summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/nfit/core.c157
-rw-r--r--drivers/acpi/nfit/intel.c386
-rw-r--r--drivers/acpi/nfit/intel.h61
-rw-r--r--drivers/acpi/nfit/nfit.h38
-rw-r--r--drivers/crypto/virtio/virtio_crypto_core.c46
-rw-r--r--drivers/dax/super.c13
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gem_device.c10
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_mmu.c2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_kms.c16
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_object.c2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_vq.c4
-rw-r--r--drivers/hwspinlock/Kconfig10
-rw-r--r--drivers/hwspinlock/qcom_hwspinlock.c70
-rw-r--r--drivers/iommu/Kconfig146
-rw-r--r--drivers/iommu/Makefile15
-rw-r--r--drivers/iommu/amd/Kconfig44
-rw-r--r--drivers/iommu/amd/Makefile4
-rw-r--r--drivers/iommu/amd/init.c13
-rw-r--r--drivers/iommu/amd/iommu.c31
-rw-r--r--drivers/iommu/arm/Makefile2
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/Makefile2
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c (renamed from drivers/iommu/arm-smmu-v3.c)4
-rw-r--r--drivers/iommu/arm/arm-smmu/Makefile4
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-impl.c (renamed from drivers/iommu/arm-smmu-impl.c)60
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c278
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c (renamed from drivers/iommu/arm-smmu-qcom.c)0
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu.c (renamed from drivers/iommu/arm-smmu.c)42
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu.h (renamed from drivers/iommu/arm-smmu.h)6
-rw-r--r--drivers/iommu/arm/arm-smmu/qcom_iommu.c (renamed from drivers/iommu/qcom_iommu.c)66
-rw-r--r--drivers/iommu/exynos-iommu.c32
-rw-r--r--drivers/iommu/fsl_pamu.c5
-rw-r--r--drivers/iommu/fsl_pamu_domain.c8
-rw-r--r--drivers/iommu/intel/Kconfig87
-rw-r--r--drivers/iommu/intel/Makefile7
-rw-r--r--drivers/iommu/intel/debugfs.c2
-rw-r--r--drivers/iommu/intel/dmar.c26
-rw-r--r--drivers/iommu/intel/iommu.c141
-rw-r--r--drivers/iommu/intel/pasid.c13
-rw-r--r--drivers/iommu/intel/pasid.h (renamed from drivers/iommu/intel/intel-pasid.h)2
-rw-r--r--drivers/iommu/intel/svm.c335
-rw-r--r--drivers/iommu/io-pgtable-arm-v7s.c18
-rw-r--r--drivers/iommu/io-pgtable-arm.c21
-rw-r--r--drivers/iommu/iommu.c37
-rw-r--r--drivers/iommu/iova.c4
-rw-r--r--drivers/iommu/ipmmu-vmsa.c14
-rw-r--r--drivers/iommu/msm_iommu.c6
-rw-r--r--drivers/iommu/mtk_iommu.c112
-rw-r--r--drivers/iommu/mtk_iommu.h23
-rw-r--r--drivers/iommu/mtk_iommu_v1.c10
-rw-r--r--drivers/iommu/omap-iommu-debug.c3
-rw-r--r--drivers/iommu/omap-iommu.c22
-rw-r--r--drivers/iommu/rockchip-iommu.c8
-rw-r--r--drivers/iommu/tegra-gart.c8
-rw-r--r--drivers/iommu/tegra-smmu.c8
-rw-r--r--drivers/iommu/virtio-iommu.c34
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_iommu.h4
-rw-r--r--drivers/memory/mtk-smi.c22
-rw-r--r--drivers/mfd/sky81452.c2
-rw-r--r--drivers/net/ipa/ipa.h3
-rw-r--r--drivers/net/ipa/ipa_modem.c56
-rw-r--r--drivers/net/virtio_net.c9
-rw-r--r--drivers/nvdimm/bus.c16
-rw-r--r--drivers/nvdimm/core.c149
-rw-r--r--drivers/nvdimm/dimm_devs.c123
-rw-r--r--drivers/nvdimm/namespace_devs.c2
-rw-r--r--drivers/nvdimm/nd-core.h1
-rw-r--r--drivers/nvdimm/pfn_devs.c2
-rw-r--r--drivers/nvdimm/region_devs.c2
-rw-r--r--drivers/nvdimm/security.c13
-rw-r--r--drivers/nvdimm/virtio_pmem.c4
-rw-r--r--drivers/platform/chrome/Kconfig1
-rw-r--r--drivers/platform/chrome/cros_ec_debugfs.c24
-rw-r--r--drivers/platform/chrome/cros_ec_ishtp.c4
-rw-r--r--drivers/platform/chrome/cros_ec_proto.c42
-rw-r--r--drivers/platform/chrome/cros_ec_rpmsg.c3
-rw-r--r--drivers/platform/chrome/cros_ec_sensorhub_ring.c98
-rw-r--r--drivers/platform/chrome/cros_ec_spi.c4
-rw-r--r--drivers/platform/chrome/cros_ec_typec.c399
-rw-r--r--drivers/platform/mellanox/mlxbf-tmfifo.c13
-rw-r--r--drivers/remoteproc/Kconfig34
-rw-r--r--drivers/remoteproc/Makefile5
-rw-r--r--drivers/remoteproc/ingenic_rproc.c84
-rw-r--r--drivers/remoteproc/qcom_common.c133
-rw-r--r--drivers/remoteproc/qcom_common.h5
-rw-r--r--drivers/remoteproc/qcom_pil_info.c129
-rw-r--r--drivers/remoteproc/qcom_pil_info.h9
-rw-r--r--drivers/remoteproc/qcom_q6v5.c2
-rw-r--r--drivers/remoteproc/qcom_q6v5_adsp.c16
-rw-r--r--drivers/remoteproc/qcom_q6v5_ipa_notify.c85
-rw-r--r--drivers/remoteproc/qcom_q6v5_mss.c157
-rw-r--r--drivers/remoteproc/qcom_q6v5_pas.c15
-rw-r--r--drivers/remoteproc/qcom_q6v5_wcss.c14
-rw-r--r--drivers/remoteproc/qcom_sysmon.c4
-rw-r--r--drivers/remoteproc/qcom_wcnss.c14
-rw-r--r--drivers/remoteproc/remoteproc_cdev.c124
-rw-r--r--drivers/remoteproc/remoteproc_core.c457
-rw-r--r--drivers/remoteproc/remoteproc_coredump.c325
-rw-r--r--drivers/remoteproc/remoteproc_debugfs.c90
-rw-r--r--drivers/remoteproc/remoteproc_internal.h42
-rw-r--r--drivers/remoteproc/remoteproc_sysfs.c17
-rw-r--r--drivers/remoteproc/stm32_rproc.c214
-rw-r--r--drivers/remoteproc/ti_k3_dsp_remoteproc.c787
-rw-r--r--drivers/remoteproc/ti_sci_proc.h104
-rw-r--r--drivers/rpmsg/virtio_rpmsg_bus.c63
-rw-r--r--drivers/scsi/virtio_scsi.c4
-rw-r--r--drivers/vdpa/Kconfig19
-rw-r--r--drivers/vdpa/Makefile1
-rw-r--r--drivers/vdpa/ifcvf/ifcvf_base.c4
-rw-r--r--drivers/vdpa/ifcvf/ifcvf_base.h6
-rw-r--r--drivers/vdpa/ifcvf/ifcvf_main.c31
-rw-r--r--drivers/vdpa/mlx5/Makefile4
-rw-r--r--drivers/vdpa/mlx5/core/mlx5_vdpa.h91
-rw-r--r--drivers/vdpa/mlx5/core/mlx5_vdpa_ifc.h168
-rw-r--r--drivers/vdpa/mlx5/core/mr.c486
-rw-r--r--drivers/vdpa/mlx5/core/resources.c284
-rw-r--r--drivers/vdpa/mlx5/net/main.c76
-rw-r--r--drivers/vdpa/mlx5/net/mlx5_vnet.c1974
-rw-r--r--drivers/vdpa/mlx5/net/mlx5_vnet.h24
-rw-r--r--drivers/vdpa/vdpa.c4
-rw-r--r--drivers/vdpa/vdpa_sim/vdpa_sim.c124
-rw-r--r--drivers/vhost/Kconfig1
-rw-r--r--drivers/vhost/net.c22
-rw-r--r--drivers/vhost/vdpa.c183
-rw-r--r--drivers/vhost/vhost.c39
-rw-r--r--drivers/vhost/vhost.h11
-rw-r--r--drivers/video/backlight/88pm860x_bl.c13
-rw-r--r--drivers/video/backlight/Kconfig15
-rw-r--r--drivers/video/backlight/Makefile2
-rw-r--r--drivers/video/backlight/adp5520_bl.c10
-rw-r--r--drivers/video/backlight/adp8860_bl.c10
-rw-r--r--drivers/video/backlight/adp8870_bl.c10
-rw-r--r--drivers/video/backlight/as3711_bl.c11
-rw-r--r--drivers/video/backlight/backlight.c206
-rw-r--r--drivers/video/backlight/bd6107.c7
-rw-r--r--drivers/video/backlight/corgi_lcd.c8
-rw-r--r--drivers/video/backlight/cr_bllcd.c26
-rw-r--r--drivers/video/backlight/da903x_bl.c13
-rw-r--r--drivers/video/backlight/ep93xx_bl.c8
-rw-r--r--drivers/video/backlight/generic_bl.c110
-rw-r--r--drivers/video/backlight/gpio_backlight.c17
-rw-r--r--drivers/video/backlight/hp680_bl.c6
-rw-r--r--drivers/video/backlight/ili922x.c8
-rw-r--r--drivers/video/backlight/jornada720_bl.c2
-rw-r--r--drivers/video/backlight/kb3886_bl.c6
-rw-r--r--drivers/video/backlight/lcd.c1
-rw-r--r--drivers/video/backlight/led_bl.c7
-rw-r--r--drivers/video/backlight/lm3533_bl.c10
-rw-r--r--drivers/video/backlight/lm3630a_bl.c4
-rw-r--r--drivers/video/backlight/lms501kf03.c9
-rw-r--r--drivers/video/backlight/locomolcd.c6
-rw-r--r--drivers/video/backlight/lv5207lp.c7
-rw-r--r--drivers/video/backlight/max8925_bl.c13
-rw-r--r--drivers/video/backlight/ot200_bl.c162
-rw-r--r--drivers/video/backlight/pwm_bl.c7
-rw-r--r--drivers/video/backlight/qcom-wled.c15
-rw-r--r--drivers/video/backlight/sky81452-backlight.c52
-rw-r--r--drivers/video/backlight/tps65217_bl.c10
-rw-r--r--drivers/video/backlight/wm831x_bl.c13
-rw-r--r--drivers/virtio/virtio_balloon.c30
-rw-r--r--drivers/virtio/virtio_input.c32
-rw-r--r--drivers/virtio/virtio_mem.c30
-rw-r--r--drivers/virtio/virtio_pci_modern.c1
-rw-r--r--drivers/virtio/virtio_ring.c7
-rw-r--r--drivers/virtio/virtio_vdpa.c9
164 files changed, 8811 insertions, 2101 deletions
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index 7c138a4edc03..26dd208a0d63 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -73,6 +73,18 @@ const guid_t *to_nfit_uuid(enum nfit_uuids id)
}
EXPORT_SYMBOL(to_nfit_uuid);
+static const guid_t *to_nfit_bus_uuid(int family)
+{
+ if (WARN_ONCE(family == NVDIMM_BUS_FAMILY_NFIT,
+ "only secondary bus families can be translated\n"))
+ return NULL;
+ /*
+ * The index of bus UUIDs starts immediately following the last
+ * NVDIMM/leaf family.
+ */
+ return to_nfit_uuid(family + NVDIMM_FAMILY_MAX);
+}
+
static struct acpi_device *to_acpi_dev(struct acpi_nfit_desc *acpi_desc)
{
struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
@@ -362,24 +374,8 @@ static u8 nfit_dsm_revid(unsigned family, unsigned func)
{
static const u8 revid_table[NVDIMM_FAMILY_MAX+1][NVDIMM_CMD_MAX+1] = {
[NVDIMM_FAMILY_INTEL] = {
- [NVDIMM_INTEL_GET_MODES] = 2,
- [NVDIMM_INTEL_GET_FWINFO] = 2,
- [NVDIMM_INTEL_START_FWUPDATE] = 2,
- [NVDIMM_INTEL_SEND_FWUPDATE] = 2,
- [NVDIMM_INTEL_FINISH_FWUPDATE] = 2,
- [NVDIMM_INTEL_QUERY_FWUPDATE] = 2,
- [NVDIMM_INTEL_SET_THRESHOLD] = 2,
- [NVDIMM_INTEL_INJECT_ERROR] = 2,
- [NVDIMM_INTEL_GET_SECURITY_STATE] = 2,
- [NVDIMM_INTEL_SET_PASSPHRASE] = 2,
- [NVDIMM_INTEL_DISABLE_PASSPHRASE] = 2,
- [NVDIMM_INTEL_UNLOCK_UNIT] = 2,
- [NVDIMM_INTEL_FREEZE_LOCK] = 2,
- [NVDIMM_INTEL_SECURE_ERASE] = 2,
- [NVDIMM_INTEL_OVERWRITE] = 2,
- [NVDIMM_INTEL_QUERY_OVERWRITE] = 2,
- [NVDIMM_INTEL_SET_MASTER_PASSPHRASE] = 2,
- [NVDIMM_INTEL_MASTER_SECURE_ERASE] = 2,
+ [NVDIMM_INTEL_GET_MODES ...
+ NVDIMM_INTEL_FW_ACTIVATE_ARM] = 2,
},
};
u8 id;
@@ -406,7 +402,7 @@ static bool payload_dumpable(struct nvdimm *nvdimm, unsigned int func)
}
static int cmd_to_func(struct nfit_mem *nfit_mem, unsigned int cmd,
- struct nd_cmd_pkg *call_pkg)
+ struct nd_cmd_pkg *call_pkg, int *family)
{
if (call_pkg) {
int i;
@@ -417,6 +413,7 @@ static int cmd_to_func(struct nfit_mem *nfit_mem, unsigned int cmd,
for (i = 0; i < ARRAY_SIZE(call_pkg->nd_reserved2); i++)
if (call_pkg->nd_reserved2[i])
return -EINVAL;
+ *family = call_pkg->nd_family;
return call_pkg->nd_command;
}
@@ -450,13 +447,14 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
acpi_handle handle;
const guid_t *guid;
int func, rc, i;
+ int family = 0;
if (cmd_rc)
*cmd_rc = -EINVAL;
if (cmd == ND_CMD_CALL)
call_pkg = buf;
- func = cmd_to_func(nfit_mem, cmd, call_pkg);
+ func = cmd_to_func(nfit_mem, cmd, call_pkg, &family);
if (func < 0)
return func;
@@ -478,9 +476,17 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
cmd_name = nvdimm_bus_cmd_name(cmd);
cmd_mask = nd_desc->cmd_mask;
- dsm_mask = nd_desc->bus_dsm_mask;
+ if (cmd == ND_CMD_CALL && call_pkg->nd_family) {
+ family = call_pkg->nd_family;
+ if (!test_bit(family, &nd_desc->bus_family_mask))
+ return -EINVAL;
+ dsm_mask = acpi_desc->family_dsm_mask[family];
+ guid = to_nfit_bus_uuid(family);
+ } else {
+ dsm_mask = acpi_desc->bus_dsm_mask;
+ guid = to_nfit_uuid(NFIT_DEV_BUS);
+ }
desc = nd_cmd_bus_desc(cmd);
- guid = to_nfit_uuid(NFIT_DEV_BUS);
handle = adev->handle;
dimm_name = "bus";
}
@@ -516,8 +522,8 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
in_buf.buffer.length = call_pkg->nd_size_in;
}
- dev_dbg(dev, "%s cmd: %d: func: %d input length: %d\n",
- dimm_name, cmd, func, in_buf.buffer.length);
+ dev_dbg(dev, "%s cmd: %d: family: %d func: %d input length: %d\n",
+ dimm_name, cmd, family, func, in_buf.buffer.length);
if (payload_dumpable(nvdimm, func))
print_hex_dump_debug("nvdimm in ", DUMP_PREFIX_OFFSET, 4, 4,
in_buf.buffer.pointer,
@@ -1238,8 +1244,9 @@ static ssize_t bus_dsm_mask_show(struct device *dev,
{
struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
+ struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
- return sprintf(buf, "%#lx\n", nd_desc->bus_dsm_mask);
+ return sprintf(buf, "%#lx\n", acpi_desc->bus_dsm_mask);
}
static struct device_attribute dev_attr_bus_dsm_mask =
__ATTR(dsm_mask, 0444, bus_dsm_mask_show, NULL);
@@ -1385,8 +1392,12 @@ static umode_t nfit_visible(struct kobject *kobj, struct attribute *a, int n)
struct device *dev = container_of(kobj, struct device, kobj);
struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
- if (a == &dev_attr_scrub.attr && !ars_supported(nvdimm_bus))
- return 0;
+ if (a == &dev_attr_scrub.attr)
+ return ars_supported(nvdimm_bus) ? a->mode : 0;
+
+ if (a == &dev_attr_firmware_activate_noidle.attr)
+ return intel_fwa_supported(nvdimm_bus) ? a->mode : 0;
+
return a->mode;
}
@@ -1395,6 +1406,7 @@ static struct attribute *acpi_nfit_attributes[] = {
&dev_attr_scrub.attr,
&dev_attr_hw_error_scrub.attr,
&dev_attr_bus_dsm_mask.attr,
+ &dev_attr_firmware_activate_noidle.attr,
NULL,
};
@@ -1823,6 +1835,7 @@ static void populate_shutdown_status(struct nfit_mem *nfit_mem)
static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
struct nfit_mem *nfit_mem, u32 device_handle)
{
+ struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
struct acpi_device *adev, *adev_dimm;
struct device *dev = acpi_desc->dev;
unsigned long dsm_mask, label_mask;
@@ -1834,6 +1847,7 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
/* nfit test assumes 1:1 relationship between commands and dsms */
nfit_mem->dsm_mask = acpi_desc->dimm_cmd_force_en;
nfit_mem->family = NVDIMM_FAMILY_INTEL;
+ set_bit(NVDIMM_FAMILY_INTEL, &nd_desc->dimm_family_mask);
if (dcr->valid_fields & ACPI_NFIT_CONTROL_MFG_INFO_VALID)
sprintf(nfit_mem->id, "%04x-%02x-%04x-%08x",
@@ -1886,10 +1900,13 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
* Note, that checking for function0 (bit0) tells us if any commands
* are reachable through this GUID.
*/
+ clear_bit(NVDIMM_FAMILY_INTEL, &nd_desc->dimm_family_mask);
for (i = 0; i <= NVDIMM_FAMILY_MAX; i++)
- if (acpi_check_dsm(adev_dimm->handle, to_nfit_uuid(i), 1, 1))
+ if (acpi_check_dsm(adev_dimm->handle, to_nfit_uuid(i), 1, 1)) {
+ set_bit(i, &nd_desc->dimm_family_mask);
if (family < 0 || i == default_dsm_family)
family = i;
+ }
/* limit the supported commands to those that are publicly documented */
nfit_mem->family = family;
@@ -2007,6 +2024,26 @@ static const struct nvdimm_security_ops *acpi_nfit_get_security_ops(int family)
}
}
+static const struct nvdimm_fw_ops *acpi_nfit_get_fw_ops(
+ struct nfit_mem *nfit_mem)
+{
+ unsigned long mask;
+ struct acpi_nfit_desc *acpi_desc = nfit_mem->acpi_desc;
+ struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
+
+ if (!nd_desc->fw_ops)
+ return NULL;
+
+ if (nfit_mem->family != NVDIMM_FAMILY_INTEL)
+ return NULL;
+
+ mask = nfit_mem->dsm_mask & NVDIMM_INTEL_FW_ACTIVATE_CMDMASK;
+ if (mask != NVDIMM_INTEL_FW_ACTIVATE_CMDMASK)
+ return NULL;
+
+ return intel_fw_ops;
+}
+
static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc)
{
struct nfit_mem *nfit_mem;
@@ -2083,7 +2120,8 @@ static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc)
acpi_nfit_dimm_attribute_groups,
flags, cmd_mask, flush ? flush->hint_count : 0,
nfit_mem->flush_wpq, &nfit_mem->id[0],
- acpi_nfit_get_security_ops(nfit_mem->family));
+ acpi_nfit_get_security_ops(nfit_mem->family),
+ acpi_nfit_get_fw_ops(nfit_mem));
if (!nvdimm)
return -ENOMEM;
@@ -2147,12 +2185,23 @@ static void acpi_nfit_init_dsms(struct acpi_nfit_desc *acpi_desc)
{
struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
const guid_t *guid = to_nfit_uuid(NFIT_DEV_BUS);
+ unsigned long dsm_mask, *mask;
struct acpi_device *adev;
- unsigned long dsm_mask;
int i;
- nd_desc->cmd_mask = acpi_desc->bus_cmd_force_en;
- nd_desc->bus_dsm_mask = acpi_desc->bus_nfit_cmd_force_en;
+ set_bit(ND_CMD_CALL, &nd_desc->cmd_mask);
+ set_bit(NVDIMM_BUS_FAMILY_NFIT, &nd_desc->bus_family_mask);
+
+ /* enable nfit_test to inject bus command emulation */
+ if (acpi_desc->bus_cmd_force_en) {
+ nd_desc->cmd_mask = acpi_desc->bus_cmd_force_en;
+ mask = &nd_desc->bus_family_mask;
+ if (acpi_desc->family_dsm_mask[NVDIMM_BUS_FAMILY_INTEL]) {
+ set_bit(NVDIMM_BUS_FAMILY_INTEL, mask);
+ nd_desc->fw_ops = intel_bus_fw_ops;
+ }
+ }
+
adev = to_acpi_dev(acpi_desc);
if (!adev)
return;
@@ -2160,7 +2209,6 @@ static void acpi_nfit_init_dsms(struct acpi_nfit_desc *acpi_desc)
for (i = ND_CMD_ARS_CAP; i <= ND_CMD_CLEAR_ERROR; i++)
if (acpi_check_dsm(adev->handle, guid, 1, 1ULL << i))
set_bit(i, &nd_desc->cmd_mask);
- set_bit(ND_CMD_CALL, &nd_desc->cmd_mask);
dsm_mask =
(1 << ND_CMD_ARS_CAP) |
@@ -2173,7 +2221,20 @@ static void acpi_nfit_init_dsms(struct acpi_nfit_desc *acpi_desc)
(1 << NFIT_CMD_ARS_INJECT_GET);
for_each_set_bit(i, &dsm_mask, BITS_PER_LONG)
if (acpi_check_dsm(adev->handle, guid, 1, 1ULL << i))
- set_bit(i, &nd_desc->bus_dsm_mask);
+ set_bit(i, &acpi_desc->bus_dsm_mask);
+
+ /* Enumerate allowed NVDIMM_BUS_FAMILY_INTEL commands */
+ dsm_mask = NVDIMM_BUS_INTEL_FW_ACTIVATE_CMDMASK;
+ guid = to_nfit_bus_uuid(NVDIMM_BUS_FAMILY_INTEL);
+ mask = &acpi_desc->family_dsm_mask[NVDIMM_BUS_FAMILY_INTEL];
+ for_each_set_bit(i, &dsm_mask, BITS_PER_LONG)
+ if (acpi_check_dsm(adev->handle, guid, 1, 1ULL << i))
+ set_bit(i, mask);
+
+ if (*mask == dsm_mask) {
+ set_bit(NVDIMM_BUS_FAMILY_INTEL, &nd_desc->bus_family_mask);
+ nd_desc->fw_ops = intel_bus_fw_ops;
+ }
}
static ssize_t range_index_show(struct device *dev,
@@ -3273,7 +3334,7 @@ static void acpi_nfit_init_ars(struct acpi_nfit_desc *acpi_desc,
static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc)
{
struct nfit_spa *nfit_spa;
- int rc;
+ int rc, do_sched_ars = 0;
set_bit(ARS_VALID, &acpi_desc->scrub_flags);
list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
@@ -3285,7 +3346,7 @@ static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc)
}
}
- list_for_each_entry(nfit_spa, &acpi_desc->spas, list)
+ list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
switch (nfit_spa_type(nfit_spa->spa)) {
case NFIT_SPA_VOLATILE:
case NFIT_SPA_PM:
@@ -3293,6 +3354,13 @@ static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc)
rc = ars_register(acpi_desc, nfit_spa);
if (rc)
return rc;
+
+ /*
+ * Kick off background ARS if at least one
+ * region successfully registered ARS
+ */
+ if (!test_bit(ARS_FAILED, &nfit_spa->ars_state))
+ do_sched_ars++;
break;
case NFIT_SPA_BDW:
/* nothing to register */
@@ -3311,8 +3379,10 @@ static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc)
/* don't register unknown regions */
break;
}
+ }
- sched_ars(acpi_desc);
+ if (do_sched_ars)
+ sched_ars(acpi_desc);
return 0;
}
@@ -3485,7 +3555,10 @@ static int __acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc,
return 0;
}
-/* prevent security commands from being issued via ioctl */
+/*
+ * Prevent security and firmware activate commands from being issued via
+ * ioctl.
+ */
static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc,
struct nvdimm *nvdimm, unsigned int cmd, void *buf)
{
@@ -3496,10 +3569,15 @@ static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc,
call_pkg->nd_family == NVDIMM_FAMILY_INTEL) {
func = call_pkg->nd_command;
if (func > NVDIMM_CMD_MAX ||
- (1 << func) & NVDIMM_INTEL_SECURITY_CMDMASK)
+ (1 << func) & NVDIMM_INTEL_DENY_CMDMASK)
return -EOPNOTSUPP;
}
+ /* block all non-nfit bus commands */
+ if (!nvdimm && cmd == ND_CMD_CALL &&
+ call_pkg->nd_family != NVDIMM_BUS_FAMILY_NFIT)
+ return -EOPNOTSUPP;
+
return __acpi_nfit_clear_to_send(nd_desc, nvdimm, cmd);
}
@@ -3791,6 +3869,7 @@ static __init int nfit_init(void)
guid_parse(UUID_NFIT_DIMM_N_HPE2, &nfit_uuid[NFIT_DEV_DIMM_N_HPE2]);
guid_parse(UUID_NFIT_DIMM_N_MSFT, &nfit_uuid[NFIT_DEV_DIMM_N_MSFT]);
guid_parse(UUID_NFIT_DIMM_N_HYPERV, &nfit_uuid[NFIT_DEV_DIMM_N_HYPERV]);
+ guid_parse(UUID_INTEL_BUS, &nfit_uuid[NFIT_BUS_INTEL]);
nfit_wq = create_singlethread_workqueue("nfit");
if (!nfit_wq)
diff --git a/drivers/acpi/nfit/intel.c b/drivers/acpi/nfit/intel.c
index 1113b679cd7b..8dd792a55730 100644
--- a/drivers/acpi/nfit/intel.c
+++ b/drivers/acpi/nfit/intel.c
@@ -7,6 +7,48 @@
#include "intel.h"
#include "nfit.h"
+static ssize_t firmware_activate_noidle_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
+ struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
+ struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
+
+ return sprintf(buf, "%s\n", acpi_desc->fwa_noidle ? "Y" : "N");
+}
+
+static ssize_t firmware_activate_noidle_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
+ struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
+ struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
+ ssize_t rc;
+ bool val;
+
+ rc = kstrtobool(buf, &val);
+ if (rc)
+ return rc;
+ if (val != acpi_desc->fwa_noidle)
+ acpi_desc->fwa_cap = NVDIMM_FWA_CAP_INVALID;
+ acpi_desc->fwa_noidle = val;
+ return size;
+}
+DEVICE_ATTR_RW(firmware_activate_noidle);
+
+bool intel_fwa_supported(struct nvdimm_bus *nvdimm_bus)
+{
+ struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
+ struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
+ unsigned long *mask;
+
+ if (!test_bit(NVDIMM_BUS_FAMILY_INTEL, &nd_desc->bus_family_mask))
+ return false;
+
+ mask = &acpi_desc->family_dsm_mask[NVDIMM_BUS_FAMILY_INTEL];
+ return *mask == NVDIMM_BUS_INTEL_FW_ACTIVATE_CMDMASK;
+}
+
static unsigned long intel_security_flags(struct nvdimm *nvdimm,
enum nvdimm_passphrase_type ptype)
{
@@ -389,3 +431,347 @@ static const struct nvdimm_security_ops __intel_security_ops = {
};
const struct nvdimm_security_ops *intel_security_ops = &__intel_security_ops;
+
+static int intel_bus_fwa_businfo(struct nvdimm_bus_descriptor *nd_desc,
+ struct nd_intel_bus_fw_activate_businfo *info)
+{
+ struct {
+ struct nd_cmd_pkg pkg;
+ struct nd_intel_bus_fw_activate_businfo cmd;
+ } nd_cmd = {
+ .pkg = {
+ .nd_command = NVDIMM_BUS_INTEL_FW_ACTIVATE_BUSINFO,
+ .nd_family = NVDIMM_BUS_FAMILY_INTEL,
+ .nd_size_out =
+ sizeof(struct nd_intel_bus_fw_activate_businfo),
+ .nd_fw_size =
+ sizeof(struct nd_intel_bus_fw_activate_businfo),
+ },
+ };
+ int rc;
+
+ rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd),
+ NULL);
+ *info = nd_cmd.cmd;
+ return rc;
+}
+
+/* The fw_ops expect to be called with the nvdimm_bus_lock() held */
+static enum nvdimm_fwa_state intel_bus_fwa_state(
+ struct nvdimm_bus_descriptor *nd_desc)
+{
+ struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
+ struct nd_intel_bus_fw_activate_businfo info;
+ struct device *dev = acpi_desc->dev;
+ enum nvdimm_fwa_state state;
+ int rc;
+
+ /*
+ * It should not be possible for platform firmware to return
+ * busy because activate is a synchronous operation. Treat it
+ * similar to invalid, i.e. always refresh / poll the status.
+ */
+ switch (acpi_desc->fwa_state) {
+ case NVDIMM_FWA_INVALID:
+ case NVDIMM_FWA_BUSY:
+ break;
+ default:
+ /* check if capability needs to be refreshed */
+ if (acpi_desc->fwa_cap == NVDIMM_FWA_CAP_INVALID)
+ break;
+ return acpi_desc->fwa_state;
+ }
+
+ /* Refresh with platform firmware */
+ rc = intel_bus_fwa_businfo(nd_desc, &info);
+ if (rc)
+ return NVDIMM_FWA_INVALID;
+
+ switch (info.state) {
+ case ND_INTEL_FWA_IDLE:
+ state = NVDIMM_FWA_IDLE;
+ break;
+ case ND_INTEL_FWA_BUSY:
+ state = NVDIMM_FWA_BUSY;
+ break;
+ case ND_INTEL_FWA_ARMED:
+ if (info.activate_tmo > info.max_quiesce_tmo)
+ state = NVDIMM_FWA_ARM_OVERFLOW;
+ else
+ state = NVDIMM_FWA_ARMED;
+ break;
+ default:
+ dev_err_once(dev, "invalid firmware activate state %d\n",
+ info.state);
+ return NVDIMM_FWA_INVALID;
+ }
+
+ /*
+ * Capability data is available in the same payload as state. It
+ * is expected to be static.
+ */
+ if (acpi_desc->fwa_cap == NVDIMM_FWA_CAP_INVALID) {
+ if (info.capability & ND_INTEL_BUS_FWA_CAP_FWQUIESCE)
+ acpi_desc->fwa_cap = NVDIMM_FWA_CAP_QUIESCE;
+ else if (info.capability & ND_INTEL_BUS_FWA_CAP_OSQUIESCE) {
+ /*
+ * Skip hibernate cycle by default if platform
+ * indicates that it does not need devices to be
+ * quiesced.
+ */
+ acpi_desc->fwa_cap = NVDIMM_FWA_CAP_LIVE;
+ } else
+ acpi_desc->fwa_cap = NVDIMM_FWA_CAP_NONE;
+ }
+
+ acpi_desc->fwa_state = state;
+
+ return state;
+}
+
+static enum nvdimm_fwa_capability intel_bus_fwa_capability(
+ struct nvdimm_bus_descriptor *nd_desc)
+{
+ struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
+
+ if (acpi_desc->fwa_cap > NVDIMM_FWA_CAP_INVALID)
+ return acpi_desc->fwa_cap;
+
+ if (intel_bus_fwa_state(nd_desc) > NVDIMM_FWA_INVALID)
+ return acpi_desc->fwa_cap;
+
+ return NVDIMM_FWA_CAP_INVALID;
+}
+
+static int intel_bus_fwa_activate(struct nvdimm_bus_descriptor *nd_desc)
+{
+ struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
+ struct {
+ struct nd_cmd_pkg pkg;
+ struct nd_intel_bus_fw_activate cmd;
+ } nd_cmd = {
+ .pkg = {
+ .nd_command = NVDIMM_BUS_INTEL_FW_ACTIVATE,
+ .nd_family = NVDIMM_BUS_FAMILY_INTEL,
+ .nd_size_in = sizeof(nd_cmd.cmd.iodev_state),
+ .nd_size_out =
+ sizeof(struct nd_intel_bus_fw_activate),
+ .nd_fw_size =
+ sizeof(struct nd_intel_bus_fw_activate),
+ },
+ /*
+ * Even though activate is run from a suspended context,
+ * for safety, still ask platform firmware to force
+ * quiesce devices by default. Let a module
+ * parameter override that policy.
+ */
+ .cmd = {
+ .iodev_state = acpi_desc->fwa_noidle
+ ? ND_INTEL_BUS_FWA_IODEV_OS_IDLE
+ : ND_INTEL_BUS_FWA_IODEV_FORCE_IDLE,
+ },
+ };
+ int rc;
+
+ switch (intel_bus_fwa_state(nd_desc)) {
+ case NVDIMM_FWA_ARMED:
+ case NVDIMM_FWA_ARM_OVERFLOW:
+ break;
+ default:
+ return -ENXIO;
+ }
+
+ rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd),
+ NULL);
+
+ /*
+ * Whether the command succeeded, or failed, the agent checking
+ * for the result needs to query the DIMMs individually.
+ * Increment the activation count to invalidate all the DIMM
+ * states at once (it's otherwise not possible to take
+ * acpi_desc->init_mutex in this context)
+ */
+ acpi_desc->fwa_state = NVDIMM_FWA_INVALID;
+ acpi_desc->fwa_count++;
+
+ dev_dbg(acpi_desc->dev, "result: %d\n", rc);
+
+ return rc;
+}
+
+static const struct nvdimm_bus_fw_ops __intel_bus_fw_ops = {
+ .activate_state = intel_bus_fwa_state,
+ .capability = intel_bus_fwa_capability,
+ .activate = intel_bus_fwa_activate,
+};
+
+const struct nvdimm_bus_fw_ops *intel_bus_fw_ops = &__intel_bus_fw_ops;
+
+static int intel_fwa_dimminfo(struct nvdimm *nvdimm,
+ struct nd_intel_fw_activate_dimminfo *info)
+{
+ struct {
+ struct nd_cmd_pkg pkg;
+ struct nd_intel_fw_activate_dimminfo cmd;
+ } nd_cmd = {
+ .pkg = {
+ .nd_command = NVDIMM_INTEL_FW_ACTIVATE_DIMMINFO,
+ .nd_family = NVDIMM_FAMILY_INTEL,
+ .nd_size_out =
+ sizeof(struct nd_intel_fw_activate_dimminfo),
+ .nd_fw_size =
+ sizeof(struct nd_intel_fw_activate_dimminfo),
+ },
+ };
+ int rc;
+
+ rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
+ *info = nd_cmd.cmd;
+ return rc;
+}
+
+static enum nvdimm_fwa_state intel_fwa_state(struct nvdimm *nvdimm)
+{
+ struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
+ struct acpi_nfit_desc *acpi_desc = nfit_mem->acpi_desc;
+ struct nd_intel_fw_activate_dimminfo info;
+ int rc;
+
+ /*
+ * Similar to the bus state, since activate is synchronous the
+ * busy state should resolve within the context of 'activate'.
+ */
+ switch (nfit_mem->fwa_state) {
+ case NVDIMM_FWA_INVALID:
+ case NVDIMM_FWA_BUSY:
+ break;
+ default:
+ /* If no activations occurred the old state is still valid */
+ if (nfit_mem->fwa_count == acpi_desc->fwa_count)
+ return nfit_mem->fwa_state;
+ }
+
+ rc = intel_fwa_dimminfo(nvdimm, &info);
+ if (rc)
+ return NVDIMM_FWA_INVALID;
+
+ switch (info.state) {
+ case ND_INTEL_FWA_IDLE:
+ nfit_mem->fwa_state = NVDIMM_FWA_IDLE;
+ break;
+ case ND_INTEL_FWA_BUSY:
+ nfit_mem->fwa_state = NVDIMM_FWA_BUSY;
+ break;
+ case ND_INTEL_FWA_ARMED:
+ nfit_mem->fwa_state = NVDIMM_FWA_ARMED;
+ break;
+ default:
+ nfit_mem->fwa_state = NVDIMM_FWA_INVALID;
+ break;
+ }
+
+ switch (info.result) {
+ case ND_INTEL_DIMM_FWA_NONE:
+ nfit_mem->fwa_result = NVDIMM_FWA_RESULT_NONE;
+ break;
+ case ND_INTEL_DIMM_FWA_SUCCESS:
+ nfit_mem->fwa_result = NVDIMM_FWA_RESULT_SUCCESS;
+ break;
+ case ND_INTEL_DIMM_FWA_NOTSTAGED:
+ nfit_mem->fwa_result = NVDIMM_FWA_RESULT_NOTSTAGED;
+ break;
+ case ND_INTEL_DIMM_FWA_NEEDRESET:
+ nfit_mem->fwa_result = NVDIMM_FWA_RESULT_NEEDRESET;
+ break;
+ case ND_INTEL_DIMM_FWA_MEDIAFAILED:
+ case ND_INTEL_DIMM_FWA_ABORT:
+ case ND_INTEL_DIMM_FWA_NOTSUPP:
+ case ND_INTEL_DIMM_FWA_ERROR:
+ default:
+ nfit_mem->fwa_result = NVDIMM_FWA_RESULT_FAIL;
+ break;
+ }
+
+ nfit_mem->fwa_count = acpi_desc->fwa_count;
+
+ return nfit_mem->fwa_state;
+}
+
+static enum nvdimm_fwa_result intel_fwa_result(struct nvdimm *nvdimm)
+{
+ struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
+ struct acpi_nfit_desc *acpi_desc = nfit_mem->acpi_desc;
+
+ if (nfit_mem->fwa_count == acpi_desc->fwa_count
+ && nfit_mem->fwa_result > NVDIMM_FWA_RESULT_INVALID)
+ return nfit_mem->fwa_result;
+
+ if (intel_fwa_state(nvdimm) > NVDIMM_FWA_INVALID)
+ return nfit_mem->fwa_result;
+
+ return NVDIMM_FWA_RESULT_INVALID;
+}
+
+static int intel_fwa_arm(struct nvdimm *nvdimm, enum nvdimm_fwa_trigger arm)
+{
+ struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
+ struct acpi_nfit_desc *acpi_desc = nfit_mem->acpi_desc;
+ struct {
+ struct nd_cmd_pkg pkg;
+ struct nd_intel_fw_activate_arm cmd;
+ } nd_cmd = {
+ .pkg = {
+ .nd_command = NVDIMM_INTEL_FW_ACTIVATE_ARM,
+ .nd_family = NVDIMM_FAMILY_INTEL,
+ .nd_size_in = sizeof(nd_cmd.cmd.activate_arm),
+ .nd_size_out =
+ sizeof(struct nd_intel_fw_activate_arm),
+ .nd_fw_size =
+ sizeof(struct nd_intel_fw_activate_arm),
+ },
+ .cmd = {
+ .activate_arm = arm == NVDIMM_FWA_ARM
+ ? ND_INTEL_DIMM_FWA_ARM
+ : ND_INTEL_DIMM_FWA_DISARM,
+ },
+ };
+ int rc;
+
+ switch (intel_fwa_state(nvdimm)) {
+ case NVDIMM_FWA_INVALID:
+ return -ENXIO;
+ case NVDIMM_FWA_BUSY:
+ return -EBUSY;
+ case NVDIMM_FWA_IDLE:
+ if (arm == NVDIMM_FWA_DISARM)
+ return 0;
+ break;
+ case NVDIMM_FWA_ARMED:
+ if (arm == NVDIMM_FWA_ARM)
+ return 0;
+ break;
+ default:
+ return -ENXIO;
+ }
+
+ /*
+ * Invalidate the bus-level state, now that we're committed to
+ * changing the 'arm' state.
+ */
+ acpi_desc->fwa_state = NVDIMM_FWA_INVALID;
+ nfit_mem->fwa_state = NVDIMM_FWA_INVALID;
+
+ rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
+
+ dev_dbg(acpi_desc->dev, "%s result: %d\n", arm == NVDIMM_FWA_ARM
+ ? "arm" : "disarm", rc);
+ return rc;
+}
+
+static const struct nvdimm_fw_ops __intel_fw_ops = {
+ .activate_state = intel_fwa_state,
+ .activate_result = intel_fwa_result,
+ .arm = intel_fwa_arm,
+};
+
+const struct nvdimm_fw_ops *intel_fw_ops = &__intel_fw_ops;
diff --git a/drivers/acpi/nfit/intel.h b/drivers/acpi/nfit/intel.h
index 0aca682ab9d7..b768234ccebc 100644
--- a/drivers/acpi/nfit/intel.h
+++ b/drivers/acpi/nfit/intel.h
@@ -111,4 +111,65 @@ struct nd_intel_master_secure_erase {
u8 passphrase[ND_INTEL_PASSPHRASE_SIZE];
u32 status;
} __packed;
+
+#define ND_INTEL_FWA_IDLE 0
+#define ND_INTEL_FWA_ARMED 1
+#define ND_INTEL_FWA_BUSY 2
+
+#define ND_INTEL_DIMM_FWA_NONE 0
+#define ND_INTEL_DIMM_FWA_NOTSTAGED 1
+#define ND_INTEL_DIMM_FWA_SUCCESS 2
+#define ND_INTEL_DIMM_FWA_NEEDRESET 3
+#define ND_INTEL_DIMM_FWA_MEDIAFAILED 4
+#define ND_INTEL_DIMM_FWA_ABORT 5
+#define ND_INTEL_DIMM_FWA_NOTSUPP 6
+#define ND_INTEL_DIMM_FWA_ERROR 7
+
+struct nd_intel_fw_activate_dimminfo {
+ u32 status;
+ u16 result;
+ u8 state;
+ u8 reserved[7];
+} __packed;
+
+#define ND_INTEL_DIMM_FWA_ARM 1
+#define ND_INTEL_DIMM_FWA_DISARM 0
+
+struct nd_intel_fw_activate_arm {
+ u8 activate_arm;
+ u32 status;
+} __packed;
+
+/* Root device command payloads */
+#define ND_INTEL_BUS_FWA_CAP_FWQUIESCE (1 << 0)
+#define ND_INTEL_BUS_FWA_CAP_OSQUIESCE (1 << 1)
+#define ND_INTEL_BUS_FWA_CAP_RESET (1 << 2)
+
+struct nd_intel_bus_fw_activate_businfo {
+ u32 status;
+ u16 reserved;
+ u8 state;
+ u8 capability;
+ u64 activate_tmo;
+ u64 cpu_quiesce_tmo;
+ u64 io_quiesce_tmo;
+ u64 max_quiesce_tmo;
+} __packed;
+
+#define ND_INTEL_BUS_FWA_STATUS_NOARM (6 | 1 << 16)
+#define ND_INTEL_BUS_FWA_STATUS_BUSY (6 | 2 << 16)
+#define ND_INTEL_BUS_FWA_STATUS_NOFW (6 | 3 << 16)
+#define ND_INTEL_BUS_FWA_STATUS_TMO (6 | 4 << 16)
+#define ND_INTEL_BUS_FWA_STATUS_NOIDLE (6 | 5 << 16)
+#define ND_INTEL_BUS_FWA_STATUS_ABORT (6 | 6 << 16)
+
+#define ND_INTEL_BUS_FWA_IODEV_FORCE_IDLE (0)
+#define ND_INTEL_BUS_FWA_IODEV_OS_IDLE (1)
+struct nd_intel_bus_fw_activate {
+ u8 iodev_state;
+ u32 status;
+} __packed;
+
+extern const struct nvdimm_fw_ops *intel_fw_ops;
+extern const struct nvdimm_bus_fw_ops *intel_bus_fw_ops;
#endif
diff --git a/drivers/acpi/nfit/nfit.h b/drivers/acpi/nfit/nfit.h
index a303f0123394..c674f3df9be7 100644
--- a/drivers/acpi/nfit/nfit.h
+++ b/drivers/acpi/nfit/nfit.h
@@ -18,6 +18,7 @@
/* https://pmem.io/documents/NVDIMM_DSM_Interface-V1.6.pdf */
#define UUID_NFIT_DIMM "4309ac30-0d11-11e4-9191-0800200c9a66"
+#define UUID_INTEL_BUS "c7d8acd4-2df8-4b82-9f65-a325335af149"
/* https://github.com/HewlettPackard/hpe-nvm/blob/master/Documentation/ */
#define UUID_NFIT_DIMM_N_HPE1 "9002c334-acf3-4c0e-9642-a235f0d53bc6"
@@ -33,7 +34,6 @@
| ACPI_NFIT_MEM_RESTORE_FAILED | ACPI_NFIT_MEM_FLUSH_FAILED \
| ACPI_NFIT_MEM_NOT_ARMED | ACPI_NFIT_MEM_MAP_FAILED)
-#define NVDIMM_FAMILY_MAX NVDIMM_FAMILY_HYPERV
#define NVDIMM_CMD_MAX 31
#define NVDIMM_STANDARD_CMDMASK \
@@ -66,6 +66,13 @@ enum nvdimm_family_cmds {
NVDIMM_INTEL_QUERY_OVERWRITE = 26,
NVDIMM_INTEL_SET_MASTER_PASSPHRASE = 27,
NVDIMM_INTEL_MASTER_SECURE_ERASE = 28,
+ NVDIMM_INTEL_FW_ACTIVATE_DIMMINFO = 29,
+ NVDIMM_INTEL_FW_ACTIVATE_ARM = 30,
+};
+
+enum nvdimm_bus_family_cmds {
+ NVDIMM_BUS_INTEL_FW_ACTIVATE_BUSINFO = 1,
+ NVDIMM_BUS_INTEL_FW_ACTIVATE = 2,
};
#define NVDIMM_INTEL_SECURITY_CMDMASK \
@@ -76,13 +83,22 @@ enum nvdimm_family_cmds {
| 1 << NVDIMM_INTEL_SET_MASTER_PASSPHRASE \
| 1 << NVDIMM_INTEL_MASTER_SECURE_ERASE)
+#define NVDIMM_INTEL_FW_ACTIVATE_CMDMASK \
+(1 << NVDIMM_INTEL_FW_ACTIVATE_DIMMINFO | 1 << NVDIMM_INTEL_FW_ACTIVATE_ARM)
+
+#define NVDIMM_BUS_INTEL_FW_ACTIVATE_CMDMASK \
+(1 << NVDIMM_BUS_INTEL_FW_ACTIVATE_BUSINFO | 1 << NVDIMM_BUS_INTEL_FW_ACTIVATE)
+
#define NVDIMM_INTEL_CMDMASK \
(NVDIMM_STANDARD_CMDMASK | 1 << NVDIMM_INTEL_GET_MODES \
| 1 << NVDIMM_INTEL_GET_FWINFO | 1 << NVDIMM_INTEL_START_FWUPDATE \
| 1 << NVDIMM_INTEL_SEND_FWUPDATE | 1 << NVDIMM_INTEL_FINISH_FWUPDATE \
| 1 << NVDIMM_INTEL_QUERY_FWUPDATE | 1 << NVDIMM_INTEL_SET_THRESHOLD \
| 1 << NVDIMM_INTEL_INJECT_ERROR | 1 << NVDIMM_INTEL_LATCH_SHUTDOWN \
- | NVDIMM_INTEL_SECURITY_CMDMASK)
+ | NVDIMM_INTEL_SECURITY_CMDMASK | NVDIMM_INTEL_FW_ACTIVATE_CMDMASK)
+
+#define NVDIMM_INTEL_DENY_CMDMASK \
+(NVDIMM_INTEL_SECURITY_CMDMASK | NVDIMM_INTEL_FW_ACTIVATE_CMDMASK)
enum nfit_uuids {
/* for simplicity alias the uuid index with the family id */
@@ -91,6 +107,11 @@ enum nfit_uuids {
NFIT_DEV_DIMM_N_HPE2 = NVDIMM_FAMILY_HPE2,
NFIT_DEV_DIMM_N_MSFT = NVDIMM_FAMILY_MSFT,
NFIT_DEV_DIMM_N_HYPERV = NVDIMM_FAMILY_HYPERV,
+ /*
+ * to_nfit_bus_uuid() expects to translate bus uuid family ids
+ * to a UUID index using NVDIMM_FAMILY_MAX as an offset
+ */
+ NFIT_BUS_INTEL = NVDIMM_FAMILY_MAX + NVDIMM_BUS_FAMILY_INTEL,
NFIT_SPA_VOLATILE,
NFIT_SPA_PM,
NFIT_SPA_DCR,
@@ -199,6 +220,9 @@ struct nfit_mem {
struct list_head list;
struct acpi_device *adev;
struct acpi_nfit_desc *acpi_desc;
+ enum nvdimm_fwa_state fwa_state;
+ enum nvdimm_fwa_result fwa_result;
+ int fwa_count;
char id[NFIT_DIMM_ID_LEN+1];
struct resource *flush_wpq;
unsigned long dsm_mask;
@@ -238,11 +262,17 @@ struct acpi_nfit_desc {
unsigned long scrub_flags;
unsigned long dimm_cmd_force_en;
unsigned long bus_cmd_force_en;
- unsigned long bus_nfit_cmd_force_en;
+ unsigned long bus_dsm_mask;
+ unsigned long family_dsm_mask[NVDIMM_BUS_FAMILY_MAX + 1];
unsigned int platform_cap;
unsigned int scrub_tmo;
int (*blk_do_io)(struct nd_blk_region *ndbr, resource_size_t dpa,
void *iobuf, u64 len, int rw);
+ enum nvdimm_fwa_state fwa_state;
+ enum nvdimm_fwa_capability fwa_cap;
+ int fwa_count;
+ bool fwa_noidle;
+ bool fwa_nosuspend;
};
enum scrub_mode {
@@ -345,4 +375,6 @@ void __acpi_nvdimm_notify(struct device *dev, u32 event);
int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
unsigned int cmd, void *buf, unsigned int buf_len, int *cmd_rc);
void acpi_nfit_desc_init(struct acpi_nfit_desc *acpi_desc, struct device *dev);
+bool intel_fwa_supported(struct nvdimm_bus *nvdimm_bus);
+extern struct device_attribute dev_attr_firmware_activate_noidle;
#endif /* __NFIT_H__ */
diff --git a/drivers/crypto/virtio/virtio_crypto_core.c b/drivers/crypto/virtio/virtio_crypto_core.c
index 0c66d6193ca2..080955a1dd9c 100644
--- a/drivers/crypto/virtio/virtio_crypto_core.c
+++ b/drivers/crypto/virtio/virtio_crypto_core.c
@@ -204,8 +204,8 @@ static int virtcrypto_update_status(struct virtio_crypto *vcrypto)
u32 status;
int err;
- virtio_cread(vcrypto->vdev,
- struct virtio_crypto_config, status, &status);
+ virtio_cread_le(vcrypto->vdev,
+ struct virtio_crypto_config, status, &status);
/*
* Unknown status bits would be a host error and the driver
@@ -323,31 +323,31 @@ static int virtcrypto_probe(struct virtio_device *vdev)
if (!vcrypto)
return -ENOMEM;
- virtio_cread(vdev, struct virtio_crypto_config,
+ virtio_cread_le(vdev, struct virtio_crypto_config,
max_dataqueues, &max_data_queues);
if (max_data_queues < 1)
max_data_queues = 1;
- virtio_cread(vdev, struct virtio_crypto_config,
- max_cipher_key_len, &max_cipher_key_len);
- virtio_cread(vdev, struct virtio_crypto_config,
- max_auth_key_len, &max_auth_key_len);
- virtio_cread(vdev, struct virtio_crypto_config,
- max_size, &max_size);
- virtio_cread(vdev, struct virtio_crypto_config,
- crypto_services, &crypto_services);
- virtio_cread(vdev, struct virtio_crypto_config,
- cipher_algo_l, &cipher_algo_l);
- virtio_cread(vdev, struct virtio_crypto_config,
- cipher_algo_h, &cipher_algo_h);
- virtio_cread(vdev, struct virtio_crypto_config,
- hash_algo, &hash_algo);
- virtio_cread(vdev, struct virtio_crypto_config,
- mac_algo_l, &mac_algo_l);
- virtio_cread(vdev, struct virtio_crypto_config,
- mac_algo_h, &mac_algo_h);
- virtio_cread(vdev, struct virtio_crypto_config,
- aead_algo, &aead_algo);
+ virtio_cread_le(vdev, struct virtio_crypto_config,
+ max_cipher_key_len, &max_cipher_key_len);
+ virtio_cread_le(vdev, struct virtio_crypto_config,
+ max_auth_key_len, &max_auth_key_len);
+ virtio_cread_le(vdev, struct virtio_crypto_config,
+ max_size, &max_size);
+ virtio_cread_le(vdev, struct virtio_crypto_config,
+ crypto_services, &crypto_services);
+ virtio_cread_le(vdev, struct virtio_crypto_config,
+ cipher_algo_l, &cipher_algo_l);
+ virtio_cread_le(vdev, struct virtio_crypto_config,
+ cipher_algo_h, &cipher_algo_h);
+ virtio_cread_le(vdev, struct virtio_crypto_config,
+ hash_algo, &hash_algo);
+ virtio_cread_le(vdev, struct virtio_crypto_config,
+ mac_algo_l, &mac_algo_l);
+ virtio_cread_le(vdev, struct virtio_crypto_config,
+ mac_algo_h, &mac_algo_h);
+ virtio_cread_le(vdev, struct virtio_crypto_config,
+ aead_algo, &aead_algo);
/* Add virtio crypto device to global table */
err = virtcrypto_devmgr_add_dev(vcrypto);
diff --git a/drivers/dax/super.c b/drivers/dax/super.c
index f50828526331..c82cbcb64202 100644
--- a/drivers/dax/super.c
+++ b/drivers/dax/super.c
@@ -80,14 +80,14 @@ bool __generic_fsdax_supported(struct dax_device *dax_dev,
int err, id;
if (blocksize != PAGE_SIZE) {
- pr_debug("%s: error: unsupported blocksize for dax\n",
+ pr_info("%s: error: unsupported blocksize for dax\n",
bdevname(bdev, buf));
return false;
}
err = bdev_dax_pgoff(bdev, start, PAGE_SIZE, &pgoff);
if (err) {
- pr_debug("%s: error: unaligned partition for dax\n",
+ pr_info("%s: error: unaligned partition for dax\n",
bdevname(bdev, buf));
return false;
}
@@ -95,7 +95,7 @@ bool __generic_fsdax_supported(struct dax_device *dax_dev,
last_page = PFN_DOWN((start + sectors - 1) * 512) * PAGE_SIZE / 512;
err = bdev_dax_pgoff(bdev, last_page, PAGE_SIZE, &pgoff_end);
if (err) {
- pr_debug("%s: error: unaligned partition for dax\n",
+ pr_info("%s: error: unaligned partition for dax\n",
bdevname(bdev, buf));
return false;
}
@@ -103,11 +103,11 @@ bool __generic_fsdax_supported(struct dax_device *dax_dev,
id = dax_read_lock();
len = dax_direct_access(dax_dev, pgoff, 1, &kaddr, &pfn);
len2 = dax_direct_access(dax_dev, pgoff_end, 1, &end_kaddr, &end_pfn);
- dax_read_unlock(id);
if (len < 1 || len2 < 1) {
- pr_debug("%s: error: dax access failed (%ld)\n",
+ pr_info("%s: error: dax access failed (%ld)\n",
bdevname(bdev, buf), len < 1 ? len : len2);
+ dax_read_unlock(id);
return false;
}
@@ -137,9 +137,10 @@ bool __generic_fsdax_supported(struct dax_device *dax_dev,
put_dev_pagemap(end_pgmap);
}
+ dax_read_unlock(id);
if (!dax_enabled) {
- pr_debug("%s: error: dax support not enabled\n",
+ pr_info("%s: error: dax support not enabled\n",
bdevname(bdev, buf));
return false;
}
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index 9a46be05425a..b9810bf156c3 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -24,6 +24,7 @@
#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
+#include <linux/iommu.h>
#include <drm/drm_managed.h>
@@ -118,6 +119,9 @@ struct drm_i915_private *mock_gem_device(void)
{
struct drm_i915_private *i915;
struct pci_dev *pdev;
+#if IS_ENABLED(CONFIG_IOMMU_API) && defined(CONFIG_INTEL_IOMMU)
+ struct dev_iommu iommu;
+#endif
int err;
pdev = kzalloc(sizeof(*pdev), GFP_KERNEL);
@@ -136,8 +140,10 @@ struct drm_i915_private *mock_gem_device(void)
dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
#if IS_ENABLED(CONFIG_IOMMU_API) && defined(CONFIG_INTEL_IOMMU)
- /* hack to disable iommu for the fake device; force identity mapping */
- pdev->dev.archdata.iommu = (void *)-1;
+ /* HACK HACK HACK to disable iommu for the fake device; force identity mapping */
+ memset(&iommu, 0, sizeof(iommu));
+ iommu.priv = (void *)-1;
+ pdev->dev.iommu = &iommu;
#endif
pci_set_drvdata(pdev, i915);
diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c
index 1a49e619aacf..e8f7b11352d2 100644
--- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
+++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
@@ -262,7 +262,7 @@ static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu,
while (len) {
size_t pgsize = get_pgsize(iova | paddr, len);
- ops->map(ops, iova, paddr, pgsize, prot);
+ ops->map(ops, iova, paddr, pgsize, prot, GFP_KERNEL);
iova += pgsize;
paddr += pgsize;
len -= pgsize;
diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c b/drivers/gpu/drm/virtio/virtgpu_kms.c
index 0a5c8cf409fb..4d944a0dff3e 100644
--- a/drivers/gpu/drm/virtio/virtgpu_kms.c
+++ b/drivers/gpu/drm/virtio/virtgpu_kms.c
@@ -39,8 +39,8 @@ static void virtio_gpu_config_changed_work_func(struct work_struct *work)
u32 events_read, events_clear = 0;
/* read the config space */
- virtio_cread(vgdev->vdev, struct virtio_gpu_config,
- events_read, &events_read);
+ virtio_cread_le(vgdev->vdev, struct virtio_gpu_config,
+ events_read, &events_read);
if (events_read & VIRTIO_GPU_EVENT_DISPLAY) {
if (vgdev->has_edid)
virtio_gpu_cmd_get_edids(vgdev);
@@ -49,8 +49,8 @@ static void virtio_gpu_config_changed_work_func(struct work_struct *work)
drm_helper_hpd_irq_event(vgdev->ddev);
events_clear |= VIRTIO_GPU_EVENT_DISPLAY;
}
- virtio_cwrite(vgdev->vdev, struct virtio_gpu_config,
- events_clear, &events_clear);
+ virtio_cwrite_le(vgdev->vdev, struct virtio_gpu_config,
+ events_clear, &events_clear);
}
static void virtio_gpu_init_vq(struct virtio_gpu_queue *vgvq,
@@ -165,8 +165,8 @@ int virtio_gpu_init(struct drm_device *dev)
}
/* get display info */
- virtio_cread(vgdev->vdev, struct virtio_gpu_config,
- num_scanouts, &num_scanouts);
+ virtio_cread_le(vgdev->vdev, struct virtio_gpu_config,
+ num_scanouts, &num_scanouts);
vgdev->num_scanouts = min_t(uint32_t, num_scanouts,
VIRTIO_GPU_MAX_SCANOUTS);
if (!vgdev->num_scanouts) {
@@ -176,8 +176,8 @@ int virtio_gpu_init(struct drm_device *dev)
}
DRM_INFO("number of scanouts: %d\n", num_scanouts);
- virtio_cread(vgdev->vdev, struct virtio_gpu_config,
- num_capsets, &num_capsets);
+ virtio_cread_le(vgdev->vdev, struct virtio_gpu_config,
+ num_capsets, &num_capsets);
DRM_INFO("number of cap sets: %d\n", num_capsets);
virtio_gpu_modeset_init(vgdev);
diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c
index 346cef5ce251..2cdd3cd9ce75 100644
--- a/drivers/gpu/drm/virtio/virtgpu_object.c
+++ b/drivers/gpu/drm/virtio/virtgpu_object.c
@@ -141,7 +141,7 @@ static int virtio_gpu_object_shmem_init(struct virtio_gpu_device *vgdev,
struct virtio_gpu_mem_entry **ents,
unsigned int *nents)
{
- bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
+ bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
struct scatterlist *sg;
int si, ret;
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
index 9e663a5d9952..53af60d484a4 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -599,7 +599,7 @@ void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
struct virtio_gpu_transfer_to_host_2d *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
- bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
+ bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
if (use_dma_api)
@@ -1015,7 +1015,7 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
struct virtio_gpu_transfer_host_3d *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
- bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
+ bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
if (use_dma_api)
diff --git a/drivers/hwspinlock/Kconfig b/drivers/hwspinlock/Kconfig
index 826a1054100d..32cd26352f38 100644
--- a/drivers/hwspinlock/Kconfig
+++ b/drivers/hwspinlock/Kconfig
@@ -6,9 +6,10 @@
menuconfig HWSPINLOCK
bool "Hardware Spinlock drivers"
+if HWSPINLOCK
+
config HWSPINLOCK_OMAP
tristate "OMAP Hardware Spinlock device"
- depends on HWSPINLOCK
depends on ARCH_OMAP4 || SOC_OMAP5 || SOC_DRA7XX || SOC_AM33XX || SOC_AM43XX || ARCH_K3 || COMPILE_TEST
help
Say y here to support the OMAP Hardware Spinlock device (firstly
@@ -18,7 +19,6 @@ config HWSPINLOCK_OMAP
config HWSPINLOCK_QCOM
tristate "Qualcomm Hardware Spinlock device"
- depends on HWSPINLOCK
depends on ARCH_QCOM || COMPILE_TEST
select MFD_SYSCON
help
@@ -30,7 +30,6 @@ config HWSPINLOCK_QCOM
config HWSPINLOCK_SIRF
tristate "SIRF Hardware Spinlock device"
- depends on HWSPINLOCK
depends on ARCH_SIRF || COMPILE_TEST
help
Say y here to support the SIRF Hardware Spinlock device, which
@@ -43,7 +42,6 @@ config HWSPINLOCK_SIRF
config HWSPINLOCK_SPRD
tristate "SPRD Hardware Spinlock device"
depends on ARCH_SPRD || COMPILE_TEST
- depends on HWSPINLOCK
help
Say y here to support the SPRD Hardware Spinlock device.
@@ -52,7 +50,6 @@ config HWSPINLOCK_SPRD
config HWSPINLOCK_STM32
tristate "STM32 Hardware Spinlock device"
depends on MACH_STM32MP157 || COMPILE_TEST
- depends on HWSPINLOCK
help
Say y here to support the STM32 Hardware Spinlock device.
@@ -60,7 +57,6 @@ config HWSPINLOCK_STM32
config HSEM_U8500
tristate "STE Hardware Semaphore functionality"
- depends on HWSPINLOCK
depends on ARCH_U8500 || COMPILE_TEST
help
Say y here to support the STE Hardware Semaphore functionality, which
@@ -68,3 +64,5 @@ config HSEM_U8500
SoC.
If unsure, say N.
+
+endif # HWSPINLOCK
diff --git a/drivers/hwspinlock/qcom_hwspinlock.c b/drivers/hwspinlock/qcom_hwspinlock.c
index f0da544b14d2..364710966665 100644
--- a/drivers/hwspinlock/qcom_hwspinlock.c
+++ b/drivers/hwspinlock/qcom_hwspinlock.c
@@ -70,41 +70,79 @@ static const struct of_device_id qcom_hwspinlock_of_match[] = {
};
MODULE_DEVICE_TABLE(of, qcom_hwspinlock_of_match);
-static int qcom_hwspinlock_probe(struct platform_device *pdev)
+static struct regmap *qcom_hwspinlock_probe_syscon(struct platform_device *pdev,
+ u32 *base, u32 *stride)
{
- struct hwspinlock_device *bank;
struct device_node *syscon;
- struct reg_field field;
struct regmap *regmap;
- size_t array_size;
- u32 stride;
- u32 base;
int ret;
- int i;
syscon = of_parse_phandle(pdev->dev.of_node, "syscon", 0);
- if (!syscon) {
- dev_err(&pdev->dev, "no syscon property\n");
- return -ENODEV;
- }
+ if (!syscon)
+ return ERR_PTR(-ENODEV);
regmap = syscon_node_to_regmap(syscon);
of_node_put(syscon);
if (IS_ERR(regmap))
- return PTR_ERR(regmap);
+ return regmap;
- ret = of_property_read_u32_index(pdev->dev.of_node, "syscon", 1, &base);
+ ret = of_property_read_u32_index(pdev->dev.of_node, "syscon", 1, base);
if (ret < 0) {
dev_err(&pdev->dev, "no offset in syscon\n");
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
}
- ret = of_property_read_u32_index(pdev->dev.of_node, "syscon", 2, &stride);
+ ret = of_property_read_u32_index(pdev->dev.of_node, "syscon", 2, stride);
if (ret < 0) {
dev_err(&pdev->dev, "no stride syscon\n");
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
}
+ return regmap;
+}
+
+static const struct regmap_config tcsr_mutex_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x40000,
+ .fast_io = true,
+};
+
+static struct regmap *qcom_hwspinlock_probe_mmio(struct platform_device *pdev,
+ u32 *offset, u32 *stride)
+{
+ struct device *dev = &pdev->dev;
+ void __iomem *base;
+
+ /* All modern platform has offset 0 and stride of 4k */
+ *offset = 0;
+ *stride = 0x1000;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return ERR_CAST(base);
+
+ return devm_regmap_init_mmio(dev, base, &tcsr_mutex_config);
+}
+
+static int qcom_hwspinlock_probe(struct platform_device *pdev)
+{
+ struct hwspinlock_device *bank;
+ struct reg_field field;
+ struct regmap *regmap;
+ size_t array_size;
+ u32 stride;
+ u32 base;
+ int i;
+
+ regmap = qcom_hwspinlock_probe_syscon(pdev, &base, &stride);
+ if (IS_ERR(regmap) && PTR_ERR(regmap) == -ENODEV)
+ regmap = qcom_hwspinlock_probe_mmio(pdev, &base, &stride);
+
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
array_size = QCOM_MUTEX_NUM_LOCKS * sizeof(struct hwspinlock);
bank = devm_kzalloc(&pdev->dev, sizeof(*bank) + array_size, GFP_KERNEL);
if (!bank)
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index b622af72448f..bef5d75e306b 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -129,140 +129,8 @@ config MSM_IOMMU
If unsure, say N here.
-config IOMMU_PGTABLES_L2
- def_bool y
- depends on MSM_IOMMU && MMU && SMP && CPU_DCACHE_DISABLE=n
-
-# AMD IOMMU support
-config AMD_IOMMU
- bool "AMD IOMMU support"
- select SWIOTLB
- select PCI_MSI
- select PCI_ATS
- select PCI_PRI
- select PCI_PASID
- select IOMMU_API
- select IOMMU_IOVA
- select IOMMU_DMA
- depends on X86_64 && PCI && ACPI
- help
- With this option you can enable support for AMD IOMMU hardware in
- your system. An IOMMU is a hardware component which provides
- remapping of DMA memory accesses from devices. With an AMD IOMMU you
- can isolate the DMA memory of different devices and protect the
- system from misbehaving device drivers or hardware.
-
- You can find out if your system has an AMD IOMMU if you look into
- your BIOS for an option to enable it or if you have an IVRS ACPI
- table.
-
-config AMD_IOMMU_V2
- tristate "AMD IOMMU Version 2 driver"
- depends on AMD_IOMMU
- select MMU_NOTIFIER
- help
- This option enables support for the AMD IOMMUv2 features of the IOMMU
- hardware. Select this option if you want to use devices that support
- the PCI PRI and PASID interface.
-
-config AMD_IOMMU_DEBUGFS
- bool "Enable AMD IOMMU internals in DebugFS"
- depends on AMD_IOMMU && IOMMU_DEBUGFS
- help
- !!!WARNING!!! !!!WARNING!!! !!!WARNING!!! !!!WARNING!!!
-
- DO NOT ENABLE THIS OPTION UNLESS YOU REALLY, -REALLY- KNOW WHAT YOU ARE DOING!!!
- Exposes AMD IOMMU device internals in DebugFS.
-
- This option is -NOT- intended for production environments, and should
- not generally be enabled.
-
-# Intel IOMMU support
-config DMAR_TABLE
- bool
-
-config INTEL_IOMMU
- bool "Support for Intel IOMMU using DMA Remapping Devices"
- depends on PCI_MSI && ACPI && (X86 || IA64)
- select DMA_OPS
- select IOMMU_API
- select IOMMU_IOVA
- select NEED_DMA_MAP_STATE
- select DMAR_TABLE
- select SWIOTLB
- select IOASID
- help
- DMA remapping (DMAR) devices support enables independent address
- translations for Direct Memory Access (DMA) from devices.
- These DMA remapping devices are reported via ACPI tables
- and include PCI device scope covered by these DMA
- remapping devices.
-
-config INTEL_IOMMU_DEBUGFS
- bool "Export Intel IOMMU internals in Debugfs"
- depends on INTEL_IOMMU && IOMMU_DEBUGFS
- help
- !!!WARNING!!!
-
- DO NOT ENABLE THIS OPTION UNLESS YOU REALLY KNOW WHAT YOU ARE DOING!!!
-
- Expose Intel IOMMU internals in Debugfs.
-
- This option is -NOT- intended for production environments, and should
- only be enabled for debugging Intel IOMMU.
-
-config INTEL_IOMMU_SVM
- bool "Support for Shared Virtual Memory with Intel IOMMU"
- depends on INTEL_IOMMU && X86_64
- select PCI_PASID
- select PCI_PRI
- select MMU_NOTIFIER
- select IOASID
- help
- Shared Virtual Memory (SVM) provides a facility for devices
- to access DMA resources through process address space by
- means of a Process Address Space ID (PASID).
-
-config INTEL_IOMMU_DEFAULT_ON
- def_bool y
- prompt "Enable Intel DMA Remapping Devices by default"
- depends on INTEL_IOMMU
- help
- Selecting this option will enable a DMAR device at boot time if
- one is found. If this option is not selected, DMAR support can
- be enabled by passing intel_iommu=on to the kernel.
-
-config INTEL_IOMMU_BROKEN_GFX_WA
- bool "Workaround broken graphics drivers (going away soon)"
- depends on INTEL_IOMMU && BROKEN && X86
- help
- Current Graphics drivers tend to use physical address
- for DMA and avoid using DMA APIs. Setting this config
- option permits the IOMMU driver to set a unity map for
- all the OS-visible memory. Hence the driver can continue
- to use physical addresses for DMA, at least until this
- option is removed in the 2.6.32 kernel.
-
-config INTEL_IOMMU_FLOPPY_WA
- def_bool y
- depends on INTEL_IOMMU && X86
- help
- Floppy disk drivers are known to bypass DMA API calls
- thereby failing to work when IOMMU is enabled. This
- workaround will setup a 1:1 mapping for the first
- 16MiB to make floppy (an ISA device) work.
-
-config INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON
- bool "Enable Intel IOMMU scalable mode by default"
- depends on INTEL_IOMMU
- help
- Selecting this option will enable by default the scalable mode if
- hardware presents the capability. The scalable mode is defined in
- VT-d 3.0. The scalable mode capability could be checked by reading
- /sys/devices/virtual/iommu/dmar*/intel-iommu/ecap. If this option
- is not selected, scalable mode support could also be enabled by
- passing intel_iommu=sm_on to the kernel. If not sure, please use
- the default value.
+source "drivers/iommu/amd/Kconfig"
+source "drivers/iommu/intel/Kconfig"
config IRQ_REMAP
bool "Support for Interrupt Remapping"
@@ -276,7 +144,6 @@ config IRQ_REMAP
# OMAP IOMMU support
config OMAP_IOMMU
bool "OMAP IOMMU Support"
- depends on ARM && MMU || (COMPILE_TEST && (ARM || ARM64 || IA64 || SPARC))
depends on ARCH_OMAP2PLUS || COMPILE_TEST
select IOMMU_API
help
@@ -294,7 +161,6 @@ config OMAP_IOMMU_DEBUG
config ROCKCHIP_IOMMU
bool "Rockchip IOMMU Support"
- depends on ARM || ARM64 || (COMPILE_TEST && (ARM64 || IA64 || SPARC))
depends on ARCH_ROCKCHIP || COMPILE_TEST
select IOMMU_API
select ARM_DMA_USE_IOMMU
@@ -311,7 +177,6 @@ config SUN50I_IOMMU
depends on ARCH_SUNXI || COMPILE_TEST
select ARM_DMA_USE_IOMMU
select IOMMU_API
- select IOMMU_DMA
help
Support for the IOMMU introduced in the Allwinner H6 SoCs.
@@ -338,7 +203,7 @@ config TEGRA_IOMMU_SMMU
config EXYNOS_IOMMU
bool "Exynos IOMMU Support"
- depends on ARCH_EXYNOS && MMU || (COMPILE_TEST && (ARM || ARM64 || IA64 || SPARC))
+ depends on ARCH_EXYNOS || COMPILE_TEST
depends on !CPU_BIG_ENDIAN # revisit driver if we can enable big-endian ptes
select IOMMU_API
select ARM_DMA_USE_IOMMU
@@ -361,7 +226,6 @@ config EXYNOS_IOMMU_DEBUG
config IPMMU_VMSA
bool "Renesas VMSA-compatible IPMMU"
- depends on ARM || IOMMU_DMA
depends on ARCH_RENESAS || (COMPILE_TEST && !GENERIC_ATOMIC64)
select IOMMU_API
select IOMMU_IO_PGTABLE_LPAE
@@ -383,7 +247,7 @@ config SPAPR_TCE_IOMMU
# ARM IOMMU support
config ARM_SMMU
tristate "ARM Ltd. System MMU (SMMU) Support"
- depends on (ARM64 || ARM || (COMPILE_TEST && !GENERIC_ATOMIC64)) && MMU
+ depends on ARM64 || ARM || (COMPILE_TEST && !GENERIC_ATOMIC64)
select IOMMU_API
select IOMMU_IO_PGTABLE_LPAE
select ARM_DMA_USE_IOMMU if ARM
@@ -469,11 +333,9 @@ config S390_AP_IOMMU
config MTK_IOMMU
bool "MTK IOMMU Support"
- depends on HAS_DMA
depends on ARCH_MEDIATEK || COMPILE_TEST
select ARM_DMA_USE_IOMMU
select IOMMU_API
- select IOMMU_DMA
select IOMMU_IO_PGTABLE_ARMV7S
select MEMORY
select MTK_SMI
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
index 342190196dfb..11f1771104f3 100644
--- a/drivers/iommu/Makefile
+++ b/drivers/iommu/Makefile
@@ -1,4 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
+obj-y += amd/ intel/ arm/
obj-$(CONFIG_IOMMU_API) += iommu.o
obj-$(CONFIG_IOMMU_API) += iommu-traces.o
obj-$(CONFIG_IOMMU_API) += iommu-sysfs.o
@@ -11,19 +12,8 @@ obj-$(CONFIG_IOASID) += ioasid.o
obj-$(CONFIG_IOMMU_IOVA) += iova.o
obj-$(CONFIG_OF_IOMMU) += of_iommu.o
obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o
-obj-$(CONFIG_AMD_IOMMU) += amd/iommu.o amd/init.o amd/quirks.o
-obj-$(CONFIG_AMD_IOMMU_DEBUGFS) += amd/debugfs.o
-obj-$(CONFIG_AMD_IOMMU_V2) += amd/iommu_v2.o
-obj-$(CONFIG_ARM_SMMU) += arm_smmu.o
-arm_smmu-objs += arm-smmu.o arm-smmu-impl.o arm-smmu-qcom.o
-obj-$(CONFIG_ARM_SMMU_V3) += arm-smmu-v3.o
-obj-$(CONFIG_DMAR_TABLE) += intel/dmar.o
-obj-$(CONFIG_INTEL_IOMMU) += intel/iommu.o intel/pasid.o
-obj-$(CONFIG_INTEL_IOMMU) += intel/trace.o
-obj-$(CONFIG_INTEL_IOMMU_DEBUGFS) += intel/debugfs.o
-obj-$(CONFIG_INTEL_IOMMU_SVM) += intel/svm.o
obj-$(CONFIG_IPMMU_VMSA) += ipmmu-vmsa.o
-obj-$(CONFIG_IRQ_REMAP) += intel/irq_remapping.o irq_remapping.o
+obj-$(CONFIG_IRQ_REMAP) += irq_remapping.o
obj-$(CONFIG_MTK_IOMMU) += mtk_iommu.o
obj-$(CONFIG_MTK_IOMMU_V1) += mtk_iommu_v1.o
obj-$(CONFIG_OMAP_IOMMU) += omap-iommu.o
@@ -35,6 +25,5 @@ obj-$(CONFIG_TEGRA_IOMMU_SMMU) += tegra-smmu.o
obj-$(CONFIG_EXYNOS_IOMMU) += exynos-iommu.o
obj-$(CONFIG_FSL_PAMU) += fsl_pamu.o fsl_pamu_domain.o
obj-$(CONFIG_S390_IOMMU) += s390-iommu.o
-obj-$(CONFIG_QCOM_IOMMU) += qcom_iommu.o
obj-$(CONFIG_HYPERV_IOMMU) += hyperv-iommu.o
obj-$(CONFIG_VIRTIO_IOMMU) += virtio-iommu.o
diff --git a/drivers/iommu/amd/Kconfig b/drivers/iommu/amd/Kconfig
new file mode 100644
index 000000000000..1f061d91e0b8
--- /dev/null
+++ b/drivers/iommu/amd/Kconfig
@@ -0,0 +1,44 @@
+# SPDX-License-Identifier: GPL-2.0-only
+# AMD IOMMU support
+config AMD_IOMMU
+ bool "AMD IOMMU support"
+ select SWIOTLB
+ select PCI_MSI
+ select PCI_ATS
+ select PCI_PRI
+ select PCI_PASID
+ select IOMMU_API
+ select IOMMU_IOVA
+ select IOMMU_DMA
+ depends on X86_64 && PCI && ACPI
+ help
+ With this option you can enable support for AMD IOMMU hardware in
+ your system. An IOMMU is a hardware component which provides
+ remapping of DMA memory accesses from devices. With an AMD IOMMU you
+ can isolate the DMA memory of different devices and protect the
+ system from misbehaving device drivers or hardware.
+
+ You can find out if your system has an AMD IOMMU if you look into
+ your BIOS for an option to enable it or if you have an IVRS ACPI
+ table.
+
+config AMD_IOMMU_V2
+ tristate "AMD IOMMU Version 2 driver"
+ depends on AMD_IOMMU
+ select MMU_NOTIFIER
+ help
+ This option enables support for the AMD IOMMUv2 features of the IOMMU
+ hardware. Select this option if you want to use devices that support
+ the PCI PRI and PASID interface.
+
+config AMD_IOMMU_DEBUGFS
+ bool "Enable AMD IOMMU internals in DebugFS"
+ depends on AMD_IOMMU && IOMMU_DEBUGFS
+ help
+ !!!WARNING!!! !!!WARNING!!! !!!WARNING!!! !!!WARNING!!!
+
+ DO NOT ENABLE THIS OPTION UNLESS YOU REALLY, -REALLY- KNOW WHAT YOU ARE DOING!!!
+ Exposes AMD IOMMU device internals in DebugFS.
+
+ This option is -NOT- intended for production environments, and should
+ not generally be enabled.
diff --git a/drivers/iommu/amd/Makefile b/drivers/iommu/amd/Makefile
new file mode 100644
index 000000000000..dc5a2fa4fd37
--- /dev/null
+++ b/drivers/iommu/amd/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_AMD_IOMMU) += iommu.o init.o quirks.o
+obj-$(CONFIG_AMD_IOMMU_DEBUGFS) += debugfs.o
+obj-$(CONFIG_AMD_IOMMU_V2) += iommu_v2.o
diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
index 6ebd4825e320..958050c213f9 100644
--- a/drivers/iommu/amd/init.c
+++ b/drivers/iommu/amd/init.c
@@ -720,21 +720,14 @@ static void iommu_enable_ppr_log(struct amd_iommu *iommu)
static void __init free_ppr_log(struct amd_iommu *iommu)
{
- if (iommu->ppr_log == NULL)
- return;
-
free_pages((unsigned long)iommu->ppr_log, get_order(PPR_LOG_SIZE));
}
static void free_ga_log(struct amd_iommu *iommu)
{
#ifdef CONFIG_IRQ_REMAP
- if (iommu->ga_log)
- free_pages((unsigned long)iommu->ga_log,
- get_order(GA_LOG_SIZE));
- if (iommu->ga_log_tail)
- free_pages((unsigned long)iommu->ga_log_tail,
- get_order(8));
+ free_pages((unsigned long)iommu->ga_log, get_order(GA_LOG_SIZE));
+ free_pages((unsigned long)iommu->ga_log_tail, get_order(8));
#endif
}
@@ -1842,7 +1835,7 @@ static void print_iommu_info(void)
pci_info(pdev, "Found IOMMU cap 0x%hx\n", iommu->cap_ptr);
if (iommu->cap & (1 << IOMMU_CAP_EFR)) {
- pci_info(pdev, "Extended features (%#llx):\n",
+ pci_info(pdev, "Extended features (%#llx):",
iommu->features);
for (i = 0; i < ARRAY_SIZE(feat_str); ++i) {
if (iommu_feature(iommu, (1ULL << i)))
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index 2f22326ee4df..ba9f3dbc5b94 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -162,7 +162,18 @@ static void amd_iommu_domain_get_pgtable(struct protection_domain *domain,
pgtable->mode = pt_root & 7; /* lowest 3 bits encode pgtable mode */
}
-static u64 amd_iommu_domain_encode_pgtable(u64 *root, int mode)
+static void amd_iommu_domain_set_pt_root(struct protection_domain *domain, u64 root)
+{
+ atomic64_set(&domain->pt_root, root);
+}
+
+static void amd_iommu_domain_clr_pt_root(struct protection_domain *domain)
+{
+ amd_iommu_domain_set_pt_root(domain, 0);
+}
+
+static void amd_iommu_domain_set_pgtable(struct protection_domain *domain,
+ u64 *root, int mode)
{
u64 pt_root;
@@ -170,7 +181,7 @@ static u64 amd_iommu_domain_encode_pgtable(u64 *root, int mode)
pt_root = mode & 7;
pt_root |= (u64)root;
- return pt_root;
+ amd_iommu_domain_set_pt_root(domain, pt_root);
}
static struct iommu_dev_data *alloc_dev_data(u16 devid)
@@ -1410,7 +1421,7 @@ static bool increase_address_space(struct protection_domain *domain,
struct domain_pgtable pgtable;
unsigned long flags;
bool ret = true;
- u64 *pte, root;
+ u64 *pte;
spin_lock_irqsave(&domain->lock, flags);
@@ -1438,8 +1449,7 @@ static bool increase_address_space(struct protection_domain *domain,
* Device Table needs to be updated and flushed before the new root can
* be published.
*/
- root = amd_iommu_domain_encode_pgtable(pte, pgtable.mode);
- atomic64_set(&domain->pt_root, root);
+ amd_iommu_domain_set_pgtable(domain, pte, pgtable.mode);
ret = true;
@@ -2319,7 +2329,7 @@ static void protection_domain_free(struct protection_domain *domain)
domain_id_free(domain->id);
amd_iommu_domain_get_pgtable(domain, &pgtable);
- atomic64_set(&domain->pt_root, 0);
+ amd_iommu_domain_clr_pt_root(domain);
free_pagetable(&pgtable);
kfree(domain);
@@ -2327,7 +2337,7 @@ static void protection_domain_free(struct protection_domain *domain)
static int protection_domain_init(struct protection_domain *domain, int mode)
{
- u64 *pt_root = NULL, root;
+ u64 *pt_root = NULL;
BUG_ON(mode < PAGE_MODE_NONE || mode > PAGE_MODE_6_LEVEL);
@@ -2343,8 +2353,7 @@ static int protection_domain_init(struct protection_domain *domain, int mode)
return -ENOMEM;
}
- root = amd_iommu_domain_encode_pgtable(pt_root, mode);
- atomic64_set(&domain->pt_root, root);
+ amd_iommu_domain_set_pgtable(domain, pt_root, mode);
return 0;
}
@@ -2713,8 +2722,8 @@ void amd_iommu_domain_direct_map(struct iommu_domain *dom)
/* First save pgtable configuration*/
amd_iommu_domain_get_pgtable(domain, &pgtable);
- /* Update data structure */
- atomic64_set(&domain->pt_root, 0);
+ /* Remove page-table from domain */
+ amd_iommu_domain_clr_pt_root(domain);
/* Make changes visible to IOMMUs */
update_domain(domain);
diff --git a/drivers/iommu/arm/Makefile b/drivers/iommu/arm/Makefile
new file mode 100644
index 000000000000..0f9efeab709f
--- /dev/null
+++ b/drivers/iommu/arm/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-y += arm-smmu/ arm-smmu-v3/
diff --git a/drivers/iommu/arm/arm-smmu-v3/Makefile b/drivers/iommu/arm/arm-smmu-v3/Makefile
new file mode 100644
index 000000000000..569e24e9f162
--- /dev/null
+++ b/drivers/iommu/arm/arm-smmu-v3/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_ARM_SMMU_V3) += arm-smmu-v3.o
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
index f578677a5c41..7196207be7ea 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
@@ -1479,7 +1479,7 @@ static int arm_smmu_cmdq_issue_cmdlist(struct arm_smmu_device *smmu,
}
/*
- * Try to unlock the cmq lock. This will fail if we're the last
+ * Try to unlock the cmdq lock. This will fail if we're the last
* reader, in which case we can safely update cmdq->q.llq.cons
*/
if (!arm_smmu_cmdq_shared_tryunlock(cmdq)) {
@@ -2850,7 +2850,7 @@ static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
if (!ops)
return -ENODEV;
- return ops->map(ops, iova, paddr, size, prot);
+ return ops->map(ops, iova, paddr, size, prot, gfp);
}
static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
diff --git a/drivers/iommu/arm/arm-smmu/Makefile b/drivers/iommu/arm/arm-smmu/Makefile
new file mode 100644
index 000000000000..e240a7bcf310
--- /dev/null
+++ b/drivers/iommu/arm/arm-smmu/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_QCOM_IOMMU) += qcom_iommu.o
+obj-$(CONFIG_ARM_SMMU) += arm_smmu.o
+arm_smmu-objs += arm-smmu.o arm-smmu-impl.o arm-smmu-nvidia.o arm-smmu-qcom.o
diff --git a/drivers/iommu/arm-smmu-impl.c b/drivers/iommu/arm/arm-smmu/arm-smmu-impl.c
index c75b9d957b70..f4ff124a1967 100644
--- a/drivers/iommu/arm-smmu-impl.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu-impl.c
@@ -147,16 +147,57 @@ static const struct arm_smmu_impl arm_mmu500_impl = {
.reset = arm_mmu500_reset,
};
+static u64 mrvl_mmu500_readq(struct arm_smmu_device *smmu, int page, int off)
+{
+ /*
+ * Marvell Armada-AP806 erratum #582743.
+ * Split all the readq to double readl
+ */
+ return hi_lo_readq_relaxed(arm_smmu_page(smmu, page) + off);
+}
+
+static void mrvl_mmu500_writeq(struct arm_smmu_device *smmu, int page, int off,
+ u64 val)
+{
+ /*
+ * Marvell Armada-AP806 erratum #582743.
+ * Split all the writeq to double writel
+ */
+ hi_lo_writeq_relaxed(val, arm_smmu_page(smmu, page) + off);
+}
+
+static int mrvl_mmu500_cfg_probe(struct arm_smmu_device *smmu)
+{
+
+ /*
+ * Armada-AP806 erratum #582743.
+ * Hide the SMMU_IDR2.PTFSv8 fields to sidestep the AArch64
+ * formats altogether and allow using 32 bits access on the
+ * interconnect.
+ */
+ smmu->features &= ~(ARM_SMMU_FEAT_FMT_AARCH64_4K |
+ ARM_SMMU_FEAT_FMT_AARCH64_16K |
+ ARM_SMMU_FEAT_FMT_AARCH64_64K);
+
+ return 0;
+}
+
+static const struct arm_smmu_impl mrvl_mmu500_impl = {
+ .read_reg64 = mrvl_mmu500_readq,
+ .write_reg64 = mrvl_mmu500_writeq,
+ .cfg_probe = mrvl_mmu500_cfg_probe,
+ .reset = arm_mmu500_reset,
+};
+
struct arm_smmu_device *arm_smmu_impl_init(struct arm_smmu_device *smmu)
{
const struct device_node *np = smmu->dev->of_node;
/*
- * We will inevitably have to combine model-specific implementation
- * quirks with platform-specific integration quirks, but everything
- * we currently support happens to work out as straightforward
- * mutually-exclusive assignments.
+ * Set the impl for model-specific implementation quirks first,
+ * such that platform integration quirks can pick it up and
+ * inherit from it if necessary.
*/
switch (smmu->model) {
case ARM_MMU500:
@@ -168,12 +209,21 @@ struct arm_smmu_device *arm_smmu_impl_init(struct arm_smmu_device *smmu)
break;
}
+ /* This is implicitly MMU-400 */
if (of_property_read_bool(np, "calxeda,smmu-secure-config-access"))
smmu->impl = &calxeda_impl;
+ if (of_device_is_compatible(np, "nvidia,tegra194-smmu"))
+ return nvidia_smmu_impl_init(smmu);
+
if (of_device_is_compatible(np, "qcom,sdm845-smmu-500") ||
- of_device_is_compatible(np, "qcom,sc7180-smmu-500"))
+ of_device_is_compatible(np, "qcom,sc7180-smmu-500") ||
+ of_device_is_compatible(np, "qcom,sm8150-smmu-500") ||
+ of_device_is_compatible(np, "qcom,sm8250-smmu-500"))
return qcom_smmu_impl_init(smmu);
+ if (of_device_is_compatible(np, "marvell,ap806-smmu-500"))
+ smmu->impl = &mrvl_mmu500_impl;
+
return smmu;
}
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c b/drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c
new file mode 100644
index 000000000000..31368057e9be
--- /dev/null
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c
@@ -0,0 +1,278 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (C) 2019-2020 NVIDIA CORPORATION. All rights reserved.
+
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "arm-smmu.h"
+
+/*
+ * Tegra194 has three ARM MMU-500 Instances.
+ * Two of them are used together and must be programmed identically for
+ * interleaved IOVA accesses across them and translates accesses from
+ * non-isochronous HW devices.
+ * Third one is used for translating accesses from isochronous HW devices.
+ * This implementation supports programming of the two instances that must
+ * be programmed identically.
+ * The third instance usage is through standard arm-smmu driver itself and
+ * is out of scope of this implementation.
+ */
+#define NUM_SMMU_INSTANCES 2
+
+struct nvidia_smmu {
+ struct arm_smmu_device smmu;
+ void __iomem *bases[NUM_SMMU_INSTANCES];
+};
+
+static inline void __iomem *nvidia_smmu_page(struct arm_smmu_device *smmu,
+ unsigned int inst, int page)
+{
+ struct nvidia_smmu *nvidia_smmu;
+
+ nvidia_smmu = container_of(smmu, struct nvidia_smmu, smmu);
+ return nvidia_smmu->bases[inst] + (page << smmu->pgshift);
+}
+
+static u32 nvidia_smmu_read_reg(struct arm_smmu_device *smmu,
+ int page, int offset)
+{
+ void __iomem *reg = nvidia_smmu_page(smmu, 0, page) + offset;
+
+ return readl_relaxed(reg);
+}
+
+static void nvidia_smmu_write_reg(struct arm_smmu_device *smmu,
+ int page, int offset, u32 val)
+{
+ unsigned int i;
+
+ for (i = 0; i < NUM_SMMU_INSTANCES; i++) {
+ void __iomem *reg = nvidia_smmu_page(smmu, i, page) + offset;
+
+ writel_relaxed(val, reg);
+ }
+}
+
+static u64 nvidia_smmu_read_reg64(struct arm_smmu_device *smmu,
+ int page, int offset)
+{
+ void __iomem *reg = nvidia_smmu_page(smmu, 0, page) + offset;
+
+ return readq_relaxed(reg);
+}
+
+static void nvidia_smmu_write_reg64(struct arm_smmu_device *smmu,
+ int page, int offset, u64 val)
+{
+ unsigned int i;
+
+ for (i = 0; i < NUM_SMMU_INSTANCES; i++) {
+ void __iomem *reg = nvidia_smmu_page(smmu, i, page) + offset;
+
+ writeq_relaxed(val, reg);
+ }
+}
+
+static void nvidia_smmu_tlb_sync(struct arm_smmu_device *smmu, int page,
+ int sync, int status)
+{
+ unsigned int delay;
+
+ arm_smmu_writel(smmu, page, sync, 0);
+
+ for (delay = 1; delay < TLB_LOOP_TIMEOUT; delay *= 2) {
+ unsigned int spin_cnt;
+
+ for (spin_cnt = TLB_SPIN_COUNT; spin_cnt > 0; spin_cnt--) {
+ u32 val = 0;
+ unsigned int i;
+
+ for (i = 0; i < NUM_SMMU_INSTANCES; i++) {
+ void __iomem *reg;
+
+ reg = nvidia_smmu_page(smmu, i, page) + status;
+ val |= readl_relaxed(reg);
+ }
+
+ if (!(val & ARM_SMMU_sTLBGSTATUS_GSACTIVE))
+ return;
+
+ cpu_relax();
+ }
+
+ udelay(delay);
+ }
+
+ dev_err_ratelimited(smmu->dev,
+ "TLB sync timed out -- SMMU may be deadlocked\n");
+}
+
+static int nvidia_smmu_reset(struct arm_smmu_device *smmu)
+{
+ unsigned int i;
+
+ for (i = 0; i < NUM_SMMU_INSTANCES; i++) {
+ u32 val;
+ void __iomem *reg = nvidia_smmu_page(smmu, i, ARM_SMMU_GR0) +
+ ARM_SMMU_GR0_sGFSR;
+
+ /* clear global FSR */
+ val = readl_relaxed(reg);
+ writel_relaxed(val, reg);
+ }
+
+ return 0;
+}
+
+static irqreturn_t nvidia_smmu_global_fault_inst(int irq,
+ struct arm_smmu_device *smmu,
+ int inst)
+{
+ u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
+ void __iomem *gr0_base = nvidia_smmu_page(smmu, inst, 0);
+
+ gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
+ if (!gfsr)
+ return IRQ_NONE;
+
+ gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0);
+ gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1);
+ gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2);
+
+ dev_err_ratelimited(smmu->dev,
+ "Unexpected global fault, this could be serious\n");
+ dev_err_ratelimited(smmu->dev,
+ "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
+ gfsr, gfsynr0, gfsynr1, gfsynr2);
+
+ writel_relaxed(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t nvidia_smmu_global_fault(int irq, void *dev)
+{
+ unsigned int inst;
+ irqreturn_t ret = IRQ_NONE;
+ struct arm_smmu_device *smmu = dev;
+
+ for (inst = 0; inst < NUM_SMMU_INSTANCES; inst++) {
+ irqreturn_t irq_ret;
+
+ irq_ret = nvidia_smmu_global_fault_inst(irq, smmu, inst);
+ if (irq_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+
+ return ret;
+}
+
+static irqreturn_t nvidia_smmu_context_fault_bank(int irq,
+ struct arm_smmu_device *smmu,
+ int idx, int inst)
+{
+ u32 fsr, fsynr, cbfrsynra;
+ unsigned long iova;
+ void __iomem *gr1_base = nvidia_smmu_page(smmu, inst, 1);
+ void __iomem *cb_base = nvidia_smmu_page(smmu, inst, smmu->numpage + idx);
+
+ fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
+ if (!(fsr & ARM_SMMU_FSR_FAULT))
+ return IRQ_NONE;
+
+ fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
+ iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR);
+ cbfrsynra = readl_relaxed(gr1_base + ARM_SMMU_GR1_CBFRSYNRA(idx));
+
+ dev_err_ratelimited(smmu->dev,
+ "Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cbfrsynra=0x%x, cb=%d\n",
+ fsr, iova, fsynr, cbfrsynra, idx);
+
+ writel_relaxed(fsr, cb_base + ARM_SMMU_CB_FSR);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t nvidia_smmu_context_fault(int irq, void *dev)
+{
+ int idx;
+ unsigned int inst;
+ irqreturn_t ret = IRQ_NONE;
+ struct arm_smmu_device *smmu;
+ struct iommu_domain *domain = dev;
+ struct arm_smmu_domain *smmu_domain;
+
+ smmu_domain = container_of(domain, struct arm_smmu_domain, domain);
+ smmu = smmu_domain->smmu;
+
+ for (inst = 0; inst < NUM_SMMU_INSTANCES; inst++) {
+ irqreturn_t irq_ret;
+
+ /*
+ * Interrupt line is shared between all contexts.
+ * Check for faults across all contexts.
+ */
+ for (idx = 0; idx < smmu->num_context_banks; idx++) {
+ irq_ret = nvidia_smmu_context_fault_bank(irq, smmu,
+ idx, inst);
+ if (irq_ret == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+ }
+
+ return ret;
+}
+
+static const struct arm_smmu_impl nvidia_smmu_impl = {
+ .read_reg = nvidia_smmu_read_reg,
+ .write_reg = nvidia_smmu_write_reg,
+ .read_reg64 = nvidia_smmu_read_reg64,
+ .write_reg64 = nvidia_smmu_write_reg64,
+ .reset = nvidia_smmu_reset,
+ .tlb_sync = nvidia_smmu_tlb_sync,
+ .global_fault = nvidia_smmu_global_fault,
+ .context_fault = nvidia_smmu_context_fault,
+};
+
+struct arm_smmu_device *nvidia_smmu_impl_init(struct arm_smmu_device *smmu)
+{
+ struct resource *res;
+ struct device *dev = smmu->dev;
+ struct nvidia_smmu *nvidia_smmu;
+ struct platform_device *pdev = to_platform_device(dev);
+
+ nvidia_smmu = devm_kzalloc(dev, sizeof(*nvidia_smmu), GFP_KERNEL);
+ if (!nvidia_smmu)
+ return ERR_PTR(-ENOMEM);
+
+ /*
+ * Copy the data from struct arm_smmu_device *smmu allocated in
+ * arm-smmu.c. The smmu from struct nvidia_smmu replaces the smmu
+ * pointer used in arm-smmu.c once this function returns.
+ * This is necessary to derive nvidia_smmu from smmu pointer passed
+ * through arm_smmu_impl function calls subsequently.
+ */
+ nvidia_smmu->smmu = *smmu;
+ /* Instance 0 is ioremapped by arm-smmu.c. */
+ nvidia_smmu->bases[0] = smmu->base;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!res)
+ return ERR_PTR(-ENODEV);
+
+ nvidia_smmu->bases[1] = devm_ioremap_resource(dev, res);
+ if (IS_ERR(nvidia_smmu->bases[1]))
+ return ERR_CAST(nvidia_smmu->bases[1]);
+
+ nvidia_smmu->smmu.impl = &nvidia_smmu_impl;
+
+ /*
+ * Free the struct arm_smmu_device *smmu allocated in arm-smmu.c.
+ * Once this function returns, arm-smmu.c would use arm_smmu_device
+ * allocated as part of struct nvidia_smmu.
+ */
+ devm_kfree(dev, smmu);
+
+ return &nvidia_smmu->smmu;
+}
diff --git a/drivers/iommu/arm-smmu-qcom.c b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
index be4318044f96..be4318044f96 100644
--- a/drivers/iommu/arm-smmu-qcom.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm/arm-smmu/arm-smmu.c
index 243bc4cb2705..09c42af9f31e 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu.c
@@ -52,9 +52,6 @@
*/
#define QCOM_DUMMY_VAL -1
-#define TLB_LOOP_TIMEOUT 1000000 /* 1s! */
-#define TLB_SPIN_COUNT 10
-
#define MSI_IOVA_BASE 0x8000000
#define MSI_IOVA_LENGTH 0x100000
@@ -673,6 +670,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
enum io_pgtable_fmt fmt;
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
+ irqreturn_t (*context_fault)(int irq, void *dev);
mutex_lock(&smmu_domain->init_mutex);
if (smmu_domain->smmu)
@@ -835,7 +833,13 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
* handler seeing a half-initialised domain state.
*/
irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
- ret = devm_request_irq(smmu->dev, irq, arm_smmu_context_fault,
+
+ if (smmu->impl && smmu->impl->context_fault)
+ context_fault = smmu->impl->context_fault;
+ else
+ context_fault = arm_smmu_context_fault;
+
+ ret = devm_request_irq(smmu->dev, irq, context_fault,
IRQF_SHARED, "arm-smmu-context-fault", domain);
if (ret < 0) {
dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
@@ -1227,7 +1231,7 @@ static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
return -ENODEV;
arm_smmu_rpm_get(smmu);
- ret = ops->map(ops, iova, paddr, size, prot);
+ ret = ops->map(ops, iova, paddr, size, prot, gfp);
arm_smmu_rpm_put(smmu);
return ret;
@@ -1728,7 +1732,7 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
unsigned int size;
u32 id;
bool cttw_reg, cttw_fw = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK;
- int i;
+ int i, ret;
dev_notice(smmu->dev, "probing hardware configuration...\n");
dev_notice(smmu->dev, "SMMUv%d with:\n",
@@ -1891,6 +1895,12 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
}
+ if (smmu->impl && smmu->impl->cfg_probe) {
+ ret = smmu->impl->cfg_probe(smmu);
+ if (ret)
+ return ret;
+ }
+
/* Now we've corralled the various formats, what'll it do? */
if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S)
smmu->pgsize_bitmap |= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
@@ -1918,9 +1928,6 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
smmu->ipa_size, smmu->pa_size);
- if (smmu->impl && smmu->impl->cfg_probe)
- return smmu->impl->cfg_probe(smmu);
-
return 0;
}
@@ -1946,6 +1953,7 @@ static const struct of_device_id arm_smmu_of_match[] = {
{ .compatible = "arm,mmu-401", .data = &arm_mmu401 },
{ .compatible = "arm,mmu-500", .data = &arm_mmu500 },
{ .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
+ { .compatible = "nvidia,smmu-500", .data = &arm_mmu500 },
{ .compatible = "qcom,smmu-v2", .data = &qcom_smmuv2 },
{ },
};
@@ -2107,6 +2115,7 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
struct arm_smmu_device *smmu;
struct device *dev = &pdev->dev;
int num_irqs, i, err;
+ irqreturn_t (*global_fault)(int irq, void *dev);
smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
if (!smmu) {
@@ -2123,10 +2132,6 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
if (err)
return err;
- smmu = arm_smmu_impl_init(smmu);
- if (IS_ERR(smmu))
- return PTR_ERR(smmu);
-
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
ioaddr = res->start;
smmu->base = devm_ioremap_resource(dev, res);
@@ -2138,6 +2143,10 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
*/
smmu->numpage = resource_size(res);
+ smmu = arm_smmu_impl_init(smmu);
+ if (IS_ERR(smmu))
+ return PTR_ERR(smmu);
+
num_irqs = 0;
while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
num_irqs++;
@@ -2193,9 +2202,14 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
smmu->num_context_irqs = smmu->num_context_banks;
}
+ if (smmu->impl && smmu->impl->global_fault)
+ global_fault = smmu->impl->global_fault;
+ else
+ global_fault = arm_smmu_global_fault;
+
for (i = 0; i < smmu->num_global_irqs; ++i) {
err = devm_request_irq(smmu->dev, smmu->irqs[i],
- arm_smmu_global_fault,
+ global_fault,
IRQF_SHARED,
"arm-smmu global fault",
smmu);
diff --git a/drivers/iommu/arm-smmu.h b/drivers/iommu/arm/arm-smmu/arm-smmu.h
index d172c024be61..d890a4a968e8 100644
--- a/drivers/iommu/arm-smmu.h
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu.h
@@ -18,6 +18,7 @@
#include <linux/io-64-nonatomic-hi-lo.h>
#include <linux/io-pgtable.h>
#include <linux/iommu.h>
+#include <linux/irqreturn.h>
#include <linux/mutex.h>
#include <linux/spinlock.h>
#include <linux/types.h>
@@ -236,6 +237,8 @@ enum arm_smmu_cbar_type {
/* Maximum number of context banks per SMMU */
#define ARM_SMMU_MAX_CBS 128
+#define TLB_LOOP_TIMEOUT 1000000 /* 1s! */
+#define TLB_SPIN_COUNT 10
/* Shared driver definitions */
enum arm_smmu_arch_version {
@@ -387,6 +390,8 @@ struct arm_smmu_impl {
void (*tlb_sync)(struct arm_smmu_device *smmu, int page, int sync,
int status);
int (*def_domain_type)(struct device *dev);
+ irqreturn_t (*global_fault)(int irq, void *dev);
+ irqreturn_t (*context_fault)(int irq, void *dev);
};
static inline void __iomem *arm_smmu_page(struct arm_smmu_device *smmu, int n)
@@ -450,6 +455,7 @@ static inline void arm_smmu_writeq(struct arm_smmu_device *smmu, int page,
arm_smmu_writeq((s), ARM_SMMU_CB((s), (n)), (o), (v))
struct arm_smmu_device *arm_smmu_impl_init(struct arm_smmu_device *smmu);
+struct arm_smmu_device *nvidia_smmu_impl_init(struct arm_smmu_device *smmu);
struct arm_smmu_device *qcom_smmu_impl_init(struct arm_smmu_device *smmu);
int arm_mmu500_reset(struct arm_smmu_device *smmu);
diff --git a/drivers/iommu/qcom_iommu.c b/drivers/iommu/arm/arm-smmu/qcom_iommu.c
index d176df569af8..af6bec3ace00 100644
--- a/drivers/iommu/qcom_iommu.c
+++ b/drivers/iommu/arm/arm-smmu/qcom_iommu.c
@@ -37,14 +37,20 @@
#define SMMU_INTR_SEL_NS 0x2000
+enum qcom_iommu_clk {
+ CLK_IFACE,
+ CLK_BUS,
+ CLK_TBU,
+ CLK_NUM,
+};
+
struct qcom_iommu_ctx;
struct qcom_iommu_dev {
/* IOMMU core code handle */
struct iommu_device iommu;
struct device *dev;
- struct clk *iface_clk;
- struct clk *bus_clk;
+ struct clk_bulk_data clks[CLK_NUM];
void __iomem *local_base;
u32 sec_id;
u8 num_ctxs;
@@ -301,7 +307,7 @@ static int qcom_iommu_init_domain(struct iommu_domain *domain,
ARM_SMMU_SCTLR_M | ARM_SMMU_SCTLR_S1_ASIDPNE |
ARM_SMMU_SCTLR_CFCFG;
- if (IS_ENABLED(CONFIG_BIG_ENDIAN))
+ if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
reg |= ARM_SMMU_SCTLR_E;
iommu_writel(ctx, ARM_SMMU_CB_SCTLR, reg);
@@ -438,7 +444,7 @@ static int qcom_iommu_map(struct iommu_domain *domain, unsigned long iova,
return -ENODEV;
spin_lock_irqsave(&qcom_domain->pgtbl_lock, flags);
- ret = ops->map(ops, iova, paddr, size, prot);
+ ret = ops->map(ops, iova, paddr, size, prot, GFP_ATOMIC);
spin_unlock_irqrestore(&qcom_domain->pgtbl_lock, flags);
return ret;
}
@@ -613,32 +619,6 @@ static const struct iommu_ops qcom_iommu_ops = {
.pgsize_bitmap = SZ_4K | SZ_64K | SZ_1M | SZ_16M,
};
-static int qcom_iommu_enable_clocks(struct qcom_iommu_dev *qcom_iommu)
-{
- int ret;
-
- ret = clk_prepare_enable(qcom_iommu->iface_clk);
- if (ret) {
- dev_err(qcom_iommu->dev, "Couldn't enable iface_clk\n");
- return ret;
- }
-
- ret = clk_prepare_enable(qcom_iommu->bus_clk);
- if (ret) {
- dev_err(qcom_iommu->dev, "Couldn't enable bus_clk\n");
- clk_disable_unprepare(qcom_iommu->iface_clk);
- return ret;
- }
-
- return 0;
-}
-
-static void qcom_iommu_disable_clocks(struct qcom_iommu_dev *qcom_iommu)
-{
- clk_disable_unprepare(qcom_iommu->bus_clk);
- clk_disable_unprepare(qcom_iommu->iface_clk);
-}
-
static int qcom_iommu_sec_ptbl_init(struct device *dev)
{
size_t psize = 0;
@@ -795,6 +775,7 @@ static int qcom_iommu_device_probe(struct platform_device *pdev)
struct qcom_iommu_dev *qcom_iommu;
struct device *dev = &pdev->dev;
struct resource *res;
+ struct clk *clk;
int ret, max_asid = 0;
/* find the max asid (which is 1:1 to ctx bank idx), so we know how
@@ -817,17 +798,26 @@ static int qcom_iommu_device_probe(struct platform_device *pdev)
return PTR_ERR(qcom_iommu->local_base);
}
- qcom_iommu->iface_clk = devm_clk_get(dev, "iface");
- if (IS_ERR(qcom_iommu->iface_clk)) {
+ clk = devm_clk_get(dev, "iface");
+ if (IS_ERR(clk)) {
dev_err(dev, "failed to get iface clock\n");
- return PTR_ERR(qcom_iommu->iface_clk);
+ return PTR_ERR(clk);
}
+ qcom_iommu->clks[CLK_IFACE].clk = clk;
- qcom_iommu->bus_clk = devm_clk_get(dev, "bus");
- if (IS_ERR(qcom_iommu->bus_clk)) {
+ clk = devm_clk_get(dev, "bus");
+ if (IS_ERR(clk)) {
dev_err(dev, "failed to get bus clock\n");
- return PTR_ERR(qcom_iommu->bus_clk);
+ return PTR_ERR(clk);
+ }
+ qcom_iommu->clks[CLK_BUS].clk = clk;
+
+ clk = devm_clk_get_optional(dev, "tbu");
+ if (IS_ERR(clk)) {
+ dev_err(dev, "failed to get tbu clock\n");
+ return PTR_ERR(clk);
}
+ qcom_iommu->clks[CLK_TBU].clk = clk;
if (of_property_read_u32(dev->of_node, "qcom,iommu-secure-id",
&qcom_iommu->sec_id)) {
@@ -899,14 +889,14 @@ static int __maybe_unused qcom_iommu_resume(struct device *dev)
{
struct qcom_iommu_dev *qcom_iommu = dev_get_drvdata(dev);
- return qcom_iommu_enable_clocks(qcom_iommu);
+ return clk_bulk_prepare_enable(CLK_NUM, qcom_iommu->clks);
}
static int __maybe_unused qcom_iommu_suspend(struct device *dev)
{
struct qcom_iommu_dev *qcom_iommu = dev_get_drvdata(dev);
- qcom_iommu_disable_clocks(qcom_iommu);
+ clk_bulk_disable_unprepare(CLK_NUM, qcom_iommu->clks);
return 0;
}
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index 60c8a56e4a3f..bad3c0ce10cb 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -173,7 +173,7 @@ static u32 lv2ent_offset(sysmmu_iova_t iova)
#define REG_V5_FAULT_AR_VA 0x070
#define REG_V5_FAULT_AW_VA 0x080
-#define has_sysmmu(dev) (dev->archdata.iommu != NULL)
+#define has_sysmmu(dev) (dev_iommu_priv_get(dev) != NULL)
static struct device *dma_dev;
static struct kmem_cache *lv2table_kmem_cache;
@@ -226,7 +226,7 @@ static const struct sysmmu_fault_info sysmmu_v5_faults[] = {
};
/*
- * This structure is attached to dev.archdata.iommu of the master device
+ * This structure is attached to dev->iommu->priv of the master device
* on device add, contains a list of SYSMMU controllers defined by device tree,
* which are bound to given master device. It is usually referenced by 'owner'
* pointer.
@@ -670,7 +670,7 @@ static int __maybe_unused exynos_sysmmu_suspend(struct device *dev)
struct device *master = data->master;
if (master) {
- struct exynos_iommu_owner *owner = master->archdata.iommu;
+ struct exynos_iommu_owner *owner = dev_iommu_priv_get(master);
mutex_lock(&owner->rpm_lock);
if (data->domain) {
@@ -688,7 +688,7 @@ static int __maybe_unused exynos_sysmmu_resume(struct device *dev)
struct device *master = data->master;
if (master) {
- struct exynos_iommu_owner *owner = master->archdata.iommu;
+ struct exynos_iommu_owner *owner = dev_iommu_priv_get(master);
mutex_lock(&owner->rpm_lock);
if (data->domain) {
@@ -721,7 +721,7 @@ static struct platform_driver exynos_sysmmu_driver __refdata = {
}
};
-static inline void update_pte(sysmmu_pte_t *ent, sysmmu_pte_t val)
+static inline void exynos_iommu_set_pte(sysmmu_pte_t *ent, sysmmu_pte_t val)
{
dma_sync_single_for_cpu(dma_dev, virt_to_phys(ent), sizeof(*ent),
DMA_TO_DEVICE);
@@ -837,8 +837,8 @@ static void exynos_iommu_domain_free(struct iommu_domain *iommu_domain)
static void exynos_iommu_detach_device(struct iommu_domain *iommu_domain,
struct device *dev)
{
- struct exynos_iommu_owner *owner = dev->archdata.iommu;
struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
+ struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev);
phys_addr_t pagetable = virt_to_phys(domain->pgtable);
struct sysmmu_drvdata *data, *next;
unsigned long flags;
@@ -875,8 +875,8 @@ static void exynos_iommu_detach_device(struct iommu_domain *iommu_domain,
static int exynos_iommu_attach_device(struct iommu_domain *iommu_domain,
struct device *dev)
{
- struct exynos_iommu_owner *owner = dev->archdata.iommu;
struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
+ struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev);
struct sysmmu_drvdata *data;
phys_addr_t pagetable = virt_to_phys(domain->pgtable);
unsigned long flags;
@@ -933,7 +933,7 @@ static sysmmu_pte_t *alloc_lv2entry(struct exynos_iommu_domain *domain,
if (!pent)
return ERR_PTR(-ENOMEM);
- update_pte(sent, mk_lv1ent_page(virt_to_phys(pent)));
+ exynos_iommu_set_pte(sent, mk_lv1ent_page(virt_to_phys(pent)));
kmemleak_ignore(pent);
*pgcounter = NUM_LV2ENTRIES;
handle = dma_map_single(dma_dev, pent, LV2TABLE_SIZE,
@@ -994,7 +994,7 @@ static int lv1set_section(struct exynos_iommu_domain *domain,
*pgcnt = 0;
}
- update_pte(sent, mk_lv1ent_sect(paddr, prot));
+ exynos_iommu_set_pte(sent, mk_lv1ent_sect(paddr, prot));
spin_lock(&domain->lock);
if (lv1ent_page_zero(sent)) {
@@ -1018,7 +1018,7 @@ static int lv2set_page(sysmmu_pte_t *pent, phys_addr_t paddr, size_t size,
if (WARN_ON(!lv2ent_fault(pent)))
return -EADDRINUSE;
- update_pte(pent, mk_lv2ent_spage(paddr, prot));
+ exynos_iommu_set_pte(pent, mk_lv2ent_spage(paddr, prot));
*pgcnt -= 1;
} else { /* size == LPAGE_SIZE */
int i;
@@ -1150,7 +1150,7 @@ static size_t exynos_iommu_unmap(struct iommu_domain *iommu_domain,
}
/* workaround for h/w bug in System MMU v3.3 */
- update_pte(ent, ZERO_LV2LINK);
+ exynos_iommu_set_pte(ent, ZERO_LV2LINK);
size = SECT_SIZE;
goto done;
}
@@ -1171,7 +1171,7 @@ static size_t exynos_iommu_unmap(struct iommu_domain *iommu_domain,
}
if (lv2ent_small(ent)) {
- update_pte(ent, 0);
+ exynos_iommu_set_pte(ent, 0);
size = SPAGE_SIZE;
domain->lv2entcnt[lv1ent_offset(iova)] += 1;
goto done;
@@ -1237,7 +1237,7 @@ static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *iommu_domain,
static struct iommu_device *exynos_iommu_probe_device(struct device *dev)
{
- struct exynos_iommu_owner *owner = dev->archdata.iommu;
+ struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev);
struct sysmmu_drvdata *data;
if (!has_sysmmu(dev))
@@ -1263,7 +1263,7 @@ static struct iommu_device *exynos_iommu_probe_device(struct device *dev)
static void exynos_iommu_release_device(struct device *dev)
{
- struct exynos_iommu_owner *owner = dev->archdata.iommu;
+ struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev);
struct sysmmu_drvdata *data;
if (!has_sysmmu(dev))
@@ -1287,8 +1287,8 @@ static void exynos_iommu_release_device(struct device *dev)
static int exynos_iommu_of_xlate(struct device *dev,
struct of_phandle_args *spec)
{
- struct exynos_iommu_owner *owner = dev->archdata.iommu;
struct platform_device *sysmmu = of_find_device_by_node(spec->np);
+ struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev);
struct sysmmu_drvdata *data, *entry;
if (!sysmmu)
@@ -1305,7 +1305,7 @@ static int exynos_iommu_of_xlate(struct device *dev,
INIT_LIST_HEAD(&owner->controllers);
mutex_init(&owner->rpm_lock);
- dev->archdata.iommu = owner;
+ dev_iommu_priv_set(dev, owner);
}
list_for_each_entry(entry, &owner->controllers, owner_node)
diff --git a/drivers/iommu/fsl_pamu.c b/drivers/iommu/fsl_pamu.c
index cde281b97afa..099a11a35fb9 100644
--- a/drivers/iommu/fsl_pamu.c
+++ b/drivers/iommu/fsl_pamu.c
@@ -1174,10 +1174,7 @@ error:
if (irq != NO_IRQ)
free_irq(irq, data);
- if (data) {
- memset(data, 0, sizeof(struct pamu_isr_data));
- kfree(data);
- }
+ kzfree(data);
if (pamu_regs)
iounmap(pamu_regs);
diff --git a/drivers/iommu/fsl_pamu_domain.c b/drivers/iommu/fsl_pamu_domain.c
index 928d37771ece..b2110767caf4 100644
--- a/drivers/iommu/fsl_pamu_domain.c
+++ b/drivers/iommu/fsl_pamu_domain.c
@@ -323,7 +323,7 @@ static void remove_device_ref(struct device_domain_info *info, u32 win_cnt)
pamu_disable_liodn(info->liodn);
spin_unlock_irqrestore(&iommu_lock, flags);
spin_lock_irqsave(&device_domain_lock, flags);
- info->dev->archdata.iommu_domain = NULL;
+ dev_iommu_priv_set(info->dev, NULL);
kmem_cache_free(iommu_devinfo_cache, info);
spin_unlock_irqrestore(&device_domain_lock, flags);
}
@@ -352,7 +352,7 @@ static void attach_device(struct fsl_dma_domain *dma_domain, int liodn, struct d
* Check here if the device is already attached to domain or not.
* If the device is already attached to a domain detach it.
*/
- old_domain_info = dev->archdata.iommu_domain;
+ old_domain_info = dev_iommu_priv_get(dev);
if (old_domain_info && old_domain_info->domain != dma_domain) {
spin_unlock_irqrestore(&device_domain_lock, flags);
detach_device(dev, old_domain_info->domain);
@@ -371,8 +371,8 @@ static void attach_device(struct fsl_dma_domain *dma_domain, int liodn, struct d
* the info for the first LIODN as all
* LIODNs share the same domain
*/
- if (!dev->archdata.iommu_domain)
- dev->archdata.iommu_domain = info;
+ if (!dev_iommu_priv_get(dev))
+ dev_iommu_priv_set(dev, info);
spin_unlock_irqrestore(&device_domain_lock, flags);
}
diff --git a/drivers/iommu/intel/Kconfig b/drivers/iommu/intel/Kconfig
new file mode 100644
index 000000000000..5337ee1584b0
--- /dev/null
+++ b/drivers/iommu/intel/Kconfig
@@ -0,0 +1,87 @@
+# SPDX-License-Identifier: GPL-2.0-only
+# Intel IOMMU support
+config DMAR_TABLE
+ bool
+
+config INTEL_IOMMU
+ bool "Support for Intel IOMMU using DMA Remapping Devices"
+ depends on PCI_MSI && ACPI && (X86 || IA64)
+ select DMA_OPS
+ select IOMMU_API
+ select IOMMU_IOVA
+ select NEED_DMA_MAP_STATE
+ select DMAR_TABLE
+ select SWIOTLB
+ select IOASID
+ help
+ DMA remapping (DMAR) devices support enables independent address
+ translations for Direct Memory Access (DMA) from devices.
+ These DMA remapping devices are reported via ACPI tables
+ and include PCI device scope covered by these DMA
+ remapping devices.
+
+config INTEL_IOMMU_DEBUGFS
+ bool "Export Intel IOMMU internals in Debugfs"
+ depends on INTEL_IOMMU && IOMMU_DEBUGFS
+ help
+ !!!WARNING!!!
+
+ DO NOT ENABLE THIS OPTION UNLESS YOU REALLY KNOW WHAT YOU ARE DOING!!!
+
+ Expose Intel IOMMU internals in Debugfs.
+
+ This option is -NOT- intended for production environments, and should
+ only be enabled for debugging Intel IOMMU.
+
+config INTEL_IOMMU_SVM
+ bool "Support for Shared Virtual Memory with Intel IOMMU"
+ depends on INTEL_IOMMU && X86_64
+ select PCI_PASID
+ select PCI_PRI
+ select MMU_NOTIFIER
+ select IOASID
+ help
+ Shared Virtual Memory (SVM) provides a facility for devices
+ to access DMA resources through process address space by
+ means of a Process Address Space ID (PASID).
+
+config INTEL_IOMMU_DEFAULT_ON
+ def_bool y
+ prompt "Enable Intel DMA Remapping Devices by default"
+ depends on INTEL_IOMMU
+ help
+ Selecting this option will enable a DMAR device at boot time if
+ one is found. If this option is not selected, DMAR support can
+ be enabled by passing intel_iommu=on to the kernel.
+
+config INTEL_IOMMU_BROKEN_GFX_WA
+ bool "Workaround broken graphics drivers (going away soon)"
+ depends on INTEL_IOMMU && BROKEN && X86
+ help
+ Current Graphics drivers tend to use physical address
+ for DMA and avoid using DMA APIs. Setting this config
+ option permits the IOMMU driver to set a unity map for
+ all the OS-visible memory. Hence the driver can continue
+ to use physical addresses for DMA, at least until this
+ option is removed in the 2.6.32 kernel.
+
+config INTEL_IOMMU_FLOPPY_WA
+ def_bool y
+ depends on INTEL_IOMMU && X86
+ help
+ Floppy disk drivers are known to bypass DMA API calls
+ thereby failing to work when IOMMU is enabled. This
+ workaround will setup a 1:1 mapping for the first
+ 16MiB to make floppy (an ISA device) work.
+
+config INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON
+ bool "Enable Intel IOMMU scalable mode by default"
+ depends on INTEL_IOMMU
+ help
+ Selecting this option will enable by default the scalable mode if
+ hardware presents the capability. The scalable mode is defined in
+ VT-d 3.0. The scalable mode capability could be checked by reading
+ /sys/devices/virtual/iommu/dmar*/intel-iommu/ecap. If this option
+ is not selected, scalable mode support could also be enabled by
+ passing intel_iommu=sm_on to the kernel. If not sure, please use
+ the default value.
diff --git a/drivers/iommu/intel/Makefile b/drivers/iommu/intel/Makefile
new file mode 100644
index 000000000000..fb8e1e8c8029
--- /dev/null
+++ b/drivers/iommu/intel/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_DMAR_TABLE) += dmar.o
+obj-$(CONFIG_INTEL_IOMMU) += iommu.o pasid.o
+obj-$(CONFIG_INTEL_IOMMU) += trace.o
+obj-$(CONFIG_INTEL_IOMMU_DEBUGFS) += debugfs.o
+obj-$(CONFIG_INTEL_IOMMU_SVM) += svm.o
+obj-$(CONFIG_IRQ_REMAP) += irq_remapping.o
diff --git a/drivers/iommu/intel/debugfs.c b/drivers/iommu/intel/debugfs.c
index cf1ebb98e418..efea7f02abd9 100644
--- a/drivers/iommu/intel/debugfs.c
+++ b/drivers/iommu/intel/debugfs.c
@@ -15,7 +15,7 @@
#include <asm/irq_remapping.h>
-#include "intel-pasid.h"
+#include "pasid.h"
struct tbl_walk {
u16 bus;
diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c
index 683b812c5c47..93e6345f3414 100644
--- a/drivers/iommu/intel/dmar.c
+++ b/drivers/iommu/intel/dmar.c
@@ -1102,6 +1102,7 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd)
}
drhd->iommu = iommu;
+ iommu->drhd = drhd;
return 0;
@@ -1438,8 +1439,7 @@ void qi_flush_piotlb(struct intel_iommu *iommu, u16 did, u32 pasid, u64 addr,
/* PASID-based device IOTLB Invalidate */
void qi_flush_dev_iotlb_pasid(struct intel_iommu *iommu, u16 sid, u16 pfsid,
- u32 pasid, u16 qdep, u64 addr,
- unsigned int size_order, u64 granu)
+ u32 pasid, u16 qdep, u64 addr, unsigned int size_order)
{
unsigned long mask = 1UL << (VTD_PAGE_SHIFT + size_order - 1);
struct qi_desc desc = {.qw1 = 0, .qw2 = 0, .qw3 = 0};
@@ -1447,7 +1447,6 @@ void qi_flush_dev_iotlb_pasid(struct intel_iommu *iommu, u16 sid, u16 pfsid,
desc.qw0 = QI_DEV_EIOTLB_PASID(pasid) | QI_DEV_EIOTLB_SID(sid) |
QI_DEV_EIOTLB_QDEP(qdep) | QI_DEIOTLB_TYPE |
QI_DEV_IOTLB_PFSID(pfsid);
- desc.qw1 = QI_DEV_EIOTLB_GLOB(granu);
/*
* If S bit is 0, we only flush a single page. If S bit is set,
@@ -1458,9 +1457,26 @@ void qi_flush_dev_iotlb_pasid(struct intel_iommu *iommu, u16 sid, u16 pfsid,
* Max Invs Pending (MIP) is set to 0 for now until we have DIT in
* ECAP.
*/
- desc.qw1 |= addr & ~mask;
- if (size_order)
+ if (addr & GENMASK_ULL(size_order + VTD_PAGE_SHIFT, 0))
+ pr_warn_ratelimited("Invalidate non-aligned address %llx, order %d\n",
+ addr, size_order);
+
+ /* Take page address */
+ desc.qw1 = QI_DEV_EIOTLB_ADDR(addr);
+
+ if (size_order) {
+ /*
+ * Existing 0s in address below size_order may be the least
+ * significant bit, we must set them to 1s to avoid having
+ * smaller size than desired.
+ */
+ desc.qw1 |= GENMASK_ULL(size_order + VTD_PAGE_SHIFT - 1,
+ VTD_PAGE_SHIFT);
+ /* Clear size_order bit to indicate size */
+ desc.qw1 &= ~mask;
+ /* Set the S bit to indicate flushing more than 1 page */
desc.qw1 |= QI_DEV_EIOTLB_SIZE;
+ }
qi_submit_sync(iommu, &desc, 1, 0);
}
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index 237a470e1e9c..e9864e52b0e9 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -48,7 +48,7 @@
#include <trace/events/intel_iommu.h>
#include "../irq_remapping.h"
-#include "intel-pasid.h"
+#include "pasid.h"
#define ROOT_SIZE VTD_PAGE_SIZE
#define CONTEXT_SIZE VTD_PAGE_SIZE
@@ -356,6 +356,7 @@ static int intel_iommu_strict;
static int intel_iommu_superpage = 1;
static int iommu_identity_mapping;
static int intel_no_bounce;
+static int iommu_skip_te_disable;
#define IDENTMAP_GFX 2
#define IDENTMAP_AZALIA 4
@@ -372,7 +373,7 @@ struct device_domain_info *get_domain_info(struct device *dev)
if (!dev)
return NULL;
- info = dev->archdata.iommu;
+ info = dev_iommu_priv_get(dev);
if (unlikely(info == DUMMY_DEVICE_DOMAIN_INFO ||
info == DEFER_DEVICE_DOMAIN_INFO))
return NULL;
@@ -743,12 +744,12 @@ struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus,
static int iommu_dummy(struct device *dev)
{
- return dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
+ return dev_iommu_priv_get(dev) == DUMMY_DEVICE_DOMAIN_INFO;
}
static bool attach_deferred(struct device *dev)
{
- return dev->archdata.iommu == DEFER_DEVICE_DOMAIN_INFO;
+ return dev_iommu_priv_get(dev) == DEFER_DEVICE_DOMAIN_INFO;
}
/**
@@ -778,16 +779,16 @@ is_downstream_to_pci_bridge(struct device *dev, struct device *bridge)
return false;
}
-static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn)
+struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn)
{
struct dmar_drhd_unit *drhd = NULL;
+ struct pci_dev *pdev = NULL;
struct intel_iommu *iommu;
struct device *tmp;
- struct pci_dev *pdev = NULL;
u16 segment = 0;
int i;
- if (iommu_dummy(dev))
+ if (!dev || iommu_dummy(dev))
return NULL;
if (dev_is_pci(dev)) {
@@ -818,8 +819,10 @@ static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devf
if (pdev && pdev->is_virtfn)
goto got_pdev;
- *bus = drhd->devices[i].bus;
- *devfn = drhd->devices[i].devfn;
+ if (bus && devfn) {
+ *bus = drhd->devices[i].bus;
+ *devfn = drhd->devices[i].devfn;
+ }
goto out;
}
@@ -829,8 +832,10 @@ static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devf
if (pdev && drhd->include_all) {
got_pdev:
- *bus = pdev->bus->number;
- *devfn = pdev->devfn;
+ if (bus && devfn) {
+ *bus = pdev->bus->number;
+ *devfn = pdev->devfn;
+ }
goto out;
}
}
@@ -1629,6 +1634,10 @@ static void iommu_disable_translation(struct intel_iommu *iommu)
u32 sts;
unsigned long flag;
+ if (iommu_skip_te_disable && iommu->drhd->gfx_dedicated &&
+ (cap_read_drain(iommu->cap) || cap_write_drain(iommu->cap)))
+ return;
+
raw_spin_lock_irqsave(&iommu->register_lock, flag);
iommu->gcmd &= ~DMA_GCMD_TE;
writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
@@ -2420,7 +2429,7 @@ static inline void unlink_domain_info(struct device_domain_info *info)
list_del(&info->link);
list_del(&info->global);
if (info->dev)
- info->dev->archdata.iommu = NULL;
+ dev_iommu_priv_set(info->dev, NULL);
}
static void domain_remove_dev_info(struct dmar_domain *domain)
@@ -2453,7 +2462,7 @@ static void do_deferred_attach(struct device *dev)
{
struct iommu_domain *domain;
- dev->archdata.iommu = NULL;
+ dev_iommu_priv_set(dev, NULL);
domain = iommu_get_domain_for_dev(dev);
if (domain)
intel_iommu_attach_device(domain, dev);
@@ -2599,7 +2608,7 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
list_add(&info->link, &domain->devices);
list_add(&info->global, &device_domain_list);
if (dev)
- dev->archdata.iommu = info;
+ dev_iommu_priv_set(dev, info);
spin_unlock_irqrestore(&device_domain_lock, flags);
/* PASID table is mandatory for a PCI device in scalable mode. */
@@ -4004,7 +4013,7 @@ static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
if (!drhd || drhd->reg_base_addr - vtbar != 0xa000) {
pr_warn_once(FW_BUG "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n");
add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
- pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
+ dev_iommu_priv_set(&pdev->dev, DUMMY_DEVICE_DOMAIN_INFO);
}
}
DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);
@@ -4039,11 +4048,12 @@ static void __init init_no_remapping_devices(void)
/* This IOMMU has *only* gfx devices. Either bypass it or
set the gfx_mapped flag, as appropriate */
+ drhd->gfx_dedicated = 1;
if (!dmar_map_gfx) {
drhd->ignored = 1;
for_each_active_dev_scope(drhd->devices,
drhd->devices_cnt, i, dev)
- dev->archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
+ dev_iommu_priv_set(dev, DUMMY_DEVICE_DOMAIN_INFO);
}
}
}
@@ -5146,11 +5156,10 @@ static int aux_domain_add_dev(struct dmar_domain *domain,
struct device *dev)
{
int ret;
- u8 bus, devfn;
unsigned long flags;
struct intel_iommu *iommu;
- iommu = device_to_iommu(dev, &bus, &devfn);
+ iommu = device_to_iommu(dev, NULL, NULL);
if (!iommu)
return -ENODEV;
@@ -5236,9 +5245,8 @@ static int prepare_domain_attach_device(struct iommu_domain *domain,
struct dmar_domain *dmar_domain = to_dmar_domain(domain);
struct intel_iommu *iommu;
int addr_width;
- u8 bus, devfn;
- iommu = device_to_iommu(dev, &bus, &devfn);
+ iommu = device_to_iommu(dev, NULL, NULL);
if (!iommu)
return -ENODEV;
@@ -5416,7 +5424,7 @@ intel_iommu_sva_invalidate(struct iommu_domain *domain, struct device *dev,
sid = PCI_DEVID(bus, devfn);
/* Size is only valid in address selective invalidation */
- if (inv_info->granularity != IOMMU_INV_GRANU_PASID)
+ if (inv_info->granularity == IOMMU_INV_GRANU_ADDR)
size = to_vtd_size(inv_info->addr_info.granule_size,
inv_info->addr_info.nb_granules);
@@ -5425,6 +5433,7 @@ intel_iommu_sva_invalidate(struct iommu_domain *domain, struct device *dev,
IOMMU_CACHE_INV_TYPE_NR) {
int granu = 0;
u64 pasid = 0;
+ u64 addr = 0;
granu = to_vtd_granularity(cache_type, inv_info->granularity);
if (granu == -EINVAL) {
@@ -5446,13 +5455,12 @@ intel_iommu_sva_invalidate(struct iommu_domain *domain, struct device *dev,
switch (BIT(cache_type)) {
case IOMMU_CACHE_INV_TYPE_IOTLB:
+ /* HW will ignore LSB bits based on address mask */
if (inv_info->granularity == IOMMU_INV_GRANU_ADDR &&
size &&
(inv_info->addr_info.addr & ((BIT(VTD_PAGE_SHIFT + size)) - 1))) {
- pr_err_ratelimited("Address out of range, 0x%llx, size order %llu\n",
+ pr_err_ratelimited("User address not aligned, 0x%llx, size order %llu\n",
inv_info->addr_info.addr, size);
- ret = -ERANGE;
- goto out_unlock;
}
/*
@@ -5464,25 +5472,35 @@ intel_iommu_sva_invalidate(struct iommu_domain *domain, struct device *dev,
(granu == QI_GRAN_NONG_PASID) ? -1 : 1 << size,
inv_info->addr_info.flags & IOMMU_INV_ADDR_FLAGS_LEAF);
+ if (!info->ats_enabled)
+ break;
/*
* Always flush device IOTLB if ATS is enabled. vIOMMU
* in the guest may assume IOTLB flush is inclusive,
* which is more efficient.
*/
- if (info->ats_enabled)
- qi_flush_dev_iotlb_pasid(iommu, sid,
- info->pfsid, pasid,
- info->ats_qdep,
- inv_info->addr_info.addr,
- size, granu);
- break;
+ fallthrough;
case IOMMU_CACHE_INV_TYPE_DEV_IOTLB:
+ /*
+ * PASID based device TLB invalidation does not support
+ * IOMMU_INV_GRANU_PASID granularity but only supports
+ * IOMMU_INV_GRANU_ADDR.
+ * The equivalent of that is we set the size to be the
+ * entire range of 64 bit. User only provides PASID info
+ * without address info. So we set addr to 0.
+ */
+ if (inv_info->granularity == IOMMU_INV_GRANU_PASID) {
+ size = 64 - VTD_PAGE_SHIFT;
+ addr = 0;
+ } else if (inv_info->granularity == IOMMU_INV_GRANU_ADDR) {
+ addr = inv_info->addr_info.addr;
+ }
+
if (info->ats_enabled)
qi_flush_dev_iotlb_pasid(iommu, sid,
info->pfsid, pasid,
- info->ats_qdep,
- inv_info->addr_info.addr,
- size, granu);
+ info->ats_qdep, addr,
+ size);
else
pr_warn_ratelimited("Passdown device IOTLB flush w/o ATS!\n");
break;
@@ -5658,14 +5676,13 @@ static bool intel_iommu_capable(enum iommu_cap cap)
static struct iommu_device *intel_iommu_probe_device(struct device *dev)
{
struct intel_iommu *iommu;
- u8 bus, devfn;
- iommu = device_to_iommu(dev, &bus, &devfn);
+ iommu = device_to_iommu(dev, NULL, NULL);
if (!iommu)
return ERR_PTR(-ENODEV);
if (translation_pre_enabled(iommu))
- dev->archdata.iommu = DEFER_DEVICE_DOMAIN_INFO;
+ dev_iommu_priv_set(dev, DEFER_DEVICE_DOMAIN_INFO);
return &iommu->iommu;
}
@@ -5673,9 +5690,8 @@ static struct iommu_device *intel_iommu_probe_device(struct device *dev)
static void intel_iommu_release_device(struct device *dev)
{
struct intel_iommu *iommu;
- u8 bus, devfn;
- iommu = device_to_iommu(dev, &bus, &devfn);
+ iommu = device_to_iommu(dev, NULL, NULL);
if (!iommu)
return;
@@ -5825,37 +5841,14 @@ static struct iommu_group *intel_iommu_device_group(struct device *dev)
return generic_device_group(dev);
}
-#ifdef CONFIG_INTEL_IOMMU_SVM
-struct intel_iommu *intel_svm_device_to_iommu(struct device *dev)
-{
- struct intel_iommu *iommu;
- u8 bus, devfn;
-
- if (iommu_dummy(dev)) {
- dev_warn(dev,
- "No IOMMU translation for device; cannot enable SVM\n");
- return NULL;
- }
-
- iommu = device_to_iommu(dev, &bus, &devfn);
- if ((!iommu)) {
- dev_err(dev, "No IOMMU for device; cannot enable SVM\n");
- return NULL;
- }
-
- return iommu;
-}
-#endif /* CONFIG_INTEL_IOMMU_SVM */
-
static int intel_iommu_enable_auxd(struct device *dev)
{
struct device_domain_info *info;
struct intel_iommu *iommu;
unsigned long flags;
- u8 bus, devfn;
int ret;
- iommu = device_to_iommu(dev, &bus, &devfn);
+ iommu = device_to_iommu(dev, NULL, NULL);
if (!iommu || dmar_disabled)
return -EINVAL;
@@ -6080,6 +6073,7 @@ const struct iommu_ops intel_iommu_ops = {
.sva_bind = intel_svm_bind,
.sva_unbind = intel_svm_unbind,
.sva_get_pasid = intel_svm_get_pasid,
+ .page_response = intel_svm_page_response,
#endif
};
@@ -6182,6 +6176,27 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_g
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
+static void quirk_igfx_skip_te_disable(struct pci_dev *dev)
+{
+ unsigned short ver;
+
+ if (!IS_GFX_DEVICE(dev))
+ return;
+
+ ver = (dev->device >> 8) & 0xff;
+ if (ver != 0x45 && ver != 0x46 && ver != 0x4c &&
+ ver != 0x4e && ver != 0x8a && ver != 0x98 &&
+ ver != 0x9a)
+ return;
+
+ if (risky_device(dev))
+ return;
+
+ pci_info(dev, "Skip IOMMU disabling for graphics\n");
+ iommu_skip_te_disable = 1;
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_ANY_ID, quirk_igfx_skip_te_disable);
+
/* On Tylersburg chipsets, some BIOSes have been known to enable the
ISOCH DMAR unit for the Azalia sound device, but not give it any
TLB entries, which causes it to deadlock. Check for that. We do
diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c
index c81f0f17c6ba..e6faedf42fd4 100644
--- a/drivers/iommu/intel/pasid.c
+++ b/drivers/iommu/intel/pasid.c
@@ -19,7 +19,7 @@
#include <linux/pci-ats.h>
#include <linux/spinlock.h>
-#include "intel-pasid.h"
+#include "pasid.h"
/*
* Intel IOMMU system wide PASID name space:
@@ -486,7 +486,16 @@ devtlb_invalidation_with_pasid(struct intel_iommu *iommu,
qdep = info->ats_qdep;
pfsid = info->pfsid;
- qi_flush_dev_iotlb(iommu, sid, pfsid, qdep, 0, 64 - VTD_PAGE_SHIFT);
+ /*
+ * When PASID 0 is used, it indicates RID2PASID(DMA request w/o PASID),
+ * devTLB flush w/o PASID should be used. For non-zero PASID under
+ * SVA usage, device could do DMA with multiple PASIDs. It is more
+ * efficient to flush devTLB specific to the PASID.
+ */
+ if (pasid == PASID_RID2PASID)
+ qi_flush_dev_iotlb(iommu, sid, pfsid, qdep, 0, 64 - VTD_PAGE_SHIFT);
+ else
+ qi_flush_dev_iotlb_pasid(iommu, sid, pfsid, pasid, qdep, 0, 64 - VTD_PAGE_SHIFT);
}
void intel_pasid_tear_down_entry(struct intel_iommu *iommu, struct device *dev,
diff --git a/drivers/iommu/intel/intel-pasid.h b/drivers/iommu/intel/pasid.h
index c5318d40e0fa..c9850766c3a9 100644
--- a/drivers/iommu/intel/intel-pasid.h
+++ b/drivers/iommu/intel/pasid.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * intel-pasid.h - PASID idr, table and entry header
+ * pasid.h - PASID idr, table and entry header
*
* Copyright (C) 2018 Intel Corporation
*
diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c
index 5ae59a6ad681..95c3164a2302 100644
--- a/drivers/iommu/intel/svm.c
+++ b/drivers/iommu/intel/svm.c
@@ -20,7 +20,7 @@
#include <linux/ioasid.h>
#include <asm/page.h>
-#include "intel-pasid.h"
+#include "pasid.h"
static irqreturn_t prq_event_thread(int irq, void *d);
static void intel_svm_drain_prq(struct device *dev, int pasid);
@@ -228,13 +228,57 @@ static LIST_HEAD(global_svm_list);
list_for_each_entry((sdev), &(svm)->devs, list) \
if ((d) != (sdev)->dev) {} else
+static int pasid_to_svm_sdev(struct device *dev, unsigned int pasid,
+ struct intel_svm **rsvm,
+ struct intel_svm_dev **rsdev)
+{
+ struct intel_svm_dev *d, *sdev = NULL;
+ struct intel_svm *svm;
+
+ /* The caller should hold the pasid_mutex lock */
+ if (WARN_ON(!mutex_is_locked(&pasid_mutex)))
+ return -EINVAL;
+
+ if (pasid == INVALID_IOASID || pasid >= PASID_MAX)
+ return -EINVAL;
+
+ svm = ioasid_find(NULL, pasid, NULL);
+ if (IS_ERR(svm))
+ return PTR_ERR(svm);
+
+ if (!svm)
+ goto out;
+
+ /*
+ * If we found svm for the PASID, there must be at least one device
+ * bond.
+ */
+ if (WARN_ON(list_empty(&svm->devs)))
+ return -EINVAL;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(d, &svm->devs, list) {
+ if (d->dev == dev) {
+ sdev = d;
+ break;
+ }
+ }
+ rcu_read_unlock();
+
+out:
+ *rsvm = svm;
+ *rsdev = sdev;
+
+ return 0;
+}
+
int intel_svm_bind_gpasid(struct iommu_domain *domain, struct device *dev,
struct iommu_gpasid_bind_data *data)
{
- struct intel_iommu *iommu = intel_svm_device_to_iommu(dev);
+ struct intel_iommu *iommu = device_to_iommu(dev, NULL, NULL);
+ struct intel_svm_dev *sdev = NULL;
struct dmar_domain *dmar_domain;
- struct intel_svm_dev *sdev;
- struct intel_svm *svm;
+ struct intel_svm *svm = NULL;
int ret = 0;
if (WARN_ON(!iommu) || !data)
@@ -261,39 +305,23 @@ int intel_svm_bind_gpasid(struct iommu_domain *domain, struct device *dev,
dmar_domain = to_dmar_domain(domain);
mutex_lock(&pasid_mutex);
- svm = ioasid_find(NULL, data->hpasid, NULL);
- if (IS_ERR(svm)) {
- ret = PTR_ERR(svm);
+ ret = pasid_to_svm_sdev(dev, data->hpasid, &svm, &sdev);
+ if (ret)
goto out;
- }
- if (svm) {
+ if (sdev) {
/*
- * If we found svm for the PASID, there must be at
- * least one device bond, otherwise svm should be freed.
+ * Do not allow multiple bindings of the same device-PASID since
+ * there is only one SL page tables per PASID. We may revisit
+ * once sharing PGD across domains are supported.
*/
- if (WARN_ON(list_empty(&svm->devs))) {
- ret = -EINVAL;
- goto out;
- }
+ dev_warn_ratelimited(dev, "Already bound with PASID %u\n",
+ svm->pasid);
+ ret = -EBUSY;
+ goto out;
+ }
- for_each_svm_dev(sdev, svm, dev) {
- /*
- * For devices with aux domains, we should allow
- * multiple bind calls with the same PASID and pdev.
- */
- if (iommu_dev_feature_enabled(dev,
- IOMMU_DEV_FEAT_AUX)) {
- sdev->users++;
- } else {
- dev_warn_ratelimited(dev,
- "Already bound with PASID %u\n",
- svm->pasid);
- ret = -EBUSY;
- }
- goto out;
- }
- } else {
+ if (!svm) {
/* We come here when PASID has never been bond to a device. */
svm = kzalloc(sizeof(*svm), GFP_KERNEL);
if (!svm) {
@@ -373,28 +401,20 @@ int intel_svm_bind_gpasid(struct iommu_domain *domain, struct device *dev,
int intel_svm_unbind_gpasid(struct device *dev, int pasid)
{
- struct intel_iommu *iommu = intel_svm_device_to_iommu(dev);
+ struct intel_iommu *iommu = device_to_iommu(dev, NULL, NULL);
struct intel_svm_dev *sdev;
struct intel_svm *svm;
- int ret = -EINVAL;
+ int ret;
if (WARN_ON(!iommu))
return -EINVAL;
mutex_lock(&pasid_mutex);
- svm = ioasid_find(NULL, pasid, NULL);
- if (!svm) {
- ret = -EINVAL;
- goto out;
- }
-
- if (IS_ERR(svm)) {
- ret = PTR_ERR(svm);
+ ret = pasid_to_svm_sdev(dev, pasid, &svm, &sdev);
+ if (ret)
goto out;
- }
- for_each_svm_dev(sdev, svm, dev) {
- ret = 0;
+ if (sdev) {
if (iommu_dev_feature_enabled(dev, IOMMU_DEV_FEAT_AUX))
sdev->users--;
if (!sdev->users) {
@@ -418,7 +438,6 @@ int intel_svm_unbind_gpasid(struct device *dev, int pasid)
kfree(svm);
}
}
- break;
}
out:
mutex_unlock(&pasid_mutex);
@@ -430,7 +449,7 @@ static int
intel_svm_bind_mm(struct device *dev, int flags, struct svm_dev_ops *ops,
struct mm_struct *mm, struct intel_svm_dev **sd)
{
- struct intel_iommu *iommu = intel_svm_device_to_iommu(dev);
+ struct intel_iommu *iommu = device_to_iommu(dev, NULL, NULL);
struct device_domain_info *info;
struct intel_svm_dev *sdev;
struct intel_svm *svm = NULL;
@@ -596,7 +615,7 @@ success:
if (sd)
*sd = sdev;
ret = 0;
- out:
+out:
return ret;
}
@@ -608,21 +627,15 @@ static int intel_svm_unbind_mm(struct device *dev, int pasid)
struct intel_svm *svm;
int ret = -EINVAL;
- iommu = intel_svm_device_to_iommu(dev);
+ iommu = device_to_iommu(dev, NULL, NULL);
if (!iommu)
goto out;
- svm = ioasid_find(NULL, pasid, NULL);
- if (!svm)
- goto out;
-
- if (IS_ERR(svm)) {
- ret = PTR_ERR(svm);
+ ret = pasid_to_svm_sdev(dev, pasid, &svm, &sdev);
+ if (ret)
goto out;
- }
- for_each_svm_dev(sdev, svm, dev) {
- ret = 0;
+ if (sdev) {
sdev->users--;
if (!sdev->users) {
list_del_rcu(&sdev->list);
@@ -651,10 +664,8 @@ static int intel_svm_unbind_mm(struct device *dev, int pasid)
kfree(svm);
}
}
- break;
}
- out:
-
+out:
return ret;
}
@@ -800,8 +811,63 @@ qi_retry:
}
}
+static int prq_to_iommu_prot(struct page_req_dsc *req)
+{
+ int prot = 0;
+
+ if (req->rd_req)
+ prot |= IOMMU_FAULT_PERM_READ;
+ if (req->wr_req)
+ prot |= IOMMU_FAULT_PERM_WRITE;
+ if (req->exe_req)
+ prot |= IOMMU_FAULT_PERM_EXEC;
+ if (req->pm_req)
+ prot |= IOMMU_FAULT_PERM_PRIV;
+
+ return prot;
+}
+
+static int
+intel_svm_prq_report(struct device *dev, struct page_req_dsc *desc)
+{
+ struct iommu_fault_event event;
+
+ if (!dev || !dev_is_pci(dev))
+ return -ENODEV;
+
+ /* Fill in event data for device specific processing */
+ memset(&event, 0, sizeof(struct iommu_fault_event));
+ event.fault.type = IOMMU_FAULT_PAGE_REQ;
+ event.fault.prm.addr = desc->addr;
+ event.fault.prm.pasid = desc->pasid;
+ event.fault.prm.grpid = desc->prg_index;
+ event.fault.prm.perm = prq_to_iommu_prot(desc);
+
+ if (desc->lpig)
+ event.fault.prm.flags |= IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE;
+ if (desc->pasid_present) {
+ event.fault.prm.flags |= IOMMU_FAULT_PAGE_REQUEST_PASID_VALID;
+ event.fault.prm.flags |= IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID;
+ }
+ if (desc->priv_data_present) {
+ /*
+ * Set last page in group bit if private data is present,
+ * page response is required as it does for LPIG.
+ * iommu_report_device_fault() doesn't understand this vendor
+ * specific requirement thus we set last_page as a workaround.
+ */
+ event.fault.prm.flags |= IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE;
+ event.fault.prm.flags |= IOMMU_FAULT_PAGE_REQUEST_PRIV_DATA;
+ memcpy(event.fault.prm.private_data, desc->priv_data,
+ sizeof(desc->priv_data));
+ }
+
+ return iommu_report_device_fault(dev, &event);
+}
+
static irqreturn_t prq_event_thread(int irq, void *d)
{
+ struct intel_svm_dev *sdev = NULL;
struct intel_iommu *iommu = d;
struct intel_svm *svm = NULL;
int head, tail, handled = 0;
@@ -813,7 +879,6 @@ static irqreturn_t prq_event_thread(int irq, void *d)
tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK;
head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK;
while (head != tail) {
- struct intel_svm_dev *sdev;
struct vm_area_struct *vma;
struct page_req_dsc *req;
struct qi_desc resp;
@@ -849,6 +914,20 @@ static irqreturn_t prq_event_thread(int irq, void *d)
}
}
+ if (!sdev || sdev->sid != req->rid) {
+ struct intel_svm_dev *t;
+
+ sdev = NULL;
+ rcu_read_lock();
+ list_for_each_entry_rcu(t, &svm->devs, list) {
+ if (t->sid == req->rid) {
+ sdev = t;
+ break;
+ }
+ }
+ rcu_read_unlock();
+ }
+
result = QI_RESP_INVALID;
/* Since we're using init_mm.pgd directly, we should never take
* any faults on kernel addresses. */
@@ -859,6 +938,17 @@ static irqreturn_t prq_event_thread(int irq, void *d)
if (!is_canonical_address(address))
goto bad_req;
+ /*
+ * If prq is to be handled outside iommu driver via receiver of
+ * the fault notifiers, we skip the page response here.
+ */
+ if (svm->flags & SVM_FLAG_GUEST_MODE) {
+ if (sdev && !intel_svm_prq_report(sdev->dev, req))
+ goto prq_advance;
+ else
+ goto bad_req;
+ }
+
/* If the mm is already defunct, don't handle faults. */
if (!mmget_not_zero(svm->mm))
goto bad_req;
@@ -878,24 +968,11 @@ static irqreturn_t prq_event_thread(int irq, void *d)
goto invalid;
result = QI_RESP_SUCCESS;
- invalid:
+invalid:
mmap_read_unlock(svm->mm);
mmput(svm->mm);
- bad_req:
- /* Accounting for major/minor faults? */
- rcu_read_lock();
- list_for_each_entry_rcu(sdev, &svm->devs, list) {
- if (sdev->sid == req->rid)
- break;
- }
- /* Other devices can go away, but the drivers are not permitted
- * to unbind while any page faults might be in flight. So it's
- * OK to drop the 'lock' here now we have it. */
- rcu_read_unlock();
-
- if (WARN_ON(&sdev->list == &svm->devs))
- sdev = NULL;
-
+bad_req:
+ WARN_ON(!sdev);
if (sdev && sdev->ops && sdev->ops->fault_cb) {
int rwxp = (req->rd_req << 3) | (req->wr_req << 2) |
(req->exe_req << 1) | (req->pm_req);
@@ -906,7 +983,7 @@ static irqreturn_t prq_event_thread(int irq, void *d)
and these can be NULL. Do not use them below this point! */
sdev = NULL;
svm = NULL;
- no_pasid:
+no_pasid:
if (req->lpig || req->priv_data_present) {
/*
* Per VT-d spec. v3.0 ch7.7, system software must
@@ -931,6 +1008,7 @@ static irqreturn_t prq_event_thread(int irq, void *d)
resp.qw3 = 0;
qi_submit_sync(iommu, &resp, 1, 0);
}
+prq_advance:
head = (head + sizeof(*req)) & PRQ_RING_MASK;
}
@@ -1001,3 +1079,102 @@ int intel_svm_get_pasid(struct iommu_sva *sva)
return pasid;
}
+
+int intel_svm_page_response(struct device *dev,
+ struct iommu_fault_event *evt,
+ struct iommu_page_response *msg)
+{
+ struct iommu_fault_page_request *prm;
+ struct intel_svm_dev *sdev = NULL;
+ struct intel_svm *svm = NULL;
+ struct intel_iommu *iommu;
+ bool private_present;
+ bool pasid_present;
+ bool last_page;
+ u8 bus, devfn;
+ int ret = 0;
+ u16 sid;
+
+ if (!dev || !dev_is_pci(dev))
+ return -ENODEV;
+
+ iommu = device_to_iommu(dev, &bus, &devfn);
+ if (!iommu)
+ return -ENODEV;
+
+ if (!msg || !evt)
+ return -EINVAL;
+
+ mutex_lock(&pasid_mutex);
+
+ prm = &evt->fault.prm;
+ sid = PCI_DEVID(bus, devfn);
+ pasid_present = prm->flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID;
+ private_present = prm->flags & IOMMU_FAULT_PAGE_REQUEST_PRIV_DATA;
+ last_page = prm->flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE;
+
+ if (!pasid_present) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (prm->pasid == 0 || prm->pasid >= PASID_MAX) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = pasid_to_svm_sdev(dev, prm->pasid, &svm, &sdev);
+ if (ret || !sdev) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ /*
+ * For responses from userspace, need to make sure that the
+ * pasid has been bound to its mm.
+ */
+ if (svm->flags & SVM_FLAG_GUEST_MODE) {
+ struct mm_struct *mm;
+
+ mm = get_task_mm(current);
+ if (!mm) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (mm != svm->mm) {
+ ret = -ENODEV;
+ mmput(mm);
+ goto out;
+ }
+
+ mmput(mm);
+ }
+
+ /*
+ * Per VT-d spec. v3.0 ch7.7, system software must respond
+ * with page group response if private data is present (PDP)
+ * or last page in group (LPIG) bit is set. This is an
+ * additional VT-d requirement beyond PCI ATS spec.
+ */
+ if (last_page || private_present) {
+ struct qi_desc desc;
+
+ desc.qw0 = QI_PGRP_PASID(prm->pasid) | QI_PGRP_DID(sid) |
+ QI_PGRP_PASID_P(pasid_present) |
+ QI_PGRP_PDP(private_present) |
+ QI_PGRP_RESP_CODE(msg->code) |
+ QI_PGRP_RESP_TYPE;
+ desc.qw1 = QI_PGRP_IDX(prm->grpid) | QI_PGRP_LPIG(last_page);
+ desc.qw2 = 0;
+ desc.qw3 = 0;
+ if (private_present)
+ memcpy(&desc.qw2, prm->private_data,
+ sizeof(prm->private_data));
+
+ qi_submit_sync(iommu, &desc, 1, 0);
+ }
+out:
+ mutex_unlock(&pasid_mutex);
+ return ret;
+}
diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c
index 4272fe4e17f4..a688f22cbe3b 100644
--- a/drivers/iommu/io-pgtable-arm-v7s.c
+++ b/drivers/iommu/io-pgtable-arm-v7s.c
@@ -470,7 +470,7 @@ static arm_v7s_iopte arm_v7s_install_table(arm_v7s_iopte *table,
static int __arm_v7s_map(struct arm_v7s_io_pgtable *data, unsigned long iova,
phys_addr_t paddr, size_t size, int prot,
- int lvl, arm_v7s_iopte *ptep)
+ int lvl, arm_v7s_iopte *ptep, gfp_t gfp)
{
struct io_pgtable_cfg *cfg = &data->iop.cfg;
arm_v7s_iopte pte, *cptep;
@@ -491,7 +491,7 @@ static int __arm_v7s_map(struct arm_v7s_io_pgtable *data, unsigned long iova,
/* Grab a pointer to the next level */
pte = READ_ONCE(*ptep);
if (!pte) {
- cptep = __arm_v7s_alloc_table(lvl + 1, GFP_ATOMIC, data);
+ cptep = __arm_v7s_alloc_table(lvl + 1, gfp, data);
if (!cptep)
return -ENOMEM;
@@ -512,11 +512,11 @@ static int __arm_v7s_map(struct arm_v7s_io_pgtable *data, unsigned long iova,
}
/* Rinse, repeat */
- return __arm_v7s_map(data, iova, paddr, size, prot, lvl + 1, cptep);
+ return __arm_v7s_map(data, iova, paddr, size, prot, lvl + 1, cptep, gfp);
}
static int arm_v7s_map(struct io_pgtable_ops *ops, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot)
+ phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{
struct arm_v7s_io_pgtable *data = io_pgtable_ops_to_data(ops);
struct io_pgtable *iop = &data->iop;
@@ -530,7 +530,7 @@ static int arm_v7s_map(struct io_pgtable_ops *ops, unsigned long iova,
paddr >= (1ULL << data->iop.cfg.oas)))
return -ERANGE;
- ret = __arm_v7s_map(data, iova, paddr, size, prot, 1, data->pgd);
+ ret = __arm_v7s_map(data, iova, paddr, size, prot, 1, data->pgd, gfp);
/*
* Synchronise all PTE updates for the new mapping before there's
* a chance for anything to kick off a table walk for the new iova.
@@ -922,12 +922,12 @@ static int __init arm_v7s_do_selftests(void)
if (ops->map(ops, iova, iova, size, IOMMU_READ |
IOMMU_WRITE |
IOMMU_NOEXEC |
- IOMMU_CACHE))
+ IOMMU_CACHE, GFP_KERNEL))
return __FAIL(ops);
/* Overlapping mappings */
if (!ops->map(ops, iova, iova + size, size,
- IOMMU_READ | IOMMU_NOEXEC))
+ IOMMU_READ | IOMMU_NOEXEC, GFP_KERNEL))
return __FAIL(ops);
if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
@@ -946,7 +946,7 @@ static int __init arm_v7s_do_selftests(void)
return __FAIL(ops);
/* Remap of partial unmap */
- if (ops->map(ops, iova_start + size, size, size, IOMMU_READ))
+ if (ops->map(ops, iova_start + size, size, size, IOMMU_READ, GFP_KERNEL))
return __FAIL(ops);
if (ops->iova_to_phys(ops, iova_start + size + 42)
@@ -967,7 +967,7 @@ static int __init arm_v7s_do_selftests(void)
return __FAIL(ops);
/* Remap full block */
- if (ops->map(ops, iova, iova, size, IOMMU_WRITE))
+ if (ops->map(ops, iova, iova, size, IOMMU_WRITE, GFP_KERNEL))
return __FAIL(ops);
if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index 04fbd4bf0ff9..dc7bcf858b6d 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -355,7 +355,7 @@ static arm_lpae_iopte arm_lpae_install_table(arm_lpae_iopte *table,
static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
phys_addr_t paddr, size_t size, arm_lpae_iopte prot,
- int lvl, arm_lpae_iopte *ptep)
+ int lvl, arm_lpae_iopte *ptep, gfp_t gfp)
{
arm_lpae_iopte *cptep, pte;
size_t block_size = ARM_LPAE_BLOCK_SIZE(lvl, data);
@@ -376,7 +376,7 @@ static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
/* Grab a pointer to the next level */
pte = READ_ONCE(*ptep);
if (!pte) {
- cptep = __arm_lpae_alloc_pages(tblsz, GFP_ATOMIC, cfg);
+ cptep = __arm_lpae_alloc_pages(tblsz, gfp, cfg);
if (!cptep)
return -ENOMEM;
@@ -396,7 +396,7 @@ static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
}
/* Rinse, repeat */
- return __arm_lpae_map(data, iova, paddr, size, prot, lvl + 1, cptep);
+ return __arm_lpae_map(data, iova, paddr, size, prot, lvl + 1, cptep, gfp);
}
static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
@@ -438,9 +438,6 @@ static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
else if (prot & IOMMU_CACHE)
pte |= (ARM_LPAE_MAIR_ATTR_IDX_CACHE
<< ARM_LPAE_PTE_ATTRINDX_SHIFT);
- else if (prot & IOMMU_SYS_CACHE_ONLY)
- pte |= (ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE
- << ARM_LPAE_PTE_ATTRINDX_SHIFT);
}
if (prot & IOMMU_CACHE)
@@ -461,7 +458,7 @@ static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
}
static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
- phys_addr_t paddr, size_t size, int iommu_prot)
+ phys_addr_t paddr, size_t size, int iommu_prot, gfp_t gfp)
{
struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
struct io_pgtable_cfg *cfg = &data->iop.cfg;
@@ -483,7 +480,7 @@ static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
return -ERANGE;
prot = arm_lpae_prot_to_pte(data, iommu_prot);
- ret = __arm_lpae_map(data, iova, paddr, size, prot, lvl, ptep);
+ ret = __arm_lpae_map(data, iova, paddr, size, prot, lvl, ptep, gfp);
/*
* Synchronise all PTE updates for the new mapping before there's
* a chance for anything to kick off a table walk for the new iova.
@@ -1178,12 +1175,12 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
if (ops->map(ops, iova, iova, size, IOMMU_READ |
IOMMU_WRITE |
IOMMU_NOEXEC |
- IOMMU_CACHE))
+ IOMMU_CACHE, GFP_KERNEL))
return __FAIL(ops, i);
/* Overlapping mappings */
if (!ops->map(ops, iova, iova + size, size,
- IOMMU_READ | IOMMU_NOEXEC))
+ IOMMU_READ | IOMMU_NOEXEC, GFP_KERNEL))
return __FAIL(ops, i);
if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
@@ -1198,7 +1195,7 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
return __FAIL(ops, i);
/* Remap of partial unmap */
- if (ops->map(ops, SZ_1G + size, size, size, IOMMU_READ))
+ if (ops->map(ops, SZ_1G + size, size, size, IOMMU_READ, GFP_KERNEL))
return __FAIL(ops, i);
if (ops->iova_to_phys(ops, SZ_1G + size + 42) != (size + 42))
@@ -1216,7 +1213,7 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
return __FAIL(ops, i);
/* Remap full block */
- if (ops->map(ops, iova, iova, size, IOMMU_WRITE))
+ if (ops->map(ops, iova, iova, size, IOMMU_WRITE, GFP_KERNEL))
return __FAIL(ops, i);
if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index b6858adc4f17..609bd25bf154 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -383,8 +383,8 @@ static ssize_t iommu_group_show_name(struct iommu_group *group, char *buf)
* Elements are sorted by start address and overlapping segments
* of the same type are merged.
*/
-int iommu_insert_resv_region(struct iommu_resv_region *new,
- struct list_head *regions)
+static int iommu_insert_resv_region(struct iommu_resv_region *new,
+ struct list_head *regions)
{
struct iommu_resv_region *iter, *tmp, *nr, *top;
LIST_HEAD(stack);
@@ -1185,11 +1185,12 @@ EXPORT_SYMBOL_GPL(iommu_report_device_fault);
int iommu_page_response(struct device *dev,
struct iommu_page_response *msg)
{
- bool pasid_valid;
+ bool needs_pasid;
int ret = -EINVAL;
struct iommu_fault_event *evt;
struct iommu_fault_page_request *prm;
struct dev_iommu *param = dev->iommu;
+ bool has_pasid = msg->flags & IOMMU_PAGE_RESP_PASID_VALID;
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
if (!domain || !domain->ops->page_response)
@@ -1214,14 +1215,24 @@ int iommu_page_response(struct device *dev,
*/
list_for_each_entry(evt, &param->fault_param->faults, list) {
prm = &evt->fault.prm;
- pasid_valid = prm->flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID;
+ if (prm->grpid != msg->grpid)
+ continue;
- if ((pasid_valid && prm->pasid != msg->pasid) ||
- prm->grpid != msg->grpid)
+ /*
+ * If the PASID is required, the corresponding request is
+ * matched using the group ID, the PASID valid bit and the PASID
+ * value. Otherwise only the group ID matches request and
+ * response.
+ */
+ needs_pasid = prm->flags & IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID;
+ if (needs_pasid && (!has_pasid || msg->pasid != prm->pasid))
continue;
- /* Sanitize the reply */
- msg->flags = pasid_valid ? IOMMU_PAGE_RESP_PASID_VALID : 0;
+ if (!needs_pasid && has_pasid) {
+ /* No big deal, just clear it. */
+ msg->flags &= ~IOMMU_PAGE_RESP_PASID_VALID;
+ msg->pasid = 0;
+ }
ret = domain->ops->page_response(dev, evt, msg);
list_del(&evt->list);
@@ -2168,8 +2179,8 @@ static size_t iommu_pgsize(struct iommu_domain *domain,
return pgsize;
}
-int __iommu_map(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
+static int __iommu_map(struct iommu_domain *domain, unsigned long iova,
+ phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{
const struct iommu_ops *ops = domain->ops;
unsigned long orig_iova = iova;
@@ -2319,9 +2330,9 @@ size_t iommu_unmap_fast(struct iommu_domain *domain,
}
EXPORT_SYMBOL_GPL(iommu_unmap_fast);
-size_t __iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
- struct scatterlist *sg, unsigned int nents, int prot,
- gfp_t gfp)
+static size_t __iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
+ struct scatterlist *sg, unsigned int nents, int prot,
+ gfp_t gfp)
{
size_t len = 0, mapped = 0;
phys_addr_t start;
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index 49fc01f2a28d..45a251da5453 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -811,7 +811,9 @@ iova_magazine_free_pfns(struct iova_magazine *mag, struct iova_domain *iovad)
for (i = 0 ; i < mag->size; ++i) {
struct iova *iova = private_find_iova(iovad, mag->pfns[i]);
- BUG_ON(!iova);
+ if (WARN_ON(!iova))
+ continue;
+
private_free_iova(iovad, iova);
}
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
index 6de86e73dfc3..0f18abda0e20 100644
--- a/drivers/iommu/ipmmu-vmsa.c
+++ b/drivers/iommu/ipmmu-vmsa.c
@@ -3,7 +3,7 @@
* IOMMU API for Renesas VMSA-compatible IPMMU
* Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
*
- * Copyright (C) 2014 Renesas Electronics Corporation
+ * Copyright (C) 2014-2020 Renesas Electronics Corporation
*/
#include <linux/bitmap.h>
@@ -686,7 +686,7 @@ static int ipmmu_map(struct iommu_domain *io_domain, unsigned long iova,
if (!domain)
return -ENODEV;
- return domain->iop->map(domain->iop, iova, paddr, size, prot);
+ return domain->iop->map(domain->iop, iova, paddr, size, prot, gfp);
}
static size_t ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova,
@@ -739,7 +739,9 @@ static const struct soc_device_attribute soc_rcar_gen3[] = {
{ .soc_id = "r8a774a1", },
{ .soc_id = "r8a774b1", },
{ .soc_id = "r8a774c0", },
+ { .soc_id = "r8a774e1", },
{ .soc_id = "r8a7795", },
+ { .soc_id = "r8a77961", },
{ .soc_id = "r8a7796", },
{ .soc_id = "r8a77965", },
{ .soc_id = "r8a77970", },
@@ -751,7 +753,9 @@ static const struct soc_device_attribute soc_rcar_gen3[] = {
static const struct soc_device_attribute soc_rcar_gen3_whitelist[] = {
{ .soc_id = "r8a774b1", },
{ .soc_id = "r8a774c0", },
+ { .soc_id = "r8a774e1", },
{ .soc_id = "r8a7795", .revision = "ES3.*" },
+ { .soc_id = "r8a77961", },
{ .soc_id = "r8a77965", },
{ .soc_id = "r8a77990", },
{ .soc_id = "r8a77995", },
@@ -963,12 +967,18 @@ static const struct of_device_id ipmmu_of_ids[] = {
.compatible = "renesas,ipmmu-r8a774c0",
.data = &ipmmu_features_rcar_gen3,
}, {
+ .compatible = "renesas,ipmmu-r8a774e1",
+ .data = &ipmmu_features_rcar_gen3,
+ }, {
.compatible = "renesas,ipmmu-r8a7795",
.data = &ipmmu_features_rcar_gen3,
}, {
.compatible = "renesas,ipmmu-r8a7796",
.data = &ipmmu_features_rcar_gen3,
}, {
+ .compatible = "renesas,ipmmu-r8a77961",
+ .data = &ipmmu_features_rcar_gen3,
+ }, {
.compatible = "renesas,ipmmu-r8a77965",
.data = &ipmmu_features_rcar_gen3,
}, {
diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c
index 3d8a63555c25..3615cd6241c4 100644
--- a/drivers/iommu/msm_iommu.c
+++ b/drivers/iommu/msm_iommu.c
@@ -491,7 +491,7 @@ static int msm_iommu_map(struct iommu_domain *domain, unsigned long iova,
int ret;
spin_lock_irqsave(&priv->pgtlock, flags);
- ret = priv->iop->map(priv->iop, iova, pa, len, prot);
+ ret = priv->iop->map(priv->iop, iova, pa, len, prot, GFP_ATOMIC);
spin_unlock_irqrestore(&priv->pgtlock, flags);
return ret;
@@ -593,14 +593,14 @@ static void insert_iommu_master(struct device *dev,
struct msm_iommu_dev **iommu,
struct of_phandle_args *spec)
{
- struct msm_iommu_ctx_dev *master = dev->archdata.iommu;
+ struct msm_iommu_ctx_dev *master = dev_iommu_priv_get(dev);
int sid;
if (list_empty(&(*iommu)->ctx_list)) {
master = kzalloc(sizeof(*master), GFP_ATOMIC);
master->of_node = dev->of_node;
list_add(&master->list, &(*iommu)->ctx_list);
- dev->archdata.iommu = master;
+ dev_iommu_priv_set(dev, master);
}
for (sid = 0; sid < master->num_mids; sid++)
diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
index 2be96f1cdbd2..785b228d39a6 100644
--- a/drivers/iommu/mtk_iommu.c
+++ b/drivers/iommu/mtk_iommu.c
@@ -37,12 +37,18 @@
#define REG_MMU_INVLD_START_A 0x024
#define REG_MMU_INVLD_END_A 0x028
-#define REG_MMU_INV_SEL 0x038
+#define REG_MMU_INV_SEL_GEN2 0x02c
+#define REG_MMU_INV_SEL_GEN1 0x038
#define F_INVLD_EN0 BIT(0)
#define F_INVLD_EN1 BIT(1)
-#define REG_MMU_STANDARD_AXI_MODE 0x048
+#define REG_MMU_MISC_CTRL 0x048
+#define F_MMU_IN_ORDER_WR_EN_MASK (BIT(1) | BIT(17))
+#define F_MMU_STANDARD_AXI_MODE_MASK (BIT(3) | BIT(19))
+
#define REG_MMU_DCM_DIS 0x050
+#define REG_MMU_WR_LEN_CTRL 0x054
+#define F_MMU_WR_THROT_DIS_MASK (BIT(5) | BIT(21))
#define REG_MMU_CTRL_REG 0x110
#define F_MMU_TF_PROT_TO_PROGRAM_ADDR (2 << 4)
@@ -88,10 +94,12 @@
#define REG_MMU1_INVLD_PA 0x148
#define REG_MMU0_INT_ID 0x150
#define REG_MMU1_INT_ID 0x154
+#define F_MMU_INT_ID_COMM_ID(a) (((a) >> 9) & 0x7)
+#define F_MMU_INT_ID_SUB_COMM_ID(a) (((a) >> 7) & 0x3)
#define F_MMU_INT_ID_LARB_ID(a) (((a) >> 7) & 0x7)
#define F_MMU_INT_ID_PORT_ID(a) (((a) >> 2) & 0x1f)
-#define MTK_PROTECT_PA_ALIGN 128
+#define MTK_PROTECT_PA_ALIGN 256
/*
* Get the local arbiter ID and the portid within the larb arbiter
@@ -100,6 +108,18 @@
#define MTK_M4U_TO_LARB(id) (((id) >> 5) & 0xf)
#define MTK_M4U_TO_PORT(id) ((id) & 0x1f)
+#define HAS_4GB_MODE BIT(0)
+/* HW will use the EMI clock if there isn't the "bclk". */
+#define HAS_BCLK BIT(1)
+#define HAS_VLD_PA_RNG BIT(2)
+#define RESET_AXI BIT(3)
+#define OUT_ORDER_WR_EN BIT(4)
+#define HAS_SUB_COMM BIT(5)
+#define WR_THROT_EN BIT(6)
+
+#define MTK_IOMMU_HAS_FLAG(pdata, _x) \
+ ((((pdata)->flags) & (_x)) == (_x))
+
struct mtk_iommu_domain {
struct io_pgtable_cfg cfg;
struct io_pgtable_ops *iop;
@@ -165,7 +185,7 @@ static void mtk_iommu_tlb_flush_all(void *cookie)
for_each_m4u(data) {
writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0,
- data->base + REG_MMU_INV_SEL);
+ data->base + data->plat_data->inv_sel_reg);
writel_relaxed(F_ALL_INVLD, data->base + REG_MMU_INVALIDATE);
wmb(); /* Make sure the tlb flush all done */
}
@@ -182,7 +202,7 @@ static void mtk_iommu_tlb_flush_range_sync(unsigned long iova, size_t size,
for_each_m4u(data) {
spin_lock_irqsave(&data->tlb_lock, flags);
writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0,
- data->base + REG_MMU_INV_SEL);
+ data->base + data->plat_data->inv_sel_reg);
writel_relaxed(iova, data->base + REG_MMU_INVLD_START_A);
writel_relaxed(iova + size - 1,
@@ -226,7 +246,7 @@ static irqreturn_t mtk_iommu_isr(int irq, void *dev_id)
struct mtk_iommu_data *data = dev_id;
struct mtk_iommu_domain *dom = data->m4u_dom;
u32 int_state, regval, fault_iova, fault_pa;
- unsigned int fault_larb, fault_port;
+ unsigned int fault_larb, fault_port, sub_comm = 0;
bool layer, write;
/* Read error info from registers */
@@ -242,10 +262,14 @@ static irqreturn_t mtk_iommu_isr(int irq, void *dev_id)
}
layer = fault_iova & F_MMU_FAULT_VA_LAYER_BIT;
write = fault_iova & F_MMU_FAULT_VA_WRITE_BIT;
- fault_larb = F_MMU_INT_ID_LARB_ID(regval);
fault_port = F_MMU_INT_ID_PORT_ID(regval);
-
- fault_larb = data->plat_data->larbid_remap[fault_larb];
+ if (MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_SUB_COMM)) {
+ fault_larb = F_MMU_INT_ID_COMM_ID(regval);
+ sub_comm = F_MMU_INT_ID_SUB_COMM_ID(regval);
+ } else {
+ fault_larb = F_MMU_INT_ID_LARB_ID(regval);
+ }
+ fault_larb = data->plat_data->larbid_remap[fault_larb][sub_comm];
if (report_iommu_fault(&dom->domain, data->dev, fault_iova,
write ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ)) {
@@ -397,7 +421,7 @@ static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova,
paddr |= BIT_ULL(32);
/* Synchronize with the tlb_lock */
- return dom->iop->map(dom->iop, iova, paddr, size, prot);
+ return dom->iop->map(dom->iop, iova, paddr, size, prot, gfp);
}
static size_t mtk_iommu_unmap(struct iommu_domain *domain,
@@ -532,11 +556,13 @@ static int mtk_iommu_hw_init(const struct mtk_iommu_data *data)
return ret;
}
- if (data->plat_data->m4u_plat == M4U_MT8173)
+ if (data->plat_data->m4u_plat == M4U_MT8173) {
regval = F_MMU_PREFETCH_RT_REPLACE_MOD |
F_MMU_TF_PROT_TO_PROGRAM_ADDR_MT8173;
- else
- regval = F_MMU_TF_PROT_TO_PROGRAM_ADDR;
+ } else {
+ regval = readl_relaxed(data->base + REG_MMU_CTRL_REG);
+ regval |= F_MMU_TF_PROT_TO_PROGRAM_ADDR;
+ }
writel_relaxed(regval, data->base + REG_MMU_CTRL_REG);
regval = F_L2_MULIT_HIT_EN |
@@ -563,7 +589,8 @@ static int mtk_iommu_hw_init(const struct mtk_iommu_data *data)
upper_32_bits(data->protect_base);
writel_relaxed(regval, data->base + REG_MMU_IVRP_PADDR);
- if (data->enable_4GB && data->plat_data->has_vld_pa_rng) {
+ if (data->enable_4GB &&
+ MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_VLD_PA_RNG)) {
/*
* If 4GB mode is enabled, the validate PA range is from
* 0x1_0000_0000 to 0x1_ffff_ffff. here record bit[32:30].
@@ -572,9 +599,23 @@ static int mtk_iommu_hw_init(const struct mtk_iommu_data *data)
writel_relaxed(regval, data->base + REG_MMU_VLD_PA_RNG);
}
writel_relaxed(0, data->base + REG_MMU_DCM_DIS);
+ if (MTK_IOMMU_HAS_FLAG(data->plat_data, WR_THROT_EN)) {
+ /* write command throttling mode */
+ regval = readl_relaxed(data->base + REG_MMU_WR_LEN_CTRL);
+ regval &= ~F_MMU_WR_THROT_DIS_MASK;
+ writel_relaxed(regval, data->base + REG_MMU_WR_LEN_CTRL);
+ }
- if (data->plat_data->reset_axi)
- writel_relaxed(0, data->base + REG_MMU_STANDARD_AXI_MODE);
+ if (MTK_IOMMU_HAS_FLAG(data->plat_data, RESET_AXI)) {
+ /* The register is called STANDARD_AXI_MODE in this case */
+ regval = 0;
+ } else {
+ regval = readl_relaxed(data->base + REG_MMU_MISC_CTRL);
+ regval &= ~F_MMU_STANDARD_AXI_MODE_MASK;
+ if (MTK_IOMMU_HAS_FLAG(data->plat_data, OUT_ORDER_WR_EN))
+ regval &= ~F_MMU_IN_ORDER_WR_EN_MASK;
+ }
+ writel_relaxed(regval, data->base + REG_MMU_MISC_CTRL);
if (devm_request_irq(data->dev, data->irq, mtk_iommu_isr, 0,
dev_name(data->dev), (void *)data)) {
@@ -616,7 +657,7 @@ static int mtk_iommu_probe(struct platform_device *pdev)
/* Whether the current dram is over 4GB */
data->enable_4GB = !!(max_pfn > (BIT_ULL(32) >> PAGE_SHIFT));
- if (!data->plat_data->has_4gb_mode)
+ if (!MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_4GB_MODE))
data->enable_4GB = false;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -629,7 +670,7 @@ static int mtk_iommu_probe(struct platform_device *pdev)
if (data->irq < 0)
return data->irq;
- if (data->plat_data->has_bclk) {
+ if (MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_BCLK)) {
data->bclk = devm_clk_get(dev, "bclk");
if (IS_ERR(data->bclk))
return PTR_ERR(data->bclk);
@@ -718,8 +759,8 @@ static int __maybe_unused mtk_iommu_suspend(struct device *dev)
struct mtk_iommu_suspend_reg *reg = &data->reg;
void __iomem *base = data->base;
- reg->standard_axi_mode = readl_relaxed(base +
- REG_MMU_STANDARD_AXI_MODE);
+ reg->wr_len_ctrl = readl_relaxed(base + REG_MMU_WR_LEN_CTRL);
+ reg->misc_ctrl = readl_relaxed(base + REG_MMU_MISC_CTRL);
reg->dcm_dis = readl_relaxed(base + REG_MMU_DCM_DIS);
reg->ctrl_reg = readl_relaxed(base + REG_MMU_CTRL_REG);
reg->int_control0 = readl_relaxed(base + REG_MMU_INT_CONTROL0);
@@ -743,8 +784,8 @@ static int __maybe_unused mtk_iommu_resume(struct device *dev)
dev_err(data->dev, "Failed to enable clk(%d) in resume\n", ret);
return ret;
}
- writel_relaxed(reg->standard_axi_mode,
- base + REG_MMU_STANDARD_AXI_MODE);
+ writel_relaxed(reg->wr_len_ctrl, base + REG_MMU_WR_LEN_CTRL);
+ writel_relaxed(reg->misc_ctrl, base + REG_MMU_MISC_CTRL);
writel_relaxed(reg->dcm_dis, base + REG_MMU_DCM_DIS);
writel_relaxed(reg->ctrl_reg, base + REG_MMU_CTRL_REG);
writel_relaxed(reg->int_control0, base + REG_MMU_INT_CONTROL0);
@@ -763,28 +804,35 @@ static const struct dev_pm_ops mtk_iommu_pm_ops = {
static const struct mtk_iommu_plat_data mt2712_data = {
.m4u_plat = M4U_MT2712,
- .has_4gb_mode = true,
- .has_bclk = true,
- .has_vld_pa_rng = true,
- .larbid_remap = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9},
+ .flags = HAS_4GB_MODE | HAS_BCLK | HAS_VLD_PA_RNG,
+ .inv_sel_reg = REG_MMU_INV_SEL_GEN1,
+ .larbid_remap = {{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}},
+};
+
+static const struct mtk_iommu_plat_data mt6779_data = {
+ .m4u_plat = M4U_MT6779,
+ .flags = HAS_SUB_COMM | OUT_ORDER_WR_EN | WR_THROT_EN,
+ .inv_sel_reg = REG_MMU_INV_SEL_GEN2,
+ .larbid_remap = {{0}, {1}, {2}, {3}, {5}, {7, 8}, {10}, {9}},
};
static const struct mtk_iommu_plat_data mt8173_data = {
.m4u_plat = M4U_MT8173,
- .has_4gb_mode = true,
- .has_bclk = true,
- .reset_axi = true,
- .larbid_remap = {0, 1, 2, 3, 4, 5}, /* Linear mapping. */
+ .flags = HAS_4GB_MODE | HAS_BCLK | RESET_AXI,
+ .inv_sel_reg = REG_MMU_INV_SEL_GEN1,
+ .larbid_remap = {{0}, {1}, {2}, {3}, {4}, {5}}, /* Linear mapping. */
};
static const struct mtk_iommu_plat_data mt8183_data = {
.m4u_plat = M4U_MT8183,
- .reset_axi = true,
- .larbid_remap = {0, 4, 5, 6, 7, 2, 3, 1},
+ .flags = RESET_AXI,
+ .inv_sel_reg = REG_MMU_INV_SEL_GEN1,
+ .larbid_remap = {{0}, {4}, {5}, {6}, {7}, {2}, {3}, {1}},
};
static const struct of_device_id mtk_iommu_of_ids[] = {
{ .compatible = "mediatek,mt2712-m4u", .data = &mt2712_data},
+ { .compatible = "mediatek,mt6779-m4u", .data = &mt6779_data},
{ .compatible = "mediatek,mt8173-m4u", .data = &mt8173_data},
{ .compatible = "mediatek,mt8183-m4u", .data = &mt8183_data},
{}
diff --git a/drivers/iommu/mtk_iommu.h b/drivers/iommu/mtk_iommu.h
index ea949a324e33..122925dbe547 100644
--- a/drivers/iommu/mtk_iommu.h
+++ b/drivers/iommu/mtk_iommu.h
@@ -15,34 +15,39 @@
#include <linux/iommu.h>
#include <linux/list.h>
#include <linux/spinlock.h>
+#include <linux/dma-mapping.h>
#include <soc/mediatek/smi.h>
+#define MTK_LARB_COM_MAX 8
+#define MTK_LARB_SUBCOM_MAX 4
+
struct mtk_iommu_suspend_reg {
- u32 standard_axi_mode;
+ union {
+ u32 standard_axi_mode;/* v1 */
+ u32 misc_ctrl;/* v2 */
+ };
u32 dcm_dis;
u32 ctrl_reg;
u32 int_control0;
u32 int_main_control;
u32 ivrp_paddr;
u32 vld_pa_rng;
+ u32 wr_len_ctrl;
};
enum mtk_iommu_plat {
M4U_MT2701,
M4U_MT2712,
+ M4U_MT6779,
M4U_MT8173,
M4U_MT8183,
};
struct mtk_iommu_plat_data {
enum mtk_iommu_plat m4u_plat;
- bool has_4gb_mode;
-
- /* HW will use the EMI clock if there isn't the "bclk". */
- bool has_bclk;
- bool has_vld_pa_rng;
- bool reset_axi;
- unsigned char larbid_remap[MTK_LARB_NR_MAX];
+ u32 flags;
+ u32 inv_sel_reg;
+ unsigned char larbid_remap[MTK_LARB_COM_MAX][MTK_LARB_SUBCOM_MAX];
};
struct mtk_iommu_domain;
@@ -62,6 +67,8 @@ struct mtk_iommu_data {
struct iommu_device iommu;
const struct mtk_iommu_plat_data *plat_data;
+ struct dma_iommu_mapping *mapping; /* For mtk_iommu_v1.c */
+
struct list_head list;
struct mtk_smi_larb_iommu larb_imu[MTK_LARB_NR_MAX];
};
diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c
index c9d79cff4d17..82ddfe9170d4 100644
--- a/drivers/iommu/mtk_iommu_v1.c
+++ b/drivers/iommu/mtk_iommu_v1.c
@@ -269,7 +269,7 @@ static int mtk_iommu_attach_device(struct iommu_domain *domain,
int ret;
/* Only allow the domain created internally. */
- mtk_mapping = data->dev->archdata.iommu;
+ mtk_mapping = data->mapping;
if (mtk_mapping->domain != domain)
return 0;
@@ -369,7 +369,6 @@ static int mtk_iommu_create_mapping(struct device *dev,
struct mtk_iommu_data *data;
struct platform_device *m4updev;
struct dma_iommu_mapping *mtk_mapping;
- struct device *m4udev;
int ret;
if (args->args_count != 1) {
@@ -401,8 +400,7 @@ static int mtk_iommu_create_mapping(struct device *dev,
return ret;
data = dev_iommu_priv_get(dev);
- m4udev = data->dev;
- mtk_mapping = m4udev->archdata.iommu;
+ mtk_mapping = data->mapping;
if (!mtk_mapping) {
/* MTK iommu support 4GB iova address space. */
mtk_mapping = arm_iommu_create_mapping(&platform_bus_type,
@@ -410,7 +408,7 @@ static int mtk_iommu_create_mapping(struct device *dev,
if (IS_ERR(mtk_mapping))
return PTR_ERR(mtk_mapping);
- m4udev->archdata.iommu = mtk_mapping;
+ data->mapping = mtk_mapping;
}
return 0;
@@ -459,7 +457,7 @@ static void mtk_iommu_probe_finalize(struct device *dev)
int err;
data = dev_iommu_priv_get(dev);
- mtk_mapping = data->dev->archdata.iommu;
+ mtk_mapping = data->mapping;
err = arm_iommu_attach_device(dev, mtk_mapping);
if (err)
diff --git a/drivers/iommu/omap-iommu-debug.c b/drivers/iommu/omap-iommu-debug.c
index 8e19bfa94121..a99afb5d9011 100644
--- a/drivers/iommu/omap-iommu-debug.c
+++ b/drivers/iommu/omap-iommu-debug.c
@@ -98,8 +98,11 @@ static ssize_t debug_read_regs(struct file *file, char __user *userbuf,
mutex_lock(&iommu_debug_lock);
bytes = omap_iommu_dump_ctx(obj, p, count);
+ if (bytes < 0)
+ goto err;
bytes = simple_read_from_buffer(userbuf, count, ppos, buf, bytes);
+err:
mutex_unlock(&iommu_debug_lock);
kfree(buf);
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
index c8282cc212cb..71f29c0927fc 100644
--- a/drivers/iommu/omap-iommu.c
+++ b/drivers/iommu/omap-iommu.c
@@ -3,7 +3,7 @@
* omap iommu: tlb and pagetable primitives
*
* Copyright (C) 2008-2010 Nokia Corporation
- * Copyright (C) 2013-2017 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2013-2017 Texas Instruments Incorporated - https://www.ti.com/
*
* Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>,
* Paul Mundt and Toshihiro Kobayashi
@@ -71,7 +71,7 @@ static struct omap_iommu_domain *to_omap_domain(struct iommu_domain *dom)
**/
void omap_iommu_save_ctx(struct device *dev)
{
- struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
+ struct omap_iommu_arch_data *arch_data = dev_iommu_priv_get(dev);
struct omap_iommu *obj;
u32 *p;
int i;
@@ -101,7 +101,7 @@ EXPORT_SYMBOL_GPL(omap_iommu_save_ctx);
**/
void omap_iommu_restore_ctx(struct device *dev)
{
- struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
+ struct omap_iommu_arch_data *arch_data = dev_iommu_priv_get(dev);
struct omap_iommu *obj;
u32 *p;
int i;
@@ -1398,7 +1398,7 @@ static size_t omap_iommu_unmap(struct iommu_domain *domain, unsigned long da,
static int omap_iommu_count(struct device *dev)
{
- struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
+ struct omap_iommu_arch_data *arch_data = dev_iommu_priv_get(dev);
int count = 0;
while (arch_data->iommu_dev) {
@@ -1459,8 +1459,8 @@ static void omap_iommu_detach_fini(struct omap_iommu_domain *odomain)
static int
omap_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
{
+ struct omap_iommu_arch_data *arch_data = dev_iommu_priv_get(dev);
struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
- struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
struct omap_iommu_device *iommu;
struct omap_iommu *oiommu;
int ret = 0;
@@ -1524,7 +1524,7 @@ out:
static void _omap_iommu_detach_dev(struct omap_iommu_domain *omap_domain,
struct device *dev)
{
- struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
+ struct omap_iommu_arch_data *arch_data = dev_iommu_priv_get(dev);
struct omap_iommu_device *iommu = omap_domain->iommus;
struct omap_iommu *oiommu;
int i;
@@ -1650,7 +1650,7 @@ static struct iommu_device *omap_iommu_probe_device(struct device *dev)
int num_iommus, i;
/*
- * Allocate the archdata iommu structure for DT-based devices.
+ * Allocate the per-device iommu structure for DT-based devices.
*
* TODO: Simplify this when removing non-DT support completely from the
* IOMMU users.
@@ -1698,7 +1698,7 @@ static struct iommu_device *omap_iommu_probe_device(struct device *dev)
of_node_put(np);
}
- dev->archdata.iommu = arch_data;
+ dev_iommu_priv_set(dev, arch_data);
/*
* use the first IOMMU alone for the sysfs device linking.
@@ -1712,19 +1712,19 @@ static struct iommu_device *omap_iommu_probe_device(struct device *dev)
static void omap_iommu_release_device(struct device *dev)
{
- struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
+ struct omap_iommu_arch_data *arch_data = dev_iommu_priv_get(dev);
if (!dev->of_node || !arch_data)
return;
- dev->archdata.iommu = NULL;
+ dev_iommu_priv_set(dev, NULL);
kfree(arch_data);
}
static struct iommu_group *omap_iommu_device_group(struct device *dev)
{
- struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
+ struct omap_iommu_arch_data *arch_data = dev_iommu_priv_get(dev);
struct iommu_group *group = ERR_PTR(-EINVAL);
if (!arch_data)
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
index d25c2486ca07..e5d86b7177de 100644
--- a/drivers/iommu/rockchip-iommu.c
+++ b/drivers/iommu/rockchip-iommu.c
@@ -836,7 +836,7 @@ static size_t rk_iommu_unmap(struct iommu_domain *domain, unsigned long _iova,
static struct rk_iommu *rk_iommu_from_dev(struct device *dev)
{
- struct rk_iommudata *data = dev->archdata.iommu;
+ struct rk_iommudata *data = dev_iommu_priv_get(dev);
return data ? data->iommu : NULL;
}
@@ -1059,7 +1059,7 @@ static struct iommu_device *rk_iommu_probe_device(struct device *dev)
struct rk_iommudata *data;
struct rk_iommu *iommu;
- data = dev->archdata.iommu;
+ data = dev_iommu_priv_get(dev);
if (!data)
return ERR_PTR(-ENODEV);
@@ -1073,7 +1073,7 @@ static struct iommu_device *rk_iommu_probe_device(struct device *dev)
static void rk_iommu_release_device(struct device *dev)
{
- struct rk_iommudata *data = dev->archdata.iommu;
+ struct rk_iommudata *data = dev_iommu_priv_get(dev);
device_link_del(data->link);
}
@@ -1100,7 +1100,7 @@ static int rk_iommu_of_xlate(struct device *dev,
iommu_dev = of_find_device_by_node(args->np);
data->iommu = platform_get_drvdata(iommu_dev);
- dev->archdata.iommu = data;
+ dev_iommu_priv_set(dev, data);
platform_device_put(iommu_dev);
diff --git a/drivers/iommu/tegra-gart.c b/drivers/iommu/tegra-gart.c
index 5fbdff6ff41a..fac720273889 100644
--- a/drivers/iommu/tegra-gart.c
+++ b/drivers/iommu/tegra-gart.c
@@ -113,8 +113,8 @@ static int gart_iommu_attach_dev(struct iommu_domain *domain,
if (gart->active_domain && gart->active_domain != domain) {
ret = -EBUSY;
- } else if (dev->archdata.iommu != domain) {
- dev->archdata.iommu = domain;
+ } else if (dev_iommu_priv_get(dev) != domain) {
+ dev_iommu_priv_set(dev, domain);
gart->active_domain = domain;
gart->active_devices++;
}
@@ -131,8 +131,8 @@ static void gart_iommu_detach_dev(struct iommu_domain *domain,
spin_lock(&gart->dom_lock);
- if (dev->archdata.iommu == domain) {
- dev->archdata.iommu = NULL;
+ if (dev_iommu_priv_get(dev) == domain) {
+ dev_iommu_priv_set(dev, NULL);
if (--gart->active_devices == 0)
gart->active_domain = NULL;
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
index 7426b7666e2b..124c8848ab7e 100644
--- a/drivers/iommu/tegra-smmu.c
+++ b/drivers/iommu/tegra-smmu.c
@@ -465,7 +465,7 @@ static void tegra_smmu_as_unprepare(struct tegra_smmu *smmu,
static int tegra_smmu_attach_dev(struct iommu_domain *domain,
struct device *dev)
{
- struct tegra_smmu *smmu = dev->archdata.iommu;
+ struct tegra_smmu *smmu = dev_iommu_priv_get(dev);
struct tegra_smmu_as *as = to_smmu_as(domain);
struct device_node *np = dev->of_node;
struct of_phandle_args args;
@@ -780,7 +780,7 @@ static struct iommu_device *tegra_smmu_probe_device(struct device *dev)
* supported by the Linux kernel, so abort after the
* first match.
*/
- dev->archdata.iommu = smmu;
+ dev_iommu_priv_set(dev, smmu);
break;
}
@@ -797,7 +797,7 @@ static struct iommu_device *tegra_smmu_probe_device(struct device *dev)
static void tegra_smmu_release_device(struct device *dev)
{
- dev->archdata.iommu = NULL;
+ dev_iommu_priv_set(dev, NULL);
}
static const struct tegra_smmu_group_soc *
@@ -856,7 +856,7 @@ static struct iommu_group *tegra_smmu_group_get(struct tegra_smmu *smmu,
static struct iommu_group *tegra_smmu_device_group(struct device *dev)
{
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
- struct tegra_smmu *smmu = dev->archdata.iommu;
+ struct tegra_smmu *smmu = dev_iommu_priv_get(dev);
struct iommu_group *group;
group = tegra_smmu_group_get(smmu, fwspec->ids[0]);
diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c
index f6f07489a9aa..b4da396cce60 100644
--- a/drivers/iommu/virtio-iommu.c
+++ b/drivers/iommu/virtio-iommu.c
@@ -1010,8 +1010,8 @@ static int viommu_probe(struct virtio_device *vdev)
if (ret)
return ret;
- virtio_cread(vdev, struct virtio_iommu_config, page_size_mask,
- &viommu->pgsize_bitmap);
+ virtio_cread_le(vdev, struct virtio_iommu_config, page_size_mask,
+ &viommu->pgsize_bitmap);
if (!viommu->pgsize_bitmap) {
ret = -EINVAL;
@@ -1022,25 +1022,25 @@ static int viommu_probe(struct virtio_device *vdev)
viommu->last_domain = ~0U;
/* Optional features */
- virtio_cread_feature(vdev, VIRTIO_IOMMU_F_INPUT_RANGE,
- struct virtio_iommu_config, input_range.start,
- &input_start);
+ virtio_cread_le_feature(vdev, VIRTIO_IOMMU_F_INPUT_RANGE,
+ struct virtio_iommu_config, input_range.start,
+ &input_start);
- virtio_cread_feature(vdev, VIRTIO_IOMMU_F_INPUT_RANGE,
- struct virtio_iommu_config, input_range.end,
- &input_end);
+ virtio_cread_le_feature(vdev, VIRTIO_IOMMU_F_INPUT_RANGE,
+ struct virtio_iommu_config, input_range.end,
+ &input_end);
- virtio_cread_feature(vdev, VIRTIO_IOMMU_F_DOMAIN_RANGE,
- struct virtio_iommu_config, domain_range.start,
- &viommu->first_domain);
+ virtio_cread_le_feature(vdev, VIRTIO_IOMMU_F_DOMAIN_RANGE,
+ struct virtio_iommu_config, domain_range.start,
+ &viommu->first_domain);
- virtio_cread_feature(vdev, VIRTIO_IOMMU_F_DOMAIN_RANGE,
- struct virtio_iommu_config, domain_range.end,
- &viommu->last_domain);
+ virtio_cread_le_feature(vdev, VIRTIO_IOMMU_F_DOMAIN_RANGE,
+ struct virtio_iommu_config, domain_range.end,
+ &viommu->last_domain);
- virtio_cread_feature(vdev, VIRTIO_IOMMU_F_PROBE,
- struct virtio_iommu_config, probe_size,
- &viommu->probe_size);
+ virtio_cread_le_feature(vdev, VIRTIO_IOMMU_F_PROBE,
+ struct virtio_iommu_config, probe_size,
+ &viommu->probe_size);
viommu->geometry = (struct iommu_domain_geometry) {
.aperture_start = input_start,
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_iommu.h b/drivers/media/platform/s5p-mfc/s5p_mfc_iommu.h
index 152a713fff78..1a32266b7ddc 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_iommu.h
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_iommu.h
@@ -9,9 +9,11 @@
#if defined(CONFIG_EXYNOS_IOMMU)
+#include <linux/iommu.h>
+
static inline bool exynos_is_iommu_available(struct device *dev)
{
- return dev->archdata.iommu != NULL;
+ return dev_iommu_priv_get(dev) != NULL;
}
#else
diff --git a/drivers/memory/mtk-smi.c b/drivers/memory/mtk-smi.c
index e154bea3cf14..c21262502581 100644
--- a/drivers/memory/mtk-smi.c
+++ b/drivers/memory/mtk-smi.c
@@ -239,6 +239,13 @@ static const struct mtk_smi_larb_gen mtk_smi_larb_mt2712 = {
.larb_direct_to_common_mask = BIT(8) | BIT(9), /* bdpsys */
};
+static const struct mtk_smi_larb_gen mtk_smi_larb_mt6779 = {
+ .config_port = mtk_smi_larb_config_port_gen2_general,
+ .larb_direct_to_common_mask =
+ BIT(4) | BIT(6) | BIT(11) | BIT(12) | BIT(13),
+ /* DUMMY | IPU0 | IPU1 | CCU | MDLA */
+};
+
static const struct mtk_smi_larb_gen mtk_smi_larb_mt8183 = {
.has_gals = true,
.config_port = mtk_smi_larb_config_port_gen2_general,
@@ -260,6 +267,10 @@ static const struct of_device_id mtk_smi_larb_of_ids[] = {
.data = &mtk_smi_larb_mt2712
},
{
+ .compatible = "mediatek,mt6779-smi-larb",
+ .data = &mtk_smi_larb_mt6779
+ },
+ {
.compatible = "mediatek,mt8183-smi-larb",
.data = &mtk_smi_larb_mt8183
},
@@ -388,6 +399,13 @@ static const struct mtk_smi_common_plat mtk_smi_common_gen2 = {
.gen = MTK_SMI_GEN2,
};
+static const struct mtk_smi_common_plat mtk_smi_common_mt6779 = {
+ .gen = MTK_SMI_GEN2,
+ .has_gals = true,
+ .bus_sel = F_MMU1_LARB(1) | F_MMU1_LARB(2) | F_MMU1_LARB(4) |
+ F_MMU1_LARB(5) | F_MMU1_LARB(6) | F_MMU1_LARB(7),
+};
+
static const struct mtk_smi_common_plat mtk_smi_common_mt8183 = {
.gen = MTK_SMI_GEN2,
.has_gals = true,
@@ -409,6 +427,10 @@ static const struct of_device_id mtk_smi_common_of_ids[] = {
.data = &mtk_smi_common_gen2,
},
{
+ .compatible = "mediatek,mt6779-smi-common",
+ .data = &mtk_smi_common_mt6779,
+ },
+ {
.compatible = "mediatek,mt8183-smi-common",
.data = &mtk_smi_common_mt8183,
},
diff --git a/drivers/mfd/sky81452.c b/drivers/mfd/sky81452.c
index 76eedfae8553..3ad35bf0c015 100644
--- a/drivers/mfd/sky81452.c
+++ b/drivers/mfd/sky81452.c
@@ -47,8 +47,6 @@ static int sky81452_probe(struct i2c_client *client,
memset(cells, 0, sizeof(cells));
cells[0].name = "sky81452-backlight";
cells[0].of_compatible = "skyworks,sky81452-backlight";
- cells[0].platform_data = pdata->bl_pdata;
- cells[0].pdata_size = sizeof(*pdata->bl_pdata);
cells[1].name = "sky81452-regulator";
cells[1].platform_data = pdata->regulator_init_data;
cells[1].pdata_size = sizeof(*pdata->regulator_init_data);
diff --git a/drivers/net/ipa/ipa.h b/drivers/net/ipa/ipa.h
index b10a85392952..55115cfb2972 100644
--- a/drivers/net/ipa/ipa.h
+++ b/drivers/net/ipa/ipa.h
@@ -10,6 +10,7 @@
#include <linux/device.h>
#include <linux/notifier.h>
#include <linux/pm_wakeup.h>
+#include <linux/notifier.h>
#include "ipa_version.h"
#include "gsi.h"
@@ -73,6 +74,8 @@ struct ipa {
enum ipa_version version;
struct platform_device *pdev;
struct rproc *modem_rproc;
+ struct notifier_block nb;
+ void *notifier;
struct ipa_smp2p *smp2p;
struct ipa_clock *clock;
atomic_t suspend_ref;
diff --git a/drivers/net/ipa/ipa_modem.c b/drivers/net/ipa/ipa_modem.c
index ed10818dd99f..e34fe2d77324 100644
--- a/drivers/net/ipa/ipa_modem.c
+++ b/drivers/net/ipa/ipa_modem.c
@@ -9,7 +9,7 @@
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/if_rmnet.h>
-#include <linux/remoteproc/qcom_q6v5_ipa_notify.h>
+#include <linux/remoteproc/qcom_rproc.h>
#include "ipa.h"
#include "ipa_data.h"
@@ -311,43 +311,40 @@ static void ipa_modem_crashed(struct ipa *ipa)
dev_err(dev, "error %d zeroing modem memory regions\n", ret);
}
-static void ipa_modem_notify(void *data, enum qcom_rproc_event event)
+static int ipa_modem_notify(struct notifier_block *nb, unsigned long action,
+ void *data)
{
- struct ipa *ipa = data;
- struct device *dev;
+ struct ipa *ipa = container_of(nb, struct ipa, nb);
+ struct qcom_ssr_notify_data *notify_data = data;
+ struct device *dev = &ipa->pdev->dev;
- dev = &ipa->pdev->dev;
- switch (event) {
- case MODEM_STARTING:
+ switch (action) {
+ case QCOM_SSR_BEFORE_POWERUP:
dev_info(dev, "received modem starting event\n");
ipa_smp2p_notify_reset(ipa);
break;
- case MODEM_RUNNING:
+ case QCOM_SSR_AFTER_POWERUP:
dev_info(dev, "received modem running event\n");
break;
- case MODEM_STOPPING:
- case MODEM_CRASHED:
+ case QCOM_SSR_BEFORE_SHUTDOWN:
dev_info(dev, "received modem %s event\n",
- event == MODEM_STOPPING ? "stopping"
- : "crashed");
+ notify_data->crashed ? "crashed" : "stopping");
if (ipa->setup_complete)
ipa_modem_crashed(ipa);
break;
- case MODEM_OFFLINE:
+ case QCOM_SSR_AFTER_SHUTDOWN:
dev_info(dev, "received modem offline event\n");
break;
- case MODEM_REMOVING:
- dev_info(dev, "received modem stopping event\n");
- break;
-
default:
- dev_err(&ipa->pdev->dev, "unrecognized event %u\n", event);
+ dev_err(dev, "received unrecognized event %lu\n", action);
break;
}
+
+ return NOTIFY_OK;
}
int ipa_modem_init(struct ipa *ipa, bool modem_init)
@@ -362,13 +359,30 @@ void ipa_modem_exit(struct ipa *ipa)
int ipa_modem_config(struct ipa *ipa)
{
- return qcom_register_ipa_notify(ipa->modem_rproc, ipa_modem_notify,
- ipa);
+ void *notifier;
+
+ ipa->nb.notifier_call = ipa_modem_notify;
+
+ notifier = qcom_register_ssr_notifier("mpss", &ipa->nb);
+ if (IS_ERR(notifier))
+ return PTR_ERR(notifier);
+
+ ipa->notifier = notifier;
+
+ return 0;
}
void ipa_modem_deconfig(struct ipa *ipa)
{
- qcom_deregister_ipa_notify(ipa->modem_rproc);
+ struct device *dev = &ipa->pdev->dev;
+ int ret;
+
+ ret = qcom_unregister_ssr_notifier(ipa->notifier, &ipa->nb);
+ if (ret)
+ dev_err(dev, "error %d unregistering notifier", ret);
+
+ ipa->notifier = NULL;
+ memset(&ipa->nb, 0, sizeof(ipa->nb));
}
int ipa_modem_setup(struct ipa *ipa)
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 6fa8fe5ef160..0ada48edf749 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -2264,12 +2264,13 @@ static void virtnet_update_settings(struct virtnet_info *vi)
if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_SPEED_DUPLEX))
return;
- speed = virtio_cread32(vi->vdev, offsetof(struct virtio_net_config,
- speed));
+ virtio_cread_le(vi->vdev, struct virtio_net_config, speed, &speed);
+
if (ethtool_validate_speed(speed))
vi->speed = speed;
- duplex = virtio_cread8(vi->vdev, offsetof(struct virtio_net_config,
- duplex));
+
+ virtio_cread_le(vi->vdev, struct virtio_net_config, duplex, &duplex);
+
if (ethtool_validate_duplex(duplex))
vi->duplex = duplex;
}
diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
index 09087c38fabd..955265656b96 100644
--- a/drivers/nvdimm/bus.c
+++ b/drivers/nvdimm/bus.c
@@ -1037,9 +1037,25 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
dimm_name = "bus";
}
+ /* Validate command family support against bus declared support */
if (cmd == ND_CMD_CALL) {
+ unsigned long *mask;
+
if (copy_from_user(&pkg, p, sizeof(pkg)))
return -EFAULT;
+
+ if (nvdimm) {
+ if (pkg.nd_family > NVDIMM_FAMILY_MAX)
+ return -EINVAL;
+ mask = &nd_desc->dimm_family_mask;
+ } else {
+ if (pkg.nd_family > NVDIMM_BUS_FAMILY_MAX)
+ return -EINVAL;
+ mask = &nd_desc->bus_family_mask;
+ }
+
+ if (!test_bit(pkg.nd_family, mask))
+ return -EINVAL;
}
if (!desc ||
diff --git a/drivers/nvdimm/core.c b/drivers/nvdimm/core.c
index fe9bd6febdd2..c21ba0602029 100644
--- a/drivers/nvdimm/core.c
+++ b/drivers/nvdimm/core.c
@@ -4,6 +4,7 @@
*/
#include <linux/libnvdimm.h>
#include <linux/badblocks.h>
+#include <linux/suspend.h>
#include <linux/export.h>
#include <linux/module.h>
#include <linux/blkdev.h>
@@ -389,8 +390,156 @@ static const struct attribute_group nvdimm_bus_attribute_group = {
.attrs = nvdimm_bus_attributes,
};
+static ssize_t capability_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
+ struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
+ enum nvdimm_fwa_capability cap;
+
+ if (!nd_desc->fw_ops)
+ return -EOPNOTSUPP;
+
+ nvdimm_bus_lock(dev);
+ cap = nd_desc->fw_ops->capability(nd_desc);
+ nvdimm_bus_unlock(dev);
+
+ switch (cap) {
+ case NVDIMM_FWA_CAP_QUIESCE:
+ return sprintf(buf, "quiesce\n");
+ case NVDIMM_FWA_CAP_LIVE:
+ return sprintf(buf, "live\n");
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static DEVICE_ATTR_RO(capability);
+
+static ssize_t activate_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
+ struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
+ enum nvdimm_fwa_capability cap;
+ enum nvdimm_fwa_state state;
+
+ if (!nd_desc->fw_ops)
+ return -EOPNOTSUPP;
+
+ nvdimm_bus_lock(dev);
+ cap = nd_desc->fw_ops->capability(nd_desc);
+ state = nd_desc->fw_ops->activate_state(nd_desc);
+ nvdimm_bus_unlock(dev);
+
+ if (cap < NVDIMM_FWA_CAP_QUIESCE)
+ return -EOPNOTSUPP;
+
+ switch (state) {
+ case NVDIMM_FWA_IDLE:
+ return sprintf(buf, "idle\n");
+ case NVDIMM_FWA_BUSY:
+ return sprintf(buf, "busy\n");
+ case NVDIMM_FWA_ARMED:
+ return sprintf(buf, "armed\n");
+ case NVDIMM_FWA_ARM_OVERFLOW:
+ return sprintf(buf, "overflow\n");
+ default:
+ return -ENXIO;
+ }
+}
+
+static int exec_firmware_activate(void *data)
+{
+ struct nvdimm_bus_descriptor *nd_desc = data;
+
+ return nd_desc->fw_ops->activate(nd_desc);
+}
+
+static ssize_t activate_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t len)
+{
+ struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
+ struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
+ enum nvdimm_fwa_state state;
+ bool quiesce;
+ ssize_t rc;
+
+ if (!nd_desc->fw_ops)
+ return -EOPNOTSUPP;
+
+ if (sysfs_streq(buf, "live"))
+ quiesce = false;
+ else if (sysfs_streq(buf, "quiesce"))
+ quiesce = true;
+ else
+ return -EINVAL;
+
+ nvdimm_bus_lock(dev);
+ state = nd_desc->fw_ops->activate_state(nd_desc);
+
+ switch (state) {
+ case NVDIMM_FWA_BUSY:
+ rc = -EBUSY;
+ break;
+ case NVDIMM_FWA_ARMED:
+ case NVDIMM_FWA_ARM_OVERFLOW:
+ if (quiesce)
+ rc = hibernate_quiet_exec(exec_firmware_activate, nd_desc);
+ else
+ rc = nd_desc->fw_ops->activate(nd_desc);
+ break;
+ case NVDIMM_FWA_IDLE:
+ default:
+ rc = -ENXIO;
+ }
+ nvdimm_bus_unlock(dev);
+
+ if (rc == 0)
+ rc = len;
+ return rc;
+}
+
+static DEVICE_ATTR_ADMIN_RW(activate);
+
+static umode_t nvdimm_bus_firmware_visible(struct kobject *kobj, struct attribute *a, int n)
+{
+ struct device *dev = container_of(kobj, typeof(*dev), kobj);
+ struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
+ struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
+ enum nvdimm_fwa_capability cap;
+
+ /*
+ * Both 'activate' and 'capability' disappear when no ops
+ * detected, or a negative capability is indicated.
+ */
+ if (!nd_desc->fw_ops)
+ return 0;
+
+ nvdimm_bus_lock(dev);
+ cap = nd_desc->fw_ops->capability(nd_desc);
+ nvdimm_bus_unlock(dev);
+
+ if (cap < NVDIMM_FWA_CAP_QUIESCE)
+ return 0;
+
+ return a->mode;
+}
+static struct attribute *nvdimm_bus_firmware_attributes[] = {
+ &dev_attr_activate.attr,
+ &dev_attr_capability.attr,
+ NULL,
+};
+
+static const struct attribute_group nvdimm_bus_firmware_attribute_group = {
+ .name = "firmware",
+ .attrs = nvdimm_bus_firmware_attributes,
+ .is_visible = nvdimm_bus_firmware_visible,
+};
+
const struct attribute_group *nvdimm_bus_attribute_groups[] = {
&nvdimm_bus_attribute_group,
+ &nvdimm_bus_firmware_attribute_group,
NULL,
};
diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c
index b7b77e8d9027..61374def5155 100644
--- a/drivers/nvdimm/dimm_devs.c
+++ b/drivers/nvdimm/dimm_devs.c
@@ -363,14 +363,14 @@ __weak ssize_t security_show(struct device *dev,
{
struct nvdimm *nvdimm = to_nvdimm(dev);
+ if (test_bit(NVDIMM_SECURITY_OVERWRITE, &nvdimm->sec.flags))
+ return sprintf(buf, "overwrite\n");
if (test_bit(NVDIMM_SECURITY_DISABLED, &nvdimm->sec.flags))
return sprintf(buf, "disabled\n");
if (test_bit(NVDIMM_SECURITY_UNLOCKED, &nvdimm->sec.flags))
return sprintf(buf, "unlocked\n");
if (test_bit(NVDIMM_SECURITY_LOCKED, &nvdimm->sec.flags))
return sprintf(buf, "locked\n");
- if (test_bit(NVDIMM_SECURITY_OVERWRITE, &nvdimm->sec.flags))
- return sprintf(buf, "overwrite\n");
return -ENOTTY;
}
@@ -446,9 +446,124 @@ static const struct attribute_group nvdimm_attribute_group = {
.is_visible = nvdimm_visible,
};
+static ssize_t result_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct nvdimm *nvdimm = to_nvdimm(dev);
+ enum nvdimm_fwa_result result;
+
+ if (!nvdimm->fw_ops)
+ return -EOPNOTSUPP;
+
+ nvdimm_bus_lock(dev);
+ result = nvdimm->fw_ops->activate_result(nvdimm);
+ nvdimm_bus_unlock(dev);
+
+ switch (result) {
+ case NVDIMM_FWA_RESULT_NONE:
+ return sprintf(buf, "none\n");
+ case NVDIMM_FWA_RESULT_SUCCESS:
+ return sprintf(buf, "success\n");
+ case NVDIMM_FWA_RESULT_FAIL:
+ return sprintf(buf, "fail\n");
+ case NVDIMM_FWA_RESULT_NOTSTAGED:
+ return sprintf(buf, "not_staged\n");
+ case NVDIMM_FWA_RESULT_NEEDRESET:
+ return sprintf(buf, "need_reset\n");
+ default:
+ return -ENXIO;
+ }
+}
+static DEVICE_ATTR_ADMIN_RO(result);
+
+static ssize_t activate_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct nvdimm *nvdimm = to_nvdimm(dev);
+ enum nvdimm_fwa_state state;
+
+ if (!nvdimm->fw_ops)
+ return -EOPNOTSUPP;
+
+ nvdimm_bus_lock(dev);
+ state = nvdimm->fw_ops->activate_state(nvdimm);
+ nvdimm_bus_unlock(dev);
+
+ switch (state) {
+ case NVDIMM_FWA_IDLE:
+ return sprintf(buf, "idle\n");
+ case NVDIMM_FWA_BUSY:
+ return sprintf(buf, "busy\n");
+ case NVDIMM_FWA_ARMED:
+ return sprintf(buf, "armed\n");
+ default:
+ return -ENXIO;
+ }
+}
+
+static ssize_t activate_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct nvdimm *nvdimm = to_nvdimm(dev);
+ enum nvdimm_fwa_trigger arg;
+ int rc;
+
+ if (!nvdimm->fw_ops)
+ return -EOPNOTSUPP;
+
+ if (sysfs_streq(buf, "arm"))
+ arg = NVDIMM_FWA_ARM;
+ else if (sysfs_streq(buf, "disarm"))
+ arg = NVDIMM_FWA_DISARM;
+ else
+ return -EINVAL;
+
+ nvdimm_bus_lock(dev);
+ rc = nvdimm->fw_ops->arm(nvdimm, arg);
+ nvdimm_bus_unlock(dev);
+
+ if (rc < 0)
+ return rc;
+ return len;
+}
+static DEVICE_ATTR_ADMIN_RW(activate);
+
+static struct attribute *nvdimm_firmware_attributes[] = {
+ &dev_attr_activate.attr,
+ &dev_attr_result.attr,
+};
+
+static umode_t nvdimm_firmware_visible(struct kobject *kobj, struct attribute *a, int n)
+{
+ struct device *dev = container_of(kobj, typeof(*dev), kobj);
+ struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
+ struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
+ struct nvdimm *nvdimm = to_nvdimm(dev);
+ enum nvdimm_fwa_capability cap;
+
+ if (!nd_desc->fw_ops)
+ return 0;
+ if (!nvdimm->fw_ops)
+ return 0;
+
+ nvdimm_bus_lock(dev);
+ cap = nd_desc->fw_ops->capability(nd_desc);
+ nvdimm_bus_unlock(dev);
+
+ if (cap < NVDIMM_FWA_CAP_QUIESCE)
+ return 0;
+
+ return a->mode;
+}
+
+static const struct attribute_group nvdimm_firmware_attribute_group = {
+ .name = "firmware",
+ .attrs = nvdimm_firmware_attributes,
+ .is_visible = nvdimm_firmware_visible,
+};
+
static const struct attribute_group *nvdimm_attribute_groups[] = {
&nd_device_attribute_group,
&nvdimm_attribute_group,
+ &nvdimm_firmware_attribute_group,
NULL,
};
@@ -467,7 +582,8 @@ struct nvdimm *__nvdimm_create(struct nvdimm_bus *nvdimm_bus,
void *provider_data, const struct attribute_group **groups,
unsigned long flags, unsigned long cmd_mask, int num_flush,
struct resource *flush_wpq, const char *dimm_id,
- const struct nvdimm_security_ops *sec_ops)
+ const struct nvdimm_security_ops *sec_ops,
+ const struct nvdimm_fw_ops *fw_ops)
{
struct nvdimm *nvdimm = kzalloc(sizeof(*nvdimm), GFP_KERNEL);
struct device *dev;
@@ -497,6 +613,7 @@ struct nvdimm *__nvdimm_create(struct nvdimm_bus *nvdimm_bus,
dev->devt = MKDEV(nvdimm_major, nvdimm->id);
dev->groups = groups;
nvdimm->sec.ops = sec_ops;
+ nvdimm->fw_ops = fw_ops;
nvdimm->sec.overwrite_tmo = 0;
INIT_DELAYED_WORK(&nvdimm->dwork, nvdimm_security_overwrite_query);
/*
diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
index ae155e860fdc..6da67f4d641a 100644
--- a/drivers/nvdimm/namespace_devs.c
+++ b/drivers/nvdimm/namespace_devs.c
@@ -1309,7 +1309,7 @@ static ssize_t resource_show(struct device *dev,
return -ENXIO;
return sprintf(buf, "%#llx\n", (unsigned long long) res->start);
}
-static DEVICE_ATTR(resource, 0400, resource_show, NULL);
+static DEVICE_ATTR_ADMIN_RO(resource);
static const unsigned long blk_lbasize_supported[] = { 512, 520, 528,
4096, 4104, 4160, 4224, 0 };
diff --git a/drivers/nvdimm/nd-core.h b/drivers/nvdimm/nd-core.h
index ddb9d97d9129..564faa36a3ca 100644
--- a/drivers/nvdimm/nd-core.h
+++ b/drivers/nvdimm/nd-core.h
@@ -45,6 +45,7 @@ struct nvdimm {
struct kernfs_node *overwrite_state;
} sec;
struct delayed_work dwork;
+ const struct nvdimm_fw_ops *fw_ops;
};
static inline unsigned long nvdimm_security_flags(
diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
index 34db557dbad1..3e11ef8d3f5b 100644
--- a/drivers/nvdimm/pfn_devs.c
+++ b/drivers/nvdimm/pfn_devs.c
@@ -218,7 +218,7 @@ static ssize_t resource_show(struct device *dev,
return rc;
}
-static DEVICE_ATTR(resource, 0400, resource_show, NULL);
+static DEVICE_ATTR_ADMIN_RO(resource);
static ssize_t size_show(struct device *dev,
struct device_attribute *attr, char *buf)
diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
index c3237c2b03a6..ef23119db574 100644
--- a/drivers/nvdimm/region_devs.c
+++ b/drivers/nvdimm/region_devs.c
@@ -605,7 +605,7 @@ static ssize_t resource_show(struct device *dev,
return sprintf(buf, "%#llx\n", nd_region->ndr_start);
}
-static DEVICE_ATTR(resource, 0400, resource_show, NULL);
+static DEVICE_ATTR_ADMIN_RO(resource);
static ssize_t persistence_domain_show(struct device *dev,
struct device_attribute *attr, char *buf)
diff --git a/drivers/nvdimm/security.c b/drivers/nvdimm/security.c
index 4cef69bd3c1b..4b80150e4afa 100644
--- a/drivers/nvdimm/security.c
+++ b/drivers/nvdimm/security.c
@@ -450,14 +450,19 @@ void __nvdimm_security_overwrite_query(struct nvdimm *nvdimm)
else
dev_dbg(&nvdimm->dev, "overwrite completed\n");
- if (nvdimm->sec.overwrite_state)
- sysfs_notify_dirent(nvdimm->sec.overwrite_state);
+ /*
+ * Mark the overwrite work done and update dimm security flags,
+ * then send a sysfs event notification to wake up userspace
+ * poll threads to picked up the changed state.
+ */
nvdimm->sec.overwrite_tmo = 0;
clear_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags);
clear_bit(NDD_WORK_PENDING, &nvdimm->flags);
- put_device(&nvdimm->dev);
nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER);
- nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_MASTER);
+ nvdimm->sec.ext_flags = nvdimm_security_flags(nvdimm, NVDIMM_MASTER);
+ if (nvdimm->sec.overwrite_state)
+ sysfs_notify_dirent(nvdimm->sec.overwrite_state);
+ put_device(&nvdimm->dev);
}
void nvdimm_security_overwrite_query(struct work_struct *work)
diff --git a/drivers/nvdimm/virtio_pmem.c b/drivers/nvdimm/virtio_pmem.c
index 5e3d07b47e0c..726c7354d465 100644
--- a/drivers/nvdimm/virtio_pmem.c
+++ b/drivers/nvdimm/virtio_pmem.c
@@ -58,9 +58,9 @@ static int virtio_pmem_probe(struct virtio_device *vdev)
goto out_err;
}
- virtio_cread(vpmem->vdev, struct virtio_pmem_config,
+ virtio_cread_le(vpmem->vdev, struct virtio_pmem_config,
start, &vpmem->start);
- virtio_cread(vpmem->vdev, struct virtio_pmem_config,
+ virtio_cread_le(vpmem->vdev, struct virtio_pmem_config,
size, &vpmem->size);
res.start = vpmem->start;
diff --git a/drivers/platform/chrome/Kconfig b/drivers/platform/chrome/Kconfig
index cf072153bdc5..a056031dee81 100644
--- a/drivers/platform/chrome/Kconfig
+++ b/drivers/platform/chrome/Kconfig
@@ -218,6 +218,7 @@ config CROS_EC_TYPEC
tristate "ChromeOS EC Type-C Connector Control"
depends on MFD_CROS_EC_DEV && TYPEC
depends on CROS_USBPD_NOTIFY
+ depends on USB_ROLE_SWITCH
default MFD_CROS_EC_DEV
help
If you say Y here, you get support for accessing Type C connector
diff --git a/drivers/platform/chrome/cros_ec_debugfs.c b/drivers/platform/chrome/cros_ec_debugfs.c
index ecfada00e6c5..272c89837d74 100644
--- a/drivers/platform/chrome/cros_ec_debugfs.c
+++ b/drivers/platform/chrome/cros_ec_debugfs.c
@@ -242,6 +242,25 @@ static ssize_t cros_ec_pdinfo_read(struct file *file,
read_buf, p - read_buf);
}
+static bool cros_ec_uptime_is_supported(struct cros_ec_device *ec_dev)
+{
+ struct {
+ struct cros_ec_command cmd;
+ struct ec_response_uptime_info resp;
+ } __packed msg = {};
+ int ret;
+
+ msg.cmd.command = EC_CMD_GET_UPTIME_INFO;
+ msg.cmd.insize = sizeof(msg.resp);
+
+ ret = cros_ec_cmd_xfer_status(ec_dev, &msg.cmd);
+ if (ret == -EPROTO && msg.cmd.result == EC_RES_INVALID_COMMAND)
+ return false;
+
+ /* Other errors maybe a transient error, do not rule about support. */
+ return true;
+}
+
static ssize_t cros_ec_uptime_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
@@ -444,8 +463,9 @@ static int cros_ec_debugfs_probe(struct platform_device *pd)
debugfs_create_file("pdinfo", 0444, debug_info->dir, debug_info,
&cros_ec_pdinfo_fops);
- debugfs_create_file("uptime", 0444, debug_info->dir, debug_info,
- &cros_ec_uptime_fops);
+ if (cros_ec_uptime_is_supported(ec->ec_dev))
+ debugfs_create_file("uptime", 0444, debug_info->dir, debug_info,
+ &cros_ec_uptime_fops);
debugfs_create_x32("last_resume_result", 0444, debug_info->dir,
&ec->ec_dev->last_resume_result);
diff --git a/drivers/platform/chrome/cros_ec_ishtp.c b/drivers/platform/chrome/cros_ec_ishtp.c
index ed794a7ddba9..81364029af36 100644
--- a/drivers/platform/chrome/cros_ec_ishtp.c
+++ b/drivers/platform/chrome/cros_ec_ishtp.c
@@ -681,8 +681,10 @@ static int cros_ec_ishtp_probe(struct ishtp_cl_device *cl_device)
/* Register croc_ec_dev mfd */
rv = cros_ec_dev_init(client_data);
- if (rv)
+ if (rv) {
+ down_write(&init_lock);
goto end_cros_ec_dev_init_error;
+ }
return 0;
diff --git a/drivers/platform/chrome/cros_ec_proto.c b/drivers/platform/chrome/cros_ec_proto.c
index 3e745e0fe092..8d52b3b4bd4e 100644
--- a/drivers/platform/chrome/cros_ec_proto.c
+++ b/drivers/platform/chrome/cros_ec_proto.c
@@ -208,6 +208,12 @@ static int cros_ec_get_host_event_wake_mask(struct cros_ec_device *ec_dev,
msg->insize = sizeof(*r);
ret = send_command(ec_dev, msg);
+ if (ret >= 0) {
+ if (msg->result == EC_RES_INVALID_COMMAND)
+ return -EOPNOTSUPP;
+ if (msg->result != EC_RES_SUCCESS)
+ return -EPROTO;
+ }
if (ret > 0) {
r = (struct ec_response_host_event_mask *)msg->data;
*mask = r->mask;
@@ -469,14 +475,33 @@ int cros_ec_query_all(struct cros_ec_device *ec_dev)
&ver_mask);
ec_dev->host_sleep_v1 = (ret >= 0 && (ver_mask & EC_VER_MASK(1)));
- /*
- * Get host event wake mask, assume all events are wake events
- * if unavailable.
- */
+ /* Get host event wake mask. */
ret = cros_ec_get_host_event_wake_mask(ec_dev, proto_msg,
&ec_dev->host_event_wake_mask);
- if (ret < 0)
- ec_dev->host_event_wake_mask = U32_MAX;
+ if (ret < 0) {
+ /*
+ * If the EC doesn't support EC_CMD_HOST_EVENT_GET_WAKE_MASK,
+ * use a reasonable default. Note that we ignore various
+ * battery, AC status, and power-state events, because (a)
+ * those can be quite common (e.g., when sitting at full
+ * charge, on AC) and (b) these are not actionable wake events;
+ * if anything, we'd like to continue suspending (to save
+ * power), not wake up.
+ */
+ ec_dev->host_event_wake_mask = U32_MAX &
+ ~(BIT(EC_HOST_EVENT_AC_DISCONNECTED) |
+ BIT(EC_HOST_EVENT_BATTERY_LOW) |
+ BIT(EC_HOST_EVENT_BATTERY_CRITICAL) |
+ BIT(EC_HOST_EVENT_PD_MCU) |
+ BIT(EC_HOST_EVENT_BATTERY_STATUS));
+ /*
+ * Old ECs may not support this command. Complain about all
+ * other errors.
+ */
+ if (ret != -EOPNOTSUPP)
+ dev_err(ec_dev->dev,
+ "failed to retrieve wake mask: %d\n", ret);
+ }
ret = 0;
@@ -496,8 +521,8 @@ EXPORT_SYMBOL(cros_ec_query_all);
*
* Return: 0 on success or negative error code.
*/
-int cros_ec_cmd_xfer(struct cros_ec_device *ec_dev,
- struct cros_ec_command *msg)
+static int cros_ec_cmd_xfer(struct cros_ec_device *ec_dev,
+ struct cros_ec_command *msg)
{
int ret;
@@ -541,7 +566,6 @@ int cros_ec_cmd_xfer(struct cros_ec_device *ec_dev,
return ret;
}
-EXPORT_SYMBOL(cros_ec_cmd_xfer);
/**
* cros_ec_cmd_xfer_status() - Send a command to the ChromeOS EC.
diff --git a/drivers/platform/chrome/cros_ec_rpmsg.c b/drivers/platform/chrome/cros_ec_rpmsg.c
index 7e8629e3db74..30d0ba3b8889 100644
--- a/drivers/platform/chrome/cros_ec_rpmsg.c
+++ b/drivers/platform/chrome/cros_ec_rpmsg.c
@@ -38,6 +38,9 @@ struct cros_ec_rpmsg_response {
* @rpdev: rpmsg device we are connected to
* @xfer_ack: completion for host command transfer.
* @host_event_work: Work struct for pending host event.
+ * @ept: The rpmsg endpoint of this channel.
+ * @has_pending_host_event: Boolean used to check if there is a pending event.
+ * @probe_done: Flag to indicate that probe is done.
*/
struct cros_ec_rpmsg {
struct rpmsg_device *rpdev;
diff --git a/drivers/platform/chrome/cros_ec_sensorhub_ring.c b/drivers/platform/chrome/cros_ec_sensorhub_ring.c
index 24e48d96ed76..8921f24e83ba 100644
--- a/drivers/platform/chrome/cros_ec_sensorhub_ring.c
+++ b/drivers/platform/chrome/cros_ec_sensorhub_ring.c
@@ -419,9 +419,7 @@ cros_ec_sensor_ring_process_event(struct cros_ec_sensorhub *sensorhub,
* Disable filtering since we might add more jitter
* if b is in a random point in time.
*/
- new_timestamp = fifo_timestamp -
- fifo_info->timestamp * 1000 +
- in->timestamp * 1000;
+ new_timestamp = c - b * 1000 + a * 1000;
/*
* The timestamp can be stale if we had to use the fifo
* info timestamp.
@@ -675,29 +673,22 @@ done_with_this_batch:
* cros_ec_sensor_ring_spread_add_legacy: Calculate proper timestamps then
* add to ringbuffer (legacy).
*
- * Note: This assumes we're running old firmware, where every sample's timestamp
- * is after the sample. Run if tight_timestamps == false.
- *
- * If there is a sample with a proper timestamp
+ * Note: This assumes we're running old firmware, where timestamp
+ * is inserted after its sample(s)e. There can be several samples between
+ * timestamps, so several samples can have the same timestamp.
*
* timestamp | count
* -----------------
- * older_unprocess_out --> TS1 | 1
- * TS1 | 2
- * out --> TS1 | 3
- * next_out --> TS2 |
- *
- * We spread time for the samples [older_unprocess_out .. out]
- * between TS1 and TS2: [TS1+1/4, TS1+2/4, TS1+3/4, TS2].
+ * 1st sample --> TS1 | 1
+ * TS2 | 2
+ * TS2 | 3
+ * TS3 | 4
+ * last_out -->
*
- * If we reach the end of the samples, we compare with the
- * current timestamp:
*
- * older_unprocess_out --> TS1 | 1
- * TS1 | 2
- * out --> TS1 | 3
+ * We spread time for the samples using perod p = (current - TS1)/4.
+ * between TS1 and TS2: [TS1+p/4, TS1+2p/4, TS1+3p/4, current_timestamp].
*
- * We know have [TS1+1/3, TS1+2/3, current timestamp]
*/
static void
cros_ec_sensor_ring_spread_add_legacy(struct cros_ec_sensorhub *sensorhub,
@@ -710,58 +701,37 @@ cros_ec_sensor_ring_spread_add_legacy(struct cros_ec_sensorhub *sensorhub,
int i;
for_each_set_bit(i, &sensor_mask, sensorhub->sensor_num) {
- s64 older_timestamp;
s64 timestamp;
- struct cros_ec_sensors_ring_sample *older_unprocess_out =
- sensorhub->ring;
- struct cros_ec_sensors_ring_sample *next_out;
- int count = 1;
-
- for (out = sensorhub->ring; out < last_out; out = next_out) {
- s64 time_period;
+ int count = 0;
+ s64 time_period;
- next_out = out + 1;
+ for (out = sensorhub->ring; out < last_out; out++) {
if (out->sensor_id != i)
continue;
/* Timestamp to start with */
- older_timestamp = out->timestamp;
-
- /* Find next sample. */
- while (next_out < last_out && next_out->sensor_id != i)
- next_out++;
+ timestamp = out->timestamp;
+ out++;
+ count = 1;
+ break;
+ }
+ for (; out < last_out; out++) {
+ /* Find last sample. */
+ if (out->sensor_id != i)
+ continue;
+ count++;
+ }
+ if (count == 0)
+ continue;
- if (next_out >= last_out) {
- timestamp = current_timestamp;
- } else {
- timestamp = next_out->timestamp;
- if (timestamp == older_timestamp) {
- count++;
- continue;
- }
- }
+ /* Spread uniformly between the first and last samples. */
+ time_period = div_s64(current_timestamp - timestamp, count);
- /*
- * The next sample has a new timestamp, spread the
- * unprocessed samples.
- */
- if (next_out < last_out)
- count++;
- time_period = div_s64(timestamp - older_timestamp,
- count);
-
- for (; older_unprocess_out <= out;
- older_unprocess_out++) {
- if (older_unprocess_out->sensor_id != i)
- continue;
- older_timestamp += time_period;
- older_unprocess_out->timestamp =
- older_timestamp;
- }
- count = 1;
- /* The next_out sample has a valid timestamp, skip. */
- next_out++;
- older_unprocess_out = next_out;
+ for (out = sensorhub->ring; out < last_out; out++) {
+ if (out->sensor_id != i)
+ continue;
+ timestamp += time_period;
+ out->timestamp = timestamp;
}
}
diff --git a/drivers/platform/chrome/cros_ec_spi.c b/drivers/platform/chrome/cros_ec_spi.c
index d09260382550..dfa1f816a45f 100644
--- a/drivers/platform/chrome/cros_ec_spi.c
+++ b/drivers/platform/chrome/cros_ec_spi.c
@@ -148,6 +148,10 @@ static int terminate_request(struct cros_ec_device *ec_dev)
* receive_n_bytes - receive n bytes from the EC.
*
* Assumes buf is a pointer into the ec_dev->din buffer
+ *
+ * @ec_dev: ChromeOS EC device.
+ * @buf: Pointer to the buffer receiving the data.
+ * @n: Number of bytes received.
*/
static int receive_n_bytes(struct cros_ec_device *ec_dev, u8 *buf, int n)
{
diff --git a/drivers/platform/chrome/cros_ec_typec.c b/drivers/platform/chrome/cros_ec_typec.c
index 66b8d21092af..3fcd27ec9ad8 100644
--- a/drivers/platform/chrome/cros_ec_typec.c
+++ b/drivers/platform/chrome/cros_ec_typec.c
@@ -14,9 +14,21 @@
#include <linux/platform_data/cros_usbpd_notify.h>
#include <linux/platform_device.h>
#include <linux/usb/typec.h>
+#include <linux/usb/typec_altmode.h>
+#include <linux/usb/typec_dp.h>
+#include <linux/usb/typec_mux.h>
+#include <linux/usb/typec_tbt.h>
+#include <linux/usb/role.h>
#define DRV_NAME "cros-ec-typec"
+/* Supported alt modes. */
+enum {
+ CROS_EC_ALTMODE_DP = 0,
+ CROS_EC_ALTMODE_TBT,
+ CROS_EC_ALTMODE_MAX,
+};
+
/* Per port data. */
struct cros_typec_port {
struct typec_port *port;
@@ -25,6 +37,16 @@ struct cros_typec_port {
struct typec_partner *partner;
/* Port partner PD identity info. */
struct usb_pd_identity p_identity;
+ struct typec_switch *ori_sw;
+ struct typec_mux *mux;
+ struct usb_role_switch *role_sw;
+
+ /* Variables keeping track of switch state. */
+ struct typec_mux_state state;
+ uint8_t mux_flags;
+
+ /* Port alt modes. */
+ struct typec_altmode p_altmode[CROS_EC_ALTMODE_MAX];
};
/* Platform-specific data for the Chrome OS EC Type C controller. */
@@ -32,10 +54,11 @@ struct cros_typec_data {
struct device *dev;
struct cros_ec_device *ec;
int num_ports;
- unsigned int cmd_ver;
+ unsigned int pd_ctrl_ver;
/* Array of ports, indexed by port number. */
struct cros_typec_port *ports[EC_USB_PD_MAX_PORTS];
struct notifier_block nb;
+ struct work_struct port_work;
};
static int cros_typec_parse_port_props(struct typec_capability *cap,
@@ -84,6 +107,81 @@ static int cros_typec_parse_port_props(struct typec_capability *cap,
return 0;
}
+static int cros_typec_get_switch_handles(struct cros_typec_port *port,
+ struct fwnode_handle *fwnode,
+ struct device *dev)
+{
+ port->mux = fwnode_typec_mux_get(fwnode, NULL);
+ if (IS_ERR(port->mux)) {
+ dev_dbg(dev, "Mux handle not found.\n");
+ goto mux_err;
+ }
+
+ port->ori_sw = fwnode_typec_switch_get(fwnode);
+ if (IS_ERR(port->ori_sw)) {
+ dev_dbg(dev, "Orientation switch handle not found.\n");
+ goto ori_sw_err;
+ }
+
+ port->role_sw = fwnode_usb_role_switch_get(fwnode);
+ if (IS_ERR(port->role_sw)) {
+ dev_dbg(dev, "USB role switch handle not found.\n");
+ goto role_sw_err;
+ }
+
+ return 0;
+
+role_sw_err:
+ usb_role_switch_put(port->role_sw);
+ori_sw_err:
+ typec_switch_put(port->ori_sw);
+mux_err:
+ typec_mux_put(port->mux);
+
+ return -ENODEV;
+}
+
+static int cros_typec_add_partner(struct cros_typec_data *typec, int port_num,
+ bool pd_en)
+{
+ struct cros_typec_port *port = typec->ports[port_num];
+ struct typec_partner_desc p_desc = {
+ .usb_pd = pd_en,
+ };
+ int ret = 0;
+
+ /*
+ * Fill an initial PD identity, which will then be updated with info
+ * from the EC.
+ */
+ p_desc.identity = &port->p_identity;
+
+ port->partner = typec_register_partner(port->port, &p_desc);
+ if (IS_ERR(port->partner)) {
+ ret = PTR_ERR(port->partner);
+ port->partner = NULL;
+ }
+
+ return ret;
+}
+
+static void cros_typec_remove_partner(struct cros_typec_data *typec,
+ int port_num)
+{
+ struct cros_typec_port *port = typec->ports[port_num];
+
+ port->state.alt = NULL;
+ port->state.mode = TYPEC_STATE_USB;
+ port->state.data = NULL;
+
+ usb_role_switch_set_role(port->role_sw, USB_ROLE_NONE);
+ typec_switch_set(port->ori_sw, TYPEC_ORIENTATION_NONE);
+ typec_mux_set(port->mux, &port->state);
+
+ typec_unregister_partner(port->partner);
+ port->partner = NULL;
+}
+
static void cros_unregister_ports(struct cros_typec_data *typec)
{
int i;
@@ -91,10 +189,40 @@ static void cros_unregister_ports(struct cros_typec_data *typec)
for (i = 0; i < typec->num_ports; i++) {
if (!typec->ports[i])
continue;
+ cros_typec_remove_partner(typec, i);
+ usb_role_switch_put(typec->ports[i]->role_sw);
+ typec_switch_put(typec->ports[i]->ori_sw);
+ typec_mux_put(typec->ports[i]->mux);
typec_unregister_port(typec->ports[i]->port);
}
}
+/*
+ * Fake the alt mode structs until we actually start registering Type C port
+ * and partner alt modes.
+ */
+static void cros_typec_register_port_altmodes(struct cros_typec_data *typec,
+ int port_num)
+{
+ struct cros_typec_port *port = typec->ports[port_num];
+
+ /* All PD capable CrOS devices are assumed to support DP altmode. */
+ port->p_altmode[CROS_EC_ALTMODE_DP].svid = USB_TYPEC_DP_SID;
+ port->p_altmode[CROS_EC_ALTMODE_DP].mode = USB_TYPEC_DP_MODE;
+
+ /*
+ * Register TBT compatibility alt mode. The EC will not enter the mode
+ * if it doesn't support it, so it's safe to register it unconditionally
+ * here for now.
+ */
+ port->p_altmode[CROS_EC_ALTMODE_TBT].svid = USB_TYPEC_TBT_SID;
+ port->p_altmode[CROS_EC_ALTMODE_TBT].mode = TYPEC_ANY_MODE;
+
+ port->state.alt = NULL;
+ port->state.mode = TYPEC_STATE_USB;
+ port->state.data = NULL;
+}
+
static int cros_typec_init_ports(struct cros_typec_data *typec)
{
struct device *dev = typec->dev;
@@ -153,6 +281,13 @@ static int cros_typec_init_ports(struct cros_typec_data *typec)
ret = PTR_ERR(cros_port->port);
goto unregister_ports;
}
+
+ ret = cros_typec_get_switch_handles(cros_port, fwnode, dev);
+ if (ret)
+ dev_dbg(dev, "No switch control for port %d\n",
+ port_num);
+
+ cros_typec_register_port_altmodes(typec, port_num);
}
return 0;
@@ -193,30 +328,6 @@ static int cros_typec_ec_command(struct cros_typec_data *typec,
return ret;
}
-static int cros_typec_add_partner(struct cros_typec_data *typec, int port_num,
- bool pd_en)
-{
- struct cros_typec_port *port = typec->ports[port_num];
- struct typec_partner_desc p_desc = {
- .usb_pd = pd_en,
- };
- int ret = 0;
-
- /*
- * Fill an initial PD identity, which will then be updated with info
- * from the EC.
- */
- p_desc.identity = &port->p_identity;
-
- port->partner = typec_register_partner(port->port, &p_desc);
- if (IS_ERR(port->partner)) {
- ret = PTR_ERR(port->partner);
- port->partner = NULL;
- }
-
- return ret;
-}
-
static void cros_typec_set_port_params_v0(struct cros_typec_data *typec,
int port_num, struct ec_response_usb_pd_control *resp)
{
@@ -270,16 +381,166 @@ static void cros_typec_set_port_params_v1(struct cros_typec_data *typec,
} else {
if (!typec->ports[port_num]->partner)
return;
+ cros_typec_remove_partner(typec, port_num);
+ }
+}
- typec_unregister_partner(typec->ports[port_num]->partner);
- typec->ports[port_num]->partner = NULL;
+static int cros_typec_get_mux_info(struct cros_typec_data *typec, int port_num,
+ struct ec_response_usb_pd_mux_info *resp)
+{
+ struct ec_params_usb_pd_mux_info req = {
+ .port = port_num,
+ };
+
+ return cros_typec_ec_command(typec, 0, EC_CMD_USB_PD_MUX_INFO, &req,
+ sizeof(req), resp, sizeof(*resp));
+}
+
+static int cros_typec_usb_safe_state(struct cros_typec_port *port)
+{
+ port->state.mode = TYPEC_STATE_SAFE;
+
+ return typec_mux_set(port->mux, &port->state);
+}
+
+/*
+ * Spoof the VDOs that were likely communicated by the partner for TBT alt
+ * mode.
+ */
+static int cros_typec_enable_tbt(struct cros_typec_data *typec,
+ int port_num,
+ struct ec_response_usb_pd_control_v2 *pd_ctrl)
+{
+ struct cros_typec_port *port = typec->ports[port_num];
+ struct typec_thunderbolt_data data;
+ int ret;
+
+ if (typec->pd_ctrl_ver < 2) {
+ dev_err(typec->dev,
+ "PD_CTRL version too old: %d\n", typec->pd_ctrl_ver);
+ return -ENOTSUPP;
+ }
+
+ /* Device Discover Mode VDO */
+ data.device_mode = TBT_MODE;
+
+ if (pd_ctrl->control_flags & USB_PD_CTRL_TBT_LEGACY_ADAPTER)
+ data.device_mode = TBT_SET_ADAPTER(TBT_ADAPTER_TBT3);
+
+ /* Cable Discover Mode VDO */
+ data.cable_mode = TBT_MODE;
+ data.cable_mode |= TBT_SET_CABLE_SPEED(pd_ctrl->cable_speed);
+
+ if (pd_ctrl->control_flags & USB_PD_CTRL_OPTICAL_CABLE)
+ data.cable_mode |= TBT_CABLE_OPTICAL;
+
+ if (pd_ctrl->control_flags & USB_PD_CTRL_ACTIVE_LINK_UNIDIR)
+ data.cable_mode |= TBT_CABLE_LINK_TRAINING;
+
+ if (pd_ctrl->cable_gen)
+ data.cable_mode |= TBT_CABLE_ROUNDED;
+
+ /* Enter Mode VDO */
+ data.enter_vdo = TBT_SET_CABLE_SPEED(pd_ctrl->cable_speed);
+
+ if (pd_ctrl->control_flags & USB_PD_CTRL_ACTIVE_CABLE)
+ data.enter_vdo |= TBT_ENTER_MODE_ACTIVE_CABLE;
+
+ if (!port->state.alt) {
+ port->state.alt = &port->p_altmode[CROS_EC_ALTMODE_TBT];
+ ret = cros_typec_usb_safe_state(port);
+ if (ret)
+ return ret;
+ }
+
+ port->state.data = &data;
+ port->state.mode = TYPEC_TBT_MODE;
+
+ return typec_mux_set(port->mux, &port->state);
+}
+
+/* Spoof the VDOs that were likely communicated by the partner. */
+static int cros_typec_enable_dp(struct cros_typec_data *typec,
+ int port_num,
+ struct ec_response_usb_pd_control_v2 *pd_ctrl)
+{
+ struct cros_typec_port *port = typec->ports[port_num];
+ struct typec_displayport_data dp_data;
+ int ret;
+
+ if (typec->pd_ctrl_ver < 2) {
+ dev_err(typec->dev,
+ "PD_CTRL version too old: %d\n", typec->pd_ctrl_ver);
+ return -ENOTSUPP;
+ }
+
+ /* Status VDO. */
+ dp_data.status = DP_STATUS_ENABLED;
+ if (port->mux_flags & USB_PD_MUX_HPD_IRQ)
+ dp_data.status |= DP_STATUS_IRQ_HPD;
+ if (port->mux_flags & USB_PD_MUX_HPD_LVL)
+ dp_data.status |= DP_STATUS_HPD_STATE;
+
+ /* Configuration VDO. */
+ dp_data.conf = DP_CONF_SET_PIN_ASSIGN(pd_ctrl->dp_mode);
+ if (!port->state.alt) {
+ port->state.alt = &port->p_altmode[CROS_EC_ALTMODE_DP];
+ ret = cros_typec_usb_safe_state(port);
+ if (ret)
+ return ret;
}
+
+ port->state.data = &dp_data;
+ port->state.mode = TYPEC_MODAL_STATE(ffs(pd_ctrl->dp_mode));
+
+ return typec_mux_set(port->mux, &port->state);
+}
+
+static int cros_typec_configure_mux(struct cros_typec_data *typec, int port_num,
+ uint8_t mux_flags,
+ struct ec_response_usb_pd_control_v2 *pd_ctrl)
+{
+ struct cros_typec_port *port = typec->ports[port_num];
+ enum typec_orientation orientation;
+ int ret;
+
+ if (!port->partner)
+ return 0;
+
+ if (mux_flags & USB_PD_MUX_POLARITY_INVERTED)
+ orientation = TYPEC_ORIENTATION_REVERSE;
+ else
+ orientation = TYPEC_ORIENTATION_NORMAL;
+
+ ret = typec_switch_set(port->ori_sw, orientation);
+ if (ret)
+ return ret;
+
+ if (mux_flags & USB_PD_MUX_TBT_COMPAT_ENABLED) {
+ ret = cros_typec_enable_tbt(typec, port_num, pd_ctrl);
+ } else if (mux_flags & USB_PD_MUX_DP_ENABLED) {
+ ret = cros_typec_enable_dp(typec, port_num, pd_ctrl);
+ } else if (mux_flags & USB_PD_MUX_SAFE_MODE) {
+ ret = cros_typec_usb_safe_state(port);
+ } else if (mux_flags & USB_PD_MUX_USB_ENABLED) {
+ port->state.alt = NULL;
+ port->state.mode = TYPEC_STATE_USB;
+ ret = typec_mux_set(port->mux, &port->state);
+ } else {
+ dev_info(typec->dev,
+ "Unsupported mode requested, mux flags: %x\n",
+ mux_flags);
+ ret = -ENOTSUPP;
+ }
+
+ return ret;
}
static int cros_typec_port_update(struct cros_typec_data *typec, int port_num)
{
struct ec_params_usb_pd_control req;
- struct ec_response_usb_pd_control_v1 resp;
+ struct ec_response_usb_pd_control_v2 resp;
+ struct ec_response_usb_pd_mux_info mux_resp;
int ret;
if (port_num < 0 || port_num >= typec->num_ports) {
@@ -293,7 +554,7 @@ static int cros_typec_port_update(struct cros_typec_data *typec, int port_num)
req.mux = USB_PD_CTRL_MUX_NO_CHANGE;
req.swap = USB_PD_CTRL_SWAP_NONE;
- ret = cros_typec_ec_command(typec, typec->cmd_ver,
+ ret = cros_typec_ec_command(typec, typec->pd_ctrl_ver,
EC_CMD_USB_PD_CONTROL, &req, sizeof(req),
&resp, sizeof(resp));
if (ret < 0)
@@ -304,13 +565,33 @@ static int cros_typec_port_update(struct cros_typec_data *typec, int port_num)
dev_dbg(typec->dev, "Polarity %d: 0x%hhx\n", port_num, resp.polarity);
dev_dbg(typec->dev, "State %d: %s\n", port_num, resp.state);
- if (typec->cmd_ver == 1)
- cros_typec_set_port_params_v1(typec, port_num, &resp);
+ if (typec->pd_ctrl_ver != 0)
+ cros_typec_set_port_params_v1(typec, port_num,
+ (struct ec_response_usb_pd_control_v1 *)&resp);
else
cros_typec_set_port_params_v0(typec, port_num,
(struct ec_response_usb_pd_control *) &resp);
- return 0;
+ /* Update the switches if they exist, according to requested state */
+ ret = cros_typec_get_mux_info(typec, port_num, &mux_resp);
+ if (ret < 0) {
+ dev_warn(typec->dev,
+ "Failed to get mux info for port: %d, err = %d\n",
+ port_num, ret);
+ return 0;
+ }
+
+ /* No change needs to be made, let's exit early. */
+ if (typec->ports[port_num]->mux_flags == mux_resp.flags)
+ return 0;
+
+ typec->ports[port_num]->mux_flags = mux_resp.flags;
+ ret = cros_typec_configure_mux(typec, port_num, mux_resp.flags, &resp);
+ if (ret)
+ dev_warn(typec->dev, "Configure muxes failed, err = %d\n", ret);
+
+ return usb_role_switch_set_role(typec->ports[port_num]->role_sw,
+ !!(resp.role & PD_CTRL_RESP_ROLE_DATA));
}
static int cros_typec_get_cmd_version(struct cros_typec_data *typec)
@@ -327,22 +608,22 @@ static int cros_typec_get_cmd_version(struct cros_typec_data *typec)
if (ret < 0)
return ret;
- if (resp.version_mask & EC_VER_MASK(1))
- typec->cmd_ver = 1;
+ if (resp.version_mask & EC_VER_MASK(2))
+ typec->pd_ctrl_ver = 2;
+ else if (resp.version_mask & EC_VER_MASK(1))
+ typec->pd_ctrl_ver = 1;
else
- typec->cmd_ver = 0;
+ typec->pd_ctrl_ver = 0;
dev_dbg(typec->dev, "PD Control has version mask 0x%hhx\n",
- typec->cmd_ver);
+ typec->pd_ctrl_ver);
return 0;
}
-static int cros_ec_typec_event(struct notifier_block *nb,
- unsigned long host_event, void *_notify)
+static void cros_typec_port_work(struct work_struct *work)
{
- struct cros_typec_data *typec = container_of(nb, struct cros_typec_data,
- nb);
+ struct cros_typec_data *typec = container_of(work, struct cros_typec_data, port_work);
int ret, i;
for (i = 0; i < typec->num_ports; i++) {
@@ -350,6 +631,14 @@ static int cros_ec_typec_event(struct notifier_block *nb,
if (ret < 0)
dev_warn(typec->dev, "Update failed for port: %d\n", i);
}
+}
+
+static int cros_ec_typec_event(struct notifier_block *nb,
+ unsigned long host_event, void *_notify)
+{
+ struct cros_typec_data *typec = container_of(nb, struct cros_typec_data, nb);
+
+ schedule_work(&typec->port_work);
return NOTIFY_OK;
}
@@ -408,6 +697,12 @@ static int cros_typec_probe(struct platform_device *pdev)
if (ret < 0)
return ret;
+ INIT_WORK(&typec->port_work, cros_typec_port_work);
+
+ /*
+ * Safe to call port update here, since we haven't registered the
+ * PD notifier yet.
+ */
for (i = 0; i < typec->num_ports; i++) {
ret = cros_typec_port_update(typec, i);
if (ret < 0)
@@ -426,11 +721,35 @@ unregister_ports:
return ret;
}
+static int __maybe_unused cros_typec_suspend(struct device *dev)
+{
+ struct cros_typec_data *typec = dev_get_drvdata(dev);
+
+ cancel_work_sync(&typec->port_work);
+
+ return 0;
+}
+
+static int __maybe_unused cros_typec_resume(struct device *dev)
+{
+ struct cros_typec_data *typec = dev_get_drvdata(dev);
+
+ /* Refresh port state. */
+ schedule_work(&typec->port_work);
+
+ return 0;
+}
+
+static const struct dev_pm_ops cros_typec_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(cros_typec_suspend, cros_typec_resume)
+};
+
static struct platform_driver cros_typec_driver = {
.driver = {
.name = DRV_NAME,
.acpi_match_table = ACPI_PTR(cros_typec_acpi_id),
.of_match_table = of_match_ptr(cros_typec_of_match),
+ .pm = &cros_typec_pm_ops,
},
.probe = cros_typec_probe,
};
diff --git a/drivers/platform/mellanox/mlxbf-tmfifo.c b/drivers/platform/mellanox/mlxbf-tmfifo.c
index 5739a9669b29..bbc4e71a16ff 100644
--- a/drivers/platform/mellanox/mlxbf-tmfifo.c
+++ b/drivers/platform/mellanox/mlxbf-tmfifo.c
@@ -625,7 +625,10 @@ static void mlxbf_tmfifo_rxtx_header(struct mlxbf_tmfifo_vring *vring,
vdev_id = VIRTIO_ID_NET;
hdr_len = sizeof(struct virtio_net_hdr);
config = &fifo->vdev[vdev_id]->config.net;
- if (ntohs(hdr.len) > config->mtu +
+ /* A legacy-only interface for now. */
+ if (ntohs(hdr.len) >
+ __virtio16_to_cpu(virtio_legacy_is_little_endian(),
+ config->mtu) +
MLXBF_TMFIFO_NET_L2_OVERHEAD)
return;
} else {
@@ -1231,8 +1234,12 @@ static int mlxbf_tmfifo_probe(struct platform_device *pdev)
/* Create the network vdev. */
memset(&net_config, 0, sizeof(net_config));
- net_config.mtu = ETH_DATA_LEN;
- net_config.status = VIRTIO_NET_S_LINK_UP;
+
+ /* A legacy-only interface for now. */
+ net_config.mtu = __cpu_to_virtio16(virtio_legacy_is_little_endian(),
+ ETH_DATA_LEN);
+ net_config.status = __cpu_to_virtio16(virtio_legacy_is_little_endian(),
+ VIRTIO_NET_S_LINK_UP);
mlxbf_tmfifo_get_cfg_mac(net_config.mac);
rc = mlxbf_tmfifo_create_vdev(dev, fifo, VIRTIO_ID_NET,
MLXBF_TMFIFO_NET_FEATURES, &net_config,
diff --git a/drivers/remoteproc/Kconfig b/drivers/remoteproc/Kconfig
index c4d1731295eb..c6659dfea7c7 100644
--- a/drivers/remoteproc/Kconfig
+++ b/drivers/remoteproc/Kconfig
@@ -14,6 +14,15 @@ config REMOTEPROC
if REMOTEPROC
+config REMOTEPROC_CDEV
+ bool "Remoteproc character device interface"
+ help
+ Say y here to have a character device interface for the remoteproc
+ framework. Userspace can boot/shutdown remote processors through
+ this interface.
+
+ It's safe to say N if you don't want to use this interface.
+
config IMX_REMOTEPROC
tristate "IMX6/7 remoteproc support"
depends on ARCH_MXC
@@ -116,6 +125,9 @@ config KEYSTONE_REMOTEPROC
It's safe to say N here if you're not interested in the Keystone
DSPs or just want to use a bare minimum kernel.
+config QCOM_PIL_INFO
+ tristate
+
config QCOM_RPROC_COMMON
tristate
@@ -132,6 +144,7 @@ config QCOM_Q6V5_ADSP
depends on RPMSG_QCOM_GLINK_SMEM || RPMSG_QCOM_GLINK_SMEM=n
depends on QCOM_SYSMON || QCOM_SYSMON=n
select MFD_SYSCON
+ select QCOM_PIL_INFO
select QCOM_MDT_LOADER
select QCOM_Q6V5_COMMON
select QCOM_RPROC_COMMON
@@ -148,8 +161,8 @@ config QCOM_Q6V5_MSS
depends on QCOM_SYSMON || QCOM_SYSMON=n
select MFD_SYSCON
select QCOM_MDT_LOADER
+ select QCOM_PIL_INFO
select QCOM_Q6V5_COMMON
- select QCOM_Q6V5_IPA_NOTIFY
select QCOM_RPROC_COMMON
select QCOM_SCM
help
@@ -164,6 +177,7 @@ config QCOM_Q6V5_PAS
depends on RPMSG_QCOM_GLINK_SMEM || RPMSG_QCOM_GLINK_SMEM=n
depends on QCOM_SYSMON || QCOM_SYSMON=n
select MFD_SYSCON
+ select QCOM_PIL_INFO
select QCOM_MDT_LOADER
select QCOM_Q6V5_COMMON
select QCOM_RPROC_COMMON
@@ -182,6 +196,7 @@ config QCOM_Q6V5_WCSS
depends on QCOM_SYSMON || QCOM_SYSMON=n
select MFD_SYSCON
select QCOM_MDT_LOADER
+ select QCOM_PIL_INFO
select QCOM_Q6V5_COMMON
select QCOM_RPROC_COMMON
select QCOM_SCM
@@ -189,9 +204,6 @@ config QCOM_Q6V5_WCSS
Say y here to support the Qualcomm Peripheral Image Loader for the
Hexagon V5 based WCSS remote processors.
-config QCOM_Q6V5_IPA_NOTIFY
- tristate
-
config QCOM_SYSMON
tristate "Qualcomm sysmon driver"
depends on RPMSG
@@ -215,6 +227,7 @@ config QCOM_WCNSS_PIL
depends on QCOM_SMEM
depends on QCOM_SYSMON || QCOM_SYSMON=n
select QCOM_MDT_LOADER
+ select QCOM_PIL_INFO
select QCOM_RPROC_COMMON
select QCOM_SCM
help
@@ -249,6 +262,19 @@ config STM32_RPROC
This can be either built-in or a loadable module.
+config TI_K3_DSP_REMOTEPROC
+ tristate "TI K3 DSP remoteproc support"
+ depends on ARCH_K3
+ select MAILBOX
+ select OMAP2PLUS_MBOX
+ help
+ Say m here to support TI's C66x and C71x DSP remote processor
+ subsystems on various TI K3 family of SoCs through the remote
+ processor framework.
+
+ It's safe to say N here if you're not interested in utilizing
+ the DSP slave processors.
+
endif # REMOTEPROC
endmenu
diff --git a/drivers/remoteproc/Makefile b/drivers/remoteproc/Makefile
index e8b886e511f0..3dfa28e6c701 100644
--- a/drivers/remoteproc/Makefile
+++ b/drivers/remoteproc/Makefile
@@ -5,10 +5,12 @@
obj-$(CONFIG_REMOTEPROC) += remoteproc.o
remoteproc-y := remoteproc_core.o
+remoteproc-y += remoteproc_coredump.o
remoteproc-y += remoteproc_debugfs.o
remoteproc-y += remoteproc_sysfs.o
remoteproc-y += remoteproc_virtio.o
remoteproc-y += remoteproc_elf_loader.o
+obj-$(CONFIG_REMOTEPROC_CDEV) += remoteproc_cdev.o
obj-$(CONFIG_IMX_REMOTEPROC) += imx_rproc.o
obj-$(CONFIG_INGENIC_VPU_RPROC) += ingenic_rproc.o
obj-$(CONFIG_MTK_SCP) += mtk_scp.o mtk_scp_ipi.o
@@ -16,13 +18,13 @@ obj-$(CONFIG_OMAP_REMOTEPROC) += omap_remoteproc.o
obj-$(CONFIG_WKUP_M3_RPROC) += wkup_m3_rproc.o
obj-$(CONFIG_DA8XX_REMOTEPROC) += da8xx_remoteproc.o
obj-$(CONFIG_KEYSTONE_REMOTEPROC) += keystone_remoteproc.o
+obj-$(CONFIG_QCOM_PIL_INFO) += qcom_pil_info.o
obj-$(CONFIG_QCOM_RPROC_COMMON) += qcom_common.o
obj-$(CONFIG_QCOM_Q6V5_COMMON) += qcom_q6v5.o
obj-$(CONFIG_QCOM_Q6V5_ADSP) += qcom_q6v5_adsp.o
obj-$(CONFIG_QCOM_Q6V5_MSS) += qcom_q6v5_mss.o
obj-$(CONFIG_QCOM_Q6V5_PAS) += qcom_q6v5_pas.o
obj-$(CONFIG_QCOM_Q6V5_WCSS) += qcom_q6v5_wcss.o
-obj-$(CONFIG_QCOM_Q6V5_IPA_NOTIFY) += qcom_q6v5_ipa_notify.o
obj-$(CONFIG_QCOM_SYSMON) += qcom_sysmon.o
obj-$(CONFIG_QCOM_WCNSS_PIL) += qcom_wcnss_pil.o
qcom_wcnss_pil-y += qcom_wcnss.o
@@ -30,3 +32,4 @@ qcom_wcnss_pil-y += qcom_wcnss_iris.o
obj-$(CONFIG_ST_REMOTEPROC) += st_remoteproc.o
obj-$(CONFIG_ST_SLIM_REMOTEPROC) += st_slim_rproc.o
obj-$(CONFIG_STM32_RPROC) += stm32_rproc.o
+obj-$(CONFIG_TI_K3_DSP_REMOTEPROC) += ti_k3_dsp_remoteproc.o
diff --git a/drivers/remoteproc/ingenic_rproc.c b/drivers/remoteproc/ingenic_rproc.c
index 189020d77b25..1c2b21a5d178 100644
--- a/drivers/remoteproc/ingenic_rproc.c
+++ b/drivers/remoteproc/ingenic_rproc.c
@@ -11,7 +11,6 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
#include <linux/remoteproc.h>
#include "remoteproc_internal.h"
@@ -62,6 +61,28 @@ struct vpu {
struct device *dev;
};
+static int ingenic_rproc_prepare(struct rproc *rproc)
+{
+ struct vpu *vpu = rproc->priv;
+ int ret;
+
+ /* The clocks must be enabled for the firmware to be loaded in TCSM */
+ ret = clk_bulk_prepare_enable(ARRAY_SIZE(vpu->clks), vpu->clks);
+ if (ret)
+ dev_err(vpu->dev, "Unable to start clocks: %d\n", ret);
+
+ return ret;
+}
+
+static int ingenic_rproc_unprepare(struct rproc *rproc)
+{
+ struct vpu *vpu = rproc->priv;
+
+ clk_bulk_disable_unprepare(ARRAY_SIZE(vpu->clks), vpu->clks);
+
+ return 0;
+}
+
static int ingenic_rproc_start(struct rproc *rproc)
{
struct vpu *vpu = rproc->priv;
@@ -115,6 +136,8 @@ static void *ingenic_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len)
}
static struct rproc_ops ingenic_rproc_ops = {
+ .prepare = ingenic_rproc_prepare,
+ .unprepare = ingenic_rproc_unprepare,
.start = ingenic_rproc_start,
.stop = ingenic_rproc_stop,
.kick = ingenic_rproc_kick,
@@ -135,16 +158,6 @@ static irqreturn_t vpu_interrupt(int irq, void *data)
return rproc_vq_interrupt(rproc, vring);
}
-static void ingenic_rproc_disable_clks(void *data)
-{
- struct vpu *vpu = data;
-
- pm_runtime_resume(vpu->dev);
- pm_runtime_disable(vpu->dev);
-
- clk_bulk_disable_unprepare(ARRAY_SIZE(vpu->clks), vpu->clks);
-}
-
static int ingenic_rproc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -206,35 +219,13 @@ static int ingenic_rproc_probe(struct platform_device *pdev)
disable_irq(vpu->irq);
- /* The clocks must be enabled for the firmware to be loaded in TCSM */
- ret = clk_bulk_prepare_enable(ARRAY_SIZE(vpu->clks), vpu->clks);
- if (ret) {
- dev_err(dev, "Unable to start clocks\n");
- return ret;
- }
-
- pm_runtime_irq_safe(dev);
- pm_runtime_set_active(dev);
- pm_runtime_enable(dev);
- pm_runtime_get_sync(dev);
- pm_runtime_use_autosuspend(dev);
-
- ret = devm_add_action_or_reset(dev, ingenic_rproc_disable_clks, vpu);
- if (ret) {
- dev_err(dev, "Unable to register action\n");
- goto out_pm_put;
- }
-
ret = devm_rproc_add(dev, rproc);
if (ret) {
dev_err(dev, "Failed to register remote processor\n");
- goto out_pm_put;
+ return ret;
}
-out_pm_put:
- pm_runtime_put_autosuspend(dev);
-
- return ret;
+ return 0;
}
static const struct of_device_id ingenic_rproc_of_matches[] = {
@@ -243,33 +234,10 @@ static const struct of_device_id ingenic_rproc_of_matches[] = {
};
MODULE_DEVICE_TABLE(of, ingenic_rproc_of_matches);
-static int __maybe_unused ingenic_rproc_suspend(struct device *dev)
-{
- struct vpu *vpu = dev_get_drvdata(dev);
-
- clk_bulk_disable(ARRAY_SIZE(vpu->clks), vpu->clks);
-
- return 0;
-}
-
-static int __maybe_unused ingenic_rproc_resume(struct device *dev)
-{
- struct vpu *vpu = dev_get_drvdata(dev);
-
- return clk_bulk_enable(ARRAY_SIZE(vpu->clks), vpu->clks);
-}
-
-static const struct dev_pm_ops __maybe_unused ingenic_rproc_pm = {
- SET_RUNTIME_PM_OPS(ingenic_rproc_suspend, ingenic_rproc_resume, NULL)
-};
-
static struct platform_driver ingenic_rproc_driver = {
.probe = ingenic_rproc_probe,
.driver = {
.name = "ingenic-vpu",
-#ifdef CONFIG_PM
- .pm = &ingenic_rproc_pm,
-#endif
.of_match_table = ingenic_rproc_of_matches,
},
};
diff --git a/drivers/remoteproc/qcom_common.c b/drivers/remoteproc/qcom_common.c
index 9028cea2d81e..085fd73fa23a 100644
--- a/drivers/remoteproc/qcom_common.c
+++ b/drivers/remoteproc/qcom_common.c
@@ -12,8 +12,10 @@
#include <linux/module.h>
#include <linux/notifier.h>
#include <linux/remoteproc.h>
+#include <linux/remoteproc/qcom_rproc.h>
#include <linux/rpmsg/qcom_glink.h>
#include <linux/rpmsg/qcom_smd.h>
+#include <linux/slab.h>
#include <linux/soc/qcom/mdt_loader.h>
#include "remoteproc_internal.h"
@@ -23,7 +25,14 @@
#define to_smd_subdev(d) container_of(d, struct qcom_rproc_subdev, subdev)
#define to_ssr_subdev(d) container_of(d, struct qcom_rproc_ssr, subdev)
-static BLOCKING_NOTIFIER_HEAD(ssr_notifiers);
+struct qcom_ssr_subsystem {
+ const char *name;
+ struct srcu_notifier_head notifier_list;
+ struct list_head list;
+};
+
+static LIST_HEAD(qcom_ssr_subsystem_list);
+static DEFINE_MUTEX(qcom_ssr_subsys_lock);
static int glink_subdev_start(struct rproc_subdev *subdev)
{
@@ -189,37 +198,122 @@ void qcom_remove_smd_subdev(struct rproc *rproc, struct qcom_rproc_subdev *smd)
}
EXPORT_SYMBOL_GPL(qcom_remove_smd_subdev);
+static struct qcom_ssr_subsystem *qcom_ssr_get_subsys(const char *name)
+{
+ struct qcom_ssr_subsystem *info;
+
+ mutex_lock(&qcom_ssr_subsys_lock);
+ /* Match in the global qcom_ssr_subsystem_list with name */
+ list_for_each_entry(info, &qcom_ssr_subsystem_list, list)
+ if (!strcmp(info->name, name))
+ goto out;
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info) {
+ info = ERR_PTR(-ENOMEM);
+ goto out;
+ }
+ info->name = kstrdup_const(name, GFP_KERNEL);
+ srcu_init_notifier_head(&info->notifier_list);
+
+ /* Add to global notification list */
+ list_add_tail(&info->list, &qcom_ssr_subsystem_list);
+
+out:
+ mutex_unlock(&qcom_ssr_subsys_lock);
+ return info;
+}
+
/**
* qcom_register_ssr_notifier() - register SSR notification handler
- * @nb: notifier_block to notify for restart notifications
+ * @name: Subsystem's SSR name
+ * @nb: notifier_block to be invoked upon subsystem's state change
*
- * Returns 0 on success, negative errno on failure.
+ * This registers the @nb notifier block as part the notifier chain for a
+ * remoteproc associated with @name. The notifier block's callback
+ * will be invoked when the remote processor's SSR events occur
+ * (pre/post startup and pre/post shutdown).
*
- * This register the @notify function as handler for restart notifications. As
- * remote processors are stopped this function will be called, with the SSR
- * name passed as a parameter.
+ * Return: a subsystem cookie on success, ERR_PTR on failure.
*/
-int qcom_register_ssr_notifier(struct notifier_block *nb)
+void *qcom_register_ssr_notifier(const char *name, struct notifier_block *nb)
{
- return blocking_notifier_chain_register(&ssr_notifiers, nb);
+ struct qcom_ssr_subsystem *info;
+
+ info = qcom_ssr_get_subsys(name);
+ if (IS_ERR(info))
+ return info;
+
+ srcu_notifier_chain_register(&info->notifier_list, nb);
+
+ return &info->notifier_list;
}
EXPORT_SYMBOL_GPL(qcom_register_ssr_notifier);
/**
* qcom_unregister_ssr_notifier() - unregister SSR notification handler
+ * @notify: subsystem cookie returned from qcom_register_ssr_notifier
* @nb: notifier_block to unregister
+ *
+ * This function will unregister the notifier from the particular notifier
+ * chain.
+ *
+ * Return: 0 on success, %ENOENT otherwise.
*/
-void qcom_unregister_ssr_notifier(struct notifier_block *nb)
+int qcom_unregister_ssr_notifier(void *notify, struct notifier_block *nb)
{
- blocking_notifier_chain_unregister(&ssr_notifiers, nb);
+ return srcu_notifier_chain_unregister(notify, nb);
}
EXPORT_SYMBOL_GPL(qcom_unregister_ssr_notifier);
+static int ssr_notify_prepare(struct rproc_subdev *subdev)
+{
+ struct qcom_rproc_ssr *ssr = to_ssr_subdev(subdev);
+ struct qcom_ssr_notify_data data = {
+ .name = ssr->info->name,
+ .crashed = false,
+ };
+
+ srcu_notifier_call_chain(&ssr->info->notifier_list,
+ QCOM_SSR_BEFORE_POWERUP, &data);
+ return 0;
+}
+
+static int ssr_notify_start(struct rproc_subdev *subdev)
+{
+ struct qcom_rproc_ssr *ssr = to_ssr_subdev(subdev);
+ struct qcom_ssr_notify_data data = {
+ .name = ssr->info->name,
+ .crashed = false,
+ };
+
+ srcu_notifier_call_chain(&ssr->info->notifier_list,
+ QCOM_SSR_AFTER_POWERUP, &data);
+ return 0;
+}
+
+static void ssr_notify_stop(struct rproc_subdev *subdev, bool crashed)
+{
+ struct qcom_rproc_ssr *ssr = to_ssr_subdev(subdev);
+ struct qcom_ssr_notify_data data = {
+ .name = ssr->info->name,
+ .crashed = crashed,
+ };
+
+ srcu_notifier_call_chain(&ssr->info->notifier_list,
+ QCOM_SSR_BEFORE_SHUTDOWN, &data);
+}
+
static void ssr_notify_unprepare(struct rproc_subdev *subdev)
{
struct qcom_rproc_ssr *ssr = to_ssr_subdev(subdev);
+ struct qcom_ssr_notify_data data = {
+ .name = ssr->info->name,
+ .crashed = false,
+ };
- blocking_notifier_call_chain(&ssr_notifiers, 0, (void *)ssr->name);
+ srcu_notifier_call_chain(&ssr->info->notifier_list,
+ QCOM_SSR_AFTER_SHUTDOWN, &data);
}
/**
@@ -229,12 +323,24 @@ static void ssr_notify_unprepare(struct rproc_subdev *subdev)
* @ssr_name: identifier to use for notifications originating from @rproc
*
* As the @ssr is registered with the @rproc SSR events will be sent to all
- * registered listeners in the system as the remoteproc is shut down.
+ * registered listeners for the remoteproc when it's SSR events occur
+ * (pre/post startup and pre/post shutdown).
*/
void qcom_add_ssr_subdev(struct rproc *rproc, struct qcom_rproc_ssr *ssr,
const char *ssr_name)
{
- ssr->name = ssr_name;
+ struct qcom_ssr_subsystem *info;
+
+ info = qcom_ssr_get_subsys(ssr_name);
+ if (IS_ERR(info)) {
+ dev_err(&rproc->dev, "Failed to add ssr subdevice\n");
+ return;
+ }
+
+ ssr->info = info;
+ ssr->subdev.prepare = ssr_notify_prepare;
+ ssr->subdev.start = ssr_notify_start;
+ ssr->subdev.stop = ssr_notify_stop;
ssr->subdev.unprepare = ssr_notify_unprepare;
rproc_add_subdev(rproc, &ssr->subdev);
@@ -249,6 +355,7 @@ EXPORT_SYMBOL_GPL(qcom_add_ssr_subdev);
void qcom_remove_ssr_subdev(struct rproc *rproc, struct qcom_rproc_ssr *ssr)
{
rproc_remove_subdev(rproc, &ssr->subdev);
+ ssr->info = NULL;
}
EXPORT_SYMBOL_GPL(qcom_remove_ssr_subdev);
diff --git a/drivers/remoteproc/qcom_common.h b/drivers/remoteproc/qcom_common.h
index 34e5188187dc..dfc641c3a98b 100644
--- a/drivers/remoteproc/qcom_common.h
+++ b/drivers/remoteproc/qcom_common.h
@@ -26,10 +26,11 @@ struct qcom_rproc_subdev {
struct qcom_smd_edge *edge;
};
+struct qcom_ssr_subsystem;
+
struct qcom_rproc_ssr {
struct rproc_subdev subdev;
-
- const char *name;
+ struct qcom_ssr_subsystem *info;
};
void qcom_add_glink_subdev(struct rproc *rproc, struct qcom_rproc_glink *glink,
diff --git a/drivers/remoteproc/qcom_pil_info.c b/drivers/remoteproc/qcom_pil_info.c
new file mode 100644
index 000000000000..5521c4437ffa
--- /dev/null
+++ b/drivers/remoteproc/qcom_pil_info.c
@@ -0,0 +1,129 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019-2020 Linaro Ltd.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of_address.h>
+#include "qcom_pil_info.h"
+
+/*
+ * The PIL relocation information region is used to communicate memory regions
+ * occupied by co-processor firmware for post mortem crash analysis.
+ *
+ * It consists of an array of entries with an 8 byte textual identifier of the
+ * region followed by a 64 bit base address and 32 bit size, both little
+ * endian.
+ */
+#define PIL_RELOC_NAME_LEN 8
+#define PIL_RELOC_ENTRY_SIZE (PIL_RELOC_NAME_LEN + sizeof(__le64) + sizeof(__le32))
+
+struct pil_reloc {
+ void __iomem *base;
+ size_t num_entries;
+};
+
+static struct pil_reloc _reloc __read_mostly;
+static DEFINE_MUTEX(pil_reloc_lock);
+
+static int qcom_pil_info_init(void)
+{
+ struct device_node *np;
+ struct resource imem;
+ void __iomem *base;
+ int ret;
+
+ /* Already initialized? */
+ if (_reloc.base)
+ return 0;
+
+ np = of_find_compatible_node(NULL, NULL, "qcom,pil-reloc-info");
+ if (!np)
+ return -ENOENT;
+
+ ret = of_address_to_resource(np, 0, &imem);
+ of_node_put(np);
+ if (ret < 0)
+ return ret;
+
+ base = ioremap(imem.start, resource_size(&imem));
+ if (!base) {
+ pr_err("failed to map PIL relocation info region\n");
+ return -ENOMEM;
+ }
+
+ memset_io(base, 0, resource_size(&imem));
+
+ _reloc.base = base;
+ _reloc.num_entries = resource_size(&imem) / PIL_RELOC_ENTRY_SIZE;
+
+ return 0;
+}
+
+/**
+ * qcom_pil_info_store() - store PIL information of image in IMEM
+ * @image: name of the image
+ * @base: base address of the loaded image
+ * @size: size of the loaded image
+ *
+ * Return: 0 on success, negative errno on failure
+ */
+int qcom_pil_info_store(const char *image, phys_addr_t base, size_t size)
+{
+ char buf[PIL_RELOC_NAME_LEN];
+ void __iomem *entry;
+ int ret;
+ int i;
+
+ mutex_lock(&pil_reloc_lock);
+ ret = qcom_pil_info_init();
+ if (ret < 0) {
+ mutex_unlock(&pil_reloc_lock);
+ return ret;
+ }
+
+ for (i = 0; i < _reloc.num_entries; i++) {
+ entry = _reloc.base + i * PIL_RELOC_ENTRY_SIZE;
+
+ memcpy_fromio(buf, entry, PIL_RELOC_NAME_LEN);
+
+ /*
+ * An empty record means we didn't find it, given that the
+ * records are packed.
+ */
+ if (!buf[0])
+ goto found_unused;
+
+ if (!strncmp(buf, image, PIL_RELOC_NAME_LEN))
+ goto found_existing;
+ }
+
+ pr_warn("insufficient PIL info slots\n");
+ mutex_unlock(&pil_reloc_lock);
+ return -ENOMEM;
+
+found_unused:
+ memcpy_toio(entry, image, PIL_RELOC_NAME_LEN);
+found_existing:
+ /* Use two writel() as base is only aligned to 4 bytes on odd entries */
+ writel(base, entry + PIL_RELOC_NAME_LEN);
+ writel((u64)base >> 32, entry + PIL_RELOC_NAME_LEN + 4);
+ writel(size, entry + PIL_RELOC_NAME_LEN + sizeof(__le64));
+ mutex_unlock(&pil_reloc_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(qcom_pil_info_store);
+
+static void __exit pil_reloc_exit(void)
+{
+ mutex_lock(&pil_reloc_lock);
+ iounmap(_reloc.base);
+ _reloc.base = NULL;
+ mutex_unlock(&pil_reloc_lock);
+}
+module_exit(pil_reloc_exit);
+
+MODULE_DESCRIPTION("Qualcomm PIL relocation info");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/remoteproc/qcom_pil_info.h b/drivers/remoteproc/qcom_pil_info.h
new file mode 100644
index 000000000000..0dce6142935e
--- /dev/null
+++ b/drivers/remoteproc/qcom_pil_info.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __QCOM_PIL_INFO_H__
+#define __QCOM_PIL_INFO_H__
+
+#include <linux/types.h>
+
+int qcom_pil_info_store(const char *image, phys_addr_t base, size_t size);
+
+#endif
diff --git a/drivers/remoteproc/qcom_q6v5.c b/drivers/remoteproc/qcom_q6v5.c
index 111a442c993c..fd6fd36268d9 100644
--- a/drivers/remoteproc/qcom_q6v5.c
+++ b/drivers/remoteproc/qcom_q6v5.c
@@ -153,6 +153,8 @@ int qcom_q6v5_request_stop(struct qcom_q6v5 *q6v5)
{
int ret;
+ q6v5->running = false;
+
qcom_smem_state_update_bits(q6v5->state,
BIT(q6v5->stop_bit), BIT(q6v5->stop_bit));
diff --git a/drivers/remoteproc/qcom_q6v5_adsp.c b/drivers/remoteproc/qcom_q6v5_adsp.c
index d2a2574dcf35..efb2c1aa80a3 100644
--- a/drivers/remoteproc/qcom_q6v5_adsp.c
+++ b/drivers/remoteproc/qcom_q6v5_adsp.c
@@ -26,6 +26,7 @@
#include <linux/soc/qcom/smem_state.h>
#include "qcom_common.h"
+#include "qcom_pil_info.h"
#include "qcom_q6v5.h"
#include "remoteproc_internal.h"
@@ -82,6 +83,7 @@ struct qcom_adsp {
unsigned int halt_lpass;
int crash_reason_smem;
+ const char *info_name;
struct completion start_done;
struct completion stop_done;
@@ -164,10 +166,17 @@ reset:
static int adsp_load(struct rproc *rproc, const struct firmware *fw)
{
struct qcom_adsp *adsp = (struct qcom_adsp *)rproc->priv;
+ int ret;
+
+ ret = qcom_mdt_load_no_init(adsp->dev, fw, rproc->firmware, 0,
+ adsp->mem_region, adsp->mem_phys,
+ adsp->mem_size, &adsp->mem_reloc);
+ if (ret)
+ return ret;
+
+ qcom_pil_info_store(adsp->info_name, adsp->mem_phys, adsp->mem_size);
- return qcom_mdt_load_no_init(adsp->dev, fw, rproc->firmware, 0,
- adsp->mem_region, adsp->mem_phys, adsp->mem_size,
- &adsp->mem_reloc);
+ return 0;
}
static int adsp_start(struct rproc *rproc)
@@ -436,6 +445,7 @@ static int adsp_probe(struct platform_device *pdev)
adsp = (struct qcom_adsp *)rproc->priv;
adsp->dev = &pdev->dev;
adsp->rproc = rproc;
+ adsp->info_name = desc->sysmon_name;
platform_set_drvdata(pdev, adsp);
ret = adsp_alloc_memory_region(adsp);
diff --git a/drivers/remoteproc/qcom_q6v5_ipa_notify.c b/drivers/remoteproc/qcom_q6v5_ipa_notify.c
deleted file mode 100644
index e1c10a128bfd..000000000000
--- a/drivers/remoteproc/qcom_q6v5_ipa_notify.c
+++ /dev/null
@@ -1,85 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-/*
- * Qualcomm IPA notification subdev support
- *
- * Copyright (C) 2019 Linaro Ltd.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/remoteproc.h>
-#include <linux/remoteproc/qcom_q6v5_ipa_notify.h>
-
-static void
-ipa_notify_common(struct rproc_subdev *subdev, enum qcom_rproc_event event)
-{
- struct qcom_rproc_ipa_notify *ipa_notify;
- qcom_ipa_notify_t notify;
-
- ipa_notify = container_of(subdev, struct qcom_rproc_ipa_notify, subdev);
- notify = ipa_notify->notify;
- if (notify)
- notify(ipa_notify->data, event);
-}
-
-static int ipa_notify_prepare(struct rproc_subdev *subdev)
-{
- ipa_notify_common(subdev, MODEM_STARTING);
-
- return 0;
-}
-
-static int ipa_notify_start(struct rproc_subdev *subdev)
-{
- ipa_notify_common(subdev, MODEM_RUNNING);
-
- return 0;
-}
-
-static void ipa_notify_stop(struct rproc_subdev *subdev, bool crashed)
-
-{
- ipa_notify_common(subdev, crashed ? MODEM_CRASHED : MODEM_STOPPING);
-}
-
-static void ipa_notify_unprepare(struct rproc_subdev *subdev)
-{
- ipa_notify_common(subdev, MODEM_OFFLINE);
-}
-
-static void ipa_notify_removing(struct rproc_subdev *subdev)
-{
- ipa_notify_common(subdev, MODEM_REMOVING);
-}
-
-/* Register the IPA notification subdevice with the Q6V5 MSS remoteproc */
-void qcom_add_ipa_notify_subdev(struct rproc *rproc,
- struct qcom_rproc_ipa_notify *ipa_notify)
-{
- ipa_notify->notify = NULL;
- ipa_notify->data = NULL;
- ipa_notify->subdev.prepare = ipa_notify_prepare;
- ipa_notify->subdev.start = ipa_notify_start;
- ipa_notify->subdev.stop = ipa_notify_stop;
- ipa_notify->subdev.unprepare = ipa_notify_unprepare;
-
- rproc_add_subdev(rproc, &ipa_notify->subdev);
-}
-EXPORT_SYMBOL_GPL(qcom_add_ipa_notify_subdev);
-
-/* Remove the IPA notification subdevice */
-void qcom_remove_ipa_notify_subdev(struct rproc *rproc,
- struct qcom_rproc_ipa_notify *ipa_notify)
-{
- struct rproc_subdev *subdev = &ipa_notify->subdev;
-
- ipa_notify_removing(subdev);
-
- rproc_remove_subdev(rproc, subdev);
- ipa_notify->notify = NULL; /* Make it obvious */
-}
-EXPORT_SYMBOL_GPL(qcom_remove_ipa_notify_subdev);
-
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("Qualcomm IPA notification remoteproc subdev");
diff --git a/drivers/remoteproc/qcom_q6v5_mss.c b/drivers/remoteproc/qcom_q6v5_mss.c
index 903b2bb97e12..c401bcc263fa 100644
--- a/drivers/remoteproc/qcom_q6v5_mss.c
+++ b/drivers/remoteproc/qcom_q6v5_mss.c
@@ -9,6 +9,7 @@
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/devcoredump.h>
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
@@ -22,7 +23,6 @@
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/remoteproc.h>
-#include "linux/remoteproc/qcom_q6v5_ipa_notify.h"
#include <linux/reset.h>
#include <linux/soc/qcom/mdt_loader.h>
#include <linux/iopoll.h>
@@ -30,12 +30,15 @@
#include "remoteproc_internal.h"
#include "qcom_common.h"
+#include "qcom_pil_info.h"
#include "qcom_q6v5.h"
#include <linux/qcom_scm.h>
#define MPSS_CRASH_REASON_SMEM 421
+#define MBA_LOG_SIZE SZ_4K
+
/* RMB Status Register Values */
#define RMB_PBL_SUCCESS 0x1
@@ -112,8 +115,6 @@
#define QDSP6SS_SLEEP 0x3C
#define QDSP6SS_BOOT_CORE_START 0x400
#define QDSP6SS_BOOT_CMD 0x404
-#define QDSP6SS_BOOT_STATUS 0x408
-#define BOOT_STATUS_TIMEOUT_US 200
#define BOOT_FSM_TIMEOUT 10000
struct reg_info {
@@ -140,6 +141,7 @@ struct rproc_hexagon_res {
int version;
bool need_mem_protection;
bool has_alt_reset;
+ bool has_mba_logs;
bool has_spare_reg;
};
@@ -179,15 +181,14 @@ struct q6v5 {
int active_reg_count;
int proxy_reg_count;
- bool running;
-
bool dump_mba_loaded;
- unsigned long dump_segment_mask;
- unsigned long dump_complete_mask;
+ size_t current_dump_size;
+ size_t total_dump_size;
phys_addr_t mba_phys;
void *mba_region;
size_t mba_size;
+ size_t dp_size;
phys_addr_t mpss_phys;
phys_addr_t mpss_reloc;
@@ -196,10 +197,10 @@ struct q6v5 {
struct qcom_rproc_glink glink_subdev;
struct qcom_rproc_subdev smd_subdev;
struct qcom_rproc_ssr ssr_subdev;
- struct qcom_rproc_ipa_notify ipa_notify_subdev;
struct qcom_sysmon *sysmon;
bool need_mem_protection;
bool has_alt_reset;
+ bool has_mba_logs;
bool has_spare_reg;
int mpss_perm;
int mba_perm;
@@ -404,11 +405,33 @@ static int q6v5_xfer_mem_ownership(struct q6v5 *qproc, int *current_perm,
current_perm, next, perms);
}
+static void q6v5_debug_policy_load(struct q6v5 *qproc)
+{
+ const struct firmware *dp_fw;
+
+ if (request_firmware_direct(&dp_fw, "msadp", qproc->dev))
+ return;
+
+ if (SZ_1M + dp_fw->size <= qproc->mba_size) {
+ memcpy(qproc->mba_region + SZ_1M, dp_fw->data, dp_fw->size);
+ qproc->dp_size = dp_fw->size;
+ }
+
+ release_firmware(dp_fw);
+}
+
static int q6v5_load(struct rproc *rproc, const struct firmware *fw)
{
struct q6v5 *qproc = rproc->priv;
+ /* MBA is restricted to a maximum size of 1M */
+ if (fw->size > qproc->mba_size || fw->size > SZ_1M) {
+ dev_err(qproc->dev, "MBA firmware load failed\n");
+ return -EINVAL;
+ }
+
memcpy(qproc->mba_region, fw->data, fw->size);
+ q6v5_debug_policy_load(qproc);
return 0;
}
@@ -511,6 +534,26 @@ static int q6v5_rmb_mba_wait(struct q6v5 *qproc, u32 status, int ms)
return val;
}
+static void q6v5_dump_mba_logs(struct q6v5 *qproc)
+{
+ struct rproc *rproc = qproc->rproc;
+ void *data;
+
+ if (!qproc->has_mba_logs)
+ return;
+
+ if (q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true, false, qproc->mba_phys,
+ qproc->mba_size))
+ return;
+
+ data = vmalloc(MBA_LOG_SIZE);
+ if (!data)
+ return;
+
+ memcpy(data, qproc->mba_region, MBA_LOG_SIZE);
+ dev_coredumpv(&rproc->dev, data, MBA_LOG_SIZE, GFP_KERNEL);
+}
+
static int q6v5proc_reset(struct q6v5 *qproc)
{
u32 val;
@@ -579,13 +622,15 @@ static int q6v5proc_reset(struct q6v5 *qproc)
/* De-assert the Q6 stop core signal */
writel(1, qproc->reg_base + QDSP6SS_BOOT_CORE_START);
+ /* Wait for 10 us for any staggering logic to settle */
+ usleep_range(10, 20);
+
/* Trigger the boot FSM to start the Q6 out-of-reset sequence */
writel(1, qproc->reg_base + QDSP6SS_BOOT_CMD);
- /* Poll the QDSP6SS_BOOT_STATUS for FSM completion */
- ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_BOOT_STATUS,
- val, (val & BIT(0)) != 0, 1,
- BOOT_STATUS_TIMEOUT_US);
+ /* Poll the MSS_STATUS for FSM completion */
+ ret = readl_poll_timeout(qproc->rmb_base + RMB_MBA_MSS_STATUS,
+ val, (val & BIT(0)) != 0, 10, BOOT_FSM_TIMEOUT);
if (ret) {
dev_err(qproc->dev, "Boot FSM failed to complete.\n");
/* Reset the modem so that boot FSM is in reset state */
@@ -829,6 +874,7 @@ static int q6v5_mba_load(struct q6v5 *qproc)
{
int ret;
int xfermemop_ret;
+ bool mba_load_err = false;
qcom_q6v5_prepare(&qproc->q6v5);
@@ -895,6 +941,10 @@ static int q6v5_mba_load(struct q6v5 *qproc)
}
writel(qproc->mba_phys, qproc->rmb_base + RMB_MBA_IMAGE_REG);
+ if (qproc->dp_size) {
+ writel(qproc->mba_phys + SZ_1M, qproc->rmb_base + RMB_PMI_CODE_START_REG);
+ writel(qproc->dp_size, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
+ }
ret = q6v5proc_reset(qproc);
if (ret)
@@ -918,7 +968,7 @@ halt_axi_ports:
q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6);
q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem);
q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc);
-
+ mba_load_err = true;
reclaim_mba:
xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true,
false, qproc->mba_phys,
@@ -926,6 +976,8 @@ reclaim_mba:
if (xfermemop_ret) {
dev_err(qproc->dev,
"Failed to reclaim mba buffer, system may become unstable\n");
+ } else if (mba_load_err) {
+ q6v5_dump_mba_logs(qproc);
}
disable_active_clks:
@@ -961,6 +1013,7 @@ static void q6v5_mba_reclaim(struct q6v5 *qproc)
u32 val;
qproc->dump_mba_loaded = false;
+ qproc->dp_size = 0;
q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6);
q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem);
@@ -1139,15 +1192,14 @@ static int q6v5_mpss_load(struct q6v5 *qproc)
} else if (phdr->p_filesz) {
/* Replace "xxx.xxx" with "xxx.bxx" */
sprintf(fw_name + fw_name_len - 3, "b%02d", i);
- ret = request_firmware(&seg_fw, fw_name, qproc->dev);
+ ret = request_firmware_into_buf(&seg_fw, fw_name, qproc->dev,
+ ptr, phdr->p_filesz);
if (ret) {
dev_err(qproc->dev, "failed to load %s\n", fw_name);
iounmap(ptr);
goto release_firmware;
}
- memcpy(ptr, seg_fw->data, seg_fw->size);
-
release_firmware(seg_fw);
}
@@ -1190,6 +1242,8 @@ static int q6v5_mpss_load(struct q6v5 *qproc)
else if (ret < 0)
dev_err(qproc->dev, "MPSS authentication failed: %d\n", ret);
+ qcom_pil_info_store("modem", qproc->mpss_phys, qproc->mpss_size);
+
release_firmware:
release_firmware(fw);
out:
@@ -1200,11 +1254,10 @@ out:
static void qcom_q6v5_dump_segment(struct rproc *rproc,
struct rproc_dump_segment *segment,
- void *dest)
+ void *dest, size_t cp_offset, size_t size)
{
int ret = 0;
struct q6v5 *qproc = rproc->priv;
- unsigned long mask = BIT((unsigned long)segment->priv);
int offset = segment->da - qproc->mpss_reloc;
void *ptr = NULL;
@@ -1221,19 +1274,19 @@ static void qcom_q6v5_dump_segment(struct rproc *rproc,
}
if (!ret)
- ptr = ioremap_wc(qproc->mpss_phys + offset, segment->size);
+ ptr = ioremap_wc(qproc->mpss_phys + offset + cp_offset, size);
if (ptr) {
- memcpy(dest, ptr, segment->size);
+ memcpy(dest, ptr, size);
iounmap(ptr);
} else {
- memset(dest, 0xff, segment->size);
+ memset(dest, 0xff, size);
}
- qproc->dump_segment_mask |= mask;
+ qproc->current_dump_size += size;
/* Reclaim mba after copying segments */
- if (qproc->dump_segment_mask == qproc->dump_complete_mask) {
+ if (qproc->current_dump_size == qproc->total_dump_size) {
if (qproc->dump_mba_loaded) {
/* Try to reset ownership back to Q6 */
q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm,
@@ -1255,7 +1308,8 @@ static int q6v5_start(struct rproc *rproc)
if (ret)
return ret;
- dev_info(qproc->dev, "MBA booted, loading mpss\n");
+ dev_info(qproc->dev, "MBA booted with%s debug policy, loading mpss\n",
+ qproc->dp_size ? "" : "out");
ret = q6v5_mpss_load(qproc);
if (ret)
@@ -1275,13 +1329,13 @@ static int q6v5_start(struct rproc *rproc)
"Failed to reclaim mba buffer system may become unstable\n");
/* Reset Dump Segment Mask */
- qproc->dump_segment_mask = 0;
- qproc->running = true;
+ qproc->current_dump_size = 0;
return 0;
reclaim_mpss:
q6v5_mba_reclaim(qproc);
+ q6v5_dump_mba_logs(qproc);
return ret;
}
@@ -1291,8 +1345,6 @@ static int q6v5_stop(struct rproc *rproc)
struct q6v5 *qproc = (struct q6v5 *)rproc->priv;
int ret;
- qproc->running = false;
-
ret = qcom_q6v5_request_stop(&qproc->q6v5);
if (ret == -ETIMEDOUT)
dev_err(qproc->dev, "timed out on wait\n");
@@ -1324,7 +1376,7 @@ static int qcom_q6v5_register_dump_segments(struct rproc *rproc,
ehdr = (struct elf32_hdr *)fw->data;
phdrs = (struct elf32_phdr *)(ehdr + 1);
- qproc->dump_complete_mask = 0;
+ qproc->total_dump_size = 0;
for (i = 0; i < ehdr->e_phnum; i++) {
phdr = &phdrs[i];
@@ -1335,11 +1387,11 @@ static int qcom_q6v5_register_dump_segments(struct rproc *rproc,
ret = rproc_coredump_add_custom_segment(rproc, phdr->p_paddr,
phdr->p_memsz,
qcom_q6v5_dump_segment,
- (void *)i);
+ NULL);
if (ret)
break;
- qproc->dump_complete_mask |= BIT(i);
+ qproc->total_dump_size += phdr->p_memsz;
}
release_firmware(fw);
@@ -1554,39 +1606,6 @@ static int q6v5_alloc_memory_region(struct q6v5 *qproc)
return 0;
}
-#if IS_ENABLED(CONFIG_QCOM_Q6V5_IPA_NOTIFY)
-
-/* Register IPA notification function */
-int qcom_register_ipa_notify(struct rproc *rproc, qcom_ipa_notify_t notify,
- void *data)
-{
- struct qcom_rproc_ipa_notify *ipa_notify;
- struct q6v5 *qproc = rproc->priv;
-
- if (!notify)
- return -EINVAL;
-
- ipa_notify = &qproc->ipa_notify_subdev;
- if (ipa_notify->notify)
- return -EBUSY;
-
- ipa_notify->notify = notify;
- ipa_notify->data = data;
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(qcom_register_ipa_notify);
-
-/* Deregister IPA notification function */
-void qcom_deregister_ipa_notify(struct rproc *rproc)
-{
- struct q6v5 *qproc = rproc->priv;
-
- qproc->ipa_notify_subdev.notify = NULL;
-}
-EXPORT_SYMBOL_GPL(qcom_deregister_ipa_notify);
-#endif /* !IS_ENABLED(CONFIG_QCOM_Q6V5_IPA_NOTIFY) */
-
static int q6v5_probe(struct platform_device *pdev)
{
const struct rproc_hexagon_res *desc;
@@ -1701,6 +1720,7 @@ static int q6v5_probe(struct platform_device *pdev)
qproc->version = desc->version;
qproc->need_mem_protection = desc->need_mem_protection;
+ qproc->has_mba_logs = desc->has_mba_logs;
ret = qcom_q6v5_init(&qproc->q6v5, pdev, rproc, MPSS_CRASH_REASON_SMEM,
qcom_msa_handover);
@@ -1712,7 +1732,6 @@ static int q6v5_probe(struct platform_device *pdev)
qcom_add_glink_subdev(rproc, &qproc->glink_subdev, "mpss");
qcom_add_smd_subdev(rproc, &qproc->smd_subdev);
qcom_add_ssr_subdev(rproc, &qproc->ssr_subdev, "mpss");
- qcom_add_ipa_notify_subdev(rproc, &qproc->ipa_notify_subdev);
qproc->sysmon = qcom_add_sysmon_subdev(rproc, "modem", 0x12);
if (IS_ERR(qproc->sysmon)) {
ret = PTR_ERR(qproc->sysmon);
@@ -1728,7 +1747,6 @@ static int q6v5_probe(struct platform_device *pdev)
remove_sysmon_subdev:
qcom_remove_sysmon_subdev(qproc->sysmon);
remove_subdevs:
- qcom_remove_ipa_notify_subdev(qproc->rproc, &qproc->ipa_notify_subdev);
qcom_remove_ssr_subdev(rproc, &qproc->ssr_subdev);
qcom_remove_smd_subdev(rproc, &qproc->smd_subdev);
qcom_remove_glink_subdev(rproc, &qproc->glink_subdev);
@@ -1750,7 +1768,6 @@ static int q6v5_remove(struct platform_device *pdev)
rproc_del(rproc);
qcom_remove_sysmon_subdev(qproc->sysmon);
- qcom_remove_ipa_notify_subdev(rproc, &qproc->ipa_notify_subdev);
qcom_remove_ssr_subdev(rproc, &qproc->ssr_subdev);
qcom_remove_smd_subdev(rproc, &qproc->smd_subdev);
qcom_remove_glink_subdev(rproc, &qproc->glink_subdev);
@@ -1792,6 +1809,7 @@ static const struct rproc_hexagon_res sc7180_mss = {
},
.need_mem_protection = true,
.has_alt_reset = false,
+ .has_mba_logs = true,
.has_spare_reg = true,
.version = MSS_SC7180,
};
@@ -1827,6 +1845,7 @@ static const struct rproc_hexagon_res sdm845_mss = {
},
.need_mem_protection = true,
.has_alt_reset = true,
+ .has_mba_logs = false,
.has_spare_reg = false,
.version = MSS_SDM845,
};
@@ -1854,6 +1873,7 @@ static const struct rproc_hexagon_res msm8998_mss = {
},
.need_mem_protection = true,
.has_alt_reset = false,
+ .has_mba_logs = false,
.has_spare_reg = false,
.version = MSS_MSM8998,
};
@@ -1884,6 +1904,7 @@ static const struct rproc_hexagon_res msm8996_mss = {
},
.need_mem_protection = true,
.has_alt_reset = false,
+ .has_mba_logs = false,
.has_spare_reg = false,
.version = MSS_MSM8996,
};
@@ -1917,6 +1938,7 @@ static const struct rproc_hexagon_res msm8916_mss = {
},
.need_mem_protection = false,
.has_alt_reset = false,
+ .has_mba_logs = false,
.has_spare_reg = false,
.version = MSS_MSM8916,
};
@@ -1958,6 +1980,7 @@ static const struct rproc_hexagon_res msm8974_mss = {
},
.need_mem_protection = false,
.has_alt_reset = false,
+ .has_mba_logs = false,
.has_spare_reg = false,
.version = MSS_MSM8974,
};
diff --git a/drivers/remoteproc/qcom_q6v5_pas.c b/drivers/remoteproc/qcom_q6v5_pas.c
index 61791a03f648..3837f23995e0 100644
--- a/drivers/remoteproc/qcom_q6v5_pas.c
+++ b/drivers/remoteproc/qcom_q6v5_pas.c
@@ -25,6 +25,7 @@
#include <linux/soc/qcom/smem_state.h>
#include "qcom_common.h"
+#include "qcom_pil_info.h"
#include "qcom_q6v5.h"
#include "remoteproc_internal.h"
@@ -64,6 +65,7 @@ struct qcom_adsp {
int pas_id;
int crash_reason_smem;
bool has_aggre2_clk;
+ const char *info_name;
struct completion start_done;
struct completion stop_done;
@@ -117,11 +119,17 @@ static void adsp_pds_disable(struct qcom_adsp *adsp, struct device **pds,
static int adsp_load(struct rproc *rproc, const struct firmware *fw)
{
struct qcom_adsp *adsp = (struct qcom_adsp *)rproc->priv;
+ int ret;
- return qcom_mdt_load(adsp->dev, fw, rproc->firmware, adsp->pas_id,
- adsp->mem_region, adsp->mem_phys, adsp->mem_size,
- &adsp->mem_reloc);
+ ret = qcom_mdt_load(adsp->dev, fw, rproc->firmware, adsp->pas_id,
+ adsp->mem_region, adsp->mem_phys, adsp->mem_size,
+ &adsp->mem_reloc);
+ if (ret)
+ return ret;
+ qcom_pil_info_store(adsp->info_name, adsp->mem_phys, adsp->mem_size);
+
+ return 0;
}
static int adsp_start(struct rproc *rproc)
@@ -405,6 +413,7 @@ static int adsp_probe(struct platform_device *pdev)
adsp->rproc = rproc;
adsp->pas_id = desc->pas_id;
adsp->has_aggre2_clk = desc->has_aggre2_clk;
+ adsp->info_name = desc->sysmon_name;
platform_set_drvdata(pdev, adsp);
device_wakeup_enable(adsp->dev);
diff --git a/drivers/remoteproc/qcom_q6v5_wcss.c b/drivers/remoteproc/qcom_q6v5_wcss.c
index 88c76b9417fa..8846ef0b0f1a 100644
--- a/drivers/remoteproc/qcom_q6v5_wcss.c
+++ b/drivers/remoteproc/qcom_q6v5_wcss.c
@@ -14,6 +14,7 @@
#include <linux/reset.h>
#include <linux/soc/qcom/mdt_loader.h>
#include "qcom_common.h"
+#include "qcom_pil_info.h"
#include "qcom_q6v5.h"
#define WCSS_CRASH_REASON 421
@@ -424,10 +425,17 @@ static void *q6v5_wcss_da_to_va(struct rproc *rproc, u64 da, size_t len)
static int q6v5_wcss_load(struct rproc *rproc, const struct firmware *fw)
{
struct q6v5_wcss *wcss = rproc->priv;
+ int ret;
+
+ ret = qcom_mdt_load_no_init(wcss->dev, fw, rproc->firmware,
+ 0, wcss->mem_region, wcss->mem_phys,
+ wcss->mem_size, &wcss->mem_reloc);
+ if (ret)
+ return ret;
+
+ qcom_pil_info_store("wcnss", wcss->mem_phys, wcss->mem_size);
- return qcom_mdt_load_no_init(wcss->dev, fw, rproc->firmware,
- 0, wcss->mem_region, wcss->mem_phys,
- wcss->mem_size, &wcss->mem_reloc);
+ return ret;
}
static const struct rproc_ops q6v5_wcss_ops = {
diff --git a/drivers/remoteproc/qcom_sysmon.c b/drivers/remoteproc/qcom_sysmon.c
index 8d8996d714f0..9eb2f6bccea6 100644
--- a/drivers/remoteproc/qcom_sysmon.c
+++ b/drivers/remoteproc/qcom_sysmon.c
@@ -71,7 +71,7 @@ static LIST_HEAD(sysmon_list);
/**
* sysmon_send_event() - send notification of other remote's SSR event
* @sysmon: sysmon context
- * @name: other remote's name
+ * @event: sysmon event context
*/
static void sysmon_send_event(struct qcom_sysmon *sysmon,
const struct sysmon_event *event)
@@ -343,7 +343,7 @@ static void ssctl_request_shutdown(struct qcom_sysmon *sysmon)
/**
* ssctl_send_event() - send notification of other remote's SSR event
* @sysmon: sysmon context
- * @name: other remote's name
+ * @event: sysmon event context
*/
static void ssctl_send_event(struct qcom_sysmon *sysmon,
const struct sysmon_event *event)
diff --git a/drivers/remoteproc/qcom_wcnss.c b/drivers/remoteproc/qcom_wcnss.c
index 5d65e1a9329a..e2573f79a137 100644
--- a/drivers/remoteproc/qcom_wcnss.c
+++ b/drivers/remoteproc/qcom_wcnss.c
@@ -27,6 +27,7 @@
#include "qcom_common.h"
#include "remoteproc_internal.h"
+#include "qcom_pil_info.h"
#include "qcom_wcnss.h"
#define WCNSS_CRASH_REASON_SMEM 422
@@ -145,10 +146,17 @@ void qcom_wcnss_assign_iris(struct qcom_wcnss *wcnss,
static int wcnss_load(struct rproc *rproc, const struct firmware *fw)
{
struct qcom_wcnss *wcnss = (struct qcom_wcnss *)rproc->priv;
+ int ret;
+
+ ret = qcom_mdt_load(wcnss->dev, fw, rproc->firmware, WCNSS_PAS_ID,
+ wcnss->mem_region, wcnss->mem_phys,
+ wcnss->mem_size, &wcnss->mem_reloc);
+ if (ret)
+ return ret;
+
+ qcom_pil_info_store("wcnss", wcnss->mem_phys, wcnss->mem_size);
- return qcom_mdt_load(wcnss->dev, fw, rproc->firmware, WCNSS_PAS_ID,
- wcnss->mem_region, wcnss->mem_phys,
- wcnss->mem_size, &wcnss->mem_reloc);
+ return 0;
}
static void wcnss_indicate_nv_download(struct qcom_wcnss *wcnss)
diff --git a/drivers/remoteproc/remoteproc_cdev.c b/drivers/remoteproc/remoteproc_cdev.c
new file mode 100644
index 000000000000..b19ea3057bde
--- /dev/null
+++ b/drivers/remoteproc/remoteproc_cdev.c
@@ -0,0 +1,124 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Character device interface driver for Remoteproc framework.
+ *
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/cdev.h>
+#include <linux/compat.h>
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/remoteproc.h>
+#include <linux/uaccess.h>
+#include <uapi/linux/remoteproc_cdev.h>
+
+#include "remoteproc_internal.h"
+
+#define NUM_RPROC_DEVICES 64
+static dev_t rproc_major;
+
+static ssize_t rproc_cdev_write(struct file *filp, const char __user *buf, size_t len, loff_t *pos)
+{
+ struct rproc *rproc = container_of(filp->f_inode->i_cdev, struct rproc, cdev);
+ int ret = 0;
+ char cmd[10];
+
+ if (!len || len > sizeof(cmd))
+ return -EINVAL;
+
+ ret = copy_from_user(cmd, buf, len);
+ if (ret)
+ return -EFAULT;
+
+ if (!strncmp(cmd, "start", len)) {
+ if (rproc->state == RPROC_RUNNING)
+ return -EBUSY;
+
+ ret = rproc_boot(rproc);
+ } else if (!strncmp(cmd, "stop", len)) {
+ if (rproc->state != RPROC_RUNNING)
+ return -EINVAL;
+
+ rproc_shutdown(rproc);
+ } else {
+ dev_err(&rproc->dev, "Unrecognized option\n");
+ ret = -EINVAL;
+ }
+
+ return ret ? ret : len;
+}
+
+static long rproc_device_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
+{
+ struct rproc *rproc = container_of(filp->f_inode->i_cdev, struct rproc, cdev);
+ void __user *argp = (void __user *)arg;
+ s32 param;
+
+ switch (ioctl) {
+ case RPROC_SET_SHUTDOWN_ON_RELEASE:
+ if (copy_from_user(&param, argp, sizeof(s32)))
+ return -EFAULT;
+
+ rproc->cdev_put_on_release = !!param;
+ break;
+ case RPROC_GET_SHUTDOWN_ON_RELEASE:
+ param = (s32)rproc->cdev_put_on_release;
+ if (copy_to_user(argp, &param, sizeof(s32)))
+ return -EFAULT;
+
+ break;
+ default:
+ dev_err(&rproc->dev, "Unsupported ioctl\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rproc_cdev_release(struct inode *inode, struct file *filp)
+{
+ struct rproc *rproc = container_of(inode->i_cdev, struct rproc, cdev);
+
+ if (rproc->cdev_put_on_release && rproc->state == RPROC_RUNNING)
+ rproc_shutdown(rproc);
+
+ return 0;
+}
+
+static const struct file_operations rproc_fops = {
+ .write = rproc_cdev_write,
+ .unlocked_ioctl = rproc_device_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
+ .release = rproc_cdev_release,
+};
+
+int rproc_char_device_add(struct rproc *rproc)
+{
+ int ret;
+
+ cdev_init(&rproc->cdev, &rproc_fops);
+ rproc->cdev.owner = THIS_MODULE;
+
+ rproc->dev.devt = MKDEV(MAJOR(rproc_major), rproc->index);
+ cdev_set_parent(&rproc->cdev, &rproc->dev.kobj);
+ ret = cdev_add(&rproc->cdev, rproc->dev.devt, 1);
+ if (ret < 0)
+ dev_err(&rproc->dev, "Failed to add char dev for %s\n", rproc->name);
+
+ return ret;
+}
+
+void rproc_char_device_remove(struct rproc *rproc)
+{
+ __unregister_chrdev(MAJOR(rproc->dev.devt), rproc->index, 1, "remoteproc");
+}
+
+void __init rproc_init_cdev(void)
+{
+ int ret;
+
+ ret = alloc_chrdev_region(&rproc_major, 0, NUM_RPROC_DEVICES, "remoteproc");
+ if (ret < 0)
+ pr_err("Failed to alloc rproc_cdev region, err %d\n", ret);
+}
diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c
index 9f04c30c4aaf..7f90eeea67e2 100644
--- a/drivers/remoteproc/remoteproc_core.c
+++ b/drivers/remoteproc/remoteproc_core.c
@@ -26,10 +26,8 @@
#include <linux/firmware.h>
#include <linux/string.h>
#include <linux/debugfs.h>
-#include <linux/devcoredump.h>
#include <linux/rculist.h>
#include <linux/remoteproc.h>
-#include <linux/pm_runtime.h>
#include <linux/iommu.h>
#include <linux/idr.h>
#include <linux/elf.h>
@@ -41,7 +39,6 @@
#include <linux/platform_device.h>
#include "remoteproc_internal.h"
-#include "remoteproc_elf_helpers.h"
#define HIGH_BITS_MASK 0xFFFFFFFF00000000ULL
@@ -244,6 +241,7 @@ EXPORT_SYMBOL(rproc_da_to_va);
*
* Return: a valid pointer on carveout entry on success or NULL on failure.
*/
+__printf(2, 3)
struct rproc_mem_entry *
rproc_find_carveout_by_name(struct rproc *rproc, const char *name, ...)
{
@@ -411,10 +409,22 @@ void rproc_free_vring(struct rproc_vring *rvring)
idr_remove(&rproc->notifyids, rvring->notifyid);
- /* reset resource entry info */
- rsc = (void *)rproc->table_ptr + rvring->rvdev->rsc_offset;
- rsc->vring[idx].da = 0;
- rsc->vring[idx].notifyid = -1;
+ /*
+ * At this point rproc_stop() has been called and the installed resource
+ * table in the remote processor memory may no longer be accessible. As
+ * such and as per rproc_stop(), rproc->table_ptr points to the cached
+ * resource table (rproc->cached_table). The cached resource table is
+ * only available when a remote processor has been booted by the
+ * remoteproc core, otherwise it is NULL.
+ *
+ * Based on the above, reset the virtio device section in the cached
+ * resource table only if there is one to work with.
+ */
+ if (rproc->table_ptr) {
+ rsc = (void *)rproc->table_ptr + rvring->rvdev->rsc_offset;
+ rsc->vring[idx].da = 0;
+ rsc->vring[idx].notifyid = -1;
+ }
}
static int rproc_vdev_do_start(struct rproc_subdev *subdev)
@@ -967,6 +977,7 @@ EXPORT_SYMBOL(rproc_add_carveout);
* This function allocates a rproc_mem_entry struct and fill it with parameters
* provided by client.
*/
+__printf(8, 9)
struct rproc_mem_entry *
rproc_mem_entry_init(struct device *dev,
void *va, dma_addr_t dma, size_t len, u32 da,
@@ -1010,6 +1021,7 @@ EXPORT_SYMBOL(rproc_mem_entry_init);
* This function allocates a rproc_mem_entry struct and fill it with parameters
* provided by client.
*/
+__printf(5, 6)
struct rproc_mem_entry *
rproc_of_resm_mem_entry_init(struct device *dev, u32 of_resm_idx, size_t len,
u32 da, const char *name, ...)
@@ -1034,6 +1046,29 @@ rproc_of_resm_mem_entry_init(struct device *dev, u32 of_resm_idx, size_t len,
}
EXPORT_SYMBOL(rproc_of_resm_mem_entry_init);
+/**
+ * rproc_of_parse_firmware() - parse and return the firmware-name
+ * @dev: pointer on device struct representing a rproc
+ * @index: index to use for the firmware-name retrieval
+ * @fw_name: pointer to a character string, in which the firmware
+ * name is returned on success and unmodified otherwise.
+ *
+ * This is an OF helper function that parses a device's DT node for
+ * the "firmware-name" property and returns the firmware name pointer
+ * in @fw_name on success.
+ *
+ * Return: 0 on success, or an appropriate failure.
+ */
+int rproc_of_parse_firmware(struct device *dev, int index, const char **fw_name)
+{
+ int ret;
+
+ ret = of_property_read_string_index(dev->of_node, "firmware-name",
+ index, fw_name);
+ return ret ? ret : 0;
+}
+EXPORT_SYMBOL(rproc_of_parse_firmware);
+
/*
* A lookup table for resource handlers. The indices are defined in
* enum fw_resource_type.
@@ -1239,19 +1274,6 @@ static int rproc_alloc_registered_carveouts(struct rproc *rproc)
return 0;
}
-/**
- * rproc_coredump_cleanup() - clean up dump_segments list
- * @rproc: the remote processor handle
- */
-static void rproc_coredump_cleanup(struct rproc *rproc)
-{
- struct rproc_dump_segment *entry, *tmp;
-
- list_for_each_entry_safe(entry, tmp, &rproc->dump_segments, node) {
- list_del(&entry->node);
- kfree(entry);
- }
-}
/**
* rproc_resource_cleanup() - clean up and free all acquired resources
@@ -1260,7 +1282,7 @@ static void rproc_coredump_cleanup(struct rproc *rproc)
* This function will free all resources acquired for @rproc, and it
* is called whenever @rproc either shuts down or fails to boot.
*/
-static void rproc_resource_cleanup(struct rproc *rproc)
+void rproc_resource_cleanup(struct rproc *rproc)
{
struct rproc_mem_entry *entry, *tmp;
struct rproc_debug_trace *trace, *ttmp;
@@ -1304,6 +1326,7 @@ static void rproc_resource_cleanup(struct rproc *rproc)
rproc_coredump_cleanup(rproc);
}
+EXPORT_SYMBOL(rproc_resource_cleanup);
static int rproc_start(struct rproc *rproc, const struct firmware *fw)
{
@@ -1370,6 +1393,48 @@ reset_table_ptr:
return ret;
}
+static int rproc_attach(struct rproc *rproc)
+{
+ struct device *dev = &rproc->dev;
+ int ret;
+
+ ret = rproc_prepare_subdevices(rproc);
+ if (ret) {
+ dev_err(dev, "failed to prepare subdevices for %s: %d\n",
+ rproc->name, ret);
+ goto out;
+ }
+
+ /* Attach to the remote processor */
+ ret = rproc_attach_device(rproc);
+ if (ret) {
+ dev_err(dev, "can't attach to rproc %s: %d\n",
+ rproc->name, ret);
+ goto unprepare_subdevices;
+ }
+
+ /* Start any subdevices for the remote processor */
+ ret = rproc_start_subdevices(rproc);
+ if (ret) {
+ dev_err(dev, "failed to probe subdevices for %s: %d\n",
+ rproc->name, ret);
+ goto stop_rproc;
+ }
+
+ rproc->state = RPROC_RUNNING;
+
+ dev_info(dev, "remote processor %s is now attached\n", rproc->name);
+
+ return 0;
+
+stop_rproc:
+ rproc->ops->stop(rproc);
+unprepare_subdevices:
+ rproc_unprepare_subdevices(rproc);
+out:
+ return ret;
+}
+
/*
* take a firmware and boot a remote processor with it.
*/
@@ -1383,12 +1448,6 @@ static int rproc_fw_boot(struct rproc *rproc, const struct firmware *fw)
if (ret)
return ret;
- ret = pm_runtime_get_sync(dev);
- if (ret < 0) {
- dev_err(dev, "pm_runtime_get_sync failed: %d\n", ret);
- return ret;
- }
-
dev_info(dev, "Booting fw image %s, size %zd\n", name, fw->size);
/*
@@ -1398,7 +1457,7 @@ static int rproc_fw_boot(struct rproc *rproc, const struct firmware *fw)
ret = rproc_enable_iommu(rproc);
if (ret) {
dev_err(dev, "can't enable iommu: %d\n", ret);
- goto put_pm_runtime;
+ return ret;
}
/* Prepare rproc for firmware loading if needed */
@@ -1452,8 +1511,63 @@ unprepare_rproc:
rproc_unprepare_device(rproc);
disable_iommu:
rproc_disable_iommu(rproc);
-put_pm_runtime:
- pm_runtime_put(dev);
+ return ret;
+}
+
+/*
+ * Attach to remote processor - similar to rproc_fw_boot() but without
+ * the steps that deal with the firmware image.
+ */
+static int rproc_actuate(struct rproc *rproc)
+{
+ struct device *dev = &rproc->dev;
+ int ret;
+
+ /*
+ * if enabling an IOMMU isn't relevant for this rproc, this is
+ * just a nop
+ */
+ ret = rproc_enable_iommu(rproc);
+ if (ret) {
+ dev_err(dev, "can't enable iommu: %d\n", ret);
+ return ret;
+ }
+
+ /* reset max_notifyid */
+ rproc->max_notifyid = -1;
+
+ /* reset handled vdev */
+ rproc->nb_vdev = 0;
+
+ /*
+ * Handle firmware resources required to attach to a remote processor.
+ * Because we are attaching rather than booting the remote processor,
+ * we expect the platform driver to properly set rproc->table_ptr.
+ */
+ ret = rproc_handle_resources(rproc, rproc_loading_handlers);
+ if (ret) {
+ dev_err(dev, "Failed to process resources: %d\n", ret);
+ goto disable_iommu;
+ }
+
+ /* Allocate carveout resources associated to rproc */
+ ret = rproc_alloc_registered_carveouts(rproc);
+ if (ret) {
+ dev_err(dev, "Failed to allocate associated carveouts: %d\n",
+ ret);
+ goto clean_up_resources;
+ }
+
+ ret = rproc_attach(rproc);
+ if (ret)
+ goto clean_up_resources;
+
+ return 0;
+
+clean_up_resources:
+ rproc_resource_cleanup(rproc);
+disable_iommu:
+ rproc_disable_iommu(rproc);
return ret;
}
@@ -1479,6 +1593,15 @@ static int rproc_trigger_auto_boot(struct rproc *rproc)
int ret;
/*
+ * Since the remote processor is in a detached state, it has already
+ * been booted by another entity. As such there is no point in waiting
+ * for a firmware image to be loaded, we can simply initiate the process
+ * of attaching to it immediately.
+ */
+ if (rproc->state == RPROC_DETACHED)
+ return rproc_boot(rproc);
+
+ /*
* We're initiating an asynchronous firmware loading, so we can
* be built-in kernel code, without hanging the boot process.
*/
@@ -1513,187 +1636,19 @@ static int rproc_stop(struct rproc *rproc, bool crashed)
rproc->state = RPROC_OFFLINE;
- dev_info(dev, "stopped remote processor %s\n", rproc->name);
-
- return 0;
-}
-
-/**
- * rproc_coredump_add_segment() - add segment of device memory to coredump
- * @rproc: handle of a remote processor
- * @da: device address
- * @size: size of segment
- *
- * Add device memory to the list of segments to be included in a coredump for
- * the remoteproc.
- *
- * Return: 0 on success, negative errno on error.
- */
-int rproc_coredump_add_segment(struct rproc *rproc, dma_addr_t da, size_t size)
-{
- struct rproc_dump_segment *segment;
-
- segment = kzalloc(sizeof(*segment), GFP_KERNEL);
- if (!segment)
- return -ENOMEM;
-
- segment->da = da;
- segment->size = size;
-
- list_add_tail(&segment->node, &rproc->dump_segments);
-
- return 0;
-}
-EXPORT_SYMBOL(rproc_coredump_add_segment);
-
-/**
- * rproc_coredump_add_custom_segment() - add custom coredump segment
- * @rproc: handle of a remote processor
- * @da: device address
- * @size: size of segment
- * @dumpfn: custom dump function called for each segment during coredump
- * @priv: private data
- *
- * Add device memory to the list of segments to be included in the coredump
- * and associate the segment with the given custom dump function and private
- * data.
- *
- * Return: 0 on success, negative errno on error.
- */
-int rproc_coredump_add_custom_segment(struct rproc *rproc,
- dma_addr_t da, size_t size,
- void (*dumpfn)(struct rproc *rproc,
- struct rproc_dump_segment *segment,
- void *dest),
- void *priv)
-{
- struct rproc_dump_segment *segment;
-
- segment = kzalloc(sizeof(*segment), GFP_KERNEL);
- if (!segment)
- return -ENOMEM;
-
- segment->da = da;
- segment->size = size;
- segment->priv = priv;
- segment->dump = dumpfn;
+ /*
+ * The remote processor has been stopped and is now offline, which means
+ * that the next time it is brought back online the remoteproc core will
+ * be responsible to load its firmware. As such it is no longer
+ * autonomous.
+ */
+ rproc->autonomous = false;
- list_add_tail(&segment->node, &rproc->dump_segments);
+ dev_info(dev, "stopped remote processor %s\n", rproc->name);
return 0;
}
-EXPORT_SYMBOL(rproc_coredump_add_custom_segment);
-/**
- * rproc_coredump_set_elf_info() - set coredump elf information
- * @rproc: handle of a remote processor
- * @class: elf class for coredump elf file
- * @machine: elf machine for coredump elf file
- *
- * Set elf information which will be used for coredump elf file.
- *
- * Return: 0 on success, negative errno on error.
- */
-int rproc_coredump_set_elf_info(struct rproc *rproc, u8 class, u16 machine)
-{
- if (class != ELFCLASS64 && class != ELFCLASS32)
- return -EINVAL;
-
- rproc->elf_class = class;
- rproc->elf_machine = machine;
-
- return 0;
-}
-EXPORT_SYMBOL(rproc_coredump_set_elf_info);
-
-/**
- * rproc_coredump() - perform coredump
- * @rproc: rproc handle
- *
- * This function will generate an ELF header for the registered segments
- * and create a devcoredump device associated with rproc.
- */
-static void rproc_coredump(struct rproc *rproc)
-{
- struct rproc_dump_segment *segment;
- void *phdr;
- void *ehdr;
- size_t data_size;
- size_t offset;
- void *data;
- void *ptr;
- u8 class = rproc->elf_class;
- int phnum = 0;
-
- if (list_empty(&rproc->dump_segments))
- return;
-
- if (class == ELFCLASSNONE) {
- dev_err(&rproc->dev, "Elf class is not set\n");
- return;
- }
-
- data_size = elf_size_of_hdr(class);
- list_for_each_entry(segment, &rproc->dump_segments, node) {
- data_size += elf_size_of_phdr(class) + segment->size;
-
- phnum++;
- }
-
- data = vmalloc(data_size);
- if (!data)
- return;
-
- ehdr = data;
-
- memset(ehdr, 0, elf_size_of_hdr(class));
- /* e_ident field is common for both elf32 and elf64 */
- elf_hdr_init_ident(ehdr, class);
-
- elf_hdr_set_e_type(class, ehdr, ET_CORE);
- elf_hdr_set_e_machine(class, ehdr, rproc->elf_machine);
- elf_hdr_set_e_version(class, ehdr, EV_CURRENT);
- elf_hdr_set_e_entry(class, ehdr, rproc->bootaddr);
- elf_hdr_set_e_phoff(class, ehdr, elf_size_of_hdr(class));
- elf_hdr_set_e_ehsize(class, ehdr, elf_size_of_hdr(class));
- elf_hdr_set_e_phentsize(class, ehdr, elf_size_of_phdr(class));
- elf_hdr_set_e_phnum(class, ehdr, phnum);
-
- phdr = data + elf_hdr_get_e_phoff(class, ehdr);
- offset = elf_hdr_get_e_phoff(class, ehdr);
- offset += elf_size_of_phdr(class) * elf_hdr_get_e_phnum(class, ehdr);
-
- list_for_each_entry(segment, &rproc->dump_segments, node) {
- memset(phdr, 0, elf_size_of_phdr(class));
- elf_phdr_set_p_type(class, phdr, PT_LOAD);
- elf_phdr_set_p_offset(class, phdr, offset);
- elf_phdr_set_p_vaddr(class, phdr, segment->da);
- elf_phdr_set_p_paddr(class, phdr, segment->da);
- elf_phdr_set_p_filesz(class, phdr, segment->size);
- elf_phdr_set_p_memsz(class, phdr, segment->size);
- elf_phdr_set_p_flags(class, phdr, PF_R | PF_W | PF_X);
- elf_phdr_set_p_align(class, phdr, 0);
-
- if (segment->dump) {
- segment->dump(rproc, segment, data + offset);
- } else {
- ptr = rproc_da_to_va(rproc, segment->da, segment->size);
- if (!ptr) {
- dev_err(&rproc->dev,
- "invalid coredump segment (%pad, %zu)\n",
- &segment->da, segment->size);
- memset(data + offset, 0xff, segment->size);
- } else {
- memcpy(data + offset, ptr, segment->size);
- }
- }
-
- offset += elf_phdr_get_p_filesz(class, phdr);
- phdr += elf_size_of_phdr(class);
- }
-
- dev_coredumpv(&rproc->dev, data, data_size, GFP_KERNEL);
-}
/**
* rproc_trigger_recovery() - recover a remoteproc
@@ -1815,24 +1770,30 @@ int rproc_boot(struct rproc *rproc)
goto unlock_mutex;
}
- /* skip the boot process if rproc is already powered up */
+ /* skip the boot or attach process if rproc is already powered up */
if (atomic_inc_return(&rproc->power) > 1) {
ret = 0;
goto unlock_mutex;
}
- dev_info(dev, "powering up %s\n", rproc->name);
+ if (rproc->state == RPROC_DETACHED) {
+ dev_info(dev, "attaching to %s\n", rproc->name);
- /* load firmware */
- ret = request_firmware(&firmware_p, rproc->firmware, dev);
- if (ret < 0) {
- dev_err(dev, "request_firmware failed: %d\n", ret);
- goto downref_rproc;
- }
+ ret = rproc_actuate(rproc);
+ } else {
+ dev_info(dev, "powering up %s\n", rproc->name);
- ret = rproc_fw_boot(rproc, firmware_p);
+ /* load firmware */
+ ret = request_firmware(&firmware_p, rproc->firmware, dev);
+ if (ret < 0) {
+ dev_err(dev, "request_firmware failed: %d\n", ret);
+ goto downref_rproc;
+ }
- release_firmware(firmware_p);
+ ret = rproc_fw_boot(rproc, firmware_p);
+
+ release_firmware(firmware_p);
+ }
downref_rproc:
if (ret)
@@ -1891,8 +1852,6 @@ void rproc_shutdown(struct rproc *rproc)
rproc_disable_iommu(rproc);
- pm_runtime_put(dev);
-
/* Free the copy of the resource table */
kfree(rproc->cached_table);
rproc->cached_table = NULL;
@@ -1952,6 +1911,43 @@ struct rproc *rproc_get_by_phandle(phandle phandle)
#endif
EXPORT_SYMBOL(rproc_get_by_phandle);
+static int rproc_validate(struct rproc *rproc)
+{
+ switch (rproc->state) {
+ case RPROC_OFFLINE:
+ /*
+ * An offline processor without a start()
+ * function makes no sense.
+ */
+ if (!rproc->ops->start)
+ return -EINVAL;
+ break;
+ case RPROC_DETACHED:
+ /*
+ * A remote processor in a detached state without an
+ * attach() function makes not sense.
+ */
+ if (!rproc->ops->attach)
+ return -EINVAL;
+ /*
+ * When attaching to a remote processor the device memory
+ * is already available and as such there is no need to have a
+ * cached table.
+ */
+ if (rproc->cached_table)
+ return -EINVAL;
+ break;
+ default:
+ /*
+ * When adding a remote processor, the state of the device
+ * can be offline or detached, nothing else.
+ */
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
/**
* rproc_add() - register a remote processor
* @rproc: the remote processor handle to register
@@ -1981,11 +1977,30 @@ int rproc_add(struct rproc *rproc)
if (ret < 0)
return ret;
+ ret = rproc_validate(rproc);
+ if (ret < 0)
+ return ret;
+
dev_info(dev, "%s is available\n", rproc->name);
/* create debugfs entries */
rproc_create_debug_dir(rproc);
+ /* add char device for this remoteproc */
+ ret = rproc_char_device_add(rproc);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Remind ourselves the remote processor has been attached to rather
+ * than booted by the remoteproc core. This is important because the
+ * RPROC_DETACHED state will be lost as soon as the remote processor
+ * has been attached to. Used in firmware_show() and reset in
+ * rproc_stop().
+ */
+ if (rproc->state == RPROC_DETACHED)
+ rproc->autonomous = true;
+
/* if rproc is marked always-on, request it to boot */
if (rproc->auto_boot) {
ret = rproc_trigger_auto_boot(rproc);
@@ -2183,9 +2198,6 @@ struct rproc *rproc_alloc(struct device *dev, const char *name,
rproc->state = RPROC_OFFLINE;
- pm_runtime_no_callbacks(&rproc->dev);
- pm_runtime_enable(&rproc->dev);
-
return rproc;
put_device:
@@ -2205,7 +2217,6 @@ EXPORT_SYMBOL(rproc_alloc);
*/
void rproc_free(struct rproc *rproc)
{
- pm_runtime_disable(&rproc->dev);
put_device(&rproc->dev);
}
EXPORT_SYMBOL(rproc_free);
@@ -2256,6 +2267,7 @@ int rproc_del(struct rproc *rproc)
mutex_unlock(&rproc->lock);
rproc_delete_debug_dir(rproc);
+ rproc_char_device_remove(rproc);
/* the rproc is downref'ed as soon as it's removed from the klist */
mutex_lock(&rproc_list_mutex);
@@ -2424,6 +2436,7 @@ static int __init remoteproc_init(void)
{
rproc_init_sysfs();
rproc_init_debugfs();
+ rproc_init_cdev();
rproc_init_panic();
return 0;
diff --git a/drivers/remoteproc/remoteproc_coredump.c b/drivers/remoteproc/remoteproc_coredump.c
new file mode 100644
index 000000000000..bb15a29038e8
--- /dev/null
+++ b/drivers/remoteproc/remoteproc_coredump.c
@@ -0,0 +1,325 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Coredump functionality for Remoteproc framework.
+ *
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/completion.h>
+#include <linux/devcoredump.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/remoteproc.h>
+#include "remoteproc_internal.h"
+#include "remoteproc_elf_helpers.h"
+
+struct rproc_coredump_state {
+ struct rproc *rproc;
+ void *header;
+ struct completion dump_done;
+};
+
+/**
+ * rproc_coredump_cleanup() - clean up dump_segments list
+ * @rproc: the remote processor handle
+ */
+void rproc_coredump_cleanup(struct rproc *rproc)
+{
+ struct rproc_dump_segment *entry, *tmp;
+
+ list_for_each_entry_safe(entry, tmp, &rproc->dump_segments, node) {
+ list_del(&entry->node);
+ kfree(entry);
+ }
+}
+
+/**
+ * rproc_coredump_add_segment() - add segment of device memory to coredump
+ * @rproc: handle of a remote processor
+ * @da: device address
+ * @size: size of segment
+ *
+ * Add device memory to the list of segments to be included in a coredump for
+ * the remoteproc.
+ *
+ * Return: 0 on success, negative errno on error.
+ */
+int rproc_coredump_add_segment(struct rproc *rproc, dma_addr_t da, size_t size)
+{
+ struct rproc_dump_segment *segment;
+
+ segment = kzalloc(sizeof(*segment), GFP_KERNEL);
+ if (!segment)
+ return -ENOMEM;
+
+ segment->da = da;
+ segment->size = size;
+
+ list_add_tail(&segment->node, &rproc->dump_segments);
+
+ return 0;
+}
+EXPORT_SYMBOL(rproc_coredump_add_segment);
+
+/**
+ * rproc_coredump_add_custom_segment() - add custom coredump segment
+ * @rproc: handle of a remote processor
+ * @da: device address
+ * @size: size of segment
+ * @dumpfn: custom dump function called for each segment during coredump
+ * @priv: private data
+ *
+ * Add device memory to the list of segments to be included in the coredump
+ * and associate the segment with the given custom dump function and private
+ * data.
+ *
+ * Return: 0 on success, negative errno on error.
+ */
+int rproc_coredump_add_custom_segment(struct rproc *rproc,
+ dma_addr_t da, size_t size,
+ void (*dumpfn)(struct rproc *rproc,
+ struct rproc_dump_segment *segment,
+ void *dest, size_t offset,
+ size_t size),
+ void *priv)
+{
+ struct rproc_dump_segment *segment;
+
+ segment = kzalloc(sizeof(*segment), GFP_KERNEL);
+ if (!segment)
+ return -ENOMEM;
+
+ segment->da = da;
+ segment->size = size;
+ segment->priv = priv;
+ segment->dump = dumpfn;
+
+ list_add_tail(&segment->node, &rproc->dump_segments);
+
+ return 0;
+}
+EXPORT_SYMBOL(rproc_coredump_add_custom_segment);
+
+/**
+ * rproc_coredump_set_elf_info() - set coredump elf information
+ * @rproc: handle of a remote processor
+ * @class: elf class for coredump elf file
+ * @machine: elf machine for coredump elf file
+ *
+ * Set elf information which will be used for coredump elf file.
+ *
+ * Return: 0 on success, negative errno on error.
+ */
+int rproc_coredump_set_elf_info(struct rproc *rproc, u8 class, u16 machine)
+{
+ if (class != ELFCLASS64 && class != ELFCLASS32)
+ return -EINVAL;
+
+ rproc->elf_class = class;
+ rproc->elf_machine = machine;
+
+ return 0;
+}
+EXPORT_SYMBOL(rproc_coredump_set_elf_info);
+
+static void rproc_coredump_free(void *data)
+{
+ struct rproc_coredump_state *dump_state = data;
+
+ vfree(dump_state->header);
+ complete(&dump_state->dump_done);
+}
+
+static void *rproc_coredump_find_segment(loff_t user_offset,
+ struct list_head *segments,
+ size_t *data_left)
+{
+ struct rproc_dump_segment *segment;
+
+ list_for_each_entry(segment, segments, node) {
+ if (user_offset < segment->size) {
+ *data_left = segment->size - user_offset;
+ return segment;
+ }
+ user_offset -= segment->size;
+ }
+
+ *data_left = 0;
+ return NULL;
+}
+
+static void rproc_copy_segment(struct rproc *rproc, void *dest,
+ struct rproc_dump_segment *segment,
+ size_t offset, size_t size)
+{
+ void *ptr;
+
+ if (segment->dump) {
+ segment->dump(rproc, segment, dest, offset, size);
+ } else {
+ ptr = rproc_da_to_va(rproc, segment->da + offset, size);
+ if (!ptr) {
+ dev_err(&rproc->dev,
+ "invalid copy request for segment %pad with offset %zu and size %zu)\n",
+ &segment->da, offset, size);
+ memset(dest, 0xff, size);
+ } else {
+ memcpy(dest, ptr, size);
+ }
+ }
+}
+
+static ssize_t rproc_coredump_read(char *buffer, loff_t offset, size_t count,
+ void *data, size_t header_sz)
+{
+ size_t seg_data, bytes_left = count;
+ ssize_t copy_sz;
+ struct rproc_dump_segment *seg;
+ struct rproc_coredump_state *dump_state = data;
+ struct rproc *rproc = dump_state->rproc;
+ void *elfcore = dump_state->header;
+
+ /* Copy the vmalloc'ed header first. */
+ if (offset < header_sz) {
+ copy_sz = memory_read_from_buffer(buffer, count, &offset,
+ elfcore, header_sz);
+
+ return copy_sz;
+ }
+
+ /*
+ * Find out the segment memory chunk to be copied based on offset.
+ * Keep copying data until count bytes are read.
+ */
+ while (bytes_left) {
+ seg = rproc_coredump_find_segment(offset - header_sz,
+ &rproc->dump_segments,
+ &seg_data);
+ /* EOF check */
+ if (!seg) {
+ dev_info(&rproc->dev, "Ramdump done, %lld bytes read",
+ offset);
+ break;
+ }
+
+ copy_sz = min_t(size_t, bytes_left, seg_data);
+
+ rproc_copy_segment(rproc, buffer, seg, seg->size - seg_data,
+ copy_sz);
+
+ offset += copy_sz;
+ buffer += copy_sz;
+ bytes_left -= copy_sz;
+ }
+
+ return count - bytes_left;
+}
+
+/**
+ * rproc_coredump() - perform coredump
+ * @rproc: rproc handle
+ *
+ * This function will generate an ELF header for the registered segments
+ * and create a devcoredump device associated with rproc. Based on the
+ * coredump configuration this function will directly copy the segments
+ * from device memory to userspace or copy segments from device memory to
+ * a separate buffer, which can then be read by userspace.
+ * The first approach avoids using extra vmalloc memory. But it will stall
+ * recovery flow until dump is read by userspace.
+ */
+void rproc_coredump(struct rproc *rproc)
+{
+ struct rproc_dump_segment *segment;
+ void *phdr;
+ void *ehdr;
+ size_t data_size;
+ size_t offset;
+ void *data;
+ u8 class = rproc->elf_class;
+ int phnum = 0;
+ struct rproc_coredump_state dump_state;
+ enum rproc_dump_mechanism dump_conf = rproc->dump_conf;
+
+ if (list_empty(&rproc->dump_segments) ||
+ dump_conf == RPROC_COREDUMP_DISABLED)
+ return;
+
+ if (class == ELFCLASSNONE) {
+ dev_err(&rproc->dev, "Elf class is not set\n");
+ return;
+ }
+
+ data_size = elf_size_of_hdr(class);
+ list_for_each_entry(segment, &rproc->dump_segments, node) {
+ /*
+ * For default configuration buffer includes headers & segments.
+ * For inline dump buffer just includes headers as segments are
+ * directly read from device memory.
+ */
+ data_size += elf_size_of_phdr(class);
+ if (dump_conf == RPROC_COREDUMP_DEFAULT)
+ data_size += segment->size;
+
+ phnum++;
+ }
+
+ data = vmalloc(data_size);
+ if (!data)
+ return;
+
+ ehdr = data;
+
+ memset(ehdr, 0, elf_size_of_hdr(class));
+ /* e_ident field is common for both elf32 and elf64 */
+ elf_hdr_init_ident(ehdr, class);
+
+ elf_hdr_set_e_type(class, ehdr, ET_CORE);
+ elf_hdr_set_e_machine(class, ehdr, rproc->elf_machine);
+ elf_hdr_set_e_version(class, ehdr, EV_CURRENT);
+ elf_hdr_set_e_entry(class, ehdr, rproc->bootaddr);
+ elf_hdr_set_e_phoff(class, ehdr, elf_size_of_hdr(class));
+ elf_hdr_set_e_ehsize(class, ehdr, elf_size_of_hdr(class));
+ elf_hdr_set_e_phentsize(class, ehdr, elf_size_of_phdr(class));
+ elf_hdr_set_e_phnum(class, ehdr, phnum);
+
+ phdr = data + elf_hdr_get_e_phoff(class, ehdr);
+ offset = elf_hdr_get_e_phoff(class, ehdr);
+ offset += elf_size_of_phdr(class) * elf_hdr_get_e_phnum(class, ehdr);
+
+ list_for_each_entry(segment, &rproc->dump_segments, node) {
+ memset(phdr, 0, elf_size_of_phdr(class));
+ elf_phdr_set_p_type(class, phdr, PT_LOAD);
+ elf_phdr_set_p_offset(class, phdr, offset);
+ elf_phdr_set_p_vaddr(class, phdr, segment->da);
+ elf_phdr_set_p_paddr(class, phdr, segment->da);
+ elf_phdr_set_p_filesz(class, phdr, segment->size);
+ elf_phdr_set_p_memsz(class, phdr, segment->size);
+ elf_phdr_set_p_flags(class, phdr, PF_R | PF_W | PF_X);
+ elf_phdr_set_p_align(class, phdr, 0);
+
+ if (dump_conf == RPROC_COREDUMP_DEFAULT)
+ rproc_copy_segment(rproc, data + offset, segment, 0,
+ segment->size);
+
+ offset += elf_phdr_get_p_filesz(class, phdr);
+ phdr += elf_size_of_phdr(class);
+ }
+ if (dump_conf == RPROC_COREDUMP_DEFAULT) {
+ dev_coredumpv(&rproc->dev, data, data_size, GFP_KERNEL);
+ return;
+ }
+
+ /* Initialize the dump state struct to be used by rproc_coredump_read */
+ dump_state.rproc = rproc;
+ dump_state.header = data;
+ init_completion(&dump_state.dump_done);
+
+ dev_coredumpm(&rproc->dev, NULL, &dump_state, data_size, GFP_KERNEL,
+ rproc_coredump_read, rproc_coredump_free);
+
+ /*
+ * Wait until the dump is read and free is called. Data is freed
+ * by devcoredump framework automatically after 5 minutes.
+ */
+ wait_for_completion(&dump_state.dump_done);
+}
diff --git a/drivers/remoteproc/remoteproc_debugfs.c b/drivers/remoteproc/remoteproc_debugfs.c
index 732770e92b99..2e3b3e22e1d0 100644
--- a/drivers/remoteproc/remoteproc_debugfs.c
+++ b/drivers/remoteproc/remoteproc_debugfs.c
@@ -28,6 +28,94 @@
static struct dentry *rproc_dbg;
/*
+ * A coredump-configuration-to-string lookup table, for exposing a
+ * human readable configuration via debugfs. Always keep in sync with
+ * enum rproc_coredump_mechanism
+ */
+static const char * const rproc_coredump_str[] = {
+ [RPROC_COREDUMP_DEFAULT] = "default",
+ [RPROC_COREDUMP_INLINE] = "inline",
+ [RPROC_COREDUMP_DISABLED] = "disabled",
+};
+
+/* Expose the current coredump configuration via debugfs */
+static ssize_t rproc_coredump_read(struct file *filp, char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct rproc *rproc = filp->private_data;
+ char buf[20];
+ int len;
+
+ len = scnprintf(buf, sizeof(buf), "%s\n",
+ rproc_coredump_str[rproc->dump_conf]);
+
+ return simple_read_from_buffer(userbuf, count, ppos, buf, len);
+}
+
+/*
+ * By writing to the 'coredump' debugfs entry, we control the behavior of the
+ * coredump mechanism dynamically. The default value of this entry is "default".
+ *
+ * The 'coredump' debugfs entry supports these commands:
+ *
+ * default: This is the default coredump mechanism. When the remoteproc
+ * crashes the entire coredump will be copied to a separate buffer
+ * and exposed to userspace.
+ *
+ * inline: The coredump will not be copied to a separate buffer and the
+ * recovery process will have to wait until data is read by
+ * userspace. But this avoid usage of extra memory.
+ *
+ * disabled: This will disable coredump. Recovery will proceed without
+ * collecting any dump.
+ */
+static ssize_t rproc_coredump_write(struct file *filp,
+ const char __user *user_buf, size_t count,
+ loff_t *ppos)
+{
+ struct rproc *rproc = filp->private_data;
+ int ret, err = 0;
+ char buf[20];
+
+ if (count > sizeof(buf))
+ return -EINVAL;
+
+ ret = copy_from_user(buf, user_buf, count);
+ if (ret)
+ return -EFAULT;
+
+ /* remove end of line */
+ if (buf[count - 1] == '\n')
+ buf[count - 1] = '\0';
+
+ if (rproc->state == RPROC_CRASHED) {
+ dev_err(&rproc->dev, "can't change coredump configuration\n");
+ err = -EBUSY;
+ goto out;
+ }
+
+ if (!strncmp(buf, "disable", count)) {
+ rproc->dump_conf = RPROC_COREDUMP_DISABLED;
+ } else if (!strncmp(buf, "inline", count)) {
+ rproc->dump_conf = RPROC_COREDUMP_INLINE;
+ } else if (!strncmp(buf, "default", count)) {
+ rproc->dump_conf = RPROC_COREDUMP_DEFAULT;
+ } else {
+ dev_err(&rproc->dev, "Invalid coredump configuration\n");
+ err = -EINVAL;
+ }
+out:
+ return err ? err : count;
+}
+
+static const struct file_operations rproc_coredump_fops = {
+ .read = rproc_coredump_read,
+ .write = rproc_coredump_write,
+ .open = simple_open,
+ .llseek = generic_file_llseek,
+};
+
+/*
* Some remote processors may support dumping trace logs into a shared
* memory buffer. We expose this trace buffer using debugfs, so users
* can easily tell what's going on remotely.
@@ -337,6 +425,8 @@ void rproc_create_debug_dir(struct rproc *rproc)
rproc, &rproc_rsc_table_fops);
debugfs_create_file("carveout_memories", 0400, rproc->dbg_dir,
rproc, &rproc_carveouts_fops);
+ debugfs_create_file("coredump", 0600, rproc->dbg_dir,
+ rproc, &rproc_coredump_fops);
}
void __init rproc_init_debugfs(void)
diff --git a/drivers/remoteproc/remoteproc_internal.h b/drivers/remoteproc/remoteproc_internal.h
index 4ba7cb59d3e8..c34002888d2c 100644
--- a/drivers/remoteproc/remoteproc_internal.h
+++ b/drivers/remoteproc/remoteproc_internal.h
@@ -28,6 +28,8 @@ struct rproc_debug_trace {
void rproc_release(struct kref *kref);
irqreturn_t rproc_vq_interrupt(struct rproc *rproc, int vq_id);
void rproc_vdev_release(struct kref *ref);
+int rproc_of_parse_firmware(struct device *dev, int index,
+ const char **fw_name);
/* from remoteproc_virtio.c */
int rproc_add_virtio_dev(struct rproc_vdev *rvdev, int id);
@@ -47,6 +49,38 @@ extern struct class rproc_class;
int rproc_init_sysfs(void);
void rproc_exit_sysfs(void);
+/* from remoteproc_coredump.c */
+void rproc_coredump_cleanup(struct rproc *rproc);
+void rproc_coredump(struct rproc *rproc);
+
+#ifdef CONFIG_REMOTEPROC_CDEV
+void rproc_init_cdev(void);
+void rproc_exit_cdev(void);
+int rproc_char_device_add(struct rproc *rproc);
+void rproc_char_device_remove(struct rproc *rproc);
+#else
+static inline void rproc_init_cdev(void)
+{
+}
+
+static inline void rproc_exit_cdev(void)
+{
+}
+
+/*
+ * The character device interface is an optional feature, if it is not enabled
+ * the function should not return an error.
+ */
+static inline int rproc_char_device_add(struct rproc *rproc)
+{
+ return 0;
+}
+
+static inline void rproc_char_device_remove(struct rproc *rproc)
+{
+}
+#endif
+
void rproc_free_vring(struct rproc_vring *rvring);
int rproc_alloc_vring(struct rproc_vdev *rvdev, int i);
@@ -79,6 +113,14 @@ static inline int rproc_unprepare_device(struct rproc *rproc)
return 0;
}
+static inline int rproc_attach_device(struct rproc *rproc)
+{
+ if (rproc->ops->attach)
+ return rproc->ops->attach(rproc);
+
+ return 0;
+}
+
static inline
int rproc_fw_sanity_check(struct rproc *rproc, const struct firmware *fw)
{
diff --git a/drivers/remoteproc/remoteproc_sysfs.c b/drivers/remoteproc/remoteproc_sysfs.c
index 52b871327b55..eea514cec50e 100644
--- a/drivers/remoteproc/remoteproc_sysfs.c
+++ b/drivers/remoteproc/remoteproc_sysfs.c
@@ -15,8 +15,20 @@ static ssize_t firmware_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct rproc *rproc = to_rproc(dev);
-
- return sprintf(buf, "%s\n", rproc->firmware);
+ const char *firmware = rproc->firmware;
+
+ /*
+ * If the remote processor has been started by an external
+ * entity we have no idea of what image it is running. As such
+ * simply display a generic string rather then rproc->firmware.
+ *
+ * Here we rely on the autonomous flag because a remote processor
+ * may have been attached to and currently in a running state.
+ */
+ if (rproc->autonomous)
+ firmware = "unknown";
+
+ return sprintf(buf, "%s\n", firmware);
}
/* Change firmware name via sysfs */
@@ -72,6 +84,7 @@ static const char * const rproc_state_string[] = {
[RPROC_RUNNING] = "running",
[RPROC_CRASHED] = "crashed",
[RPROC_DELETED] = "deleted",
+ [RPROC_DETACHED] = "detached",
[RPROC_LAST] = "invalid",
};
diff --git a/drivers/remoteproc/stm32_rproc.c b/drivers/remoteproc/stm32_rproc.c
index 062797a447c6..f4da42fc0eeb 100644
--- a/drivers/remoteproc/stm32_rproc.c
+++ b/drivers/remoteproc/stm32_rproc.c
@@ -39,6 +39,15 @@
#define STM32_MBX_VQ1_ID 1
#define STM32_MBX_SHUTDOWN "shutdown"
+#define RSC_TBL_SIZE 1024
+
+#define M4_STATE_OFF 0
+#define M4_STATE_INI 1
+#define M4_STATE_CRUN 2
+#define M4_STATE_CSTOP 3
+#define M4_STATE_STANDBY 4
+#define M4_STATE_CRASH 5
+
struct stm32_syscon {
struct regmap *map;
u32 reg;
@@ -71,12 +80,15 @@ struct stm32_rproc {
struct reset_control *rst;
struct stm32_syscon hold_boot;
struct stm32_syscon pdds;
+ struct stm32_syscon m4_state;
+ struct stm32_syscon rsctbl;
int wdg_irq;
u32 nb_rmems;
struct stm32_rproc_mem *rmems;
struct stm32_mbox mb[MBOX_NB_MBX];
struct workqueue_struct *workqueue;
bool secured_soc;
+ void __iomem *rsc_va;
};
static int stm32_rproc_pa_to_da(struct rproc *rproc, phys_addr_t pa, u64 *da)
@@ -128,10 +140,10 @@ static int stm32_rproc_mem_release(struct rproc *rproc,
return 0;
}
-static int stm32_rproc_of_memory_translations(struct rproc *rproc)
+static int stm32_rproc_of_memory_translations(struct platform_device *pdev,
+ struct stm32_rproc *ddata)
{
- struct device *parent, *dev = rproc->dev.parent;
- struct stm32_rproc *ddata = rproc->priv;
+ struct device *parent, *dev = &pdev->dev;
struct device_node *np;
struct stm32_rproc_mem *p_mems;
struct stm32_rproc_mem_ranges *mem_range;
@@ -204,7 +216,7 @@ static int stm32_rproc_elf_load_rsc_table(struct rproc *rproc,
return 0;
}
-static int stm32_rproc_parse_fw(struct rproc *rproc, const struct firmware *fw)
+static int stm32_rproc_parse_memory_regions(struct rproc *rproc)
{
struct device *dev = rproc->dev.parent;
struct device_node *np = dev->of_node;
@@ -257,12 +269,23 @@ static int stm32_rproc_parse_fw(struct rproc *rproc, const struct firmware *fw)
index++;
}
+ return 0;
+}
+
+static int stm32_rproc_parse_fw(struct rproc *rproc, const struct firmware *fw)
+{
+ int ret = stm32_rproc_parse_memory_regions(rproc);
+
+ if (ret)
+ return ret;
+
return stm32_rproc_elf_load_rsc_table(rproc, fw);
}
static irqreturn_t stm32_rproc_wdg(int irq, void *data)
{
- struct rproc *rproc = data;
+ struct platform_device *pdev = data;
+ struct rproc *rproc = platform_get_drvdata(pdev);
rproc_report_crash(rproc, RPROC_WATCHDOG);
@@ -437,6 +460,13 @@ static int stm32_rproc_start(struct rproc *rproc)
return stm32_rproc_set_hold_boot(rproc, true);
}
+static int stm32_rproc_attach(struct rproc *rproc)
+{
+ stm32_rproc_add_coredump_trace(rproc);
+
+ return stm32_rproc_set_hold_boot(rproc, true);
+}
+
static int stm32_rproc_stop(struct rproc *rproc)
{
struct stm32_rproc *ddata = rproc->priv;
@@ -474,6 +504,18 @@ static int stm32_rproc_stop(struct rproc *rproc)
}
}
+ /* update coprocessor state to OFF if available */
+ if (ddata->m4_state.map) {
+ err = regmap_update_bits(ddata->m4_state.map,
+ ddata->m4_state.reg,
+ ddata->m4_state.mask,
+ M4_STATE_OFF);
+ if (err) {
+ dev_err(&rproc->dev, "failed to set copro state\n");
+ return err;
+ }
+ }
+
return 0;
}
@@ -502,6 +544,7 @@ static void stm32_rproc_kick(struct rproc *rproc, int vqid)
static struct rproc_ops st_rproc_ops = {
.start = stm32_rproc_start,
.stop = stm32_rproc_stop,
+ .attach = stm32_rproc_attach,
.kick = stm32_rproc_kick,
.load = rproc_elf_load_segments,
.parse_fw = stm32_rproc_parse_fw,
@@ -538,12 +581,11 @@ out:
return err;
}
-static int stm32_rproc_parse_dt(struct platform_device *pdev)
+static int stm32_rproc_parse_dt(struct platform_device *pdev,
+ struct stm32_rproc *ddata, bool *auto_boot)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
- struct rproc *rproc = platform_get_drvdata(pdev);
- struct stm32_rproc *ddata = rproc->priv;
struct stm32_syscon tz;
unsigned int tzen;
int err, irq;
@@ -554,7 +596,7 @@ static int stm32_rproc_parse_dt(struct platform_device *pdev)
if (irq > 0) {
err = devm_request_irq(dev, irq, stm32_rproc_wdg, 0,
- dev_name(dev), rproc);
+ dev_name(dev), pdev);
if (err) {
dev_err(dev, "failed to request wdg irq\n");
return err;
@@ -589,7 +631,7 @@ static int stm32_rproc_parse_dt(struct platform_device *pdev)
err = regmap_read(tz.map, tz.reg, &tzen);
if (err) {
- dev_err(&rproc->dev, "failed to read tzen\n");
+ dev_err(dev, "failed to read tzen\n");
return err;
}
ddata->secured_soc = tzen & tz.mask;
@@ -605,9 +647,118 @@ static int stm32_rproc_parse_dt(struct platform_device *pdev)
if (err)
dev_info(dev, "failed to get pdds\n");
- rproc->auto_boot = of_property_read_bool(np, "st,auto-boot");
+ *auto_boot = of_property_read_bool(np, "st,auto-boot");
- return stm32_rproc_of_memory_translations(rproc);
+ /*
+ * See if we can check the M4 status, i.e if it was started
+ * from the boot loader or not.
+ */
+ err = stm32_rproc_get_syscon(np, "st,syscfg-m4-state",
+ &ddata->m4_state);
+ if (err) {
+ /* remember this */
+ ddata->m4_state.map = NULL;
+ /* no coprocessor state syscon (optional) */
+ dev_warn(dev, "m4 state not supported\n");
+
+ /* no need to go further */
+ return 0;
+ }
+
+ /* See if we can get the resource table */
+ err = stm32_rproc_get_syscon(np, "st,syscfg-rsc-tbl",
+ &ddata->rsctbl);
+ if (err) {
+ /* no rsc table syscon (optional) */
+ dev_warn(dev, "rsc tbl syscon not supported\n");
+ }
+
+ return 0;
+}
+
+static int stm32_rproc_get_m4_status(struct stm32_rproc *ddata,
+ unsigned int *state)
+{
+ /* See stm32_rproc_parse_dt() */
+ if (!ddata->m4_state.map) {
+ /*
+ * We couldn't get the coprocessor's state, assume
+ * it is not running.
+ */
+ state = M4_STATE_OFF;
+ return 0;
+ }
+
+ return regmap_read(ddata->m4_state.map, ddata->m4_state.reg, state);
+}
+
+static int stm32_rproc_da_to_pa(struct platform_device *pdev,
+ struct stm32_rproc *ddata,
+ u64 da, phys_addr_t *pa)
+{
+ struct device *dev = &pdev->dev;
+ struct stm32_rproc_mem *p_mem;
+ unsigned int i;
+
+ for (i = 0; i < ddata->nb_rmems; i++) {
+ p_mem = &ddata->rmems[i];
+
+ if (da < p_mem->dev_addr ||
+ da >= p_mem->dev_addr + p_mem->size)
+ continue;
+
+ *pa = da - p_mem->dev_addr + p_mem->bus_addr;
+ dev_dbg(dev, "da %llx to pa %#x\n", da, *pa);
+
+ return 0;
+ }
+
+ dev_err(dev, "can't translate da %llx\n", da);
+
+ return -EINVAL;
+}
+
+static int stm32_rproc_get_loaded_rsc_table(struct platform_device *pdev,
+ struct rproc *rproc,
+ struct stm32_rproc *ddata)
+{
+ struct device *dev = &pdev->dev;
+ phys_addr_t rsc_pa;
+ u32 rsc_da;
+ int err;
+
+ err = regmap_read(ddata->rsctbl.map, ddata->rsctbl.reg, &rsc_da);
+ if (err) {
+ dev_err(dev, "failed to read rsc tbl addr\n");
+ return err;
+ }
+
+ if (!rsc_da)
+ /* no rsc table */
+ return 0;
+
+ err = stm32_rproc_da_to_pa(pdev, ddata, rsc_da, &rsc_pa);
+ if (err)
+ return err;
+
+ ddata->rsc_va = devm_ioremap_wc(dev, rsc_pa, RSC_TBL_SIZE);
+ if (IS_ERR_OR_NULL(ddata->rsc_va)) {
+ dev_err(dev, "Unable to map memory region: %pa+%zx\n",
+ &rsc_pa, RSC_TBL_SIZE);
+ ddata->rsc_va = NULL;
+ return -ENOMEM;
+ }
+
+ /*
+ * The resource table is already loaded in device memory, no need
+ * to work with a cached table.
+ */
+ rproc->cached_table = NULL;
+ /* Assuming the resource table fits in 1kB is fair */
+ rproc->table_sz = RSC_TBL_SIZE;
+ rproc->table_ptr = (struct resource_table *)ddata->rsc_va;
+
+ return 0;
}
static int stm32_rproc_probe(struct platform_device *pdev)
@@ -616,6 +767,7 @@ static int stm32_rproc_probe(struct platform_device *pdev)
struct stm32_rproc *ddata;
struct device_node *np = dev->of_node;
struct rproc *rproc;
+ unsigned int state;
int ret;
ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32));
@@ -626,25 +778,47 @@ static int stm32_rproc_probe(struct platform_device *pdev)
if (!rproc)
return -ENOMEM;
+ ddata = rproc->priv;
+
rproc_coredump_set_elf_info(rproc, ELFCLASS32, EM_NONE);
+
+ ret = stm32_rproc_parse_dt(pdev, ddata, &rproc->auto_boot);
+ if (ret)
+ goto free_rproc;
+
+ ret = stm32_rproc_of_memory_translations(pdev, ddata);
+ if (ret)
+ goto free_rproc;
+
+ ret = stm32_rproc_get_m4_status(ddata, &state);
+ if (ret)
+ goto free_rproc;
+
+ if (state == M4_STATE_CRUN) {
+ rproc->state = RPROC_DETACHED;
+
+ ret = stm32_rproc_parse_memory_regions(rproc);
+ if (ret)
+ goto free_resources;
+
+ ret = stm32_rproc_get_loaded_rsc_table(pdev, rproc, ddata);
+ if (ret)
+ goto free_resources;
+ }
+
rproc->has_iommu = false;
- ddata = rproc->priv;
ddata->workqueue = create_workqueue(dev_name(dev));
if (!ddata->workqueue) {
dev_err(dev, "cannot create workqueue\n");
ret = -ENOMEM;
- goto free_rproc;
+ goto free_resources;
}
platform_set_drvdata(pdev, rproc);
- ret = stm32_rproc_parse_dt(pdev);
- if (ret)
- goto free_wkq;
-
ret = stm32_rproc_request_mbox(rproc);
if (ret)
- goto free_rproc;
+ goto free_wkq;
ret = rproc_add(rproc);
if (ret)
@@ -656,6 +830,8 @@ free_mb:
stm32_rproc_free_mbox(rproc);
free_wkq:
destroy_workqueue(ddata->workqueue);
+free_resources:
+ rproc_resource_cleanup(rproc);
free_rproc:
if (device_may_wakeup(dev)) {
dev_pm_clear_wake_irq(dev);
diff --git a/drivers/remoteproc/ti_k3_dsp_remoteproc.c b/drivers/remoteproc/ti_k3_dsp_remoteproc.c
new file mode 100644
index 000000000000..9011e477290c
--- /dev/null
+++ b/drivers/remoteproc/ti_k3_dsp_remoteproc.c
@@ -0,0 +1,787 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * TI K3 DSP Remote Processor(s) driver
+ *
+ * Copyright (C) 2018-2020 Texas Instruments Incorporated - https://www.ti.com/
+ * Suman Anna <s-anna@ti.com>
+ */
+
+#include <linux/io.h>
+#include <linux/mailbox_client.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/omap-mailbox.h>
+#include <linux/platform_device.h>
+#include <linux/remoteproc.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+
+#include "omap_remoteproc.h"
+#include "remoteproc_internal.h"
+#include "ti_sci_proc.h"
+
+#define KEYSTONE_RPROC_LOCAL_ADDRESS_MASK (SZ_16M - 1)
+
+/**
+ * struct k3_dsp_mem - internal memory structure
+ * @cpu_addr: MPU virtual address of the memory region
+ * @bus_addr: Bus address used to access the memory region
+ * @dev_addr: Device address of the memory region from DSP view
+ * @size: Size of the memory region
+ */
+struct k3_dsp_mem {
+ void __iomem *cpu_addr;
+ phys_addr_t bus_addr;
+ u32 dev_addr;
+ size_t size;
+};
+
+/**
+ * struct k3_dsp_mem_data - memory definitions for a DSP
+ * @name: name for this memory entry
+ * @dev_addr: device address for the memory entry
+ */
+struct k3_dsp_mem_data {
+ const char *name;
+ const u32 dev_addr;
+};
+
+/**
+ * struct k3_dsp_dev_data - device data structure for a DSP
+ * @mems: pointer to memory definitions for a DSP
+ * @num_mems: number of memory regions in @mems
+ * @boot_align_addr: boot vector address alignment granularity
+ * @uses_lreset: flag to denote the need for local reset management
+ */
+struct k3_dsp_dev_data {
+ const struct k3_dsp_mem_data *mems;
+ u32 num_mems;
+ u32 boot_align_addr;
+ bool uses_lreset;
+};
+
+/**
+ * struct k3_dsp_rproc - k3 DSP remote processor driver structure
+ * @dev: cached device pointer
+ * @rproc: remoteproc device handle
+ * @mem: internal memory regions data
+ * @num_mems: number of internal memory regions
+ * @rmem: reserved memory regions data
+ * @num_rmems: number of reserved memory regions
+ * @reset: reset control handle
+ * @data: pointer to DSP-specific device data
+ * @tsp: TI-SCI processor control handle
+ * @ti_sci: TI-SCI handle
+ * @ti_sci_id: TI-SCI device identifier
+ * @mbox: mailbox channel handle
+ * @client: mailbox client to request the mailbox channel
+ */
+struct k3_dsp_rproc {
+ struct device *dev;
+ struct rproc *rproc;
+ struct k3_dsp_mem *mem;
+ int num_mems;
+ struct k3_dsp_mem *rmem;
+ int num_rmems;
+ struct reset_control *reset;
+ const struct k3_dsp_dev_data *data;
+ struct ti_sci_proc *tsp;
+ const struct ti_sci_handle *ti_sci;
+ u32 ti_sci_id;
+ struct mbox_chan *mbox;
+ struct mbox_client client;
+};
+
+/**
+ * k3_dsp_rproc_mbox_callback() - inbound mailbox message handler
+ * @client: mailbox client pointer used for requesting the mailbox channel
+ * @data: mailbox payload
+ *
+ * This handler is invoked by the OMAP mailbox driver whenever a mailbox
+ * message is received. Usually, the mailbox payload simply contains
+ * the index of the virtqueue that is kicked by the remote processor,
+ * and we let remoteproc core handle it.
+ *
+ * In addition to virtqueue indices, we also have some out-of-band values
+ * that indicate different events. Those values are deliberately very
+ * large so they don't coincide with virtqueue indices.
+ */
+static void k3_dsp_rproc_mbox_callback(struct mbox_client *client, void *data)
+{
+ struct k3_dsp_rproc *kproc = container_of(client, struct k3_dsp_rproc,
+ client);
+ struct device *dev = kproc->rproc->dev.parent;
+ const char *name = kproc->rproc->name;
+ u32 msg = omap_mbox_message(data);
+
+ dev_dbg(dev, "mbox msg: 0x%x\n", msg);
+
+ switch (msg) {
+ case RP_MBOX_CRASH:
+ /*
+ * remoteproc detected an exception, but error recovery is not
+ * supported. So, just log this for now
+ */
+ dev_err(dev, "K3 DSP rproc %s crashed\n", name);
+ break;
+ case RP_MBOX_ECHO_REPLY:
+ dev_info(dev, "received echo reply from %s\n", name);
+ break;
+ default:
+ /* silently handle all other valid messages */
+ if (msg >= RP_MBOX_READY && msg < RP_MBOX_END_MSG)
+ return;
+ if (msg > kproc->rproc->max_notifyid) {
+ dev_dbg(dev, "dropping unknown message 0x%x", msg);
+ return;
+ }
+ /* msg contains the index of the triggered vring */
+ if (rproc_vq_interrupt(kproc->rproc, msg) == IRQ_NONE)
+ dev_dbg(dev, "no message was found in vqid %d\n", msg);
+ }
+}
+
+/*
+ * Kick the remote processor to notify about pending unprocessed messages.
+ * The vqid usage is not used and is inconsequential, as the kick is performed
+ * through a simulated GPIO (a bit in an IPC interrupt-triggering register),
+ * the remote processor is expected to process both its Tx and Rx virtqueues.
+ */
+static void k3_dsp_rproc_kick(struct rproc *rproc, int vqid)
+{
+ struct k3_dsp_rproc *kproc = rproc->priv;
+ struct device *dev = rproc->dev.parent;
+ mbox_msg_t msg = (mbox_msg_t)vqid;
+ int ret;
+
+ /* send the index of the triggered virtqueue in the mailbox payload */
+ ret = mbox_send_message(kproc->mbox, (void *)msg);
+ if (ret < 0)
+ dev_err(dev, "failed to send mailbox message, status = %d\n",
+ ret);
+}
+
+/* Put the DSP processor into reset */
+static int k3_dsp_rproc_reset(struct k3_dsp_rproc *kproc)
+{
+ struct device *dev = kproc->dev;
+ int ret;
+
+ ret = reset_control_assert(kproc->reset);
+ if (ret) {
+ dev_err(dev, "local-reset assert failed, ret = %d\n", ret);
+ return ret;
+ }
+
+ if (kproc->data->uses_lreset)
+ return ret;
+
+ ret = kproc->ti_sci->ops.dev_ops.put_device(kproc->ti_sci,
+ kproc->ti_sci_id);
+ if (ret) {
+ dev_err(dev, "module-reset assert failed, ret = %d\n", ret);
+ if (reset_control_deassert(kproc->reset))
+ dev_warn(dev, "local-reset deassert back failed\n");
+ }
+
+ return ret;
+}
+
+/* Release the DSP processor from reset */
+static int k3_dsp_rproc_release(struct k3_dsp_rproc *kproc)
+{
+ struct device *dev = kproc->dev;
+ int ret;
+
+ if (kproc->data->uses_lreset)
+ goto lreset;
+
+ ret = kproc->ti_sci->ops.dev_ops.get_device(kproc->ti_sci,
+ kproc->ti_sci_id);
+ if (ret) {
+ dev_err(dev, "module-reset deassert failed, ret = %d\n", ret);
+ return ret;
+ }
+
+lreset:
+ ret = reset_control_deassert(kproc->reset);
+ if (ret) {
+ dev_err(dev, "local-reset deassert failed, ret = %d\n", ret);
+ if (kproc->ti_sci->ops.dev_ops.put_device(kproc->ti_sci,
+ kproc->ti_sci_id))
+ dev_warn(dev, "module-reset assert back failed\n");
+ }
+
+ return ret;
+}
+
+/*
+ * The C66x DSP cores have a local reset that affects only the CPU, and a
+ * generic module reset that powers on the device and allows the DSP internal
+ * memories to be accessed while the local reset is asserted. This function is
+ * used to release the global reset on C66x DSPs to allow loading into the DSP
+ * internal RAMs. The .prepare() ops is invoked by remoteproc core before any
+ * firmware loading, and is followed by the .start() ops after loading to
+ * actually let the C66x DSP cores run.
+ */
+static int k3_dsp_rproc_prepare(struct rproc *rproc)
+{
+ struct k3_dsp_rproc *kproc = rproc->priv;
+ struct device *dev = kproc->dev;
+ int ret;
+
+ ret = kproc->ti_sci->ops.dev_ops.get_device(kproc->ti_sci,
+ kproc->ti_sci_id);
+ if (ret)
+ dev_err(dev, "module-reset deassert failed, cannot enable internal RAM loading, ret = %d\n",
+ ret);
+
+ return ret;
+}
+
+/*
+ * This function implements the .unprepare() ops and performs the complimentary
+ * operations to that of the .prepare() ops. The function is used to assert the
+ * global reset on applicable C66x cores. This completes the second portion of
+ * powering down the C66x DSP cores. The cores themselves are only halted in the
+ * .stop() callback through the local reset, and the .unprepare() ops is invoked
+ * by the remoteproc core after the remoteproc is stopped to balance the global
+ * reset.
+ */
+static int k3_dsp_rproc_unprepare(struct rproc *rproc)
+{
+ struct k3_dsp_rproc *kproc = rproc->priv;
+ struct device *dev = kproc->dev;
+ int ret;
+
+ ret = kproc->ti_sci->ops.dev_ops.put_device(kproc->ti_sci,
+ kproc->ti_sci_id);
+ if (ret)
+ dev_err(dev, "module-reset assert failed, ret = %d\n", ret);
+
+ return ret;
+}
+
+/*
+ * Power up the DSP remote processor.
+ *
+ * This function will be invoked only after the firmware for this rproc
+ * was loaded, parsed successfully, and all of its resource requirements
+ * were met.
+ */
+static int k3_dsp_rproc_start(struct rproc *rproc)
+{
+ struct k3_dsp_rproc *kproc = rproc->priv;
+ struct mbox_client *client = &kproc->client;
+ struct device *dev = kproc->dev;
+ u32 boot_addr;
+ int ret;
+
+ client->dev = dev;
+ client->tx_done = NULL;
+ client->rx_callback = k3_dsp_rproc_mbox_callback;
+ client->tx_block = false;
+ client->knows_txdone = false;
+
+ kproc->mbox = mbox_request_channel(client, 0);
+ if (IS_ERR(kproc->mbox)) {
+ ret = -EBUSY;
+ dev_err(dev, "mbox_request_channel failed: %ld\n",
+ PTR_ERR(kproc->mbox));
+ return ret;
+ }
+
+ /*
+ * Ping the remote processor, this is only for sanity-sake for now;
+ * there is no functional effect whatsoever.
+ *
+ * Note that the reply will _not_ arrive immediately: this message
+ * will wait in the mailbox fifo until the remote processor is booted.
+ */
+ ret = mbox_send_message(kproc->mbox, (void *)RP_MBOX_ECHO_REQUEST);
+ if (ret < 0) {
+ dev_err(dev, "mbox_send_message failed: %d\n", ret);
+ goto put_mbox;
+ }
+
+ boot_addr = rproc->bootaddr;
+ if (boot_addr & (kproc->data->boot_align_addr - 1)) {
+ dev_err(dev, "invalid boot address 0x%x, must be aligned on a 0x%x boundary\n",
+ boot_addr, kproc->data->boot_align_addr);
+ ret = -EINVAL;
+ goto put_mbox;
+ }
+
+ dev_err(dev, "booting DSP core using boot addr = 0x%x\n", boot_addr);
+ ret = ti_sci_proc_set_config(kproc->tsp, boot_addr, 0, 0);
+ if (ret)
+ goto put_mbox;
+
+ ret = k3_dsp_rproc_release(kproc);
+ if (ret)
+ goto put_mbox;
+
+ return 0;
+
+put_mbox:
+ mbox_free_channel(kproc->mbox);
+ return ret;
+}
+
+/*
+ * Stop the DSP remote processor.
+ *
+ * This function puts the DSP processor into reset, and finishes processing
+ * of any pending messages.
+ */
+static int k3_dsp_rproc_stop(struct rproc *rproc)
+{
+ struct k3_dsp_rproc *kproc = rproc->priv;
+
+ mbox_free_channel(kproc->mbox);
+
+ k3_dsp_rproc_reset(kproc);
+
+ return 0;
+}
+
+/*
+ * Custom function to translate a DSP device address (internal RAMs only) to a
+ * kernel virtual address. The DSPs can access their RAMs at either an internal
+ * address visible only from a DSP, or at the SoC-level bus address. Both these
+ * addresses need to be looked through for translation. The translated addresses
+ * can be used either by the remoteproc core for loading (when using kernel
+ * remoteproc loader), or by any rpmsg bus drivers.
+ */
+static void *k3_dsp_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len)
+{
+ struct k3_dsp_rproc *kproc = rproc->priv;
+ void __iomem *va = NULL;
+ phys_addr_t bus_addr;
+ u32 dev_addr, offset;
+ size_t size;
+ int i;
+
+ if (len == 0)
+ return NULL;
+
+ for (i = 0; i < kproc->num_mems; i++) {
+ bus_addr = kproc->mem[i].bus_addr;
+ dev_addr = kproc->mem[i].dev_addr;
+ size = kproc->mem[i].size;
+
+ if (da < KEYSTONE_RPROC_LOCAL_ADDRESS_MASK) {
+ /* handle DSP-view addresses */
+ if (da >= dev_addr &&
+ ((da + len) <= (dev_addr + size))) {
+ offset = da - dev_addr;
+ va = kproc->mem[i].cpu_addr + offset;
+ return (__force void *)va;
+ }
+ } else {
+ /* handle SoC-view addresses */
+ if (da >= bus_addr &&
+ (da + len) <= (bus_addr + size)) {
+ offset = da - bus_addr;
+ va = kproc->mem[i].cpu_addr + offset;
+ return (__force void *)va;
+ }
+ }
+ }
+
+ /* handle static DDR reserved memory regions */
+ for (i = 0; i < kproc->num_rmems; i++) {
+ dev_addr = kproc->rmem[i].dev_addr;
+ size = kproc->rmem[i].size;
+
+ if (da >= dev_addr && ((da + len) <= (dev_addr + size))) {
+ offset = da - dev_addr;
+ va = kproc->rmem[i].cpu_addr + offset;
+ return (__force void *)va;
+ }
+ }
+
+ return NULL;
+}
+
+static const struct rproc_ops k3_dsp_rproc_ops = {
+ .start = k3_dsp_rproc_start,
+ .stop = k3_dsp_rproc_stop,
+ .kick = k3_dsp_rproc_kick,
+ .da_to_va = k3_dsp_rproc_da_to_va,
+};
+
+static int k3_dsp_rproc_of_get_memories(struct platform_device *pdev,
+ struct k3_dsp_rproc *kproc)
+{
+ const struct k3_dsp_dev_data *data = kproc->data;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ int num_mems = 0;
+ int i;
+
+ num_mems = kproc->data->num_mems;
+ kproc->mem = devm_kcalloc(kproc->dev, num_mems,
+ sizeof(*kproc->mem), GFP_KERNEL);
+ if (!kproc->mem)
+ return -ENOMEM;
+
+ for (i = 0; i < num_mems; i++) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ data->mems[i].name);
+ if (!res) {
+ dev_err(dev, "found no memory resource for %s\n",
+ data->mems[i].name);
+ return -EINVAL;
+ }
+ if (!devm_request_mem_region(dev, res->start,
+ resource_size(res),
+ dev_name(dev))) {
+ dev_err(dev, "could not request %s region for resource\n",
+ data->mems[i].name);
+ return -EBUSY;
+ }
+
+ kproc->mem[i].cpu_addr = devm_ioremap_wc(dev, res->start,
+ resource_size(res));
+ if (IS_ERR(kproc->mem[i].cpu_addr)) {
+ dev_err(dev, "failed to map %s memory\n",
+ data->mems[i].name);
+ return PTR_ERR(kproc->mem[i].cpu_addr);
+ }
+ kproc->mem[i].bus_addr = res->start;
+ kproc->mem[i].dev_addr = data->mems[i].dev_addr;
+ kproc->mem[i].size = resource_size(res);
+
+ dev_dbg(dev, "memory %8s: bus addr %pa size 0x%zx va %pK da 0x%x\n",
+ data->mems[i].name, &kproc->mem[i].bus_addr,
+ kproc->mem[i].size, kproc->mem[i].cpu_addr,
+ kproc->mem[i].dev_addr);
+ }
+ kproc->num_mems = num_mems;
+
+ return 0;
+}
+
+static int k3_dsp_reserved_mem_init(struct k3_dsp_rproc *kproc)
+{
+ struct device *dev = kproc->dev;
+ struct device_node *np = dev->of_node;
+ struct device_node *rmem_np;
+ struct reserved_mem *rmem;
+ int num_rmems;
+ int ret, i;
+
+ num_rmems = of_property_count_elems_of_size(np, "memory-region",
+ sizeof(phandle));
+ if (num_rmems <= 0) {
+ dev_err(dev, "device does not reserved memory regions, ret = %d\n",
+ num_rmems);
+ return -EINVAL;
+ }
+ if (num_rmems < 2) {
+ dev_err(dev, "device needs atleast two memory regions to be defined, num = %d\n",
+ num_rmems);
+ return -EINVAL;
+ }
+
+ /* use reserved memory region 0 for vring DMA allocations */
+ ret = of_reserved_mem_device_init_by_idx(dev, np, 0);
+ if (ret) {
+ dev_err(dev, "device cannot initialize DMA pool, ret = %d\n",
+ ret);
+ return ret;
+ }
+
+ num_rmems--;
+ kproc->rmem = kcalloc(num_rmems, sizeof(*kproc->rmem), GFP_KERNEL);
+ if (!kproc->rmem) {
+ ret = -ENOMEM;
+ goto release_rmem;
+ }
+
+ /* use remaining reserved memory regions for static carveouts */
+ for (i = 0; i < num_rmems; i++) {
+ rmem_np = of_parse_phandle(np, "memory-region", i + 1);
+ if (!rmem_np) {
+ ret = -EINVAL;
+ goto unmap_rmem;
+ }
+
+ rmem = of_reserved_mem_lookup(rmem_np);
+ if (!rmem) {
+ of_node_put(rmem_np);
+ ret = -EINVAL;
+ goto unmap_rmem;
+ }
+ of_node_put(rmem_np);
+
+ kproc->rmem[i].bus_addr = rmem->base;
+ /* 64-bit address regions currently not supported */
+ kproc->rmem[i].dev_addr = (u32)rmem->base;
+ kproc->rmem[i].size = rmem->size;
+ kproc->rmem[i].cpu_addr = ioremap_wc(rmem->base, rmem->size);
+ if (!kproc->rmem[i].cpu_addr) {
+ dev_err(dev, "failed to map reserved memory#%d at %pa of size %pa\n",
+ i + 1, &rmem->base, &rmem->size);
+ ret = -ENOMEM;
+ goto unmap_rmem;
+ }
+
+ dev_dbg(dev, "reserved memory%d: bus addr %pa size 0x%zx va %pK da 0x%x\n",
+ i + 1, &kproc->rmem[i].bus_addr,
+ kproc->rmem[i].size, kproc->rmem[i].cpu_addr,
+ kproc->rmem[i].dev_addr);
+ }
+ kproc->num_rmems = num_rmems;
+
+ return 0;
+
+unmap_rmem:
+ for (i--; i >= 0; i--)
+ iounmap(kproc->rmem[i].cpu_addr);
+ kfree(kproc->rmem);
+release_rmem:
+ of_reserved_mem_device_release(kproc->dev);
+ return ret;
+}
+
+static void k3_dsp_reserved_mem_exit(struct k3_dsp_rproc *kproc)
+{
+ int i;
+
+ for (i = 0; i < kproc->num_rmems; i++)
+ iounmap(kproc->rmem[i].cpu_addr);
+ kfree(kproc->rmem);
+
+ of_reserved_mem_device_release(kproc->dev);
+}
+
+static
+struct ti_sci_proc *k3_dsp_rproc_of_get_tsp(struct device *dev,
+ const struct ti_sci_handle *sci)
+{
+ struct ti_sci_proc *tsp;
+ u32 temp[2];
+ int ret;
+
+ ret = of_property_read_u32_array(dev->of_node, "ti,sci-proc-ids",
+ temp, 2);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ tsp = kzalloc(sizeof(*tsp), GFP_KERNEL);
+ if (!tsp)
+ return ERR_PTR(-ENOMEM);
+
+ tsp->dev = dev;
+ tsp->sci = sci;
+ tsp->ops = &sci->ops.proc_ops;
+ tsp->proc_id = temp[0];
+ tsp->host_id = temp[1];
+
+ return tsp;
+}
+
+static int k3_dsp_rproc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ const struct k3_dsp_dev_data *data;
+ struct k3_dsp_rproc *kproc;
+ struct rproc *rproc;
+ const char *fw_name;
+ int ret = 0;
+ int ret1;
+
+ data = of_device_get_match_data(dev);
+ if (!data)
+ return -ENODEV;
+
+ ret = rproc_of_parse_firmware(dev, 0, &fw_name);
+ if (ret) {
+ dev_err(dev, "failed to parse firmware-name property, ret = %d\n",
+ ret);
+ return ret;
+ }
+
+ rproc = rproc_alloc(dev, dev_name(dev), &k3_dsp_rproc_ops, fw_name,
+ sizeof(*kproc));
+ if (!rproc)
+ return -ENOMEM;
+
+ rproc->has_iommu = false;
+ rproc->recovery_disabled = true;
+ if (data->uses_lreset) {
+ rproc->ops->prepare = k3_dsp_rproc_prepare;
+ rproc->ops->unprepare = k3_dsp_rproc_unprepare;
+ }
+ kproc = rproc->priv;
+ kproc->rproc = rproc;
+ kproc->dev = dev;
+ kproc->data = data;
+
+ kproc->ti_sci = ti_sci_get_by_phandle(np, "ti,sci");
+ if (IS_ERR(kproc->ti_sci)) {
+ ret = PTR_ERR(kproc->ti_sci);
+ if (ret != -EPROBE_DEFER) {
+ dev_err(dev, "failed to get ti-sci handle, ret = %d\n",
+ ret);
+ }
+ kproc->ti_sci = NULL;
+ goto free_rproc;
+ }
+
+ ret = of_property_read_u32(np, "ti,sci-dev-id", &kproc->ti_sci_id);
+ if (ret) {
+ dev_err(dev, "missing 'ti,sci-dev-id' property\n");
+ goto put_sci;
+ }
+
+ kproc->reset = devm_reset_control_get_exclusive(dev, NULL);
+ if (IS_ERR(kproc->reset)) {
+ ret = PTR_ERR(kproc->reset);
+ dev_err(dev, "failed to get reset, status = %d\n", ret);
+ goto put_sci;
+ }
+
+ kproc->tsp = k3_dsp_rproc_of_get_tsp(dev, kproc->ti_sci);
+ if (IS_ERR(kproc->tsp)) {
+ dev_err(dev, "failed to construct ti-sci proc control, ret = %d\n",
+ ret);
+ ret = PTR_ERR(kproc->tsp);
+ goto put_sci;
+ }
+
+ ret = ti_sci_proc_request(kproc->tsp);
+ if (ret < 0) {
+ dev_err(dev, "ti_sci_proc_request failed, ret = %d\n", ret);
+ goto free_tsp;
+ }
+
+ ret = k3_dsp_rproc_of_get_memories(pdev, kproc);
+ if (ret)
+ goto release_tsp;
+
+ ret = k3_dsp_reserved_mem_init(kproc);
+ if (ret) {
+ dev_err(dev, "reserved memory init failed, ret = %d\n", ret);
+ goto release_tsp;
+ }
+
+ /*
+ * ensure the DSP local reset is asserted to ensure the DSP doesn't
+ * execute bogus code in .prepare() when the module reset is released.
+ */
+ if (data->uses_lreset) {
+ ret = reset_control_status(kproc->reset);
+ if (ret < 0) {
+ dev_err(dev, "failed to get reset status, status = %d\n",
+ ret);
+ goto release_mem;
+ } else if (ret == 0) {
+ dev_warn(dev, "local reset is deasserted for device\n");
+ k3_dsp_rproc_reset(kproc);
+ }
+ }
+
+ ret = rproc_add(rproc);
+ if (ret) {
+ dev_err(dev, "failed to add register device with remoteproc core, status = %d\n",
+ ret);
+ goto release_mem;
+ }
+
+ platform_set_drvdata(pdev, kproc);
+
+ return 0;
+
+release_mem:
+ k3_dsp_reserved_mem_exit(kproc);
+release_tsp:
+ ret1 = ti_sci_proc_release(kproc->tsp);
+ if (ret1)
+ dev_err(dev, "failed to release proc, ret = %d\n", ret1);
+free_tsp:
+ kfree(kproc->tsp);
+put_sci:
+ ret1 = ti_sci_put_handle(kproc->ti_sci);
+ if (ret1)
+ dev_err(dev, "failed to put ti_sci handle, ret = %d\n", ret1);
+free_rproc:
+ rproc_free(rproc);
+ return ret;
+}
+
+static int k3_dsp_rproc_remove(struct platform_device *pdev)
+{
+ struct k3_dsp_rproc *kproc = platform_get_drvdata(pdev);
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ rproc_del(kproc->rproc);
+
+ ret = ti_sci_proc_release(kproc->tsp);
+ if (ret)
+ dev_err(dev, "failed to release proc, ret = %d\n", ret);
+
+ kfree(kproc->tsp);
+
+ ret = ti_sci_put_handle(kproc->ti_sci);
+ if (ret)
+ dev_err(dev, "failed to put ti_sci handle, ret = %d\n", ret);
+
+ k3_dsp_reserved_mem_exit(kproc);
+ rproc_free(kproc->rproc);
+
+ return 0;
+}
+
+static const struct k3_dsp_mem_data c66_mems[] = {
+ { .name = "l2sram", .dev_addr = 0x800000 },
+ { .name = "l1pram", .dev_addr = 0xe00000 },
+ { .name = "l1dram", .dev_addr = 0xf00000 },
+};
+
+/* C71x cores only have a L1P Cache, there are no L1P SRAMs */
+static const struct k3_dsp_mem_data c71_mems[] = {
+ { .name = "l2sram", .dev_addr = 0x800000 },
+ { .name = "l1dram", .dev_addr = 0xe00000 },
+};
+
+static const struct k3_dsp_dev_data c66_data = {
+ .mems = c66_mems,
+ .num_mems = ARRAY_SIZE(c66_mems),
+ .boot_align_addr = SZ_1K,
+ .uses_lreset = true,
+};
+
+static const struct k3_dsp_dev_data c71_data = {
+ .mems = c71_mems,
+ .num_mems = ARRAY_SIZE(c71_mems),
+ .boot_align_addr = SZ_2M,
+ .uses_lreset = false,
+};
+
+static const struct of_device_id k3_dsp_of_match[] = {
+ { .compatible = "ti,j721e-c66-dsp", .data = &c66_data, },
+ { .compatible = "ti,j721e-c71-dsp", .data = &c71_data, },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, k3_dsp_of_match);
+
+static struct platform_driver k3_dsp_rproc_driver = {
+ .probe = k3_dsp_rproc_probe,
+ .remove = k3_dsp_rproc_remove,
+ .driver = {
+ .name = "k3-dsp-rproc",
+ .of_match_table = k3_dsp_of_match,
+ },
+};
+
+module_platform_driver(k3_dsp_rproc_driver);
+
+MODULE_AUTHOR("Suman Anna <s-anna@ti.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("TI K3 DSP Remoteproc driver");
diff --git a/drivers/remoteproc/ti_sci_proc.h b/drivers/remoteproc/ti_sci_proc.h
new file mode 100644
index 000000000000..778558abcdcc
--- /dev/null
+++ b/drivers/remoteproc/ti_sci_proc.h
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Texas Instruments TI-SCI Processor Controller Helper Functions
+ *
+ * Copyright (C) 2018-2020 Texas Instruments Incorporated - https://www.ti.com/
+ * Suman Anna <s-anna@ti.com>
+ */
+
+#ifndef REMOTEPROC_TI_SCI_PROC_H
+#define REMOTEPROC_TI_SCI_PROC_H
+
+#include <linux/soc/ti/ti_sci_protocol.h>
+
+/**
+ * struct ti_sci_proc - structure representing a processor control client
+ * @sci: cached TI-SCI protocol handle
+ * @ops: cached TI-SCI proc ops
+ * @dev: cached client device pointer
+ * @proc_id: processor id for the consumer remoteproc device
+ * @host_id: host id to pass the control over for this consumer remoteproc
+ * device
+ */
+struct ti_sci_proc {
+ const struct ti_sci_handle *sci;
+ const struct ti_sci_proc_ops *ops;
+ struct device *dev;
+ u8 proc_id;
+ u8 host_id;
+};
+
+static inline int ti_sci_proc_request(struct ti_sci_proc *tsp)
+{
+ int ret;
+
+ ret = tsp->ops->request(tsp->sci, tsp->proc_id);
+ if (ret)
+ dev_err(tsp->dev, "ti-sci processor request failed: %d\n",
+ ret);
+ return ret;
+}
+
+static inline int ti_sci_proc_release(struct ti_sci_proc *tsp)
+{
+ int ret;
+
+ ret = tsp->ops->release(tsp->sci, tsp->proc_id);
+ if (ret)
+ dev_err(tsp->dev, "ti-sci processor release failed: %d\n",
+ ret);
+ return ret;
+}
+
+static inline int ti_sci_proc_handover(struct ti_sci_proc *tsp)
+{
+ int ret;
+
+ ret = tsp->ops->handover(tsp->sci, tsp->proc_id, tsp->host_id);
+ if (ret)
+ dev_err(tsp->dev, "ti-sci processor handover of %d to %d failed: %d\n",
+ tsp->proc_id, tsp->host_id, ret);
+ return ret;
+}
+
+static inline int ti_sci_proc_set_config(struct ti_sci_proc *tsp,
+ u64 boot_vector,
+ u32 cfg_set, u32 cfg_clr)
+{
+ int ret;
+
+ ret = tsp->ops->set_config(tsp->sci, tsp->proc_id, boot_vector,
+ cfg_set, cfg_clr);
+ if (ret)
+ dev_err(tsp->dev, "ti-sci processor set_config failed: %d\n",
+ ret);
+ return ret;
+}
+
+static inline int ti_sci_proc_set_control(struct ti_sci_proc *tsp,
+ u32 ctrl_set, u32 ctrl_clr)
+{
+ int ret;
+
+ ret = tsp->ops->set_control(tsp->sci, tsp->proc_id, ctrl_set, ctrl_clr);
+ if (ret)
+ dev_err(tsp->dev, "ti-sci processor set_control failed: %d\n",
+ ret);
+ return ret;
+}
+
+static inline int ti_sci_proc_get_status(struct ti_sci_proc *tsp,
+ u64 *boot_vector, u32 *cfg_flags,
+ u32 *ctrl_flags, u32 *status_flags)
+{
+ int ret;
+
+ ret = tsp->ops->get_status(tsp->sci, tsp->proc_id, boot_vector,
+ cfg_flags, ctrl_flags, status_flags);
+ if (ret)
+ dev_err(tsp->dev, "ti-sci processor get_status failed: %d\n",
+ ret);
+ return ret;
+}
+
+#endif /* REMOTEPROC_TI_SCI_PROC_H */
diff --git a/drivers/rpmsg/virtio_rpmsg_bus.c b/drivers/rpmsg/virtio_rpmsg_bus.c
index 07d4f3374098..9006fc7f73d0 100644
--- a/drivers/rpmsg/virtio_rpmsg_bus.c
+++ b/drivers/rpmsg/virtio_rpmsg_bus.c
@@ -23,6 +23,7 @@
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/virtio.h>
+#include <linux/virtio_byteorder.h>
#include <linux/virtio_ids.h>
#include <linux/virtio_config.h>
#include <linux/wait.h>
@@ -84,11 +85,11 @@ struct virtproc_info {
* Every message sent(/received) on the rpmsg bus begins with this header.
*/
struct rpmsg_hdr {
- u32 src;
- u32 dst;
- u32 reserved;
- u16 len;
- u16 flags;
+ __virtio32 src;
+ __virtio32 dst;
+ __virtio32 reserved;
+ __virtio16 len;
+ __virtio16 flags;
u8 data[];
} __packed;
@@ -106,8 +107,8 @@ struct rpmsg_hdr {
*/
struct rpmsg_ns_msg {
char name[RPMSG_NAME_SIZE];
- u32 addr;
- u32 flags;
+ __virtio32 addr;
+ __virtio32 flags;
} __packed;
/**
@@ -335,8 +336,8 @@ static int virtio_rpmsg_announce_create(struct rpmsg_device *rpdev)
struct rpmsg_ns_msg nsm;
strncpy(nsm.name, rpdev->id.name, RPMSG_NAME_SIZE);
- nsm.addr = rpdev->ept->addr;
- nsm.flags = RPMSG_NS_CREATE;
+ nsm.addr = cpu_to_virtio32(vrp->vdev, rpdev->ept->addr);
+ nsm.flags = cpu_to_virtio32(vrp->vdev, RPMSG_NS_CREATE);
err = rpmsg_sendto(rpdev->ept, &nsm, sizeof(nsm), RPMSG_NS_ADDR);
if (err)
@@ -359,8 +360,8 @@ static int virtio_rpmsg_announce_destroy(struct rpmsg_device *rpdev)
struct rpmsg_ns_msg nsm;
strncpy(nsm.name, rpdev->id.name, RPMSG_NAME_SIZE);
- nsm.addr = rpdev->ept->addr;
- nsm.flags = RPMSG_NS_DESTROY;
+ nsm.addr = cpu_to_virtio32(vrp->vdev, rpdev->ept->addr);
+ nsm.flags = cpu_to_virtio32(vrp->vdev, RPMSG_NS_DESTROY);
err = rpmsg_sendto(rpdev->ept, &nsm, sizeof(nsm), RPMSG_NS_ADDR);
if (err)
@@ -612,18 +613,18 @@ static int rpmsg_send_offchannel_raw(struct rpmsg_device *rpdev,
}
}
- msg->len = len;
+ msg->len = cpu_to_virtio16(vrp->vdev, len);
msg->flags = 0;
- msg->src = src;
- msg->dst = dst;
+ msg->src = cpu_to_virtio32(vrp->vdev, src);
+ msg->dst = cpu_to_virtio32(vrp->vdev, dst);
msg->reserved = 0;
memcpy(msg->data, data, len);
dev_dbg(dev, "TX From 0x%x, To 0x%x, Len %d, Flags %d, Reserved %d\n",
- msg->src, msg->dst, msg->len, msg->flags, msg->reserved);
+ src, dst, len, msg->flags, msg->reserved);
#if defined(CONFIG_DYNAMIC_DEBUG)
dynamic_hex_dump("rpmsg_virtio TX: ", DUMP_PREFIX_NONE, 16, 1,
- msg, sizeof(*msg) + msg->len, true);
+ msg, sizeof(*msg) + len, true);
#endif
rpmsg_sg_init(&sg, msg, sizeof(*msg) + len);
@@ -704,13 +705,17 @@ static int rpmsg_recv_single(struct virtproc_info *vrp, struct device *dev,
{
struct rpmsg_endpoint *ept;
struct scatterlist sg;
+ unsigned int msg_len = virtio16_to_cpu(vrp->vdev, msg->len);
int err;
dev_dbg(dev, "From: 0x%x, To: 0x%x, Len: %d, Flags: %d, Reserved: %d\n",
- msg->src, msg->dst, msg->len, msg->flags, msg->reserved);
+ virtio32_to_cpu(vrp->vdev, msg->src),
+ virtio32_to_cpu(vrp->vdev, msg->dst), msg_len,
+ virtio16_to_cpu(vrp->vdev, msg->flags),
+ virtio32_to_cpu(vrp->vdev, msg->reserved));
#if defined(CONFIG_DYNAMIC_DEBUG)
dynamic_hex_dump("rpmsg_virtio RX: ", DUMP_PREFIX_NONE, 16, 1,
- msg, sizeof(*msg) + msg->len, true);
+ msg, sizeof(*msg) + msg_len, true);
#endif
/*
@@ -718,15 +723,15 @@ static int rpmsg_recv_single(struct virtproc_info *vrp, struct device *dev,
* the reported payload length.
*/
if (len > vrp->buf_size ||
- msg->len > (len - sizeof(struct rpmsg_hdr))) {
- dev_warn(dev, "inbound msg too big: (%d, %d)\n", len, msg->len);
+ msg_len > (len - sizeof(struct rpmsg_hdr))) {
+ dev_warn(dev, "inbound msg too big: (%d, %d)\n", len, msg_len);
return -EINVAL;
}
/* use the dst addr to fetch the callback of the appropriate user */
mutex_lock(&vrp->endpoints_lock);
- ept = idr_find(&vrp->endpoints, msg->dst);
+ ept = idr_find(&vrp->endpoints, virtio32_to_cpu(vrp->vdev, msg->dst));
/* let's make sure no one deallocates ept while we use it */
if (ept)
@@ -739,8 +744,8 @@ static int rpmsg_recv_single(struct virtproc_info *vrp, struct device *dev,
mutex_lock(&ept->cb_lock);
if (ept->cb)
- ept->cb(ept->rpdev, msg->data, msg->len, ept->priv,
- msg->src);
+ ept->cb(ept->rpdev, msg->data, msg_len, ept->priv,
+ virtio32_to_cpu(vrp->vdev, msg->src));
mutex_unlock(&ept->cb_lock);
@@ -846,15 +851,15 @@ static int rpmsg_ns_cb(struct rpmsg_device *rpdev, void *data, int len,
/* don't trust the remote processor for null terminating the name */
msg->name[RPMSG_NAME_SIZE - 1] = '\0';
- dev_info(dev, "%sing channel %s addr 0x%x\n",
- msg->flags & RPMSG_NS_DESTROY ? "destroy" : "creat",
- msg->name, msg->addr);
-
strncpy(chinfo.name, msg->name, sizeof(chinfo.name));
chinfo.src = RPMSG_ADDR_ANY;
- chinfo.dst = msg->addr;
+ chinfo.dst = virtio32_to_cpu(vrp->vdev, msg->addr);
+
+ dev_info(dev, "%sing channel %s addr 0x%x\n",
+ virtio32_to_cpu(vrp->vdev, msg->flags) & RPMSG_NS_DESTROY ?
+ "destroy" : "creat", msg->name, chinfo.dst);
- if (msg->flags & RPMSG_NS_DESTROY) {
+ if (virtio32_to_cpu(vrp->vdev, msg->flags) & RPMSG_NS_DESTROY) {
ret = rpmsg_unregister_device(&vrp->vdev->dev, &chinfo);
if (ret)
dev_err(dev, "rpmsg_destroy_channel failed: %d\n", ret);
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index 8cc003aa4d00..ca1c39b6f631 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -754,14 +754,14 @@ static struct scsi_host_template virtscsi_host_template = {
#define virtscsi_config_get(vdev, fld) \
({ \
- typeof(((struct virtio_scsi_config *)0)->fld) __val; \
+ __virtio_native_type(struct virtio_scsi_config, fld) __val; \
virtio_cread(vdev, struct virtio_scsi_config, fld, &__val); \
__val; \
})
#define virtscsi_config_set(vdev, fld, val) \
do { \
- typeof(((struct virtio_scsi_config *)0)->fld) __val = (val); \
+ __virtio_native_type(struct virtio_scsi_config, fld) __val = (val); \
virtio_cwrite(vdev, struct virtio_scsi_config, fld, &__val); \
} while(0)
diff --git a/drivers/vdpa/Kconfig b/drivers/vdpa/Kconfig
index d93a69b12f81..4271c408103e 100644
--- a/drivers/vdpa/Kconfig
+++ b/drivers/vdpa/Kconfig
@@ -29,4 +29,23 @@ config IFCVF
To compile this driver as a module, choose M here: the module will
be called ifcvf.
+config MLX5_VDPA
+ bool "MLX5 VDPA support library for ConnectX devices"
+ depends on MLX5_CORE
+ default n
+ help
+ Support library for Mellanox VDPA drivers. Provides code that is
+ common for all types of VDPA drivers. The following drivers are planned:
+ net, block.
+
+config MLX5_VDPA_NET
+ tristate "vDPA driver for ConnectX devices"
+ depends on MLX5_VDPA
+ default n
+ help
+ VDPA network driver for ConnectX6 and newer. Provides offloading
+ of virtio net datapath such that descriptors put on the ring will
+ be executed by the hardware. It also supports a variety of stateless
+ offloads depending on the actual device used and firmware version.
+
endif # VDPA
diff --git a/drivers/vdpa/Makefile b/drivers/vdpa/Makefile
index 8bbb686ca7a2..d160e9b63a66 100644
--- a/drivers/vdpa/Makefile
+++ b/drivers/vdpa/Makefile
@@ -2,3 +2,4 @@
obj-$(CONFIG_VDPA) += vdpa.o
obj-$(CONFIG_VDPA_SIM) += vdpa_sim/
obj-$(CONFIG_IFCVF) += ifcvf/
+obj-$(CONFIG_MLX5_VDPA) += mlx5/
diff --git a/drivers/vdpa/ifcvf/ifcvf_base.c b/drivers/vdpa/ifcvf/ifcvf_base.c
index 94bf0328b68d..f2a128e56de5 100644
--- a/drivers/vdpa/ifcvf/ifcvf_base.c
+++ b/drivers/vdpa/ifcvf/ifcvf_base.c
@@ -272,7 +272,7 @@ static int ifcvf_config_features(struct ifcvf_hw *hw)
return 0;
}
-u64 ifcvf_get_vq_state(struct ifcvf_hw *hw, u16 qid)
+u16 ifcvf_get_vq_state(struct ifcvf_hw *hw, u16 qid)
{
struct ifcvf_lm_cfg __iomem *ifcvf_lm;
void __iomem *avail_idx_addr;
@@ -287,7 +287,7 @@ u64 ifcvf_get_vq_state(struct ifcvf_hw *hw, u16 qid)
return last_avail_idx;
}
-int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 qid, u64 num)
+int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 qid, u16 num)
{
struct ifcvf_lm_cfg __iomem *ifcvf_lm;
void __iomem *avail_idx_addr;
diff --git a/drivers/vdpa/ifcvf/ifcvf_base.h b/drivers/vdpa/ifcvf/ifcvf_base.h
index f4554412e607..08f267a2aafe 100644
--- a/drivers/vdpa/ifcvf/ifcvf_base.h
+++ b/drivers/vdpa/ifcvf/ifcvf_base.h
@@ -29,7 +29,7 @@
(1ULL << VIRTIO_F_VERSION_1) | \
(1ULL << VIRTIO_NET_F_STATUS) | \
(1ULL << VIRTIO_F_ORDER_PLATFORM) | \
- (1ULL << VIRTIO_F_IOMMU_PLATFORM) | \
+ (1ULL << VIRTIO_F_ACCESS_PLATFORM) | \
(1ULL << VIRTIO_NET_F_MRG_RXBUF))
/* Only one queue pair for now. */
@@ -116,7 +116,7 @@ void ifcvf_set_status(struct ifcvf_hw *hw, u8 status);
void io_write64_twopart(u64 val, u32 *lo, u32 *hi);
void ifcvf_reset(struct ifcvf_hw *hw);
u64 ifcvf_get_features(struct ifcvf_hw *hw);
-u64 ifcvf_get_vq_state(struct ifcvf_hw *hw, u16 qid);
-int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 qid, u64 num);
+u16 ifcvf_get_vq_state(struct ifcvf_hw *hw, u16 qid);
+int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 qid, u16 num);
struct ifcvf_adapter *vf_to_adapter(struct ifcvf_hw *hw);
#endif /* _IFCVF_H_ */
diff --git a/drivers/vdpa/ifcvf/ifcvf_main.c b/drivers/vdpa/ifcvf/ifcvf_main.c
index f5a60c14b979..076d7ac5e723 100644
--- a/drivers/vdpa/ifcvf/ifcvf_main.c
+++ b/drivers/vdpa/ifcvf/ifcvf_main.c
@@ -50,8 +50,10 @@ static void ifcvf_free_irq(struct ifcvf_adapter *adapter, int queues)
int i;
- for (i = 0; i < queues; i++)
+ for (i = 0; i < queues; i++) {
devm_free_irq(&pdev->dev, vf->vring[i].irq, &vf->vring[i]);
+ vf->vring[i].irq = -EINVAL;
+ }
ifcvf_free_irq_vectors(pdev);
}
@@ -235,19 +237,21 @@ static u16 ifcvf_vdpa_get_vq_num_max(struct vdpa_device *vdpa_dev)
return IFCVF_QUEUE_MAX;
}
-static u64 ifcvf_vdpa_get_vq_state(struct vdpa_device *vdpa_dev, u16 qid)
+static int ifcvf_vdpa_get_vq_state(struct vdpa_device *vdpa_dev, u16 qid,
+ struct vdpa_vq_state *state)
{
struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
- return ifcvf_get_vq_state(vf, qid);
+ state->avail_index = ifcvf_get_vq_state(vf, qid);
+ return 0;
}
static int ifcvf_vdpa_set_vq_state(struct vdpa_device *vdpa_dev, u16 qid,
- u64 num)
+ const struct vdpa_vq_state *state)
{
struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
- return ifcvf_set_vq_state(vf, qid, num);
+ return ifcvf_set_vq_state(vf, qid, state->avail_index);
}
static void ifcvf_vdpa_set_vq_cb(struct vdpa_device *vdpa_dev, u16 qid,
@@ -352,6 +356,14 @@ static void ifcvf_vdpa_set_config_cb(struct vdpa_device *vdpa_dev,
vf->config_cb.private = cb->private;
}
+static int ifcvf_vdpa_get_vq_irq(struct vdpa_device *vdpa_dev,
+ u16 qid)
+{
+ struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
+
+ return vf->vring[qid].irq;
+}
+
/*
* IFCVF currently does't have on-chip IOMMU, so not
* implemented set_map()/dma_map()/dma_unmap()
@@ -369,6 +381,7 @@ static const struct vdpa_config_ops ifc_vdpa_ops = {
.get_vq_ready = ifcvf_vdpa_get_vq_ready,
.set_vq_num = ifcvf_vdpa_set_vq_num,
.set_vq_address = ifcvf_vdpa_set_vq_address,
+ .get_vq_irq = ifcvf_vdpa_get_vq_irq,
.kick_vq = ifcvf_vdpa_kick_vq,
.get_generation = ifcvf_vdpa_get_generation,
.get_device_id = ifcvf_vdpa_get_device_id,
@@ -384,7 +397,7 @@ static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
struct device *dev = &pdev->dev;
struct ifcvf_adapter *adapter;
struct ifcvf_hw *vf;
- int ret;
+ int ret, i;
ret = pcim_enable_device(pdev);
if (ret) {
@@ -420,7 +433,8 @@ static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
adapter = vdpa_alloc_device(struct ifcvf_adapter, vdpa,
- dev, &ifc_vdpa_ops);
+ dev, &ifc_vdpa_ops,
+ IFCVF_MAX_QUEUE_PAIRS * 2);
if (adapter == NULL) {
IFCVF_ERR(pdev, "Failed to allocate vDPA structure");
return -ENOMEM;
@@ -441,6 +455,9 @@ static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err;
}
+ for (i = 0; i < IFCVF_MAX_QUEUE_PAIRS * 2; i++)
+ vf->vring[i].irq = -EINVAL;
+
ret = vdpa_register_device(&adapter->vdpa);
if (ret) {
IFCVF_ERR(pdev, "Failed to register ifcvf to vdpa bus");
diff --git a/drivers/vdpa/mlx5/Makefile b/drivers/vdpa/mlx5/Makefile
new file mode 100644
index 000000000000..89a5bededc9f
--- /dev/null
+++ b/drivers/vdpa/mlx5/Makefile
@@ -0,0 +1,4 @@
+subdir-ccflags-y += -I$(srctree)/drivers/vdpa/mlx5/core
+
+obj-$(CONFIG_MLX5_VDPA_NET) += mlx5_vdpa.o
+mlx5_vdpa-$(CONFIG_MLX5_VDPA_NET) += net/main.o net/mlx5_vnet.o core/resources.o core/mr.o
diff --git a/drivers/vdpa/mlx5/core/mlx5_vdpa.h b/drivers/vdpa/mlx5/core/mlx5_vdpa.h
new file mode 100644
index 000000000000..5c92a576edae
--- /dev/null
+++ b/drivers/vdpa/mlx5/core/mlx5_vdpa.h
@@ -0,0 +1,91 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2020 Mellanox Technologies Ltd. */
+
+#ifndef __MLX5_VDPA_H__
+#define __MLX5_VDPA_H__
+
+#include <linux/vdpa.h>
+#include <linux/mlx5/driver.h>
+
+struct mlx5_vdpa_direct_mr {
+ u64 start;
+ u64 end;
+ u32 perm;
+ struct mlx5_core_mkey mr;
+ struct sg_table sg_head;
+ int log_size;
+ int nsg;
+ struct list_head list;
+ u64 offset;
+};
+
+struct mlx5_vdpa_mr {
+ struct mlx5_core_mkey mkey;
+
+ /* list of direct MRs descendants of this indirect mr */
+ struct list_head head;
+ unsigned long num_directs;
+ unsigned long num_klms;
+ bool initialized;
+
+ /* serialize mkey creation and destruction */
+ struct mutex mkey_mtx;
+};
+
+struct mlx5_vdpa_resources {
+ u32 pdn;
+ struct mlx5_uars_page *uar;
+ void __iomem *kick_addr;
+ u16 uid;
+ u32 null_mkey;
+ bool valid;
+};
+
+struct mlx5_vdpa_dev {
+ struct vdpa_device vdev;
+ struct mlx5_core_dev *mdev;
+ struct mlx5_vdpa_resources res;
+
+ u64 mlx_features;
+ u64 actual_features;
+ u8 status;
+ u32 max_vqs;
+ u32 generation;
+
+ struct mlx5_vdpa_mr mr;
+};
+
+int mlx5_vdpa_alloc_pd(struct mlx5_vdpa_dev *dev, u32 *pdn, u16 uid);
+int mlx5_vdpa_dealloc_pd(struct mlx5_vdpa_dev *dev, u32 pdn, u16 uid);
+int mlx5_vdpa_get_null_mkey(struct mlx5_vdpa_dev *dev, u32 *null_mkey);
+int mlx5_vdpa_create_tis(struct mlx5_vdpa_dev *mvdev, void *in, u32 *tisn);
+void mlx5_vdpa_destroy_tis(struct mlx5_vdpa_dev *mvdev, u32 tisn);
+int mlx5_vdpa_create_rqt(struct mlx5_vdpa_dev *mvdev, void *in, int inlen, u32 *rqtn);
+void mlx5_vdpa_destroy_rqt(struct mlx5_vdpa_dev *mvdev, u32 rqtn);
+int mlx5_vdpa_create_tir(struct mlx5_vdpa_dev *mvdev, void *in, u32 *tirn);
+void mlx5_vdpa_destroy_tir(struct mlx5_vdpa_dev *mvdev, u32 tirn);
+int mlx5_vdpa_alloc_transport_domain(struct mlx5_vdpa_dev *mvdev, u32 *tdn);
+void mlx5_vdpa_dealloc_transport_domain(struct mlx5_vdpa_dev *mvdev, u32 tdn);
+int mlx5_vdpa_alloc_resources(struct mlx5_vdpa_dev *mvdev);
+void mlx5_vdpa_free_resources(struct mlx5_vdpa_dev *mvdev);
+int mlx5_vdpa_create_mkey(struct mlx5_vdpa_dev *mvdev, struct mlx5_core_mkey *mkey, u32 *in,
+ int inlen);
+int mlx5_vdpa_destroy_mkey(struct mlx5_vdpa_dev *mvdev, struct mlx5_core_mkey *mkey);
+int mlx5_vdpa_handle_set_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb,
+ bool *change_map);
+int mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb);
+void mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev);
+
+#define mlx5_vdpa_warn(__dev, format, ...) \
+ dev_warn((__dev)->mdev->device, "%s:%d:(pid %d) warning: " format, __func__, __LINE__, \
+ current->pid, ##__VA_ARGS__)
+
+#define mlx5_vdpa_info(__dev, format, ...) \
+ dev_info((__dev)->mdev->device, "%s:%d:(pid %d): " format, __func__, __LINE__, \
+ current->pid, ##__VA_ARGS__)
+
+#define mlx5_vdpa_dbg(__dev, format, ...) \
+ dev_debug((__dev)->mdev->device, "%s:%d:(pid %d): " format, __func__, __LINE__, \
+ current->pid, ##__VA_ARGS__)
+
+#endif /* __MLX5_VDPA_H__ */
diff --git a/drivers/vdpa/mlx5/core/mlx5_vdpa_ifc.h b/drivers/vdpa/mlx5/core/mlx5_vdpa_ifc.h
new file mode 100644
index 000000000000..f6f57a29b38e
--- /dev/null
+++ b/drivers/vdpa/mlx5/core/mlx5_vdpa_ifc.h
@@ -0,0 +1,168 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2020 Mellanox Technologies Ltd. */
+
+#ifndef __MLX5_VDPA_IFC_H_
+#define __MLX5_VDPA_IFC_H_
+
+#include <linux/mlx5/mlx5_ifc.h>
+
+enum {
+ MLX5_VIRTIO_Q_EVENT_MODE_NO_MSIX_MODE = 0x0,
+ MLX5_VIRTIO_Q_EVENT_MODE_QP_MODE = 0x1,
+ MLX5_VIRTIO_Q_EVENT_MODE_MSIX_MODE = 0x2,
+};
+
+enum {
+ MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_SPLIT = 0x1, // do I check this caps?
+ MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_PACKED = 0x2,
+};
+
+enum {
+ MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_SPLIT = 0,
+ MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_PACKED = 1,
+};
+
+struct mlx5_ifc_virtio_q_bits {
+ u8 virtio_q_type[0x8];
+ u8 reserved_at_8[0x5];
+ u8 event_mode[0x3];
+ u8 queue_index[0x10];
+
+ u8 full_emulation[0x1];
+ u8 virtio_version_1_0[0x1];
+ u8 reserved_at_22[0x2];
+ u8 offload_type[0x4];
+ u8 event_qpn_or_msix[0x18];
+
+ u8 doorbell_stride_index[0x10];
+ u8 queue_size[0x10];
+
+ u8 device_emulation_id[0x20];
+
+ u8 desc_addr[0x40];
+
+ u8 used_addr[0x40];
+
+ u8 available_addr[0x40];
+
+ u8 virtio_q_mkey[0x20];
+
+ u8 max_tunnel_desc[0x10];
+ u8 reserved_at_170[0x8];
+ u8 error_type[0x8];
+
+ u8 umem_1_id[0x20];
+
+ u8 umem_1_size[0x20];
+
+ u8 umem_1_offset[0x40];
+
+ u8 umem_2_id[0x20];
+
+ u8 umem_2_size[0x20];
+
+ u8 umem_2_offset[0x40];
+
+ u8 umem_3_id[0x20];
+
+ u8 umem_3_size[0x20];
+
+ u8 umem_3_offset[0x40];
+
+ u8 counter_set_id[0x20];
+
+ u8 reserved_at_320[0x8];
+ u8 pd[0x18];
+
+ u8 reserved_at_340[0xc0];
+};
+
+struct mlx5_ifc_virtio_net_q_object_bits {
+ u8 modify_field_select[0x40];
+
+ u8 reserved_at_40[0x20];
+
+ u8 vhca_id[0x10];
+ u8 reserved_at_70[0x10];
+
+ u8 queue_feature_bit_mask_12_3[0xa];
+ u8 dirty_bitmap_dump_enable[0x1];
+ u8 vhost_log_page[0x5];
+ u8 reserved_at_90[0xc];
+ u8 state[0x4];
+
+ u8 reserved_at_a0[0x5];
+ u8 queue_feature_bit_mask_2_0[0x3];
+ u8 tisn_or_qpn[0x18];
+
+ u8 dirty_bitmap_mkey[0x20];
+
+ u8 dirty_bitmap_size[0x20];
+
+ u8 dirty_bitmap_addr[0x40];
+
+ u8 hw_available_index[0x10];
+ u8 hw_used_index[0x10];
+
+ u8 reserved_at_160[0xa0];
+
+ struct mlx5_ifc_virtio_q_bits virtio_q_context;
+};
+
+struct mlx5_ifc_create_virtio_net_q_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr;
+
+ struct mlx5_ifc_virtio_net_q_object_bits obj_context;
+};
+
+struct mlx5_ifc_create_virtio_net_q_out_bits {
+ struct mlx5_ifc_general_obj_out_cmd_hdr_bits general_obj_out_cmd_hdr;
+};
+
+struct mlx5_ifc_destroy_virtio_net_q_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_out_cmd_hdr;
+};
+
+struct mlx5_ifc_destroy_virtio_net_q_out_bits {
+ struct mlx5_ifc_general_obj_out_cmd_hdr_bits general_obj_out_cmd_hdr;
+};
+
+struct mlx5_ifc_query_virtio_net_q_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr;
+};
+
+struct mlx5_ifc_query_virtio_net_q_out_bits {
+ struct mlx5_ifc_general_obj_out_cmd_hdr_bits general_obj_out_cmd_hdr;
+
+ struct mlx5_ifc_virtio_net_q_object_bits obj_context;
+};
+
+enum {
+ MLX5_VIRTQ_MODIFY_MASK_STATE = (u64)1 << 0,
+ MLX5_VIRTQ_MODIFY_MASK_DIRTY_BITMAP_PARAMS = (u64)1 << 3,
+ MLX5_VIRTQ_MODIFY_MASK_DIRTY_BITMAP_DUMP_ENABLE = (u64)1 << 4,
+};
+
+enum {
+ MLX5_VIRTIO_NET_Q_OBJECT_STATE_INIT = 0x0,
+ MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY = 0x1,
+ MLX5_VIRTIO_NET_Q_OBJECT_STATE_SUSPEND = 0x2,
+ MLX5_VIRTIO_NET_Q_OBJECT_STATE_ERR = 0x3,
+};
+
+enum {
+ MLX5_RQTC_LIST_Q_TYPE_RQ = 0x0,
+ MLX5_RQTC_LIST_Q_TYPE_VIRTIO_NET_Q = 0x1,
+};
+
+struct mlx5_ifc_modify_virtio_net_q_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr;
+
+ struct mlx5_ifc_virtio_net_q_object_bits obj_context;
+};
+
+struct mlx5_ifc_modify_virtio_net_q_out_bits {
+ struct mlx5_ifc_general_obj_out_cmd_hdr_bits general_obj_out_cmd_hdr;
+};
+
+#endif /* __MLX5_VDPA_IFC_H_ */
diff --git a/drivers/vdpa/mlx5/core/mr.c b/drivers/vdpa/mlx5/core/mr.c
new file mode 100644
index 000000000000..ef1c550f8266
--- /dev/null
+++ b/drivers/vdpa/mlx5/core/mr.c
@@ -0,0 +1,486 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2020 Mellanox Technologies Ltd. */
+
+#include <linux/vdpa.h>
+#include <linux/gcd.h>
+#include <linux/string.h>
+#include <linux/mlx5/qp.h>
+#include "mlx5_vdpa.h"
+
+/* DIV_ROUND_UP where the divider is a power of 2 give by its log base 2 value */
+#define MLX5_DIV_ROUND_UP_POW2(_n, _s) \
+({ \
+ u64 __s = _s; \
+ u64 _res; \
+ _res = (((_n) + (1 << (__s)) - 1) >> (__s)); \
+ _res; \
+})
+
+static int get_octo_len(u64 len, int page_shift)
+{
+ u64 page_size = 1ULL << page_shift;
+ int npages;
+
+ npages = ALIGN(len, page_size) >> page_shift;
+ return (npages + 1) / 2;
+}
+
+static void fill_sg(struct mlx5_vdpa_direct_mr *mr, void *in)
+{
+ struct scatterlist *sg;
+ __be64 *pas;
+ int i;
+
+ pas = MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
+ for_each_sg(mr->sg_head.sgl, sg, mr->nsg, i)
+ (*pas) = cpu_to_be64(sg_dma_address(sg));
+}
+
+static void mlx5_set_access_mode(void *mkc, int mode)
+{
+ MLX5_SET(mkc, mkc, access_mode_1_0, mode & 0x3);
+ MLX5_SET(mkc, mkc, access_mode_4_2, mode >> 2);
+}
+
+static void populate_mtts(struct mlx5_vdpa_direct_mr *mr, __be64 *mtt)
+{
+ struct scatterlist *sg;
+ int i;
+
+ for_each_sg(mr->sg_head.sgl, sg, mr->nsg, i)
+ mtt[i] = cpu_to_be64(sg_dma_address(sg));
+}
+
+static int create_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr *mr)
+{
+ int inlen;
+ void *mkc;
+ void *in;
+ int err;
+
+ inlen = MLX5_ST_SZ_BYTES(create_mkey_in) + roundup(MLX5_ST_SZ_BYTES(mtt) * mr->nsg, 16);
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ MLX5_SET(create_mkey_in, in, uid, mvdev->res.uid);
+ fill_sg(mr, in);
+ mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
+ MLX5_SET(mkc, mkc, lw, !!(mr->perm & VHOST_MAP_WO));
+ MLX5_SET(mkc, mkc, lr, !!(mr->perm & VHOST_MAP_RO));
+ mlx5_set_access_mode(mkc, MLX5_MKC_ACCESS_MODE_MTT);
+ MLX5_SET(mkc, mkc, qpn, 0xffffff);
+ MLX5_SET(mkc, mkc, pd, mvdev->res.pdn);
+ MLX5_SET64(mkc, mkc, start_addr, mr->offset);
+ MLX5_SET64(mkc, mkc, len, mr->end - mr->start);
+ MLX5_SET(mkc, mkc, log_page_size, mr->log_size);
+ MLX5_SET(mkc, mkc, translations_octword_size,
+ get_octo_len(mr->end - mr->start, mr->log_size));
+ MLX5_SET(create_mkey_in, in, translations_octword_actual_size,
+ get_octo_len(mr->end - mr->start, mr->log_size));
+ populate_mtts(mr, MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt));
+ err = mlx5_vdpa_create_mkey(mvdev, &mr->mr, in, inlen);
+ kvfree(in);
+ if (err) {
+ mlx5_vdpa_warn(mvdev, "Failed to create direct MR\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static void destroy_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr *mr)
+{
+ mlx5_vdpa_destroy_mkey(mvdev, &mr->mr);
+}
+
+static u64 map_start(struct vhost_iotlb_map *map, struct mlx5_vdpa_direct_mr *mr)
+{
+ return max_t(u64, map->start, mr->start);
+}
+
+static u64 map_end(struct vhost_iotlb_map *map, struct mlx5_vdpa_direct_mr *mr)
+{
+ return min_t(u64, map->last + 1, mr->end);
+}
+
+static u64 maplen(struct vhost_iotlb_map *map, struct mlx5_vdpa_direct_mr *mr)
+{
+ return map_end(map, mr) - map_start(map, mr);
+}
+
+#define MLX5_VDPA_INVALID_START_ADDR ((u64)-1)
+#define MLX5_VDPA_INVALID_LEN ((u64)-1)
+
+static u64 indir_start_addr(struct mlx5_vdpa_mr *mkey)
+{
+ struct mlx5_vdpa_direct_mr *s;
+
+ s = list_first_entry_or_null(&mkey->head, struct mlx5_vdpa_direct_mr, list);
+ if (!s)
+ return MLX5_VDPA_INVALID_START_ADDR;
+
+ return s->start;
+}
+
+static u64 indir_len(struct mlx5_vdpa_mr *mkey)
+{
+ struct mlx5_vdpa_direct_mr *s;
+ struct mlx5_vdpa_direct_mr *e;
+
+ s = list_first_entry_or_null(&mkey->head, struct mlx5_vdpa_direct_mr, list);
+ if (!s)
+ return MLX5_VDPA_INVALID_LEN;
+
+ e = list_last_entry(&mkey->head, struct mlx5_vdpa_direct_mr, list);
+
+ return e->end - s->start;
+}
+
+#define LOG_MAX_KLM_SIZE 30
+#define MAX_KLM_SIZE BIT(LOG_MAX_KLM_SIZE)
+
+static u32 klm_bcount(u64 size)
+{
+ return (u32)size;
+}
+
+static void fill_indir(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mkey, void *in)
+{
+ struct mlx5_vdpa_direct_mr *dmr;
+ struct mlx5_klm *klmarr;
+ struct mlx5_klm *klm;
+ bool first = true;
+ u64 preve;
+ int i;
+
+ klmarr = MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
+ i = 0;
+ list_for_each_entry(dmr, &mkey->head, list) {
+again:
+ klm = &klmarr[i++];
+ if (first) {
+ preve = dmr->start;
+ first = false;
+ }
+
+ if (preve == dmr->start) {
+ klm->key = cpu_to_be32(dmr->mr.key);
+ klm->bcount = cpu_to_be32(klm_bcount(dmr->end - dmr->start));
+ preve = dmr->end;
+ } else {
+ klm->key = cpu_to_be32(mvdev->res.null_mkey);
+ klm->bcount = cpu_to_be32(klm_bcount(dmr->start - preve));
+ preve = dmr->start;
+ goto again;
+ }
+ }
+}
+
+static int klm_byte_size(int nklms)
+{
+ return 16 * ALIGN(nklms, 4);
+}
+
+static int create_indirect_key(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mr)
+{
+ int inlen;
+ void *mkc;
+ void *in;
+ int err;
+ u64 start;
+ u64 len;
+
+ start = indir_start_addr(mr);
+ len = indir_len(mr);
+ if (start == MLX5_VDPA_INVALID_START_ADDR || len == MLX5_VDPA_INVALID_LEN)
+ return -EINVAL;
+
+ inlen = MLX5_ST_SZ_BYTES(create_mkey_in) + klm_byte_size(mr->num_klms);
+ in = kzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ MLX5_SET(create_mkey_in, in, uid, mvdev->res.uid);
+ mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
+ MLX5_SET(mkc, mkc, lw, 1);
+ MLX5_SET(mkc, mkc, lr, 1);
+ mlx5_set_access_mode(mkc, MLX5_MKC_ACCESS_MODE_KLMS);
+ MLX5_SET(mkc, mkc, qpn, 0xffffff);
+ MLX5_SET(mkc, mkc, pd, mvdev->res.pdn);
+ MLX5_SET64(mkc, mkc, start_addr, start);
+ MLX5_SET64(mkc, mkc, len, len);
+ MLX5_SET(mkc, mkc, translations_octword_size, klm_byte_size(mr->num_klms) / 16);
+ MLX5_SET(create_mkey_in, in, translations_octword_actual_size, mr->num_klms);
+ fill_indir(mvdev, mr, in);
+ err = mlx5_vdpa_create_mkey(mvdev, &mr->mkey, in, inlen);
+ kfree(in);
+ return err;
+}
+
+static void destroy_indirect_key(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mkey)
+{
+ mlx5_vdpa_destroy_mkey(mvdev, &mkey->mkey);
+}
+
+static int map_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr *mr,
+ struct vhost_iotlb *iotlb)
+{
+ struct vhost_iotlb_map *map;
+ unsigned long lgcd = 0;
+ int log_entity_size;
+ unsigned long size;
+ u64 start = 0;
+ int err;
+ struct page *pg;
+ unsigned int nsg;
+ int sglen;
+ u64 pa;
+ u64 paend;
+ struct scatterlist *sg;
+ struct device *dma = mvdev->mdev->device;
+ int ret;
+
+ for (map = vhost_iotlb_itree_first(iotlb, mr->start, mr->end - 1);
+ map; map = vhost_iotlb_itree_next(map, start, mr->end - 1)) {
+ size = maplen(map, mr);
+ lgcd = gcd(lgcd, size);
+ start += size;
+ }
+ log_entity_size = ilog2(lgcd);
+
+ sglen = 1 << log_entity_size;
+ nsg = MLX5_DIV_ROUND_UP_POW2(mr->end - mr->start, log_entity_size);
+
+ err = sg_alloc_table(&mr->sg_head, nsg, GFP_KERNEL);
+ if (err)
+ return err;
+
+ sg = mr->sg_head.sgl;
+ for (map = vhost_iotlb_itree_first(iotlb, mr->start, mr->end - 1);
+ map; map = vhost_iotlb_itree_next(map, mr->start, mr->end - 1)) {
+ paend = map->addr + maplen(map, mr);
+ for (pa = map->addr; pa < paend; pa += sglen) {
+ pg = pfn_to_page(__phys_to_pfn(pa));
+ if (!sg) {
+ mlx5_vdpa_warn(mvdev, "sg null. start 0x%llx, end 0x%llx\n",
+ map->start, map->last + 1);
+ err = -ENOMEM;
+ goto err_map;
+ }
+ sg_set_page(sg, pg, sglen, 0);
+ sg = sg_next(sg);
+ if (!sg)
+ goto done;
+ }
+ }
+done:
+ mr->log_size = log_entity_size;
+ mr->nsg = nsg;
+ ret = dma_map_sg_attrs(dma, mr->sg_head.sgl, mr->nsg, DMA_BIDIRECTIONAL, 0);
+ if (!ret)
+ goto err_map;
+
+ err = create_direct_mr(mvdev, mr);
+ if (err)
+ goto err_direct;
+
+ return 0;
+
+err_direct:
+ dma_unmap_sg_attrs(dma, mr->sg_head.sgl, mr->nsg, DMA_BIDIRECTIONAL, 0);
+err_map:
+ sg_free_table(&mr->sg_head);
+ return err;
+}
+
+static void unmap_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr *mr)
+{
+ struct device *dma = mvdev->mdev->device;
+
+ destroy_direct_mr(mvdev, mr);
+ dma_unmap_sg_attrs(dma, mr->sg_head.sgl, mr->nsg, DMA_BIDIRECTIONAL, 0);
+ sg_free_table(&mr->sg_head);
+}
+
+static int add_direct_chain(struct mlx5_vdpa_dev *mvdev, u64 start, u64 size, u8 perm,
+ struct vhost_iotlb *iotlb)
+{
+ struct mlx5_vdpa_mr *mr = &mvdev->mr;
+ struct mlx5_vdpa_direct_mr *dmr;
+ struct mlx5_vdpa_direct_mr *n;
+ LIST_HEAD(tmp);
+ u64 st;
+ u64 sz;
+ int err;
+ int i = 0;
+
+ st = start;
+ while (size) {
+ sz = (u32)min_t(u64, MAX_KLM_SIZE, size);
+ dmr = kzalloc(sizeof(*dmr), GFP_KERNEL);
+ if (!dmr) {
+ err = -ENOMEM;
+ goto err_alloc;
+ }
+
+ dmr->start = st;
+ dmr->end = st + sz;
+ dmr->perm = perm;
+ err = map_direct_mr(mvdev, dmr, iotlb);
+ if (err) {
+ kfree(dmr);
+ goto err_alloc;
+ }
+
+ list_add_tail(&dmr->list, &tmp);
+ size -= sz;
+ mr->num_directs++;
+ mr->num_klms++;
+ st += sz;
+ i++;
+ }
+ list_splice_tail(&tmp, &mr->head);
+ return 0;
+
+err_alloc:
+ list_for_each_entry_safe(dmr, n, &mr->head, list) {
+ list_del_init(&dmr->list);
+ unmap_direct_mr(mvdev, dmr);
+ kfree(dmr);
+ }
+ return err;
+}
+
+/* The iotlb pointer contains a list of maps. Go over the maps, possibly
+ * merging mergeable maps, and create direct memory keys that provide the
+ * device access to memory. The direct mkeys are then referred to by the
+ * indirect memory key that provides access to the enitre address space given
+ * by iotlb.
+ */
+static int _mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb)
+{
+ struct mlx5_vdpa_mr *mr = &mvdev->mr;
+ struct mlx5_vdpa_direct_mr *dmr;
+ struct mlx5_vdpa_direct_mr *n;
+ struct vhost_iotlb_map *map;
+ u32 pperm = U16_MAX;
+ u64 last = U64_MAX;
+ u64 ps = U64_MAX;
+ u64 pe = U64_MAX;
+ u64 start = 0;
+ int err = 0;
+ int nnuls;
+
+ if (mr->initialized)
+ return 0;
+
+ INIT_LIST_HEAD(&mr->head);
+ for (map = vhost_iotlb_itree_first(iotlb, start, last); map;
+ map = vhost_iotlb_itree_next(map, start, last)) {
+ start = map->start;
+ if (pe == map->start && pperm == map->perm) {
+ pe = map->last + 1;
+ } else {
+ if (ps != U64_MAX) {
+ if (pe < map->start) {
+ /* We have a hole in the map. Check how
+ * many null keys are required to fill it.
+ */
+ nnuls = MLX5_DIV_ROUND_UP_POW2(map->start - pe,
+ LOG_MAX_KLM_SIZE);
+ mr->num_klms += nnuls;
+ }
+ err = add_direct_chain(mvdev, ps, pe - ps, pperm, iotlb);
+ if (err)
+ goto err_chain;
+ }
+ ps = map->start;
+ pe = map->last + 1;
+ pperm = map->perm;
+ }
+ }
+ err = add_direct_chain(mvdev, ps, pe - ps, pperm, iotlb);
+ if (err)
+ goto err_chain;
+
+ /* Create the memory key that defines the guests's address space. This
+ * memory key refers to the direct keys that contain the MTT
+ * translations
+ */
+ err = create_indirect_key(mvdev, mr);
+ if (err)
+ goto err_chain;
+
+ mr->initialized = true;
+ return 0;
+
+err_chain:
+ list_for_each_entry_safe_reverse(dmr, n, &mr->head, list) {
+ list_del_init(&dmr->list);
+ unmap_direct_mr(mvdev, dmr);
+ kfree(dmr);
+ }
+ return err;
+}
+
+int mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb)
+{
+ struct mlx5_vdpa_mr *mr = &mvdev->mr;
+ int err;
+
+ mutex_lock(&mr->mkey_mtx);
+ err = _mlx5_vdpa_create_mr(mvdev, iotlb);
+ mutex_unlock(&mr->mkey_mtx);
+ return err;
+}
+
+void mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev)
+{
+ struct mlx5_vdpa_mr *mr = &mvdev->mr;
+ struct mlx5_vdpa_direct_mr *dmr;
+ struct mlx5_vdpa_direct_mr *n;
+
+ mutex_lock(&mr->mkey_mtx);
+ if (!mr->initialized)
+ goto out;
+
+ destroy_indirect_key(mvdev, mr);
+ list_for_each_entry_safe_reverse(dmr, n, &mr->head, list) {
+ list_del_init(&dmr->list);
+ unmap_direct_mr(mvdev, dmr);
+ kfree(dmr);
+ }
+ memset(mr, 0, sizeof(*mr));
+ mr->initialized = false;
+out:
+ mutex_unlock(&mr->mkey_mtx);
+}
+
+static bool map_empty(struct vhost_iotlb *iotlb)
+{
+ return !vhost_iotlb_itree_first(iotlb, 0, U64_MAX);
+}
+
+int mlx5_vdpa_handle_set_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb,
+ bool *change_map)
+{
+ struct mlx5_vdpa_mr *mr = &mvdev->mr;
+ int err = 0;
+
+ *change_map = false;
+ if (map_empty(iotlb)) {
+ mlx5_vdpa_destroy_mr(mvdev);
+ return 0;
+ }
+ mutex_lock(&mr->mkey_mtx);
+ if (mr->initialized) {
+ mlx5_vdpa_info(mvdev, "memory map update\n");
+ *change_map = true;
+ }
+ if (!*change_map)
+ err = _mlx5_vdpa_create_mr(mvdev, iotlb);
+ mutex_unlock(&mr->mkey_mtx);
+
+ return err;
+}
diff --git a/drivers/vdpa/mlx5/core/resources.c b/drivers/vdpa/mlx5/core/resources.c
new file mode 100644
index 000000000000..96e6421c5d1c
--- /dev/null
+++ b/drivers/vdpa/mlx5/core/resources.c
@@ -0,0 +1,284 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2020 Mellanox Technologies Ltd. */
+
+#include <linux/mlx5/driver.h>
+#include "mlx5_vdpa.h"
+
+static int alloc_pd(struct mlx5_vdpa_dev *dev, u32 *pdn, u16 uid)
+{
+ struct mlx5_core_dev *mdev = dev->mdev;
+
+ u32 out[MLX5_ST_SZ_DW(alloc_pd_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(alloc_pd_in)] = {};
+ int err;
+
+ MLX5_SET(alloc_pd_in, in, opcode, MLX5_CMD_OP_ALLOC_PD);
+ MLX5_SET(alloc_pd_in, in, uid, uid);
+
+ err = mlx5_cmd_exec_inout(mdev, alloc_pd, in, out);
+ if (!err)
+ *pdn = MLX5_GET(alloc_pd_out, out, pd);
+
+ return err;
+}
+
+static int dealloc_pd(struct mlx5_vdpa_dev *dev, u32 pdn, u16 uid)
+{
+ u32 in[MLX5_ST_SZ_DW(dealloc_pd_in)] = {};
+ struct mlx5_core_dev *mdev = dev->mdev;
+
+ MLX5_SET(dealloc_pd_in, in, opcode, MLX5_CMD_OP_DEALLOC_PD);
+ MLX5_SET(dealloc_pd_in, in, pd, pdn);
+ MLX5_SET(dealloc_pd_in, in, uid, uid);
+ return mlx5_cmd_exec_in(mdev, dealloc_pd, in);
+}
+
+static int get_null_mkey(struct mlx5_vdpa_dev *dev, u32 *null_mkey)
+{
+ u32 out[MLX5_ST_SZ_DW(query_special_contexts_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(query_special_contexts_in)] = {};
+ struct mlx5_core_dev *mdev = dev->mdev;
+ int err;
+
+ MLX5_SET(query_special_contexts_in, in, opcode, MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS);
+ err = mlx5_cmd_exec_inout(mdev, query_special_contexts, in, out);
+ if (!err)
+ *null_mkey = MLX5_GET(query_special_contexts_out, out, null_mkey);
+ return err;
+}
+
+static int create_uctx(struct mlx5_vdpa_dev *mvdev, u16 *uid)
+{
+ u32 out[MLX5_ST_SZ_DW(create_uctx_out)] = {};
+ int inlen;
+ void *in;
+ int err;
+
+ /* 0 means not supported */
+ if (!MLX5_CAP_GEN(mvdev->mdev, log_max_uctx))
+ return -EOPNOTSUPP;
+
+ inlen = MLX5_ST_SZ_BYTES(create_uctx_in);
+ in = kzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ MLX5_SET(create_uctx_in, in, opcode, MLX5_CMD_OP_CREATE_UCTX);
+ MLX5_SET(create_uctx_in, in, uctx.cap, MLX5_UCTX_CAP_RAW_TX);
+
+ err = mlx5_cmd_exec(mvdev->mdev, in, inlen, out, sizeof(out));
+ kfree(in);
+ if (!err)
+ *uid = MLX5_GET(create_uctx_out, out, uid);
+
+ return err;
+}
+
+static void destroy_uctx(struct mlx5_vdpa_dev *mvdev, u32 uid)
+{
+ u32 out[MLX5_ST_SZ_DW(destroy_uctx_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(destroy_uctx_in)] = {};
+
+ MLX5_SET(destroy_uctx_in, in, opcode, MLX5_CMD_OP_DESTROY_UCTX);
+ MLX5_SET(destroy_uctx_in, in, uid, uid);
+
+ mlx5_cmd_exec(mvdev->mdev, in, sizeof(in), out, sizeof(out));
+}
+
+int mlx5_vdpa_create_tis(struct mlx5_vdpa_dev *mvdev, void *in, u32 *tisn)
+{
+ u32 out[MLX5_ST_SZ_DW(create_tis_out)] = {};
+ int err;
+
+ MLX5_SET(create_tis_in, in, opcode, MLX5_CMD_OP_CREATE_TIS);
+ MLX5_SET(create_tis_in, in, uid, mvdev->res.uid);
+ err = mlx5_cmd_exec_inout(mvdev->mdev, create_tis, in, out);
+ if (!err)
+ *tisn = MLX5_GET(create_tis_out, out, tisn);
+
+ return err;
+}
+
+void mlx5_vdpa_destroy_tis(struct mlx5_vdpa_dev *mvdev, u32 tisn)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_tis_in)] = {};
+
+ MLX5_SET(destroy_tis_in, in, opcode, MLX5_CMD_OP_DESTROY_TIS);
+ MLX5_SET(destroy_tis_in, in, uid, mvdev->res.uid);
+ MLX5_SET(destroy_tis_in, in, tisn, tisn);
+ mlx5_cmd_exec_in(mvdev->mdev, destroy_tis, in);
+}
+
+int mlx5_vdpa_create_rqt(struct mlx5_vdpa_dev *mvdev, void *in, int inlen, u32 *rqtn)
+{
+ u32 out[MLX5_ST_SZ_DW(create_rqt_out)] = {};
+ int err;
+
+ MLX5_SET(create_rqt_in, in, opcode, MLX5_CMD_OP_CREATE_RQT);
+ err = mlx5_cmd_exec(mvdev->mdev, in, inlen, out, sizeof(out));
+ if (!err)
+ *rqtn = MLX5_GET(create_rqt_out, out, rqtn);
+
+ return err;
+}
+
+void mlx5_vdpa_destroy_rqt(struct mlx5_vdpa_dev *mvdev, u32 rqtn)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)] = {};
+
+ MLX5_SET(destroy_rqt_in, in, opcode, MLX5_CMD_OP_DESTROY_RQT);
+ MLX5_SET(destroy_rqt_in, in, uid, mvdev->res.uid);
+ MLX5_SET(destroy_rqt_in, in, rqtn, rqtn);
+ mlx5_cmd_exec_in(mvdev->mdev, destroy_rqt, in);
+}
+
+int mlx5_vdpa_create_tir(struct mlx5_vdpa_dev *mvdev, void *in, u32 *tirn)
+{
+ u32 out[MLX5_ST_SZ_DW(create_tir_out)] = {};
+ int err;
+
+ MLX5_SET(create_tir_in, in, opcode, MLX5_CMD_OP_CREATE_TIR);
+ err = mlx5_cmd_exec_inout(mvdev->mdev, create_tir, in, out);
+ if (!err)
+ *tirn = MLX5_GET(create_tir_out, out, tirn);
+
+ return err;
+}
+
+void mlx5_vdpa_destroy_tir(struct mlx5_vdpa_dev *mvdev, u32 tirn)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_tir_in)] = {};
+
+ MLX5_SET(destroy_tir_in, in, opcode, MLX5_CMD_OP_DESTROY_TIR);
+ MLX5_SET(destroy_tir_in, in, uid, mvdev->res.uid);
+ MLX5_SET(destroy_tir_in, in, tirn, tirn);
+ mlx5_cmd_exec_in(mvdev->mdev, destroy_tir, in);
+}
+
+int mlx5_vdpa_alloc_transport_domain(struct mlx5_vdpa_dev *mvdev, u32 *tdn)
+{
+ u32 out[MLX5_ST_SZ_DW(alloc_transport_domain_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(alloc_transport_domain_in)] = {};
+ int err;
+
+ MLX5_SET(alloc_transport_domain_in, in, opcode, MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN);
+ MLX5_SET(alloc_transport_domain_in, in, uid, mvdev->res.uid);
+
+ err = mlx5_cmd_exec_inout(mvdev->mdev, alloc_transport_domain, in, out);
+ if (!err)
+ *tdn = MLX5_GET(alloc_transport_domain_out, out, transport_domain);
+
+ return err;
+}
+
+void mlx5_vdpa_dealloc_transport_domain(struct mlx5_vdpa_dev *mvdev, u32 tdn)
+{
+ u32 in[MLX5_ST_SZ_DW(dealloc_transport_domain_in)] = {};
+
+ MLX5_SET(dealloc_transport_domain_in, in, opcode, MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN);
+ MLX5_SET(dealloc_transport_domain_in, in, uid, mvdev->res.uid);
+ MLX5_SET(dealloc_transport_domain_in, in, transport_domain, tdn);
+ mlx5_cmd_exec_in(mvdev->mdev, dealloc_transport_domain, in);
+}
+
+int mlx5_vdpa_create_mkey(struct mlx5_vdpa_dev *mvdev, struct mlx5_core_mkey *mkey, u32 *in,
+ int inlen)
+{
+ u32 lout[MLX5_ST_SZ_DW(create_mkey_out)] = {};
+ u32 mkey_index;
+ void *mkc;
+ int err;
+
+ MLX5_SET(create_mkey_in, in, opcode, MLX5_CMD_OP_CREATE_MKEY);
+ MLX5_SET(create_mkey_in, in, uid, mvdev->res.uid);
+
+ err = mlx5_cmd_exec(mvdev->mdev, in, inlen, lout, sizeof(lout));
+ if (err)
+ return err;
+
+ mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
+ mkey_index = MLX5_GET(create_mkey_out, lout, mkey_index);
+ mkey->iova = MLX5_GET64(mkc, mkc, start_addr);
+ mkey->size = MLX5_GET64(mkc, mkc, len);
+ mkey->key |= mlx5_idx_to_mkey(mkey_index);
+ mkey->pd = MLX5_GET(mkc, mkc, pd);
+ return 0;
+}
+
+int mlx5_vdpa_destroy_mkey(struct mlx5_vdpa_dev *mvdev, struct mlx5_core_mkey *mkey)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_mkey_in)] = {};
+
+ MLX5_SET(destroy_mkey_in, in, uid, mvdev->res.uid);
+ MLX5_SET(destroy_mkey_in, in, opcode, MLX5_CMD_OP_DESTROY_MKEY);
+ MLX5_SET(destroy_mkey_in, in, mkey_index, mlx5_mkey_to_idx(mkey->key));
+ return mlx5_cmd_exec_in(mvdev->mdev, destroy_mkey, in);
+}
+
+int mlx5_vdpa_alloc_resources(struct mlx5_vdpa_dev *mvdev)
+{
+ u64 offset = MLX5_CAP64_DEV_VDPA_EMULATION(mvdev->mdev, doorbell_bar_offset);
+ struct mlx5_vdpa_resources *res = &mvdev->res;
+ struct mlx5_core_dev *mdev = mvdev->mdev;
+ u64 kick_addr;
+ int err;
+
+ if (res->valid) {
+ mlx5_vdpa_warn(mvdev, "resources already allocated\n");
+ return -EINVAL;
+ }
+ mutex_init(&mvdev->mr.mkey_mtx);
+ res->uar = mlx5_get_uars_page(mdev);
+ if (IS_ERR(res->uar)) {
+ err = PTR_ERR(res->uar);
+ goto err_uars;
+ }
+
+ err = create_uctx(mvdev, &res->uid);
+ if (err)
+ goto err_uctx;
+
+ err = alloc_pd(mvdev, &res->pdn, res->uid);
+ if (err)
+ goto err_pd;
+
+ err = get_null_mkey(mvdev, &res->null_mkey);
+ if (err)
+ goto err_key;
+
+ kick_addr = pci_resource_start(mdev->pdev, 0) + offset;
+ res->kick_addr = ioremap(kick_addr, PAGE_SIZE);
+ if (!res->kick_addr) {
+ err = -ENOMEM;
+ goto err_key;
+ }
+ res->valid = true;
+
+ return 0;
+
+err_key:
+ dealloc_pd(mvdev, res->pdn, res->uid);
+err_pd:
+ destroy_uctx(mvdev, res->uid);
+err_uctx:
+ mlx5_put_uars_page(mdev, res->uar);
+err_uars:
+ mutex_destroy(&mvdev->mr.mkey_mtx);
+ return err;
+}
+
+void mlx5_vdpa_free_resources(struct mlx5_vdpa_dev *mvdev)
+{
+ struct mlx5_vdpa_resources *res = &mvdev->res;
+
+ if (!res->valid)
+ return;
+
+ iounmap(res->kick_addr);
+ res->kick_addr = NULL;
+ dealloc_pd(mvdev, res->pdn, res->uid);
+ destroy_uctx(mvdev, res->uid);
+ mlx5_put_uars_page(mvdev->mdev, res->uar);
+ mutex_destroy(&mvdev->mr.mkey_mtx);
+ res->valid = false;
+}
diff --git a/drivers/vdpa/mlx5/net/main.c b/drivers/vdpa/mlx5/net/main.c
new file mode 100644
index 000000000000..838cd98386ff
--- /dev/null
+++ b/drivers/vdpa/mlx5/net/main.c
@@ -0,0 +1,76 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2020 Mellanox Technologies Ltd. */
+
+#include <linux/module.h>
+#include <linux/mlx5/driver.h>
+#include <linux/mlx5/device.h>
+#include "mlx5_vdpa_ifc.h"
+#include "mlx5_vnet.h"
+
+MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
+MODULE_DESCRIPTION("Mellanox VDPA driver");
+MODULE_LICENSE("Dual BSD/GPL");
+
+static bool required_caps_supported(struct mlx5_core_dev *mdev)
+{
+ u8 event_mode;
+ u64 got;
+
+ got = MLX5_CAP_GEN_64(mdev, general_obj_types);
+
+ if (!(got & MLX5_GENERAL_OBJ_TYPES_CAP_VIRTIO_NET_Q))
+ return false;
+
+ event_mode = MLX5_CAP_DEV_VDPA_EMULATION(mdev, event_mode);
+ if (!(event_mode & MLX5_VIRTIO_Q_EVENT_MODE_QP_MODE))
+ return false;
+
+ if (!MLX5_CAP_DEV_VDPA_EMULATION(mdev, eth_frame_offload_type))
+ return false;
+
+ return true;
+}
+
+static void *mlx5_vdpa_add(struct mlx5_core_dev *mdev)
+{
+ struct mlx5_vdpa_dev *vdev;
+
+ if (mlx5_core_is_pf(mdev))
+ return NULL;
+
+ if (!required_caps_supported(mdev)) {
+ dev_info(mdev->device, "virtio net emulation not supported\n");
+ return NULL;
+ }
+ vdev = mlx5_vdpa_add_dev(mdev);
+ if (IS_ERR(vdev))
+ return NULL;
+
+ return vdev;
+}
+
+static void mlx5_vdpa_remove(struct mlx5_core_dev *mdev, void *context)
+{
+ struct mlx5_vdpa_dev *vdev = context;
+
+ mlx5_vdpa_remove_dev(vdev);
+}
+
+static struct mlx5_interface mlx5_vdpa_interface = {
+ .add = mlx5_vdpa_add,
+ .remove = mlx5_vdpa_remove,
+ .protocol = MLX5_INTERFACE_PROTOCOL_VDPA,
+};
+
+static int __init mlx5_vdpa_init(void)
+{
+ return mlx5_register_interface(&mlx5_vdpa_interface);
+}
+
+static void __exit mlx5_vdpa_exit(void)
+{
+ mlx5_unregister_interface(&mlx5_vdpa_interface);
+}
+
+module_init(mlx5_vdpa_init);
+module_exit(mlx5_vdpa_exit);
diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
new file mode 100644
index 000000000000..9df69d5efe8c
--- /dev/null
+++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
@@ -0,0 +1,1974 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2020 Mellanox Technologies Ltd. */
+
+#include <linux/vdpa.h>
+#include <uapi/linux/virtio_ids.h>
+#include <linux/virtio_config.h>
+#include <linux/mlx5/qp.h>
+#include <linux/mlx5/device.h>
+#include <linux/mlx5/vport.h>
+#include <linux/mlx5/fs.h>
+#include <linux/mlx5/device.h>
+#include "mlx5_vnet.h"
+#include "mlx5_vdpa_ifc.h"
+#include "mlx5_vdpa.h"
+
+#define to_mvdev(__vdev) container_of((__vdev), struct mlx5_vdpa_dev, vdev)
+
+#define VALID_FEATURES_MASK \
+ (BIT(VIRTIO_NET_F_CSUM) | BIT(VIRTIO_NET_F_GUEST_CSUM) | \
+ BIT(VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) | BIT(VIRTIO_NET_F_MTU) | BIT(VIRTIO_NET_F_MAC) | \
+ BIT(VIRTIO_NET_F_GUEST_TSO4) | BIT(VIRTIO_NET_F_GUEST_TSO6) | \
+ BIT(VIRTIO_NET_F_GUEST_ECN) | BIT(VIRTIO_NET_F_GUEST_UFO) | BIT(VIRTIO_NET_F_HOST_TSO4) | \
+ BIT(VIRTIO_NET_F_HOST_TSO6) | BIT(VIRTIO_NET_F_HOST_ECN) | BIT(VIRTIO_NET_F_HOST_UFO) | \
+ BIT(VIRTIO_NET_F_MRG_RXBUF) | BIT(VIRTIO_NET_F_STATUS) | BIT(VIRTIO_NET_F_CTRL_VQ) | \
+ BIT(VIRTIO_NET_F_CTRL_RX) | BIT(VIRTIO_NET_F_CTRL_VLAN) | \
+ BIT(VIRTIO_NET_F_CTRL_RX_EXTRA) | BIT(VIRTIO_NET_F_GUEST_ANNOUNCE) | \
+ BIT(VIRTIO_NET_F_MQ) | BIT(VIRTIO_NET_F_CTRL_MAC_ADDR) | BIT(VIRTIO_NET_F_HASH_REPORT) | \
+ BIT(VIRTIO_NET_F_RSS) | BIT(VIRTIO_NET_F_RSC_EXT) | BIT(VIRTIO_NET_F_STANDBY) | \
+ BIT(VIRTIO_NET_F_SPEED_DUPLEX) | BIT(VIRTIO_F_NOTIFY_ON_EMPTY) | \
+ BIT(VIRTIO_F_ANY_LAYOUT) | BIT(VIRTIO_F_VERSION_1) | BIT(VIRTIO_F_ACCESS_PLATFORM) | \
+ BIT(VIRTIO_F_RING_PACKED) | BIT(VIRTIO_F_ORDER_PLATFORM) | BIT(VIRTIO_F_SR_IOV))
+
+#define VALID_STATUS_MASK \
+ (VIRTIO_CONFIG_S_ACKNOWLEDGE | VIRTIO_CONFIG_S_DRIVER | VIRTIO_CONFIG_S_DRIVER_OK | \
+ VIRTIO_CONFIG_S_FEATURES_OK | VIRTIO_CONFIG_S_NEEDS_RESET | VIRTIO_CONFIG_S_FAILED)
+
+struct mlx5_vdpa_net_resources {
+ u32 tisn;
+ u32 tdn;
+ u32 tirn;
+ u32 rqtn;
+ bool valid;
+};
+
+struct mlx5_vdpa_cq_buf {
+ struct mlx5_frag_buf_ctrl fbc;
+ struct mlx5_frag_buf frag_buf;
+ int cqe_size;
+ int nent;
+};
+
+struct mlx5_vdpa_cq {
+ struct mlx5_core_cq mcq;
+ struct mlx5_vdpa_cq_buf buf;
+ struct mlx5_db db;
+ int cqe;
+};
+
+struct mlx5_vdpa_umem {
+ struct mlx5_frag_buf_ctrl fbc;
+ struct mlx5_frag_buf frag_buf;
+ int size;
+ u32 id;
+};
+
+struct mlx5_vdpa_qp {
+ struct mlx5_core_qp mqp;
+ struct mlx5_frag_buf frag_buf;
+ struct mlx5_db db;
+ u16 head;
+ bool fw;
+};
+
+struct mlx5_vq_restore_info {
+ u32 num_ent;
+ u64 desc_addr;
+ u64 device_addr;
+ u64 driver_addr;
+ u16 avail_index;
+ bool ready;
+ struct vdpa_callback cb;
+ bool restore;
+};
+
+struct mlx5_vdpa_virtqueue {
+ bool ready;
+ u64 desc_addr;
+ u64 device_addr;
+ u64 driver_addr;
+ u32 num_ent;
+ struct vdpa_callback event_cb;
+
+ /* Resources for implementing the notification channel from the device
+ * to the driver. fwqp is the firmware end of an RC connection; the
+ * other end is vqqp used by the driver. cq is is where completions are
+ * reported.
+ */
+ struct mlx5_vdpa_cq cq;
+ struct mlx5_vdpa_qp fwqp;
+ struct mlx5_vdpa_qp vqqp;
+
+ /* umem resources are required for the virtqueue operation. They're use
+ * is internal and they must be provided by the driver.
+ */
+ struct mlx5_vdpa_umem umem1;
+ struct mlx5_vdpa_umem umem2;
+ struct mlx5_vdpa_umem umem3;
+
+ bool initialized;
+ int index;
+ u32 virtq_id;
+ struct mlx5_vdpa_net *ndev;
+ u16 avail_idx;
+ int fw_state;
+
+ /* keep last in the struct */
+ struct mlx5_vq_restore_info ri;
+};
+
+/* We will remove this limitation once mlx5_vdpa_alloc_resources()
+ * provides for driver space allocation
+ */
+#define MLX5_MAX_SUPPORTED_VQS 16
+
+struct mlx5_vdpa_net {
+ struct mlx5_vdpa_dev mvdev;
+ struct mlx5_vdpa_net_resources res;
+ struct virtio_net_config config;
+ struct mlx5_vdpa_virtqueue vqs[MLX5_MAX_SUPPORTED_VQS];
+
+ /* Serialize vq resources creation and destruction. This is required
+ * since memory map might change and we need to destroy and create
+ * resources while driver in operational.
+ */
+ struct mutex reslock;
+ struct mlx5_flow_table *rxft;
+ struct mlx5_fc *rx_counter;
+ struct mlx5_flow_handle *rx_rule;
+ bool setup;
+ u16 mtu;
+};
+
+static void free_resources(struct mlx5_vdpa_net *ndev);
+static void init_mvqs(struct mlx5_vdpa_net *ndev);
+static int setup_driver(struct mlx5_vdpa_net *ndev);
+static void teardown_driver(struct mlx5_vdpa_net *ndev);
+
+static bool mlx5_vdpa_debug;
+
+#define MLX5_LOG_VIO_FLAG(_feature) \
+ do { \
+ if (features & BIT(_feature)) \
+ mlx5_vdpa_info(mvdev, "%s\n", #_feature); \
+ } while (0)
+
+#define MLX5_LOG_VIO_STAT(_status) \
+ do { \
+ if (status & (_status)) \
+ mlx5_vdpa_info(mvdev, "%s\n", #_status); \
+ } while (0)
+
+static void print_status(struct mlx5_vdpa_dev *mvdev, u8 status, bool set)
+{
+ if (status & ~VALID_STATUS_MASK)
+ mlx5_vdpa_warn(mvdev, "Warning: there are invalid status bits 0x%x\n",
+ status & ~VALID_STATUS_MASK);
+
+ if (!mlx5_vdpa_debug)
+ return;
+
+ mlx5_vdpa_info(mvdev, "driver status %s", set ? "set" : "get");
+ if (set && !status) {
+ mlx5_vdpa_info(mvdev, "driver resets the device\n");
+ return;
+ }
+
+ MLX5_LOG_VIO_STAT(VIRTIO_CONFIG_S_ACKNOWLEDGE);
+ MLX5_LOG_VIO_STAT(VIRTIO_CONFIG_S_DRIVER);
+ MLX5_LOG_VIO_STAT(VIRTIO_CONFIG_S_DRIVER_OK);
+ MLX5_LOG_VIO_STAT(VIRTIO_CONFIG_S_FEATURES_OK);
+ MLX5_LOG_VIO_STAT(VIRTIO_CONFIG_S_NEEDS_RESET);
+ MLX5_LOG_VIO_STAT(VIRTIO_CONFIG_S_FAILED);
+}
+
+static void print_features(struct mlx5_vdpa_dev *mvdev, u64 features, bool set)
+{
+ if (features & ~VALID_FEATURES_MASK)
+ mlx5_vdpa_warn(mvdev, "There are invalid feature bits 0x%llx\n",
+ features & ~VALID_FEATURES_MASK);
+
+ if (!mlx5_vdpa_debug)
+ return;
+
+ mlx5_vdpa_info(mvdev, "driver %s feature bits:\n", set ? "sets" : "reads");
+ if (!features)
+ mlx5_vdpa_info(mvdev, "all feature bits are cleared\n");
+
+ MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_CSUM);
+ MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_GUEST_CSUM);
+ MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_CTRL_GUEST_OFFLOADS);
+ MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_MTU);
+ MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_MAC);
+ MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_GUEST_TSO4);
+ MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_GUEST_TSO6);
+ MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_GUEST_ECN);
+ MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_GUEST_UFO);
+ MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_HOST_TSO4);
+ MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_HOST_TSO6);
+ MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_HOST_ECN);
+ MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_HOST_UFO);
+ MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_MRG_RXBUF);
+ MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_STATUS);
+ MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_CTRL_VQ);
+ MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_CTRL_RX);
+ MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_CTRL_VLAN);
+ MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_CTRL_RX_EXTRA);
+ MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_GUEST_ANNOUNCE);
+ MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_MQ);
+ MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_CTRL_MAC_ADDR);
+ MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_HASH_REPORT);
+ MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_RSS);
+ MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_RSC_EXT);
+ MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_STANDBY);
+ MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_SPEED_DUPLEX);
+ MLX5_LOG_VIO_FLAG(VIRTIO_F_NOTIFY_ON_EMPTY);
+ MLX5_LOG_VIO_FLAG(VIRTIO_F_ANY_LAYOUT);
+ MLX5_LOG_VIO_FLAG(VIRTIO_F_VERSION_1);
+ MLX5_LOG_VIO_FLAG(VIRTIO_F_ACCESS_PLATFORM);
+ MLX5_LOG_VIO_FLAG(VIRTIO_F_RING_PACKED);
+ MLX5_LOG_VIO_FLAG(VIRTIO_F_ORDER_PLATFORM);
+ MLX5_LOG_VIO_FLAG(VIRTIO_F_SR_IOV);
+}
+
+static int create_tis(struct mlx5_vdpa_net *ndev)
+{
+ struct mlx5_vdpa_dev *mvdev = &ndev->mvdev;
+ u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {};
+ void *tisc;
+ int err;
+
+ tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
+ MLX5_SET(tisc, tisc, transport_domain, ndev->res.tdn);
+ err = mlx5_vdpa_create_tis(mvdev, in, &ndev->res.tisn);
+ if (err)
+ mlx5_vdpa_warn(mvdev, "create TIS (%d)\n", err);
+
+ return err;
+}
+
+static void destroy_tis(struct mlx5_vdpa_net *ndev)
+{
+ mlx5_vdpa_destroy_tis(&ndev->mvdev, ndev->res.tisn);
+}
+
+#define MLX5_VDPA_CQE_SIZE 64
+#define MLX5_VDPA_LOG_CQE_SIZE ilog2(MLX5_VDPA_CQE_SIZE)
+
+static int cq_frag_buf_alloc(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_cq_buf *buf, int nent)
+{
+ struct mlx5_frag_buf *frag_buf = &buf->frag_buf;
+ u8 log_wq_stride = MLX5_VDPA_LOG_CQE_SIZE;
+ u8 log_wq_sz = MLX5_VDPA_LOG_CQE_SIZE;
+ int err;
+
+ err = mlx5_frag_buf_alloc_node(ndev->mvdev.mdev, nent * MLX5_VDPA_CQE_SIZE, frag_buf,
+ ndev->mvdev.mdev->priv.numa_node);
+ if (err)
+ return err;
+
+ mlx5_init_fbc(frag_buf->frags, log_wq_stride, log_wq_sz, &buf->fbc);
+
+ buf->cqe_size = MLX5_VDPA_CQE_SIZE;
+ buf->nent = nent;
+
+ return 0;
+}
+
+static int umem_frag_buf_alloc(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_umem *umem, int size)
+{
+ struct mlx5_frag_buf *frag_buf = &umem->frag_buf;
+
+ return mlx5_frag_buf_alloc_node(ndev->mvdev.mdev, size, frag_buf,
+ ndev->mvdev.mdev->priv.numa_node);
+}
+
+static void cq_frag_buf_free(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_cq_buf *buf)
+{
+ mlx5_frag_buf_free(ndev->mvdev.mdev, &buf->frag_buf);
+}
+
+static void *get_cqe(struct mlx5_vdpa_cq *vcq, int n)
+{
+ return mlx5_frag_buf_get_wqe(&vcq->buf.fbc, n);
+}
+
+static void cq_frag_buf_init(struct mlx5_vdpa_cq *vcq, struct mlx5_vdpa_cq_buf *buf)
+{
+ struct mlx5_cqe64 *cqe64;
+ void *cqe;
+ int i;
+
+ for (i = 0; i < buf->nent; i++) {
+ cqe = get_cqe(vcq, i);
+ cqe64 = cqe;
+ cqe64->op_own = MLX5_CQE_INVALID << 4;
+ }
+}
+
+static void *get_sw_cqe(struct mlx5_vdpa_cq *cq, int n)
+{
+ struct mlx5_cqe64 *cqe64 = get_cqe(cq, n & (cq->cqe - 1));
+
+ if (likely(get_cqe_opcode(cqe64) != MLX5_CQE_INVALID) &&
+ !((cqe64->op_own & MLX5_CQE_OWNER_MASK) ^ !!(n & cq->cqe)))
+ return cqe64;
+
+ return NULL;
+}
+
+static void rx_post(struct mlx5_vdpa_qp *vqp, int n)
+{
+ vqp->head += n;
+ vqp->db.db[0] = cpu_to_be32(vqp->head);
+}
+
+static void qp_prepare(struct mlx5_vdpa_net *ndev, bool fw, void *in,
+ struct mlx5_vdpa_virtqueue *mvq, u32 num_ent)
+{
+ struct mlx5_vdpa_qp *vqp;
+ __be64 *pas;
+ void *qpc;
+
+ vqp = fw ? &mvq->fwqp : &mvq->vqqp;
+ MLX5_SET(create_qp_in, in, uid, ndev->mvdev.res.uid);
+ qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
+ if (vqp->fw) {
+ /* Firmware QP is allocated by the driver for the firmware's
+ * use so we can skip part of the params as they will be chosen by firmware
+ */
+ qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
+ MLX5_SET(qpc, qpc, rq_type, MLX5_ZERO_LEN_RQ);
+ MLX5_SET(qpc, qpc, no_sq, 1);
+ return;
+ }
+
+ MLX5_SET(qpc, qpc, st, MLX5_QP_ST_RC);
+ MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
+ MLX5_SET(qpc, qpc, pd, ndev->mvdev.res.pdn);
+ MLX5_SET(qpc, qpc, mtu, MLX5_QPC_MTU_256_BYTES);
+ MLX5_SET(qpc, qpc, uar_page, ndev->mvdev.res.uar->index);
+ MLX5_SET(qpc, qpc, log_page_size, vqp->frag_buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
+ MLX5_SET(qpc, qpc, no_sq, 1);
+ MLX5_SET(qpc, qpc, cqn_rcv, mvq->cq.mcq.cqn);
+ MLX5_SET(qpc, qpc, log_rq_size, ilog2(num_ent));
+ MLX5_SET(qpc, qpc, rq_type, MLX5_NON_ZERO_RQ);
+ pas = (__be64 *)MLX5_ADDR_OF(create_qp_in, in, pas);
+ mlx5_fill_page_frag_array(&vqp->frag_buf, pas);
+}
+
+static int rq_buf_alloc(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_qp *vqp, u32 num_ent)
+{
+ return mlx5_frag_buf_alloc_node(ndev->mvdev.mdev,
+ num_ent * sizeof(struct mlx5_wqe_data_seg), &vqp->frag_buf,
+ ndev->mvdev.mdev->priv.numa_node);
+}
+
+static void rq_buf_free(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_qp *vqp)
+{
+ mlx5_frag_buf_free(ndev->mvdev.mdev, &vqp->frag_buf);
+}
+
+static int qp_create(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq,
+ struct mlx5_vdpa_qp *vqp)
+{
+ struct mlx5_core_dev *mdev = ndev->mvdev.mdev;
+ int inlen = MLX5_ST_SZ_BYTES(create_qp_in);
+ u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {};
+ void *qpc;
+ void *in;
+ int err;
+
+ if (!vqp->fw) {
+ vqp = &mvq->vqqp;
+ err = rq_buf_alloc(ndev, vqp, mvq->num_ent);
+ if (err)
+ return err;
+
+ err = mlx5_db_alloc(ndev->mvdev.mdev, &vqp->db);
+ if (err)
+ goto err_db;
+ inlen += vqp->frag_buf.npages * sizeof(__be64);
+ }
+
+ in = kzalloc(inlen, GFP_KERNEL);
+ if (!in) {
+ err = -ENOMEM;
+ goto err_kzalloc;
+ }
+
+ qp_prepare(ndev, vqp->fw, in, mvq, mvq->num_ent);
+ qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
+ MLX5_SET(qpc, qpc, st, MLX5_QP_ST_RC);
+ MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
+ MLX5_SET(qpc, qpc, pd, ndev->mvdev.res.pdn);
+ MLX5_SET(qpc, qpc, mtu, MLX5_QPC_MTU_256_BYTES);
+ if (!vqp->fw)
+ MLX5_SET64(qpc, qpc, dbr_addr, vqp->db.dma);
+ MLX5_SET(create_qp_in, in, opcode, MLX5_CMD_OP_CREATE_QP);
+ err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
+ kfree(in);
+ if (err)
+ goto err_kzalloc;
+
+ vqp->mqp.uid = ndev->mvdev.res.uid;
+ vqp->mqp.qpn = MLX5_GET(create_qp_out, out, qpn);
+
+ if (!vqp->fw)
+ rx_post(vqp, mvq->num_ent);
+
+ return 0;
+
+err_kzalloc:
+ if (!vqp->fw)
+ mlx5_db_free(ndev->mvdev.mdev, &vqp->db);
+err_db:
+ if (!vqp->fw)
+ rq_buf_free(ndev, vqp);
+
+ return err;
+}
+
+static void qp_destroy(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_qp *vqp)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_qp_in)] = {};
+
+ MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP);
+ MLX5_SET(destroy_qp_in, in, qpn, vqp->mqp.qpn);
+ MLX5_SET(destroy_qp_in, in, uid, ndev->mvdev.res.uid);
+ if (mlx5_cmd_exec_in(ndev->mvdev.mdev, destroy_qp, in))
+ mlx5_vdpa_warn(&ndev->mvdev, "destroy qp 0x%x\n", vqp->mqp.qpn);
+ if (!vqp->fw) {
+ mlx5_db_free(ndev->mvdev.mdev, &vqp->db);
+ rq_buf_free(ndev, vqp);
+ }
+}
+
+static void *next_cqe_sw(struct mlx5_vdpa_cq *cq)
+{
+ return get_sw_cqe(cq, cq->mcq.cons_index);
+}
+
+static int mlx5_vdpa_poll_one(struct mlx5_vdpa_cq *vcq)
+{
+ struct mlx5_cqe64 *cqe64;
+
+ cqe64 = next_cqe_sw(vcq);
+ if (!cqe64)
+ return -EAGAIN;
+
+ vcq->mcq.cons_index++;
+ return 0;
+}
+
+static void mlx5_vdpa_handle_completions(struct mlx5_vdpa_virtqueue *mvq, int num)
+{
+ mlx5_cq_set_ci(&mvq->cq.mcq);
+ rx_post(&mvq->vqqp, num);
+ if (mvq->event_cb.callback)
+ mvq->event_cb.callback(mvq->event_cb.private);
+}
+
+static void mlx5_vdpa_cq_comp(struct mlx5_core_cq *mcq, struct mlx5_eqe *eqe)
+{
+ struct mlx5_vdpa_virtqueue *mvq = container_of(mcq, struct mlx5_vdpa_virtqueue, cq.mcq);
+ struct mlx5_vdpa_net *ndev = mvq->ndev;
+ void __iomem *uar_page = ndev->mvdev.res.uar->map;
+ int num = 0;
+
+ while (!mlx5_vdpa_poll_one(&mvq->cq)) {
+ num++;
+ if (num > mvq->num_ent / 2) {
+ /* If completions keep coming while we poll, we want to
+ * let the hardware know that we consumed them by
+ * updating the doorbell record. We also let vdpa core
+ * know about this so it passes it on the virtio driver
+ * on the guest.
+ */
+ mlx5_vdpa_handle_completions(mvq, num);
+ num = 0;
+ }
+ }
+
+ if (num)
+ mlx5_vdpa_handle_completions(mvq, num);
+
+ mlx5_cq_arm(&mvq->cq.mcq, MLX5_CQ_DB_REQ_NOT, uar_page, mvq->cq.mcq.cons_index);
+}
+
+static int cq_create(struct mlx5_vdpa_net *ndev, u16 idx, u32 num_ent)
+{
+ struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx];
+ struct mlx5_core_dev *mdev = ndev->mvdev.mdev;
+ void __iomem *uar_page = ndev->mvdev.res.uar->map;
+ u32 out[MLX5_ST_SZ_DW(create_cq_out)];
+ struct mlx5_vdpa_cq *vcq = &mvq->cq;
+ unsigned int irqn;
+ __be64 *pas;
+ int inlen;
+ void *cqc;
+ void *in;
+ int err;
+ int eqn;
+
+ err = mlx5_db_alloc(mdev, &vcq->db);
+ if (err)
+ return err;
+
+ vcq->mcq.set_ci_db = vcq->db.db;
+ vcq->mcq.arm_db = vcq->db.db + 1;
+ vcq->mcq.cqe_sz = 64;
+
+ err = cq_frag_buf_alloc(ndev, &vcq->buf, num_ent);
+ if (err)
+ goto err_db;
+
+ cq_frag_buf_init(vcq, &vcq->buf);
+
+ inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
+ MLX5_FLD_SZ_BYTES(create_cq_in, pas[0]) * vcq->buf.frag_buf.npages;
+ in = kzalloc(inlen, GFP_KERNEL);
+ if (!in) {
+ err = -ENOMEM;
+ goto err_vzalloc;
+ }
+
+ MLX5_SET(create_cq_in, in, uid, ndev->mvdev.res.uid);
+ pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas);
+ mlx5_fill_page_frag_array(&vcq->buf.frag_buf, pas);
+
+ cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
+ MLX5_SET(cqc, cqc, log_page_size, vcq->buf.frag_buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
+
+ /* Use vector 0 by default. Consider adding code to choose least used
+ * vector.
+ */
+ err = mlx5_vector2eqn(mdev, 0, &eqn, &irqn);
+ if (err)
+ goto err_vec;
+
+ cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
+ MLX5_SET(cqc, cqc, log_cq_size, ilog2(num_ent));
+ MLX5_SET(cqc, cqc, uar_page, ndev->mvdev.res.uar->index);
+ MLX5_SET(cqc, cqc, c_eqn, eqn);
+ MLX5_SET64(cqc, cqc, dbr_addr, vcq->db.dma);
+
+ err = mlx5_core_create_cq(mdev, &vcq->mcq, in, inlen, out, sizeof(out));
+ if (err)
+ goto err_vec;
+
+ vcq->mcq.comp = mlx5_vdpa_cq_comp;
+ vcq->cqe = num_ent;
+ vcq->mcq.set_ci_db = vcq->db.db;
+ vcq->mcq.arm_db = vcq->db.db + 1;
+ mlx5_cq_arm(&mvq->cq.mcq, MLX5_CQ_DB_REQ_NOT, uar_page, mvq->cq.mcq.cons_index);
+ kfree(in);
+ return 0;
+
+err_vec:
+ kfree(in);
+err_vzalloc:
+ cq_frag_buf_free(ndev, &vcq->buf);
+err_db:
+ mlx5_db_free(ndev->mvdev.mdev, &vcq->db);
+ return err;
+}
+
+static void cq_destroy(struct mlx5_vdpa_net *ndev, u16 idx)
+{
+ struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx];
+ struct mlx5_core_dev *mdev = ndev->mvdev.mdev;
+ struct mlx5_vdpa_cq *vcq = &mvq->cq;
+
+ if (mlx5_core_destroy_cq(mdev, &vcq->mcq)) {
+ mlx5_vdpa_warn(&ndev->mvdev, "destroy CQ 0x%x\n", vcq->mcq.cqn);
+ return;
+ }
+ cq_frag_buf_free(ndev, &vcq->buf);
+ mlx5_db_free(ndev->mvdev.mdev, &vcq->db);
+}
+
+static int umem_size(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, int num,
+ struct mlx5_vdpa_umem **umemp)
+{
+ struct mlx5_core_dev *mdev = ndev->mvdev.mdev;
+ int p_a;
+ int p_b;
+
+ switch (num) {
+ case 1:
+ p_a = MLX5_CAP_DEV_VDPA_EMULATION(mdev, umem_1_buffer_param_a);
+ p_b = MLX5_CAP_DEV_VDPA_EMULATION(mdev, umem_1_buffer_param_b);
+ *umemp = &mvq->umem1;
+ break;
+ case 2:
+ p_a = MLX5_CAP_DEV_VDPA_EMULATION(mdev, umem_2_buffer_param_a);
+ p_b = MLX5_CAP_DEV_VDPA_EMULATION(mdev, umem_2_buffer_param_b);
+ *umemp = &mvq->umem2;
+ break;
+ case 3:
+ p_a = MLX5_CAP_DEV_VDPA_EMULATION(mdev, umem_3_buffer_param_a);
+ p_b = MLX5_CAP_DEV_VDPA_EMULATION(mdev, umem_3_buffer_param_b);
+ *umemp = &mvq->umem3;
+ break;
+ }
+ return p_a * mvq->num_ent + p_b;
+}
+
+static void umem_frag_buf_free(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_umem *umem)
+{
+ mlx5_frag_buf_free(ndev->mvdev.mdev, &umem->frag_buf);
+}
+
+static int create_umem(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, int num)
+{
+ int inlen;
+ u32 out[MLX5_ST_SZ_DW(create_umem_out)] = {};
+ void *um;
+ void *in;
+ int err;
+ __be64 *pas;
+ int size;
+ struct mlx5_vdpa_umem *umem;
+
+ size = umem_size(ndev, mvq, num, &umem);
+ if (size < 0)
+ return size;
+
+ umem->size = size;
+ err = umem_frag_buf_alloc(ndev, umem, size);
+ if (err)
+ return err;
+
+ inlen = MLX5_ST_SZ_BYTES(create_umem_in) + MLX5_ST_SZ_BYTES(mtt) * umem->frag_buf.npages;
+
+ in = kzalloc(inlen, GFP_KERNEL);
+ if (!in) {
+ err = -ENOMEM;
+ goto err_in;
+ }
+
+ MLX5_SET(create_umem_in, in, opcode, MLX5_CMD_OP_CREATE_UMEM);
+ MLX5_SET(create_umem_in, in, uid, ndev->mvdev.res.uid);
+ um = MLX5_ADDR_OF(create_umem_in, in, umem);
+ MLX5_SET(umem, um, log_page_size, umem->frag_buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
+ MLX5_SET64(umem, um, num_of_mtt, umem->frag_buf.npages);
+
+ pas = (__be64 *)MLX5_ADDR_OF(umem, um, mtt[0]);
+ mlx5_fill_page_frag_array_perm(&umem->frag_buf, pas, MLX5_MTT_PERM_RW);
+
+ err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, sizeof(out));
+ if (err) {
+ mlx5_vdpa_warn(&ndev->mvdev, "create umem(%d)\n", err);
+ goto err_cmd;
+ }
+
+ kfree(in);
+ umem->id = MLX5_GET(create_umem_out, out, umem_id);
+
+ return 0;
+
+err_cmd:
+ kfree(in);
+err_in:
+ umem_frag_buf_free(ndev, umem);
+ return err;
+}
+
+static void umem_destroy(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, int num)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_umem_in)] = {};
+ u32 out[MLX5_ST_SZ_DW(destroy_umem_out)] = {};
+ struct mlx5_vdpa_umem *umem;
+
+ switch (num) {
+ case 1:
+ umem = &mvq->umem1;
+ break;
+ case 2:
+ umem = &mvq->umem2;
+ break;
+ case 3:
+ umem = &mvq->umem3;
+ break;
+ }
+
+ MLX5_SET(destroy_umem_in, in, opcode, MLX5_CMD_OP_DESTROY_UMEM);
+ MLX5_SET(destroy_umem_in, in, umem_id, umem->id);
+ if (mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, sizeof(out)))
+ return;
+
+ umem_frag_buf_free(ndev, umem);
+}
+
+static int umems_create(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
+{
+ int num;
+ int err;
+
+ for (num = 1; num <= 3; num++) {
+ err = create_umem(ndev, mvq, num);
+ if (err)
+ goto err_umem;
+ }
+ return 0;
+
+err_umem:
+ for (num--; num > 0; num--)
+ umem_destroy(ndev, mvq, num);
+
+ return err;
+}
+
+static void umems_destroy(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
+{
+ int num;
+
+ for (num = 3; num > 0; num--)
+ umem_destroy(ndev, mvq, num);
+}
+
+static int get_queue_type(struct mlx5_vdpa_net *ndev)
+{
+ u32 type_mask;
+
+ type_mask = MLX5_CAP_DEV_VDPA_EMULATION(ndev->mvdev.mdev, virtio_queue_type);
+
+ /* prefer split queue */
+ if (type_mask & MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_PACKED)
+ return MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_PACKED;
+
+ WARN_ON(!(type_mask & MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_SPLIT));
+
+ return MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_SPLIT;
+}
+
+static bool vq_is_tx(u16 idx)
+{
+ return idx % 2;
+}
+
+static u16 get_features_12_3(u64 features)
+{
+ return (!!(features & BIT(VIRTIO_NET_F_HOST_TSO4)) << 9) |
+ (!!(features & BIT(VIRTIO_NET_F_HOST_TSO6)) << 8) |
+ (!!(features & BIT(VIRTIO_NET_F_CSUM)) << 7) |
+ (!!(features & BIT(VIRTIO_NET_F_GUEST_CSUM)) << 6);
+}
+
+static int create_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_virtio_net_q_in);
+ u32 out[MLX5_ST_SZ_DW(create_virtio_net_q_out)] = {};
+ void *obj_context;
+ void *cmd_hdr;
+ void *vq_ctx;
+ void *in;
+ int err;
+
+ err = umems_create(ndev, mvq);
+ if (err)
+ return err;
+
+ in = kzalloc(inlen, GFP_KERNEL);
+ if (!in) {
+ err = -ENOMEM;
+ goto err_alloc;
+ }
+
+ cmd_hdr = MLX5_ADDR_OF(create_virtio_net_q_in, in, general_obj_in_cmd_hdr);
+
+ MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_type, MLX5_OBJ_TYPE_VIRTIO_NET_Q);
+ MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid);
+
+ obj_context = MLX5_ADDR_OF(create_virtio_net_q_in, in, obj_context);
+ MLX5_SET(virtio_net_q_object, obj_context, hw_available_index, mvq->avail_idx);
+ MLX5_SET(virtio_net_q_object, obj_context, queue_feature_bit_mask_12_3,
+ get_features_12_3(ndev->mvdev.actual_features));
+ vq_ctx = MLX5_ADDR_OF(virtio_net_q_object, obj_context, virtio_q_context);
+ MLX5_SET(virtio_q, vq_ctx, virtio_q_type, get_queue_type(ndev));
+
+ if (vq_is_tx(mvq->index))
+ MLX5_SET(virtio_net_q_object, obj_context, tisn_or_qpn, ndev->res.tisn);
+
+ MLX5_SET(virtio_q, vq_ctx, event_mode, MLX5_VIRTIO_Q_EVENT_MODE_QP_MODE);
+ MLX5_SET(virtio_q, vq_ctx, queue_index, mvq->index);
+ MLX5_SET(virtio_q, vq_ctx, event_qpn_or_msix, mvq->fwqp.mqp.qpn);
+ MLX5_SET(virtio_q, vq_ctx, queue_size, mvq->num_ent);
+ MLX5_SET(virtio_q, vq_ctx, virtio_version_1_0,
+ !!(ndev->mvdev.actual_features & VIRTIO_F_VERSION_1));
+ MLX5_SET64(virtio_q, vq_ctx, desc_addr, mvq->desc_addr);
+ MLX5_SET64(virtio_q, vq_ctx, used_addr, mvq->device_addr);
+ MLX5_SET64(virtio_q, vq_ctx, available_addr, mvq->driver_addr);
+ MLX5_SET(virtio_q, vq_ctx, virtio_q_mkey, ndev->mvdev.mr.mkey.key);
+ MLX5_SET(virtio_q, vq_ctx, umem_1_id, mvq->umem1.id);
+ MLX5_SET(virtio_q, vq_ctx, umem_1_size, mvq->umem1.size);
+ MLX5_SET(virtio_q, vq_ctx, umem_2_id, mvq->umem2.id);
+ MLX5_SET(virtio_q, vq_ctx, umem_2_size, mvq->umem1.size);
+ MLX5_SET(virtio_q, vq_ctx, umem_3_id, mvq->umem3.id);
+ MLX5_SET(virtio_q, vq_ctx, umem_3_size, mvq->umem1.size);
+ MLX5_SET(virtio_q, vq_ctx, pd, ndev->mvdev.res.pdn);
+ if (MLX5_CAP_DEV_VDPA_EMULATION(ndev->mvdev.mdev, eth_frame_offload_type))
+ MLX5_SET(virtio_q, vq_ctx, virtio_version_1_0, 1);
+
+ err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, sizeof(out));
+ if (err)
+ goto err_cmd;
+
+ kfree(in);
+ mvq->virtq_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+
+ return 0;
+
+err_cmd:
+ kfree(in);
+err_alloc:
+ umems_destroy(ndev, mvq);
+ return err;
+}
+
+static void destroy_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_virtio_net_q_in)] = {};
+ u32 out[MLX5_ST_SZ_DW(destroy_virtio_net_q_out)] = {};
+
+ MLX5_SET(destroy_virtio_net_q_in, in, general_obj_out_cmd_hdr.opcode,
+ MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
+ MLX5_SET(destroy_virtio_net_q_in, in, general_obj_out_cmd_hdr.obj_id, mvq->virtq_id);
+ MLX5_SET(destroy_virtio_net_q_in, in, general_obj_out_cmd_hdr.uid, ndev->mvdev.res.uid);
+ MLX5_SET(destroy_virtio_net_q_in, in, general_obj_out_cmd_hdr.obj_type,
+ MLX5_OBJ_TYPE_VIRTIO_NET_Q);
+ if (mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, sizeof(out))) {
+ mlx5_vdpa_warn(&ndev->mvdev, "destroy virtqueue 0x%x\n", mvq->virtq_id);
+ return;
+ }
+ umems_destroy(ndev, mvq);
+}
+
+static u32 get_rqpn(struct mlx5_vdpa_virtqueue *mvq, bool fw)
+{
+ return fw ? mvq->vqqp.mqp.qpn : mvq->fwqp.mqp.qpn;
+}
+
+static u32 get_qpn(struct mlx5_vdpa_virtqueue *mvq, bool fw)
+{
+ return fw ? mvq->fwqp.mqp.qpn : mvq->vqqp.mqp.qpn;
+}
+
+static void alloc_inout(struct mlx5_vdpa_net *ndev, int cmd, void **in, int *inlen, void **out,
+ int *outlen, u32 qpn, u32 rqpn)
+{
+ void *qpc;
+ void *pp;
+
+ switch (cmd) {
+ case MLX5_CMD_OP_2RST_QP:
+ *inlen = MLX5_ST_SZ_BYTES(qp_2rst_in);
+ *outlen = MLX5_ST_SZ_BYTES(qp_2rst_out);
+ *in = kzalloc(*inlen, GFP_KERNEL);
+ *out = kzalloc(*outlen, GFP_KERNEL);
+ if (!*in || !*out)
+ goto outerr;
+
+ MLX5_SET(qp_2rst_in, *in, opcode, cmd);
+ MLX5_SET(qp_2rst_in, *in, uid, ndev->mvdev.res.uid);
+ MLX5_SET(qp_2rst_in, *in, qpn, qpn);
+ break;
+ case MLX5_CMD_OP_RST2INIT_QP:
+ *inlen = MLX5_ST_SZ_BYTES(rst2init_qp_in);
+ *outlen = MLX5_ST_SZ_BYTES(rst2init_qp_out);
+ *in = kzalloc(*inlen, GFP_KERNEL);
+ *out = kzalloc(MLX5_ST_SZ_BYTES(rst2init_qp_out), GFP_KERNEL);
+ if (!*in || !*out)
+ goto outerr;
+
+ MLX5_SET(rst2init_qp_in, *in, opcode, cmd);
+ MLX5_SET(rst2init_qp_in, *in, uid, ndev->mvdev.res.uid);
+ MLX5_SET(rst2init_qp_in, *in, qpn, qpn);
+ qpc = MLX5_ADDR_OF(rst2init_qp_in, *in, qpc);
+ MLX5_SET(qpc, qpc, remote_qpn, rqpn);
+ MLX5_SET(qpc, qpc, rwe, 1);
+ pp = MLX5_ADDR_OF(qpc, qpc, primary_address_path);
+ MLX5_SET(ads, pp, vhca_port_num, 1);
+ break;
+ case MLX5_CMD_OP_INIT2RTR_QP:
+ *inlen = MLX5_ST_SZ_BYTES(init2rtr_qp_in);
+ *outlen = MLX5_ST_SZ_BYTES(init2rtr_qp_out);
+ *in = kzalloc(*inlen, GFP_KERNEL);
+ *out = kzalloc(MLX5_ST_SZ_BYTES(init2rtr_qp_out), GFP_KERNEL);
+ if (!*in || !*out)
+ goto outerr;
+
+ MLX5_SET(init2rtr_qp_in, *in, opcode, cmd);
+ MLX5_SET(init2rtr_qp_in, *in, uid, ndev->mvdev.res.uid);
+ MLX5_SET(init2rtr_qp_in, *in, qpn, qpn);
+ qpc = MLX5_ADDR_OF(rst2init_qp_in, *in, qpc);
+ MLX5_SET(qpc, qpc, mtu, MLX5_QPC_MTU_256_BYTES);
+ MLX5_SET(qpc, qpc, log_msg_max, 30);
+ MLX5_SET(qpc, qpc, remote_qpn, rqpn);
+ pp = MLX5_ADDR_OF(qpc, qpc, primary_address_path);
+ MLX5_SET(ads, pp, fl, 1);
+ break;
+ case MLX5_CMD_OP_RTR2RTS_QP:
+ *inlen = MLX5_ST_SZ_BYTES(rtr2rts_qp_in);
+ *outlen = MLX5_ST_SZ_BYTES(rtr2rts_qp_out);
+ *in = kzalloc(*inlen, GFP_KERNEL);
+ *out = kzalloc(MLX5_ST_SZ_BYTES(rtr2rts_qp_out), GFP_KERNEL);
+ if (!*in || !*out)
+ goto outerr;
+
+ MLX5_SET(rtr2rts_qp_in, *in, opcode, cmd);
+ MLX5_SET(rtr2rts_qp_in, *in, uid, ndev->mvdev.res.uid);
+ MLX5_SET(rtr2rts_qp_in, *in, qpn, qpn);
+ qpc = MLX5_ADDR_OF(rst2init_qp_in, *in, qpc);
+ pp = MLX5_ADDR_OF(qpc, qpc, primary_address_path);
+ MLX5_SET(ads, pp, ack_timeout, 14);
+ MLX5_SET(qpc, qpc, retry_count, 7);
+ MLX5_SET(qpc, qpc, rnr_retry, 7);
+ break;
+ default:
+ goto outerr_nullify;
+ }
+
+ return;
+
+outerr:
+ kfree(*in);
+ kfree(*out);
+outerr_nullify:
+ *in = NULL;
+ *out = NULL;
+}
+
+static void free_inout(void *in, void *out)
+{
+ kfree(in);
+ kfree(out);
+}
+
+/* Two QPs are used by each virtqueue. One is used by the driver and one by
+ * firmware. The fw argument indicates whether the subjected QP is the one used
+ * by firmware.
+ */
+static int modify_qp(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, bool fw, int cmd)
+{
+ int outlen;
+ int inlen;
+ void *out;
+ void *in;
+ int err;
+
+ alloc_inout(ndev, cmd, &in, &inlen, &out, &outlen, get_qpn(mvq, fw), get_rqpn(mvq, fw));
+ if (!in || !out)
+ return -ENOMEM;
+
+ err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, outlen);
+ free_inout(in, out);
+ return err;
+}
+
+static int connect_qps(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
+{
+ int err;
+
+ err = modify_qp(ndev, mvq, true, MLX5_CMD_OP_2RST_QP);
+ if (err)
+ return err;
+
+ err = modify_qp(ndev, mvq, false, MLX5_CMD_OP_2RST_QP);
+ if (err)
+ return err;
+
+ err = modify_qp(ndev, mvq, true, MLX5_CMD_OP_RST2INIT_QP);
+ if (err)
+ return err;
+
+ err = modify_qp(ndev, mvq, false, MLX5_CMD_OP_RST2INIT_QP);
+ if (err)
+ return err;
+
+ err = modify_qp(ndev, mvq, true, MLX5_CMD_OP_INIT2RTR_QP);
+ if (err)
+ return err;
+
+ err = modify_qp(ndev, mvq, false, MLX5_CMD_OP_INIT2RTR_QP);
+ if (err)
+ return err;
+
+ return modify_qp(ndev, mvq, true, MLX5_CMD_OP_RTR2RTS_QP);
+}
+
+struct mlx5_virtq_attr {
+ u8 state;
+ u16 available_index;
+};
+
+static int query_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq,
+ struct mlx5_virtq_attr *attr)
+{
+ int outlen = MLX5_ST_SZ_BYTES(query_virtio_net_q_out);
+ u32 in[MLX5_ST_SZ_DW(query_virtio_net_q_in)] = {};
+ void *out;
+ void *obj_context;
+ void *cmd_hdr;
+ int err;
+
+ out = kzalloc(outlen, GFP_KERNEL);
+ if (!out)
+ return -ENOMEM;
+
+ cmd_hdr = MLX5_ADDR_OF(query_virtio_net_q_in, in, general_obj_in_cmd_hdr);
+
+ MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, opcode, MLX5_CMD_OP_QUERY_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_type, MLX5_OBJ_TYPE_VIRTIO_NET_Q);
+ MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_id, mvq->virtq_id);
+ MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid);
+ err = mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, outlen);
+ if (err)
+ goto err_cmd;
+
+ obj_context = MLX5_ADDR_OF(query_virtio_net_q_out, out, obj_context);
+ memset(attr, 0, sizeof(*attr));
+ attr->state = MLX5_GET(virtio_net_q_object, obj_context, state);
+ attr->available_index = MLX5_GET(virtio_net_q_object, obj_context, hw_available_index);
+ kfree(out);
+ return 0;
+
+err_cmd:
+ kfree(out);
+ return err;
+}
+
+static int modify_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, int state)
+{
+ int inlen = MLX5_ST_SZ_BYTES(modify_virtio_net_q_in);
+ u32 out[MLX5_ST_SZ_DW(modify_virtio_net_q_out)] = {};
+ void *obj_context;
+ void *cmd_hdr;
+ void *in;
+ int err;
+
+ in = kzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ cmd_hdr = MLX5_ADDR_OF(modify_virtio_net_q_in, in, general_obj_in_cmd_hdr);
+
+ MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, opcode, MLX5_CMD_OP_MODIFY_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_type, MLX5_OBJ_TYPE_VIRTIO_NET_Q);
+ MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_id, mvq->virtq_id);
+ MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid);
+
+ obj_context = MLX5_ADDR_OF(modify_virtio_net_q_in, in, obj_context);
+ MLX5_SET64(virtio_net_q_object, obj_context, modify_field_select,
+ MLX5_VIRTQ_MODIFY_MASK_STATE);
+ MLX5_SET(virtio_net_q_object, obj_context, state, state);
+ err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, sizeof(out));
+ kfree(in);
+ if (!err)
+ mvq->fw_state = state;
+
+ return err;
+}
+
+static int setup_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
+{
+ u16 idx = mvq->index;
+ int err;
+
+ if (!mvq->num_ent)
+ return 0;
+
+ if (mvq->initialized) {
+ mlx5_vdpa_warn(&ndev->mvdev, "attempt re init\n");
+ return -EINVAL;
+ }
+
+ err = cq_create(ndev, idx, mvq->num_ent);
+ if (err)
+ return err;
+
+ err = qp_create(ndev, mvq, &mvq->fwqp);
+ if (err)
+ goto err_fwqp;
+
+ err = qp_create(ndev, mvq, &mvq->vqqp);
+ if (err)
+ goto err_vqqp;
+
+ err = connect_qps(ndev, mvq);
+ if (err)
+ goto err_connect;
+
+ err = create_virtqueue(ndev, mvq);
+ if (err)
+ goto err_connect;
+
+ if (mvq->ready) {
+ err = modify_virtqueue(ndev, mvq, MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY);
+ if (err) {
+ mlx5_vdpa_warn(&ndev->mvdev, "failed to modify to ready vq idx %d(%d)\n",
+ idx, err);
+ goto err_connect;
+ }
+ }
+
+ mvq->initialized = true;
+ return 0;
+
+err_connect:
+ qp_destroy(ndev, &mvq->vqqp);
+err_vqqp:
+ qp_destroy(ndev, &mvq->fwqp);
+err_fwqp:
+ cq_destroy(ndev, idx);
+ return err;
+}
+
+static void suspend_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
+{
+ struct mlx5_virtq_attr attr;
+
+ if (!mvq->initialized)
+ return;
+
+ if (query_virtqueue(ndev, mvq, &attr)) {
+ mlx5_vdpa_warn(&ndev->mvdev, "failed to query virtqueue\n");
+ return;
+ }
+ if (mvq->fw_state != MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY)
+ return;
+
+ if (modify_virtqueue(ndev, mvq, MLX5_VIRTIO_NET_Q_OBJECT_STATE_SUSPEND))
+ mlx5_vdpa_warn(&ndev->mvdev, "modify to suspend failed\n");
+}
+
+static void suspend_vqs(struct mlx5_vdpa_net *ndev)
+{
+ int i;
+
+ for (i = 0; i < MLX5_MAX_SUPPORTED_VQS; i++)
+ suspend_vq(ndev, &ndev->vqs[i]);
+}
+
+static void teardown_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
+{
+ if (!mvq->initialized)
+ return;
+
+ suspend_vq(ndev, mvq);
+ destroy_virtqueue(ndev, mvq);
+ qp_destroy(ndev, &mvq->vqqp);
+ qp_destroy(ndev, &mvq->fwqp);
+ cq_destroy(ndev, mvq->index);
+ mvq->initialized = false;
+}
+
+static int create_rqt(struct mlx5_vdpa_net *ndev)
+{
+ int log_max_rqt;
+ __be32 *list;
+ void *rqtc;
+ int inlen;
+ void *in;
+ int i, j;
+ int err;
+
+ log_max_rqt = min_t(int, 1, MLX5_CAP_GEN(ndev->mvdev.mdev, log_max_rqt_size));
+ if (log_max_rqt < 1)
+ return -EOPNOTSUPP;
+
+ inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + (1 << log_max_rqt) * MLX5_ST_SZ_BYTES(rq_num);
+ in = kzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ MLX5_SET(create_rqt_in, in, uid, ndev->mvdev.res.uid);
+ rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
+
+ MLX5_SET(rqtc, rqtc, list_q_type, MLX5_RQTC_LIST_Q_TYPE_VIRTIO_NET_Q);
+ MLX5_SET(rqtc, rqtc, rqt_max_size, 1 << log_max_rqt);
+ MLX5_SET(rqtc, rqtc, rqt_actual_size, 1);
+ list = MLX5_ADDR_OF(rqtc, rqtc, rq_num[0]);
+ for (i = 0, j = 0; j < ndev->mvdev.max_vqs; j++) {
+ if (!ndev->vqs[j].initialized)
+ continue;
+
+ if (!vq_is_tx(ndev->vqs[j].index)) {
+ list[i] = cpu_to_be32(ndev->vqs[j].virtq_id);
+ i++;
+ }
+ }
+
+ err = mlx5_vdpa_create_rqt(&ndev->mvdev, in, inlen, &ndev->res.rqtn);
+ kfree(in);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static void destroy_rqt(struct mlx5_vdpa_net *ndev)
+{
+ mlx5_vdpa_destroy_rqt(&ndev->mvdev, ndev->res.rqtn);
+}
+
+static int create_tir(struct mlx5_vdpa_net *ndev)
+{
+#define HASH_IP_L4PORTS \
+ (MLX5_HASH_FIELD_SEL_SRC_IP | MLX5_HASH_FIELD_SEL_DST_IP | MLX5_HASH_FIELD_SEL_L4_SPORT | \
+ MLX5_HASH_FIELD_SEL_L4_DPORT)
+ static const u8 rx_hash_toeplitz_key[] = { 0x2c, 0xc6, 0x81, 0xd1, 0x5b, 0xdb, 0xf4, 0xf7,
+ 0xfc, 0xa2, 0x83, 0x19, 0xdb, 0x1a, 0x3e, 0x94,
+ 0x6b, 0x9e, 0x38, 0xd9, 0x2c, 0x9c, 0x03, 0xd1,
+ 0xad, 0x99, 0x44, 0xa7, 0xd9, 0x56, 0x3d, 0x59,
+ 0x06, 0x3c, 0x25, 0xf3, 0xfc, 0x1f, 0xdc, 0x2a };
+ void *rss_key;
+ void *outer;
+ void *tirc;
+ void *in;
+ int err;
+
+ in = kzalloc(MLX5_ST_SZ_BYTES(create_tir_in), GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ MLX5_SET(create_tir_in, in, uid, ndev->mvdev.res.uid);
+ tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
+ MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
+
+ MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
+ MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_TOEPLITZ);
+ rss_key = MLX5_ADDR_OF(tirc, tirc, rx_hash_toeplitz_key);
+ memcpy(rss_key, rx_hash_toeplitz_key, sizeof(rx_hash_toeplitz_key));
+
+ outer = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
+ MLX5_SET(rx_hash_field_select, outer, l3_prot_type, MLX5_L3_PROT_TYPE_IPV4);
+ MLX5_SET(rx_hash_field_select, outer, l4_prot_type, MLX5_L4_PROT_TYPE_TCP);
+ MLX5_SET(rx_hash_field_select, outer, selected_fields, HASH_IP_L4PORTS);
+
+ MLX5_SET(tirc, tirc, indirect_table, ndev->res.rqtn);
+ MLX5_SET(tirc, tirc, transport_domain, ndev->res.tdn);
+
+ err = mlx5_vdpa_create_tir(&ndev->mvdev, in, &ndev->res.tirn);
+ kfree(in);
+ return err;
+}
+
+static void destroy_tir(struct mlx5_vdpa_net *ndev)
+{
+ mlx5_vdpa_destroy_tir(&ndev->mvdev, ndev->res.tirn);
+}
+
+static int add_fwd_to_tir(struct mlx5_vdpa_net *ndev)
+{
+ struct mlx5_flow_destination dest[2] = {};
+ struct mlx5_flow_table_attr ft_attr = {};
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_flow_namespace *ns;
+ int err;
+
+ /* for now, one entry, match all, forward to tir */
+ ft_attr.max_fte = 1;
+ ft_attr.autogroup.max_num_groups = 1;
+
+ ns = mlx5_get_flow_namespace(ndev->mvdev.mdev, MLX5_FLOW_NAMESPACE_BYPASS);
+ if (!ns) {
+ mlx5_vdpa_warn(&ndev->mvdev, "get flow namespace\n");
+ return -EOPNOTSUPP;
+ }
+
+ ndev->rxft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
+ if (IS_ERR(ndev->rxft))
+ return PTR_ERR(ndev->rxft);
+
+ ndev->rx_counter = mlx5_fc_create(ndev->mvdev.mdev, false);
+ if (IS_ERR(ndev->rx_counter)) {
+ err = PTR_ERR(ndev->rx_counter);
+ goto err_fc;
+ }
+
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ dest[0].type = MLX5_FLOW_DESTINATION_TYPE_TIR;
+ dest[0].tir_num = ndev->res.tirn;
+ dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dest[1].counter_id = mlx5_fc_id(ndev->rx_counter);
+ ndev->rx_rule = mlx5_add_flow_rules(ndev->rxft, NULL, &flow_act, dest, 2);
+ if (IS_ERR(ndev->rx_rule)) {
+ err = PTR_ERR(ndev->rx_rule);
+ ndev->rx_rule = NULL;
+ goto err_rule;
+ }
+
+ return 0;
+
+err_rule:
+ mlx5_fc_destroy(ndev->mvdev.mdev, ndev->rx_counter);
+err_fc:
+ mlx5_destroy_flow_table(ndev->rxft);
+ return err;
+}
+
+static void remove_fwd_to_tir(struct mlx5_vdpa_net *ndev)
+{
+ if (!ndev->rx_rule)
+ return;
+
+ mlx5_del_flow_rules(ndev->rx_rule);
+ mlx5_fc_destroy(ndev->mvdev.mdev, ndev->rx_counter);
+ mlx5_destroy_flow_table(ndev->rxft);
+
+ ndev->rx_rule = NULL;
+}
+
+static void mlx5_vdpa_kick_vq(struct vdpa_device *vdev, u16 idx)
+{
+ struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
+ struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
+ struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx];
+
+ if (unlikely(!mvq->ready))
+ return;
+
+ iowrite16(idx, ndev->mvdev.res.kick_addr);
+}
+
+static int mlx5_vdpa_set_vq_address(struct vdpa_device *vdev, u16 idx, u64 desc_area,
+ u64 driver_area, u64 device_area)
+{
+ struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
+ struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
+ struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx];
+
+ mvq->desc_addr = desc_area;
+ mvq->device_addr = device_area;
+ mvq->driver_addr = driver_area;
+ return 0;
+}
+
+static void mlx5_vdpa_set_vq_num(struct vdpa_device *vdev, u16 idx, u32 num)
+{
+ struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
+ struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
+ struct mlx5_vdpa_virtqueue *mvq;
+
+ mvq = &ndev->vqs[idx];
+ mvq->num_ent = num;
+}
+
+static void mlx5_vdpa_set_vq_cb(struct vdpa_device *vdev, u16 idx, struct vdpa_callback *cb)
+{
+ struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
+ struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
+ struct mlx5_vdpa_virtqueue *vq = &ndev->vqs[idx];
+
+ vq->event_cb = *cb;
+}
+
+static void mlx5_vdpa_set_vq_ready(struct vdpa_device *vdev, u16 idx, bool ready)
+{
+ struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
+ struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
+ struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx];
+
+ if (!ready)
+ suspend_vq(ndev, mvq);
+
+ mvq->ready = ready;
+}
+
+static bool mlx5_vdpa_get_vq_ready(struct vdpa_device *vdev, u16 idx)
+{
+ struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
+ struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
+ struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx];
+
+ return mvq->ready;
+}
+
+static int mlx5_vdpa_set_vq_state(struct vdpa_device *vdev, u16 idx,
+ const struct vdpa_vq_state *state)
+{
+ struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
+ struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
+ struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx];
+
+ if (mvq->fw_state == MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY) {
+ mlx5_vdpa_warn(mvdev, "can't modify available index\n");
+ return -EINVAL;
+ }
+
+ mvq->avail_idx = state->avail_index;
+ return 0;
+}
+
+static int mlx5_vdpa_get_vq_state(struct vdpa_device *vdev, u16 idx, struct vdpa_vq_state *state)
+{
+ struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
+ struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
+ struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx];
+ struct mlx5_virtq_attr attr;
+ int err;
+
+ if (!mvq->initialized)
+ return -EAGAIN;
+
+ err = query_virtqueue(ndev, mvq, &attr);
+ if (err) {
+ mlx5_vdpa_warn(mvdev, "failed to query virtqueue\n");
+ return err;
+ }
+ state->avail_index = attr.available_index;
+ return 0;
+}
+
+static u32 mlx5_vdpa_get_vq_align(struct vdpa_device *vdev)
+{
+ return PAGE_SIZE;
+}
+
+enum { MLX5_VIRTIO_NET_F_GUEST_CSUM = 1 << 9,
+ MLX5_VIRTIO_NET_F_CSUM = 1 << 10,
+ MLX5_VIRTIO_NET_F_HOST_TSO6 = 1 << 11,
+ MLX5_VIRTIO_NET_F_HOST_TSO4 = 1 << 12,
+};
+
+static u64 mlx_to_vritio_features(u16 dev_features)
+{
+ u64 result = 0;
+
+ if (dev_features & MLX5_VIRTIO_NET_F_GUEST_CSUM)
+ result |= BIT(VIRTIO_NET_F_GUEST_CSUM);
+ if (dev_features & MLX5_VIRTIO_NET_F_CSUM)
+ result |= BIT(VIRTIO_NET_F_CSUM);
+ if (dev_features & MLX5_VIRTIO_NET_F_HOST_TSO6)
+ result |= BIT(VIRTIO_NET_F_HOST_TSO6);
+ if (dev_features & MLX5_VIRTIO_NET_F_HOST_TSO4)
+ result |= BIT(VIRTIO_NET_F_HOST_TSO4);
+
+ return result;
+}
+
+static u64 mlx5_vdpa_get_features(struct vdpa_device *vdev)
+{
+ struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
+ struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
+ u16 dev_features;
+
+ dev_features = MLX5_CAP_DEV_VDPA_EMULATION(mvdev->mdev, device_features_bits_mask);
+ ndev->mvdev.mlx_features = mlx_to_vritio_features(dev_features);
+ if (MLX5_CAP_DEV_VDPA_EMULATION(mvdev->mdev, virtio_version_1_0))
+ ndev->mvdev.mlx_features |= BIT(VIRTIO_F_VERSION_1);
+ ndev->mvdev.mlx_features |= BIT(VIRTIO_F_ACCESS_PLATFORM);
+ print_features(mvdev, ndev->mvdev.mlx_features, false);
+ return ndev->mvdev.mlx_features;
+}
+
+static int verify_min_features(struct mlx5_vdpa_dev *mvdev, u64 features)
+{
+ if (!(features & BIT(VIRTIO_F_ACCESS_PLATFORM)))
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+static int setup_virtqueues(struct mlx5_vdpa_net *ndev)
+{
+ int err;
+ int i;
+
+ for (i = 0; i < 2 * mlx5_vdpa_max_qps(ndev->mvdev.max_vqs); i++) {
+ err = setup_vq(ndev, &ndev->vqs[i]);
+ if (err)
+ goto err_vq;
+ }
+
+ return 0;
+
+err_vq:
+ for (--i; i >= 0; i--)
+ teardown_vq(ndev, &ndev->vqs[i]);
+
+ return err;
+}
+
+static void teardown_virtqueues(struct mlx5_vdpa_net *ndev)
+{
+ struct mlx5_vdpa_virtqueue *mvq;
+ int i;
+
+ for (i = ndev->mvdev.max_vqs - 1; i >= 0; i--) {
+ mvq = &ndev->vqs[i];
+ if (!mvq->initialized)
+ continue;
+
+ teardown_vq(ndev, mvq);
+ }
+}
+
+/* TODO: cross-endian support */
+static inline bool mlx5_vdpa_is_little_endian(struct mlx5_vdpa_dev *mvdev)
+{
+ return virtio_legacy_is_little_endian() ||
+ (mvdev->actual_features & (1ULL << VIRTIO_F_VERSION_1));
+}
+
+static int mlx5_vdpa_set_features(struct vdpa_device *vdev, u64 features)
+{
+ struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
+ struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
+ int err;
+
+ print_features(mvdev, features, true);
+
+ err = verify_min_features(mvdev, features);
+ if (err)
+ return err;
+
+ ndev->mvdev.actual_features = features & ndev->mvdev.mlx_features;
+ ndev->config.mtu = __cpu_to_virtio16(mlx5_vdpa_is_little_endian(mvdev),
+ ndev->mtu);
+ return err;
+}
+
+static void mlx5_vdpa_set_config_cb(struct vdpa_device *vdev, struct vdpa_callback *cb)
+{
+ /* not implemented */
+ mlx5_vdpa_warn(to_mvdev(vdev), "set config callback not supported\n");
+}
+
+#define MLX5_VDPA_MAX_VQ_ENTRIES 256
+static u16 mlx5_vdpa_get_vq_num_max(struct vdpa_device *vdev)
+{
+ return MLX5_VDPA_MAX_VQ_ENTRIES;
+}
+
+static u32 mlx5_vdpa_get_device_id(struct vdpa_device *vdev)
+{
+ return VIRTIO_ID_NET;
+}
+
+static u32 mlx5_vdpa_get_vendor_id(struct vdpa_device *vdev)
+{
+ return PCI_VENDOR_ID_MELLANOX;
+}
+
+static u8 mlx5_vdpa_get_status(struct vdpa_device *vdev)
+{
+ struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
+ struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
+
+ print_status(mvdev, ndev->mvdev.status, false);
+ return ndev->mvdev.status;
+}
+
+static int save_channel_info(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
+{
+ struct mlx5_vq_restore_info *ri = &mvq->ri;
+ struct mlx5_virtq_attr attr;
+ int err;
+
+ if (!mvq->initialized)
+ return 0;
+
+ err = query_virtqueue(ndev, mvq, &attr);
+ if (err)
+ return err;
+
+ ri->avail_index = attr.available_index;
+ ri->ready = mvq->ready;
+ ri->num_ent = mvq->num_ent;
+ ri->desc_addr = mvq->desc_addr;
+ ri->device_addr = mvq->device_addr;
+ ri->driver_addr = mvq->driver_addr;
+ ri->cb = mvq->event_cb;
+ ri->restore = true;
+ return 0;
+}
+
+static int save_channels_info(struct mlx5_vdpa_net *ndev)
+{
+ int i;
+
+ for (i = 0; i < ndev->mvdev.max_vqs; i++) {
+ memset(&ndev->vqs[i].ri, 0, sizeof(ndev->vqs[i].ri));
+ save_channel_info(ndev, &ndev->vqs[i]);
+ }
+ return 0;
+}
+
+static void mlx5_clear_vqs(struct mlx5_vdpa_net *ndev)
+{
+ int i;
+
+ for (i = 0; i < ndev->mvdev.max_vqs; i++)
+ memset(&ndev->vqs[i], 0, offsetof(struct mlx5_vdpa_virtqueue, ri));
+}
+
+static void restore_channels_info(struct mlx5_vdpa_net *ndev)
+{
+ struct mlx5_vdpa_virtqueue *mvq;
+ struct mlx5_vq_restore_info *ri;
+ int i;
+
+ mlx5_clear_vqs(ndev);
+ init_mvqs(ndev);
+ for (i = 0; i < ndev->mvdev.max_vqs; i++) {
+ mvq = &ndev->vqs[i];
+ ri = &mvq->ri;
+ if (!ri->restore)
+ continue;
+
+ mvq->avail_idx = ri->avail_index;
+ mvq->ready = ri->ready;
+ mvq->num_ent = ri->num_ent;
+ mvq->desc_addr = ri->desc_addr;
+ mvq->device_addr = ri->device_addr;
+ mvq->driver_addr = ri->driver_addr;
+ mvq->event_cb = ri->cb;
+ }
+}
+
+static int mlx5_vdpa_change_map(struct mlx5_vdpa_net *ndev, struct vhost_iotlb *iotlb)
+{
+ int err;
+
+ suspend_vqs(ndev);
+ err = save_channels_info(ndev);
+ if (err)
+ goto err_mr;
+
+ teardown_driver(ndev);
+ mlx5_vdpa_destroy_mr(&ndev->mvdev);
+ err = mlx5_vdpa_create_mr(&ndev->mvdev, iotlb);
+ if (err)
+ goto err_mr;
+
+ restore_channels_info(ndev);
+ err = setup_driver(ndev);
+ if (err)
+ goto err_setup;
+
+ return 0;
+
+err_setup:
+ mlx5_vdpa_destroy_mr(&ndev->mvdev);
+err_mr:
+ return err;
+}
+
+static int setup_driver(struct mlx5_vdpa_net *ndev)
+{
+ int err;
+
+ mutex_lock(&ndev->reslock);
+ if (ndev->setup) {
+ mlx5_vdpa_warn(&ndev->mvdev, "setup driver called for already setup driver\n");
+ err = 0;
+ goto out;
+ }
+ err = setup_virtqueues(ndev);
+ if (err) {
+ mlx5_vdpa_warn(&ndev->mvdev, "setup_virtqueues\n");
+ goto out;
+ }
+
+ err = create_rqt(ndev);
+ if (err) {
+ mlx5_vdpa_warn(&ndev->mvdev, "create_rqt\n");
+ goto err_rqt;
+ }
+
+ err = create_tir(ndev);
+ if (err) {
+ mlx5_vdpa_warn(&ndev->mvdev, "create_tir\n");
+ goto err_tir;
+ }
+
+ err = add_fwd_to_tir(ndev);
+ if (err) {
+ mlx5_vdpa_warn(&ndev->mvdev, "add_fwd_to_tir\n");
+ goto err_fwd;
+ }
+ ndev->setup = true;
+ mutex_unlock(&ndev->reslock);
+
+ return 0;
+
+err_fwd:
+ destroy_tir(ndev);
+err_tir:
+ destroy_rqt(ndev);
+err_rqt:
+ teardown_virtqueues(ndev);
+out:
+ mutex_unlock(&ndev->reslock);
+ return err;
+}
+
+static void teardown_driver(struct mlx5_vdpa_net *ndev)
+{
+ mutex_lock(&ndev->reslock);
+ if (!ndev->setup)
+ goto out;
+
+ remove_fwd_to_tir(ndev);
+ destroy_tir(ndev);
+ destroy_rqt(ndev);
+ teardown_virtqueues(ndev);
+ ndev->setup = false;
+out:
+ mutex_unlock(&ndev->reslock);
+}
+
+static void mlx5_vdpa_set_status(struct vdpa_device *vdev, u8 status)
+{
+ struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
+ struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
+ int err;
+
+ print_status(mvdev, status, true);
+ if (!status) {
+ mlx5_vdpa_info(mvdev, "performing device reset\n");
+ teardown_driver(ndev);
+ mlx5_vdpa_destroy_mr(&ndev->mvdev);
+ ndev->mvdev.status = 0;
+ ndev->mvdev.mlx_features = 0;
+ ++mvdev->generation;
+ return;
+ }
+
+ if ((status ^ ndev->mvdev.status) & VIRTIO_CONFIG_S_DRIVER_OK) {
+ if (status & VIRTIO_CONFIG_S_DRIVER_OK) {
+ err = setup_driver(ndev);
+ if (err) {
+ mlx5_vdpa_warn(mvdev, "failed to setup driver\n");
+ goto err_setup;
+ }
+ } else {
+ mlx5_vdpa_warn(mvdev, "did not expect DRIVER_OK to be cleared\n");
+ return;
+ }
+ }
+
+ ndev->mvdev.status = status;
+ return;
+
+err_setup:
+ mlx5_vdpa_destroy_mr(&ndev->mvdev);
+ ndev->mvdev.status |= VIRTIO_CONFIG_S_FAILED;
+}
+
+static void mlx5_vdpa_get_config(struct vdpa_device *vdev, unsigned int offset, void *buf,
+ unsigned int len)
+{
+ struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
+ struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
+
+ if (offset + len < sizeof(struct virtio_net_config))
+ memcpy(buf, (u8 *)&ndev->config + offset, len);
+}
+
+static void mlx5_vdpa_set_config(struct vdpa_device *vdev, unsigned int offset, const void *buf,
+ unsigned int len)
+{
+ /* not supported */
+}
+
+static u32 mlx5_vdpa_get_generation(struct vdpa_device *vdev)
+{
+ struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
+
+ return mvdev->generation;
+}
+
+static int mlx5_vdpa_set_map(struct vdpa_device *vdev, struct vhost_iotlb *iotlb)
+{
+ struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
+ struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
+ bool change_map;
+ int err;
+
+ err = mlx5_vdpa_handle_set_map(mvdev, iotlb, &change_map);
+ if (err) {
+ mlx5_vdpa_warn(mvdev, "set map failed(%d)\n", err);
+ return err;
+ }
+
+ if (change_map)
+ return mlx5_vdpa_change_map(ndev, iotlb);
+
+ return 0;
+}
+
+static void mlx5_vdpa_free(struct vdpa_device *vdev)
+{
+ struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
+ struct mlx5_vdpa_net *ndev;
+
+ ndev = to_mlx5_vdpa_ndev(mvdev);
+
+ free_resources(ndev);
+ mlx5_vdpa_free_resources(&ndev->mvdev);
+ mutex_destroy(&ndev->reslock);
+}
+
+static struct vdpa_notification_area mlx5_get_vq_notification(struct vdpa_device *vdev, u16 idx)
+{
+ struct vdpa_notification_area ret = {};
+
+ return ret;
+}
+
+static int mlx5_get_vq_irq(struct vdpa_device *vdv, u16 idx)
+{
+ return -EOPNOTSUPP;
+}
+
+static const struct vdpa_config_ops mlx5_vdpa_ops = {
+ .set_vq_address = mlx5_vdpa_set_vq_address,
+ .set_vq_num = mlx5_vdpa_set_vq_num,
+ .kick_vq = mlx5_vdpa_kick_vq,
+ .set_vq_cb = mlx5_vdpa_set_vq_cb,
+ .set_vq_ready = mlx5_vdpa_set_vq_ready,
+ .get_vq_ready = mlx5_vdpa_get_vq_ready,
+ .set_vq_state = mlx5_vdpa_set_vq_state,
+ .get_vq_state = mlx5_vdpa_get_vq_state,
+ .get_vq_notification = mlx5_get_vq_notification,
+ .get_vq_irq = mlx5_get_vq_irq,
+ .get_vq_align = mlx5_vdpa_get_vq_align,
+ .get_features = mlx5_vdpa_get_features,
+ .set_features = mlx5_vdpa_set_features,
+ .set_config_cb = mlx5_vdpa_set_config_cb,
+ .get_vq_num_max = mlx5_vdpa_get_vq_num_max,
+ .get_device_id = mlx5_vdpa_get_device_id,
+ .get_vendor_id = mlx5_vdpa_get_vendor_id,
+ .get_status = mlx5_vdpa_get_status,
+ .set_status = mlx5_vdpa_set_status,
+ .get_config = mlx5_vdpa_get_config,
+ .set_config = mlx5_vdpa_set_config,
+ .get_generation = mlx5_vdpa_get_generation,
+ .set_map = mlx5_vdpa_set_map,
+ .free = mlx5_vdpa_free,
+};
+
+static int alloc_resources(struct mlx5_vdpa_net *ndev)
+{
+ struct mlx5_vdpa_net_resources *res = &ndev->res;
+ int err;
+
+ if (res->valid) {
+ mlx5_vdpa_warn(&ndev->mvdev, "resources already allocated\n");
+ return -EEXIST;
+ }
+
+ err = mlx5_vdpa_alloc_transport_domain(&ndev->mvdev, &res->tdn);
+ if (err)
+ return err;
+
+ err = create_tis(ndev);
+ if (err)
+ goto err_tis;
+
+ res->valid = true;
+
+ return 0;
+
+err_tis:
+ mlx5_vdpa_dealloc_transport_domain(&ndev->mvdev, res->tdn);
+ return err;
+}
+
+static void free_resources(struct mlx5_vdpa_net *ndev)
+{
+ struct mlx5_vdpa_net_resources *res = &ndev->res;
+
+ if (!res->valid)
+ return;
+
+ destroy_tis(ndev);
+ mlx5_vdpa_dealloc_transport_domain(&ndev->mvdev, res->tdn);
+ res->valid = false;
+}
+
+static void init_mvqs(struct mlx5_vdpa_net *ndev)
+{
+ struct mlx5_vdpa_virtqueue *mvq;
+ int i;
+
+ for (i = 0; i < 2 * mlx5_vdpa_max_qps(ndev->mvdev.max_vqs); ++i) {
+ mvq = &ndev->vqs[i];
+ memset(mvq, 0, offsetof(struct mlx5_vdpa_virtqueue, ri));
+ mvq->index = i;
+ mvq->ndev = ndev;
+ mvq->fwqp.fw = true;
+ }
+ for (; i < ndev->mvdev.max_vqs; i++) {
+ mvq = &ndev->vqs[i];
+ memset(mvq, 0, offsetof(struct mlx5_vdpa_virtqueue, ri));
+ mvq->index = i;
+ mvq->ndev = ndev;
+ }
+}
+
+void *mlx5_vdpa_add_dev(struct mlx5_core_dev *mdev)
+{
+ struct virtio_net_config *config;
+ struct mlx5_vdpa_dev *mvdev;
+ struct mlx5_vdpa_net *ndev;
+ u32 max_vqs;
+ int err;
+
+ /* we save one virtqueue for control virtqueue should we require it */
+ max_vqs = MLX5_CAP_DEV_VDPA_EMULATION(mdev, max_num_virtio_queues);
+ max_vqs = min_t(u32, max_vqs, MLX5_MAX_SUPPORTED_VQS);
+
+ ndev = vdpa_alloc_device(struct mlx5_vdpa_net, mvdev.vdev, mdev->device, &mlx5_vdpa_ops,
+ 2 * mlx5_vdpa_max_qps(max_vqs));
+ if (IS_ERR(ndev))
+ return ndev;
+
+ ndev->mvdev.max_vqs = max_vqs;
+ mvdev = &ndev->mvdev;
+ mvdev->mdev = mdev;
+ init_mvqs(ndev);
+ mutex_init(&ndev->reslock);
+ config = &ndev->config;
+ err = mlx5_query_nic_vport_mtu(mdev, &ndev->mtu);
+ if (err)
+ goto err_mtu;
+
+ err = mlx5_query_nic_vport_mac_address(mdev, 0, 0, config->mac);
+ if (err)
+ goto err_mtu;
+
+ mvdev->vdev.dma_dev = mdev->device;
+ err = mlx5_vdpa_alloc_resources(&ndev->mvdev);
+ if (err)
+ goto err_mtu;
+
+ err = alloc_resources(ndev);
+ if (err)
+ goto err_res;
+
+ err = vdpa_register_device(&mvdev->vdev);
+ if (err)
+ goto err_reg;
+
+ return ndev;
+
+err_reg:
+ free_resources(ndev);
+err_res:
+ mlx5_vdpa_free_resources(&ndev->mvdev);
+err_mtu:
+ mutex_destroy(&ndev->reslock);
+ put_device(&mvdev->vdev.dev);
+ return ERR_PTR(err);
+}
+
+void mlx5_vdpa_remove_dev(struct mlx5_vdpa_dev *mvdev)
+{
+ vdpa_unregister_device(&mvdev->vdev);
+}
diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.h b/drivers/vdpa/mlx5/net/mlx5_vnet.h
new file mode 100644
index 000000000000..f2d6d68b020e
--- /dev/null
+++ b/drivers/vdpa/mlx5/net/mlx5_vnet.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2020 Mellanox Technologies Ltd. */
+
+#ifndef __MLX5_VNET_H_
+#define __MLX5_VNET_H_
+
+#include <linux/vdpa.h>
+#include <linux/virtio_net.h>
+#include <linux/vringh.h>
+#include <linux/mlx5/driver.h>
+#include <linux/mlx5/cq.h>
+#include <linux/mlx5/qp.h>
+#include "mlx5_vdpa.h"
+
+static inline u32 mlx5_vdpa_max_qps(int max_vqs)
+{
+ return max_vqs / 2;
+}
+
+#define to_mlx5_vdpa_ndev(__mvdev) container_of(__mvdev, struct mlx5_vdpa_net, mvdev)
+void *mlx5_vdpa_add_dev(struct mlx5_core_dev *mdev);
+void mlx5_vdpa_remove_dev(struct mlx5_vdpa_dev *mvdev);
+
+#endif /* __MLX5_VNET_H_ */
diff --git a/drivers/vdpa/vdpa.c b/drivers/vdpa/vdpa.c
index de211ef3738c..a69ffc991e13 100644
--- a/drivers/vdpa/vdpa.c
+++ b/drivers/vdpa/vdpa.c
@@ -61,6 +61,7 @@ static void vdpa_release_dev(struct device *d)
* initialized but before registered.
* @parent: the parent device
* @config: the bus operations that is supported by this device
+ * @nvqs: number of virtqueues supported by this device
* @size: size of the parent structure that contains private data
*
* Driver should use vdpa_alloc_device() wrapper macro instead of
@@ -71,6 +72,7 @@ static void vdpa_release_dev(struct device *d)
*/
struct vdpa_device *__vdpa_alloc_device(struct device *parent,
const struct vdpa_config_ops *config,
+ int nvqs,
size_t size)
{
struct vdpa_device *vdev;
@@ -96,6 +98,8 @@ struct vdpa_device *__vdpa_alloc_device(struct device *parent,
vdev->dev.release = vdpa_release_dev;
vdev->index = err;
vdev->config = config;
+ vdev->features_valid = false;
+ vdev->nvqs = nvqs;
err = dev_set_name(&vdev->dev, "vdpa%u", vdev->index);
if (err)
diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.c b/drivers/vdpa/vdpa_sim/vdpa_sim.c
index c7334cc65bb2..62d640327145 100644
--- a/drivers/vdpa/vdpa_sim/vdpa_sim.c
+++ b/drivers/vdpa/vdpa_sim/vdpa_sim.c
@@ -24,6 +24,7 @@
#include <linux/etherdevice.h>
#include <linux/vringh.h>
#include <linux/vdpa.h>
+#include <linux/virtio_byteorder.h>
#include <linux/vhost_iotlb.h>
#include <uapi/linux/virtio_config.h>
#include <uapi/linux/virtio_net.h>
@@ -33,6 +34,10 @@
#define DRV_DESC "vDPA Device Simulator"
#define DRV_LICENSE "GPL v2"
+static int batch_mapping = 1;
+module_param(batch_mapping, int, 0444);
+MODULE_PARM_DESC(batch_mapping, "Batched mapping 1 -Enable; 0 - Disable");
+
struct vdpasim_virtqueue {
struct vringh vring;
struct vringh_kiov iov;
@@ -55,12 +60,12 @@ struct vdpasim_virtqueue {
static u64 vdpasim_features = (1ULL << VIRTIO_F_ANY_LAYOUT) |
(1ULL << VIRTIO_F_VERSION_1) |
- (1ULL << VIRTIO_F_IOMMU_PLATFORM);
+ (1ULL << VIRTIO_F_ACCESS_PLATFORM);
/* State of each vdpasim device */
struct vdpasim {
struct vdpa_device vdpa;
- struct vdpasim_virtqueue vqs[2];
+ struct vdpasim_virtqueue vqs[VDPASIM_VQ_NUM];
struct work_struct work;
/* spinlock to synchronize virtqueue state */
spinlock_t lock;
@@ -70,8 +75,27 @@ struct vdpasim {
u32 status;
u32 generation;
u64 features;
+ /* spinlock to synchronize iommu table */
+ spinlock_t iommu_lock;
};
+/* TODO: cross-endian support */
+static inline bool vdpasim_is_little_endian(struct vdpasim *vdpasim)
+{
+ return virtio_legacy_is_little_endian() ||
+ (vdpasim->features & (1ULL << VIRTIO_F_VERSION_1));
+}
+
+static inline u16 vdpasim16_to_cpu(struct vdpasim *vdpasim, __virtio16 val)
+{
+ return __virtio16_to_cpu(vdpasim_is_little_endian(vdpasim), val);
+}
+
+static inline __virtio16 cpu_to_vdpasim16(struct vdpasim *vdpasim, u16 val)
+{
+ return __cpu_to_virtio16(vdpasim_is_little_endian(vdpasim), val);
+}
+
static struct vdpasim *vdpasim_dev;
static struct vdpasim *vdpa_to_sim(struct vdpa_device *vdpa)
@@ -118,7 +142,9 @@ static void vdpasim_reset(struct vdpasim *vdpasim)
for (i = 0; i < VDPASIM_VQ_NUM; i++)
vdpasim_vq_reset(&vdpasim->vqs[i]);
+ spin_lock(&vdpasim->iommu_lock);
vhost_iotlb_reset(vdpasim->iommu);
+ spin_unlock(&vdpasim->iommu_lock);
vdpasim->features = 0;
vdpasim->status = 0;
@@ -236,8 +262,10 @@ static dma_addr_t vdpasim_map_page(struct device *dev, struct page *page,
/* For simplicity, use identical mapping to avoid e.g iova
* allocator.
*/
+ spin_lock(&vdpasim->iommu_lock);
ret = vhost_iotlb_add_range(iommu, pa, pa + size - 1,
pa, dir_to_perm(dir));
+ spin_unlock(&vdpasim->iommu_lock);
if (ret)
return DMA_MAPPING_ERROR;
@@ -251,8 +279,10 @@ static void vdpasim_unmap_page(struct device *dev, dma_addr_t dma_addr,
struct vdpasim *vdpasim = dev_to_sim(dev);
struct vhost_iotlb *iommu = vdpasim->iommu;
+ spin_lock(&vdpasim->iommu_lock);
vhost_iotlb_del_range(iommu, (u64)dma_addr,
(u64)dma_addr + size - 1);
+ spin_unlock(&vdpasim->iommu_lock);
}
static void *vdpasim_alloc_coherent(struct device *dev, size_t size,
@@ -264,9 +294,10 @@ static void *vdpasim_alloc_coherent(struct device *dev, size_t size,
void *addr = kmalloc(size, flag);
int ret;
- if (!addr)
+ spin_lock(&vdpasim->iommu_lock);
+ if (!addr) {
*dma_addr = DMA_MAPPING_ERROR;
- else {
+ } else {
u64 pa = virt_to_phys(addr);
ret = vhost_iotlb_add_range(iommu, (u64)pa,
@@ -279,6 +310,7 @@ static void *vdpasim_alloc_coherent(struct device *dev, size_t size,
} else
*dma_addr = (dma_addr_t)pa;
}
+ spin_unlock(&vdpasim->iommu_lock);
return addr;
}
@@ -290,8 +322,11 @@ static void vdpasim_free_coherent(struct device *dev, size_t size,
struct vdpasim *vdpasim = dev_to_sim(dev);
struct vhost_iotlb *iommu = vdpasim->iommu;
+ spin_lock(&vdpasim->iommu_lock);
vhost_iotlb_del_range(iommu, (u64)dma_addr,
(u64)dma_addr + size - 1);
+ spin_unlock(&vdpasim->iommu_lock);
+
kfree(phys_to_virt((uintptr_t)dma_addr));
}
@@ -303,21 +338,27 @@ static const struct dma_map_ops vdpasim_dma_ops = {
};
static const struct vdpa_config_ops vdpasim_net_config_ops;
+static const struct vdpa_config_ops vdpasim_net_batch_config_ops;
static struct vdpasim *vdpasim_create(void)
{
- struct virtio_net_config *config;
+ const struct vdpa_config_ops *ops;
struct vdpasim *vdpasim;
struct device *dev;
int ret = -ENOMEM;
- vdpasim = vdpa_alloc_device(struct vdpasim, vdpa, NULL,
- &vdpasim_net_config_ops);
+ if (batch_mapping)
+ ops = &vdpasim_net_batch_config_ops;
+ else
+ ops = &vdpasim_net_config_ops;
+
+ vdpasim = vdpa_alloc_device(struct vdpasim, vdpa, NULL, ops, VDPASIM_VQ_NUM);
if (!vdpasim)
goto err_alloc;
INIT_WORK(&vdpasim->work, vdpasim_work);
spin_lock_init(&vdpasim->lock);
+ spin_lock_init(&vdpasim->iommu_lock);
dev = &vdpasim->vdpa.dev;
dev->coherent_dma_mask = DMA_BIT_MASK(64);
@@ -331,10 +372,7 @@ static struct vdpasim *vdpasim_create(void)
if (!vdpasim->buffer)
goto err_iommu;
- config = &vdpasim->config;
- config->mtu = 1500;
- config->status = VIRTIO_NET_S_LINK_UP;
- eth_random_addr(config->mac);
+ eth_random_addr(vdpasim->config.mac);
vringh_set_iotlb(&vdpasim->vqs[0].vring, vdpasim->iommu);
vringh_set_iotlb(&vdpasim->vqs[1].vring, vdpasim->iommu);
@@ -413,26 +451,29 @@ static bool vdpasim_get_vq_ready(struct vdpa_device *vdpa, u16 idx)
return vq->ready;
}
-static int vdpasim_set_vq_state(struct vdpa_device *vdpa, u16 idx, u64 state)
+static int vdpasim_set_vq_state(struct vdpa_device *vdpa, u16 idx,
+ const struct vdpa_vq_state *state)
{
struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
struct vringh *vrh = &vq->vring;
spin_lock(&vdpasim->lock);
- vrh->last_avail_idx = state;
+ vrh->last_avail_idx = state->avail_index;
spin_unlock(&vdpasim->lock);
return 0;
}
-static u64 vdpasim_get_vq_state(struct vdpa_device *vdpa, u16 idx)
+static int vdpasim_get_vq_state(struct vdpa_device *vdpa, u16 idx,
+ struct vdpa_vq_state *state)
{
struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
struct vringh *vrh = &vq->vring;
- return vrh->last_avail_idx;
+ state->avail_index = vrh->last_avail_idx;
+ return 0;
}
static u32 vdpasim_get_vq_align(struct vdpa_device *vdpa)
@@ -448,13 +489,22 @@ static u64 vdpasim_get_features(struct vdpa_device *vdpa)
static int vdpasim_set_features(struct vdpa_device *vdpa, u64 features)
{
struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
+ struct virtio_net_config *config = &vdpasim->config;
/* DMA mapping must be done by driver */
- if (!(features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)))
+ if (!(features & (1ULL << VIRTIO_F_ACCESS_PLATFORM)))
return -EINVAL;
vdpasim->features = features & vdpasim_features;
+ /* We generally only know whether guest is using the legacy interface
+ * here, so generally that's the earliest we can set config fields.
+ * Note: We actually require VIRTIO_F_ACCESS_PLATFORM above which
+ * implies VIRTIO_F_VERSION_1, but let's not try to be clever here.
+ */
+
+ config->mtu = cpu_to_vdpasim16(vdpasim, 1500);
+ config->status = cpu_to_vdpasim16(vdpasim, VIRTIO_NET_S_LINK_UP);
return 0;
}
@@ -508,7 +558,7 @@ static void vdpasim_get_config(struct vdpa_device *vdpa, unsigned int offset,
struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
if (offset + len < sizeof(struct virtio_net_config))
- memcpy(buf, &vdpasim->config + offset, len);
+ memcpy(buf, (u8 *)&vdpasim->config + offset, len);
}
static void vdpasim_set_config(struct vdpa_device *vdpa, unsigned int offset,
@@ -532,6 +582,7 @@ static int vdpasim_set_map(struct vdpa_device *vdpa,
u64 start = 0ULL, last = 0ULL - 1;
int ret;
+ spin_lock(&vdpasim->iommu_lock);
vhost_iotlb_reset(vdpasim->iommu);
for (map = vhost_iotlb_itree_first(iotlb, start, last); map;
@@ -541,10 +592,12 @@ static int vdpasim_set_map(struct vdpa_device *vdpa,
if (ret)
goto err;
}
+ spin_unlock(&vdpasim->iommu_lock);
return 0;
err:
vhost_iotlb_reset(vdpasim->iommu);
+ spin_unlock(&vdpasim->iommu_lock);
return ret;
}
@@ -552,16 +605,23 @@ static int vdpasim_dma_map(struct vdpa_device *vdpa, u64 iova, u64 size,
u64 pa, u32 perm)
{
struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
+ int ret;
+
+ spin_lock(&vdpasim->iommu_lock);
+ ret = vhost_iotlb_add_range(vdpasim->iommu, iova, iova + size - 1, pa,
+ perm);
+ spin_unlock(&vdpasim->iommu_lock);
- return vhost_iotlb_add_range(vdpasim->iommu, iova,
- iova + size - 1, pa, perm);
+ return ret;
}
static int vdpasim_dma_unmap(struct vdpa_device *vdpa, u64 iova, u64 size)
{
struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
+ spin_lock(&vdpasim->iommu_lock);
vhost_iotlb_del_range(vdpasim->iommu, iova, iova + size - 1);
+ spin_unlock(&vdpasim->iommu_lock);
return 0;
}
@@ -597,12 +657,36 @@ static const struct vdpa_config_ops vdpasim_net_config_ops = {
.get_config = vdpasim_get_config,
.set_config = vdpasim_set_config,
.get_generation = vdpasim_get_generation,
- .set_map = vdpasim_set_map,
.dma_map = vdpasim_dma_map,
.dma_unmap = vdpasim_dma_unmap,
.free = vdpasim_free,
};
+static const struct vdpa_config_ops vdpasim_net_batch_config_ops = {
+ .set_vq_address = vdpasim_set_vq_address,
+ .set_vq_num = vdpasim_set_vq_num,
+ .kick_vq = vdpasim_kick_vq,
+ .set_vq_cb = vdpasim_set_vq_cb,
+ .set_vq_ready = vdpasim_set_vq_ready,
+ .get_vq_ready = vdpasim_get_vq_ready,
+ .set_vq_state = vdpasim_set_vq_state,
+ .get_vq_state = vdpasim_get_vq_state,
+ .get_vq_align = vdpasim_get_vq_align,
+ .get_features = vdpasim_get_features,
+ .set_features = vdpasim_set_features,
+ .set_config_cb = vdpasim_set_config_cb,
+ .get_vq_num_max = vdpasim_get_vq_num_max,
+ .get_device_id = vdpasim_get_device_id,
+ .get_vendor_id = vdpasim_get_vendor_id,
+ .get_status = vdpasim_get_status,
+ .set_status = vdpasim_set_status,
+ .get_config = vdpasim_get_config,
+ .set_config = vdpasim_set_config,
+ .get_generation = vdpasim_get_generation,
+ .set_map = vdpasim_set_map,
+ .free = vdpasim_free,
+};
+
static int __init vdpasim_dev_init(void)
{
vdpasim_dev = vdpasim_create();
diff --git a/drivers/vhost/Kconfig b/drivers/vhost/Kconfig
index d3688c6afb87..587fbae06182 100644
--- a/drivers/vhost/Kconfig
+++ b/drivers/vhost/Kconfig
@@ -65,6 +65,7 @@ config VHOST_VDPA
tristate "Vhost driver for vDPA-based backend"
depends on EVENTFD
select VHOST
+ select IRQ_BYPASS_MANAGER
depends on VDPA
help
This kernel module can be loaded in host kernel to accelerate
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index eea902b83afe..531a00d703cd 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -73,7 +73,7 @@ enum {
VHOST_NET_FEATURES = VHOST_FEATURES |
(1ULL << VHOST_NET_F_VIRTIO_NET_HDR) |
(1ULL << VIRTIO_NET_F_MRG_RXBUF) |
- (1ULL << VIRTIO_F_IOMMU_PLATFORM)
+ (1ULL << VIRTIO_F_ACCESS_PLATFORM)
};
enum {
@@ -1615,21 +1615,6 @@ done:
return err;
}
-static int vhost_net_set_backend_features(struct vhost_net *n, u64 features)
-{
- int i;
-
- mutex_lock(&n->dev.mutex);
- for (i = 0; i < VHOST_NET_VQ_MAX; ++i) {
- mutex_lock(&n->vqs[i].vq.mutex);
- n->vqs[i].vq.acked_backend_features = features;
- mutex_unlock(&n->vqs[i].vq.mutex);
- }
- mutex_unlock(&n->dev.mutex);
-
- return 0;
-}
-
static int vhost_net_set_features(struct vhost_net *n, u64 features)
{
size_t vhost_hlen, sock_hlen, hdr_len;
@@ -1653,7 +1638,7 @@ static int vhost_net_set_features(struct vhost_net *n, u64 features)
!vhost_log_access_ok(&n->dev))
goto out_unlock;
- if ((features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))) {
+ if ((features & (1ULL << VIRTIO_F_ACCESS_PLATFORM))) {
if (vhost_init_device_iotlb(&n->dev, true))
goto out_unlock;
}
@@ -1730,7 +1715,8 @@ static long vhost_net_ioctl(struct file *f, unsigned int ioctl,
return -EFAULT;
if (features & ~VHOST_NET_BACKEND_FEATURES)
return -EOPNOTSUPP;
- return vhost_net_set_backend_features(n, features);
+ vhost_set_backend_features(&n->dev, features);
+ return 0;
case VHOST_RESET_OWNER:
return vhost_net_reset_owner(n);
case VHOST_SET_OWNER:
diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
index a54b60d6623f..3fab94f88894 100644
--- a/drivers/vhost/vdpa.c
+++ b/drivers/vhost/vdpa.c
@@ -27,37 +27,11 @@
#include "vhost.h"
enum {
- VHOST_VDPA_FEATURES =
- (1ULL << VIRTIO_F_NOTIFY_ON_EMPTY) |
- (1ULL << VIRTIO_F_ANY_LAYOUT) |
- (1ULL << VIRTIO_F_VERSION_1) |
- (1ULL << VIRTIO_F_IOMMU_PLATFORM) |
- (1ULL << VIRTIO_F_RING_PACKED) |
- (1ULL << VIRTIO_F_ORDER_PLATFORM) |
- (1ULL << VIRTIO_RING_F_INDIRECT_DESC) |
- (1ULL << VIRTIO_RING_F_EVENT_IDX),
-
- VHOST_VDPA_NET_FEATURES = VHOST_VDPA_FEATURES |
- (1ULL << VIRTIO_NET_F_CSUM) |
- (1ULL << VIRTIO_NET_F_GUEST_CSUM) |
- (1ULL << VIRTIO_NET_F_MTU) |
- (1ULL << VIRTIO_NET_F_MAC) |
- (1ULL << VIRTIO_NET_F_GUEST_TSO4) |
- (1ULL << VIRTIO_NET_F_GUEST_TSO6) |
- (1ULL << VIRTIO_NET_F_GUEST_ECN) |
- (1ULL << VIRTIO_NET_F_GUEST_UFO) |
- (1ULL << VIRTIO_NET_F_HOST_TSO4) |
- (1ULL << VIRTIO_NET_F_HOST_TSO6) |
- (1ULL << VIRTIO_NET_F_HOST_ECN) |
- (1ULL << VIRTIO_NET_F_HOST_UFO) |
- (1ULL << VIRTIO_NET_F_MRG_RXBUF) |
- (1ULL << VIRTIO_NET_F_STATUS) |
- (1ULL << VIRTIO_NET_F_SPEED_DUPLEX),
+ VHOST_VDPA_BACKEND_FEATURES =
+ (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2) |
+ (1ULL << VHOST_BACKEND_F_IOTLB_BATCH),
};
-/* Currently, only network backend w/o multiqueue is supported. */
-#define VHOST_VDPA_VQ_MAX 2
-
#define VHOST_VDPA_DEV_MAX (1U << MINORBITS)
struct vhost_vdpa {
@@ -73,16 +47,13 @@ struct vhost_vdpa {
int virtio_id;
int minor;
struct eventfd_ctx *config_ctx;
+ int in_batch;
};
static DEFINE_IDA(vhost_vdpa_ida);
static dev_t vhost_vdpa_major;
-static const u64 vhost_vdpa_features[] = {
- [VIRTIO_ID_NET] = VHOST_VDPA_NET_FEATURES,
-};
-
static void handle_vq_kick(struct vhost_work *work)
{
struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
@@ -96,7 +67,7 @@ static void handle_vq_kick(struct vhost_work *work)
static irqreturn_t vhost_vdpa_virtqueue_cb(void *private)
{
struct vhost_virtqueue *vq = private;
- struct eventfd_ctx *call_ctx = vq->call_ctx;
+ struct eventfd_ctx *call_ctx = vq->call_ctx.ctx;
if (call_ctx)
eventfd_signal(call_ctx, 1);
@@ -115,12 +86,45 @@ static irqreturn_t vhost_vdpa_config_cb(void *private)
return IRQ_HANDLED;
}
+static void vhost_vdpa_setup_vq_irq(struct vhost_vdpa *v, u16 qid)
+{
+ struct vhost_virtqueue *vq = &v->vqs[qid];
+ const struct vdpa_config_ops *ops = v->vdpa->config;
+ struct vdpa_device *vdpa = v->vdpa;
+ int ret, irq;
+
+ if (!ops->get_vq_irq)
+ return;
+
+ irq = ops->get_vq_irq(vdpa, qid);
+ spin_lock(&vq->call_ctx.ctx_lock);
+ irq_bypass_unregister_producer(&vq->call_ctx.producer);
+ if (!vq->call_ctx.ctx || irq < 0) {
+ spin_unlock(&vq->call_ctx.ctx_lock);
+ return;
+ }
+
+ vq->call_ctx.producer.token = vq->call_ctx.ctx;
+ vq->call_ctx.producer.irq = irq;
+ ret = irq_bypass_register_producer(&vq->call_ctx.producer);
+ spin_unlock(&vq->call_ctx.ctx_lock);
+}
+
+static void vhost_vdpa_unsetup_vq_irq(struct vhost_vdpa *v, u16 qid)
+{
+ struct vhost_virtqueue *vq = &v->vqs[qid];
+
+ spin_lock(&vq->call_ctx.ctx_lock);
+ irq_bypass_unregister_producer(&vq->call_ctx.producer);
+ spin_unlock(&vq->call_ctx.ctx_lock);
+}
+
static void vhost_vdpa_reset(struct vhost_vdpa *v)
{
struct vdpa_device *vdpa = v->vdpa;
- const struct vdpa_config_ops *ops = vdpa->config;
- ops->set_status(vdpa, 0);
+ vdpa_reset(vdpa);
+ v->in_batch = 0;
}
static long vhost_vdpa_get_device_id(struct vhost_vdpa *v, u8 __user *argp)
@@ -155,11 +159,15 @@ static long vhost_vdpa_set_status(struct vhost_vdpa *v, u8 __user *statusp)
{
struct vdpa_device *vdpa = v->vdpa;
const struct vdpa_config_ops *ops = vdpa->config;
- u8 status;
+ u8 status, status_old;
+ int nvqs = v->nvqs;
+ u16 i;
if (copy_from_user(&status, statusp, sizeof(status)))
return -EFAULT;
+ status_old = ops->get_status(vdpa);
+
/*
* Userspace shouldn't remove status bits unless reset the
* status to 0.
@@ -169,6 +177,14 @@ static long vhost_vdpa_set_status(struct vhost_vdpa *v, u8 __user *statusp)
ops->set_status(vdpa, status);
+ if ((status & VIRTIO_CONFIG_S_DRIVER_OK) && !(status_old & VIRTIO_CONFIG_S_DRIVER_OK))
+ for (i = 0; i < nvqs; i++)
+ vhost_vdpa_setup_vq_irq(v, i);
+
+ if ((status_old & VIRTIO_CONFIG_S_DRIVER_OK) && !(status & VIRTIO_CONFIG_S_DRIVER_OK))
+ for (i = 0; i < nvqs; i++)
+ vhost_vdpa_unsetup_vq_irq(v, i);
+
return 0;
}
@@ -196,7 +212,6 @@ static long vhost_vdpa_get_config(struct vhost_vdpa *v,
struct vhost_vdpa_config __user *c)
{
struct vdpa_device *vdpa = v->vdpa;
- const struct vdpa_config_ops *ops = vdpa->config;
struct vhost_vdpa_config config;
unsigned long size = offsetof(struct vhost_vdpa_config, buf);
u8 *buf;
@@ -209,7 +224,7 @@ static long vhost_vdpa_get_config(struct vhost_vdpa *v,
if (!buf)
return -ENOMEM;
- ops->get_config(vdpa, config.off, buf, config.len);
+ vdpa_get_config(vdpa, config.off, buf, config.len);
if (copy_to_user(c->buf, buf, config.len)) {
kvfree(buf);
@@ -255,7 +270,6 @@ static long vhost_vdpa_get_features(struct vhost_vdpa *v, u64 __user *featurep)
u64 features;
features = ops->get_features(vdpa);
- features &= vhost_vdpa_features[v->virtio_id];
if (copy_to_user(featurep, &features, sizeof(features)))
return -EFAULT;
@@ -279,10 +293,7 @@ static long vhost_vdpa_set_features(struct vhost_vdpa *v, u64 __user *featurep)
if (copy_from_user(&features, featurep, sizeof(features)))
return -EFAULT;
- if (features & ~vhost_vdpa_features[v->virtio_id])
- return -EINVAL;
-
- if (ops->set_features(vdpa, features))
+ if (vdpa_set_features(vdpa, features))
return -EINVAL;
return 0;
@@ -332,14 +343,18 @@ static long vhost_vdpa_set_config_call(struct vhost_vdpa *v, u32 __user *argp)
return 0;
}
+
static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
void __user *argp)
{
struct vdpa_device *vdpa = v->vdpa;
const struct vdpa_config_ops *ops = vdpa->config;
+ struct vdpa_vq_state vq_state;
struct vdpa_callback cb;
struct vhost_virtqueue *vq;
struct vhost_vring_state s;
+ u64 __user *featurep = argp;
+ u64 features;
u32 idx;
long r;
@@ -353,15 +368,32 @@ static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
idx = array_index_nospec(idx, v->nvqs);
vq = &v->vqs[idx];
- if (cmd == VHOST_VDPA_SET_VRING_ENABLE) {
+ switch (cmd) {
+ case VHOST_VDPA_SET_VRING_ENABLE:
if (copy_from_user(&s, argp, sizeof(s)))
return -EFAULT;
ops->set_vq_ready(vdpa, idx, s.num);
return 0;
- }
+ case VHOST_GET_VRING_BASE:
+ r = ops->get_vq_state(v->vdpa, idx, &vq_state);
+ if (r)
+ return r;
- if (cmd == VHOST_GET_VRING_BASE)
- vq->last_avail_idx = ops->get_vq_state(v->vdpa, idx);
+ vq->last_avail_idx = vq_state.avail_index;
+ break;
+ case VHOST_GET_BACKEND_FEATURES:
+ features = VHOST_VDPA_BACKEND_FEATURES;
+ if (copy_to_user(featurep, &features, sizeof(features)))
+ return -EFAULT;
+ return 0;
+ case VHOST_SET_BACKEND_FEATURES:
+ if (copy_from_user(&features, featurep, sizeof(features)))
+ return -EFAULT;
+ if (features & ~VHOST_VDPA_BACKEND_FEATURES)
+ return -EOPNOTSUPP;
+ vhost_set_backend_features(&v->vdev, features);
+ return 0;
+ }
r = vhost_vring_ioctl(&v->vdev, cmd, argp);
if (r)
@@ -377,12 +409,13 @@ static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
break;
case VHOST_SET_VRING_BASE:
- if (ops->set_vq_state(vdpa, idx, vq->last_avail_idx))
+ vq_state.avail_index = vq->last_avail_idx;
+ if (ops->set_vq_state(vdpa, idx, &vq_state))
r = -EINVAL;
break;
case VHOST_SET_VRING_CALL:
- if (vq->call_ctx) {
+ if (vq->call_ctx.ctx) {
cb.callback = vhost_vdpa_virtqueue_cb;
cb.private = vq;
} else {
@@ -390,6 +423,7 @@ static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
cb.private = NULL;
}
ops->set_vq_cb(vdpa, idx, &cb);
+ vhost_vdpa_setup_vq_irq(v, idx);
break;
case VHOST_SET_VRING_NUM:
@@ -519,13 +553,15 @@ static int vhost_vdpa_map(struct vhost_vdpa *v,
if (r)
return r;
- if (ops->dma_map)
+ if (ops->dma_map) {
r = ops->dma_map(vdpa, iova, size, pa, perm);
- else if (ops->set_map)
- r = ops->set_map(vdpa, dev->iotlb);
- else
+ } else if (ops->set_map) {
+ if (!v->in_batch)
+ r = ops->set_map(vdpa, dev->iotlb);
+ } else {
r = iommu_map(v->domain, iova, pa, size,
perm_to_iommu_flags(perm));
+ }
return r;
}
@@ -538,12 +574,14 @@ static void vhost_vdpa_unmap(struct vhost_vdpa *v, u64 iova, u64 size)
vhost_vdpa_iotlb_unmap(v, iova, iova + size - 1);
- if (ops->dma_map)
+ if (ops->dma_map) {
ops->dma_unmap(vdpa, iova, size);
- else if (ops->set_map)
- ops->set_map(vdpa, dev->iotlb);
- else
+ } else if (ops->set_map) {
+ if (!v->in_batch)
+ ops->set_map(vdpa, dev->iotlb);
+ } else {
iommu_unmap(v->domain, iova, size);
+ }
}
static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
@@ -636,6 +674,8 @@ static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev,
struct vhost_iotlb_msg *msg)
{
struct vhost_vdpa *v = container_of(dev, struct vhost_vdpa, vdev);
+ struct vdpa_device *vdpa = v->vdpa;
+ const struct vdpa_config_ops *ops = vdpa->config;
int r = 0;
r = vhost_dev_check_owner(dev);
@@ -649,6 +689,14 @@ static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev,
case VHOST_IOTLB_INVALIDATE:
vhost_vdpa_unmap(v, msg->iova, msg->size);
break;
+ case VHOST_IOTLB_BATCH_BEGIN:
+ v->in_batch = true;
+ break;
+ case VHOST_IOTLB_BATCH_END:
+ if (v->in_batch && ops->set_map)
+ ops->set_map(vdpa, dev->iotlb);
+ v->in_batch = false;
+ break;
default:
r = -EINVAL;
break;
@@ -765,6 +813,18 @@ err:
return r;
}
+static void vhost_vdpa_clean_irq(struct vhost_vdpa *v)
+{
+ struct vhost_virtqueue *vq;
+ int i;
+
+ for (i = 0; i < v->nvqs; i++) {
+ vq = &v->vqs[i];
+ if (vq->call_ctx.producer.irq)
+ irq_bypass_unregister_producer(&vq->call_ctx.producer);
+ }
+}
+
static int vhost_vdpa_release(struct inode *inode, struct file *filep)
{
struct vhost_vdpa *v = filep->private_data;
@@ -777,6 +837,7 @@ static int vhost_vdpa_release(struct inode *inode, struct file *filep)
vhost_vdpa_iotlb_free(v);
vhost_vdpa_free_domain(v);
vhost_vdpa_config_put(v);
+ vhost_vdpa_clean_irq(v);
vhost_dev_cleanup(&v->vdev);
kfree(v->vdev.vqs);
mutex_unlock(&d->mutex);
@@ -872,7 +933,7 @@ static int vhost_vdpa_probe(struct vdpa_device *vdpa)
{
const struct vdpa_config_ops *ops = vdpa->config;
struct vhost_vdpa *v;
- int minor, nvqs = VHOST_VDPA_VQ_MAX;
+ int minor;
int r;
/* Currently, we only accept the network devices. */
@@ -893,14 +954,14 @@ static int vhost_vdpa_probe(struct vdpa_device *vdpa)
atomic_set(&v->opened, 0);
v->minor = minor;
v->vdpa = vdpa;
- v->nvqs = nvqs;
+ v->nvqs = vdpa->nvqs;
v->virtio_id = ops->get_device_id(vdpa);
device_initialize(&v->dev);
v->dev.release = vhost_vdpa_release_dev;
v->dev.parent = &vdpa->dev;
v->dev.devt = MKDEV(MAJOR(vhost_vdpa_major), minor);
- v->vqs = kmalloc_array(nvqs, sizeof(struct vhost_virtqueue),
+ v->vqs = kmalloc_array(v->nvqs, sizeof(struct vhost_virtqueue),
GFP_KERNEL);
if (!v->vqs) {
r = -ENOMEM;
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 74d135ee7e26..5857d4eec9d7 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -298,6 +298,13 @@ static void vhost_vq_meta_reset(struct vhost_dev *d)
__vhost_vq_meta_reset(d->vqs[i]);
}
+static void vhost_vring_call_reset(struct vhost_vring_call *call_ctx)
+{
+ call_ctx->ctx = NULL;
+ memset(&call_ctx->producer, 0x0, sizeof(struct irq_bypass_producer));
+ spin_lock_init(&call_ctx->ctx_lock);
+}
+
static void vhost_vq_reset(struct vhost_dev *dev,
struct vhost_virtqueue *vq)
{
@@ -319,13 +326,13 @@ static void vhost_vq_reset(struct vhost_dev *dev,
vq->log_base = NULL;
vq->error_ctx = NULL;
vq->kick = NULL;
- vq->call_ctx = NULL;
vq->log_ctx = NULL;
vhost_reset_is_le(vq);
vhost_disable_cross_endian(vq);
vq->busyloop_timeout = 0;
vq->umem = NULL;
vq->iotlb = NULL;
+ vhost_vring_call_reset(&vq->call_ctx);
__vhost_vq_meta_reset(vq);
}
@@ -685,8 +692,8 @@ void vhost_dev_cleanup(struct vhost_dev *dev)
eventfd_ctx_put(dev->vqs[i]->error_ctx);
if (dev->vqs[i]->kick)
fput(dev->vqs[i]->kick);
- if (dev->vqs[i]->call_ctx)
- eventfd_ctx_put(dev->vqs[i]->call_ctx);
+ if (dev->vqs[i]->call_ctx.ctx)
+ eventfd_ctx_put(dev->vqs[i]->call_ctx.ctx);
vhost_vq_reset(dev, dev->vqs[i]);
}
vhost_dev_free_iovecs(dev);
@@ -1405,7 +1412,7 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
memcpy(newmem, &mem, size);
if (copy_from_user(newmem->regions, m->regions,
- mem.nregions * sizeof *m->regions)) {
+ flex_array_size(newmem, regions, mem.nregions))) {
kvfree(newmem);
return -EFAULT;
}
@@ -1629,7 +1636,10 @@ long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *arg
r = PTR_ERR(ctx);
break;
}
- swap(ctx, vq->call_ctx);
+
+ spin_lock(&vq->call_ctx.ctx_lock);
+ swap(ctx, vq->call_ctx.ctx);
+ spin_unlock(&vq->call_ctx.ctx_lock);
break;
case VHOST_SET_VRING_ERR:
if (copy_from_user(&f, argp, sizeof f)) {
@@ -2435,8 +2445,8 @@ static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq)
{
/* Signal the Guest tell them we used something up. */
- if (vq->call_ctx && vhost_notify(dev, vq))
- eventfd_signal(vq->call_ctx, 1);
+ if (vq->call_ctx.ctx && vhost_notify(dev, vq))
+ eventfd_signal(vq->call_ctx.ctx, 1);
}
EXPORT_SYMBOL_GPL(vhost_signal);
@@ -2576,6 +2586,21 @@ struct vhost_msg_node *vhost_dequeue_msg(struct vhost_dev *dev,
}
EXPORT_SYMBOL_GPL(vhost_dequeue_msg);
+void vhost_set_backend_features(struct vhost_dev *dev, u64 features)
+{
+ struct vhost_virtqueue *vq;
+ int i;
+
+ mutex_lock(&dev->mutex);
+ for (i = 0; i < dev->nvqs; ++i) {
+ vq = dev->vqs[i];
+ mutex_lock(&vq->mutex);
+ vq->acked_backend_features = features;
+ mutex_unlock(&vq->mutex);
+ }
+ mutex_unlock(&dev->mutex);
+}
+EXPORT_SYMBOL_GPL(vhost_set_backend_features);
static int __init vhost_init(void)
{
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index c8e96a095d3b..9032d3c2a9f4 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -13,6 +13,7 @@
#include <linux/virtio_ring.h>
#include <linux/atomic.h>
#include <linux/vhost_iotlb.h>
+#include <linux/irqbypass.h>
struct vhost_work;
typedef void (*vhost_work_fn_t)(struct vhost_work *work);
@@ -60,6 +61,12 @@ enum vhost_uaddr_type {
VHOST_NUM_ADDRS = 3,
};
+struct vhost_vring_call {
+ struct eventfd_ctx *ctx;
+ struct irq_bypass_producer producer;
+ spinlock_t ctx_lock;
+};
+
/* The virtqueue structure describes a queue attached to a device. */
struct vhost_virtqueue {
struct vhost_dev *dev;
@@ -72,7 +79,7 @@ struct vhost_virtqueue {
vring_used_t __user *used;
const struct vhost_iotlb_map *meta_iotlb[VHOST_NUM_ADDRS];
struct file *kick;
- struct eventfd_ctx *call_ctx;
+ struct vhost_vring_call call_ctx;
struct eventfd_ctx *error_ctx;
struct eventfd_ctx *log_ctx;
@@ -207,6 +214,8 @@ void vhost_enqueue_msg(struct vhost_dev *dev,
struct vhost_msg_node *node);
struct vhost_msg_node *vhost_dequeue_msg(struct vhost_dev *dev,
struct list_head *head);
+void vhost_set_backend_features(struct vhost_dev *dev, u64 features);
+
__poll_t vhost_chr_poll(struct file *file, struct vhost_dev *dev,
poll_table *wait);
ssize_t vhost_chr_read_iter(struct vhost_dev *dev, struct iov_iter *to,
diff --git a/drivers/video/backlight/88pm860x_bl.c b/drivers/video/backlight/88pm860x_bl.c
index 20d96a5ac384..25e409bbb1a2 100644
--- a/drivers/video/backlight/88pm860x_bl.c
+++ b/drivers/video/backlight/88pm860x_bl.c
@@ -121,18 +121,7 @@ out:
static int pm860x_backlight_update_status(struct backlight_device *bl)
{
- int brightness = bl->props.brightness;
-
- if (bl->props.power != FB_BLANK_UNBLANK)
- brightness = 0;
-
- if (bl->props.fb_blank != FB_BLANK_UNBLANK)
- brightness = 0;
-
- if (bl->props.state & BL_CORE_SUSPENDED)
- brightness = 0;
-
- return pm860x_backlight_set(bl, brightness);
+ return pm860x_backlight_set(bl, backlight_get_brightness(bl));
}
static int pm860x_backlight_get_brightness(struct backlight_device *bl)
diff --git a/drivers/video/backlight/Kconfig b/drivers/video/backlight/Kconfig
index 7d22d7377606..87f9fc238d28 100644
--- a/drivers/video/backlight/Kconfig
+++ b/drivers/video/backlight/Kconfig
@@ -173,14 +173,6 @@ config BACKLIGHT_EP93XX
To compile this driver as a module, choose M here: the module will
be called ep93xx_bl.
-config BACKLIGHT_GENERIC
- tristate "Generic (aka Sharp Corgi) Backlight Driver"
- default y
- help
- Say y to enable the generic platform backlight driver previously
- known as the Corgi backlight driver. If you have a Sharp Zaurus
- SL-C7xx, SL-Cxx00 or SL-6000x say y.
-
config BACKLIGHT_IPAQ_MICRO
tristate "iPAQ microcontroller backlight driver"
depends on MFD_IPAQ_MICRO
@@ -386,13 +378,6 @@ config BACKLIGHT_LP8788
help
This supports TI LP8788 backlight driver.
-config BACKLIGHT_OT200
- tristate "Backlight driver for ot200 visualisation device"
- depends on CS5535_MFGPT && GPIO_CS5535
- help
- To compile this driver as a module, choose M here: the module will be
- called ot200_bl.
-
config BACKLIGHT_PANDORA
tristate "Backlight driver for Pandora console"
depends on TWL4030_CORE
diff --git a/drivers/video/backlight/Makefile b/drivers/video/backlight/Makefile
index 0c1a1524627a..13463b99f1f9 100644
--- a/drivers/video/backlight/Makefile
+++ b/drivers/video/backlight/Makefile
@@ -31,7 +31,6 @@ obj-$(CONFIG_BACKLIGHT_CLASS_DEVICE) += backlight.o
obj-$(CONFIG_BACKLIGHT_DA903X) += da903x_bl.o
obj-$(CONFIG_BACKLIGHT_DA9052) += da9052_bl.o
obj-$(CONFIG_BACKLIGHT_EP93XX) += ep93xx_bl.o
-obj-$(CONFIG_BACKLIGHT_GENERIC) += generic_bl.o
obj-$(CONFIG_BACKLIGHT_GPIO) += gpio_backlight.o
obj-$(CONFIG_BACKLIGHT_HP680) += hp680_bl.o
obj-$(CONFIG_BACKLIGHT_HP700) += jornada720_bl.o
@@ -45,7 +44,6 @@ obj-$(CONFIG_BACKLIGHT_LP8788) += lp8788_bl.o
obj-$(CONFIG_BACKLIGHT_LV5207LP) += lv5207lp.o
obj-$(CONFIG_BACKLIGHT_MAX8925) += max8925_bl.o
obj-$(CONFIG_BACKLIGHT_OMAP1) += omap1_bl.o
-obj-$(CONFIG_BACKLIGHT_OT200) += ot200_bl.o
obj-$(CONFIG_BACKLIGHT_PANDORA) += pandora_bl.o
obj-$(CONFIG_BACKLIGHT_PCF50633) += pcf50633-backlight.o
obj-$(CONFIG_BACKLIGHT_PWM) += pwm_bl.o
diff --git a/drivers/video/backlight/adp5520_bl.c b/drivers/video/backlight/adp5520_bl.c
index 0f63f76723a5..686988c3df3a 100644
--- a/drivers/video/backlight/adp5520_bl.c
+++ b/drivers/video/backlight/adp5520_bl.c
@@ -65,15 +65,7 @@ static int adp5520_bl_set(struct backlight_device *bl, int brightness)
static int adp5520_bl_update_status(struct backlight_device *bl)
{
- int brightness = bl->props.brightness;
-
- if (bl->props.power != FB_BLANK_UNBLANK)
- brightness = 0;
-
- if (bl->props.fb_blank != FB_BLANK_UNBLANK)
- brightness = 0;
-
- return adp5520_bl_set(bl, brightness);
+ return adp5520_bl_set(bl, backlight_get_brightness(bl));
}
static int adp5520_bl_get_brightness(struct backlight_device *bl)
diff --git a/drivers/video/backlight/adp8860_bl.c b/drivers/video/backlight/adp8860_bl.c
index 19968104fc47..ddc7f5f0401f 100644
--- a/drivers/video/backlight/adp8860_bl.c
+++ b/drivers/video/backlight/adp8860_bl.c
@@ -361,15 +361,7 @@ static int adp8860_bl_set(struct backlight_device *bl, int brightness)
static int adp8860_bl_update_status(struct backlight_device *bl)
{
- int brightness = bl->props.brightness;
-
- if (bl->props.power != FB_BLANK_UNBLANK)
- brightness = 0;
-
- if (bl->props.fb_blank != FB_BLANK_UNBLANK)
- brightness = 0;
-
- return adp8860_bl_set(bl, brightness);
+ return adp8860_bl_set(bl, backlight_get_brightness(bl));
}
static int adp8860_bl_get_brightness(struct backlight_device *bl)
diff --git a/drivers/video/backlight/adp8870_bl.c b/drivers/video/backlight/adp8870_bl.c
index 4c0032010cfe..8b5213a39527 100644
--- a/drivers/video/backlight/adp8870_bl.c
+++ b/drivers/video/backlight/adp8870_bl.c
@@ -399,15 +399,7 @@ static int adp8870_bl_set(struct backlight_device *bl, int brightness)
static int adp8870_bl_update_status(struct backlight_device *bl)
{
- int brightness = bl->props.brightness;
-
- if (bl->props.power != FB_BLANK_UNBLANK)
- brightness = 0;
-
- if (bl->props.fb_blank != FB_BLANK_UNBLANK)
- brightness = 0;
-
- return adp8870_bl_set(bl, brightness);
+ return adp8870_bl_set(bl, backlight_get_brightness(bl));
}
static int adp8870_bl_get_brightness(struct backlight_device *bl)
diff --git a/drivers/video/backlight/as3711_bl.c b/drivers/video/backlight/as3711_bl.c
index 33f0f0f2e8b3..3b60019cdc2b 100644
--- a/drivers/video/backlight/as3711_bl.c
+++ b/drivers/video/backlight/as3711_bl.c
@@ -104,17 +104,10 @@ static int as3711_bl_update_status(struct backlight_device *bl)
struct as3711_bl_data *data = bl_get_data(bl);
struct as3711_bl_supply *supply = to_supply(data);
struct as3711 *as3711 = supply->as3711;
- int brightness = bl->props.brightness;
+ int brightness;
int ret = 0;
- dev_dbg(&bl->dev, "%s(): brightness %u, pwr %x, blank %x, state %x\n",
- __func__, bl->props.brightness, bl->props.power,
- bl->props.fb_blank, bl->props.state);
-
- if (bl->props.power != FB_BLANK_UNBLANK ||
- bl->props.fb_blank != FB_BLANK_UNBLANK ||
- bl->props.state & (BL_CORE_SUSPENDED | BL_CORE_FBBLANK))
- brightness = 0;
+ brightness = backlight_get_brightness(bl);
if (data->type == AS3711_BL_SU1) {
ret = as3711_set_brightness_v(as3711, brightness,
diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c
index 92d80aa0c0ef..537fe1b376ad 100644
--- a/drivers/video/backlight/backlight.c
+++ b/drivers/video/backlight/backlight.c
@@ -22,6 +22,47 @@
#include <asm/backlight.h>
#endif
+/**
+ * DOC: overview
+ *
+ * The backlight core supports implementing backlight drivers.
+ *
+ * A backlight driver registers a driver using
+ * devm_backlight_device_register(). The properties of the backlight
+ * driver such as type and max_brightness must be specified.
+ * When the core detect changes in for example brightness or power state
+ * the update_status() operation is called. The backlight driver shall
+ * implement this operation and use it to adjust backlight.
+ *
+ * Several sysfs attributes are provided by the backlight core::
+ *
+ * - brightness R/W, set the requested brightness level
+ * - actual_brightness RO, the brightness level used by the HW
+ * - max_brightness RO, the maximum brightness level supported
+ *
+ * See Documentation/ABI/stable/sysfs-class-backlight for the full list.
+ *
+ * The backlight can be adjusted using the sysfs interface, and
+ * the backlight driver may also support adjusting backlight using
+ * a hot-key or some other platform or firmware specific way.
+ *
+ * The driver must implement the get_brightness() operation if
+ * the HW do not support all the levels that can be specified in
+ * brightness, thus providing user-space access to the actual level
+ * via the actual_brightness attribute.
+ *
+ * When the backlight changes this is reported to user-space using
+ * an uevent connected to the actual_brightness attribute.
+ * When brightness is set by platform specific means, for example
+ * a hot-key to adjust backlight, the driver must notify the backlight
+ * core that brightness has changed using backlight_force_update().
+ *
+ * The backlight driver core receives notifications from fbdev and
+ * if the event is FB_EVENT_BLANK and if the value of blank, from the
+ * FBIOBLANK ioctrl, results in a change in the backlight state the
+ * update_status() operation is called.
+ */
+
static struct list_head backlight_dev_list;
static struct mutex backlight_dev_list_mutex;
static struct blocking_notifier_head backlight_notifier;
@@ -40,9 +81,17 @@ static const char *const backlight_scale_types[] = {
#if defined(CONFIG_FB) || (defined(CONFIG_FB_MODULE) && \
defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE))
-/* This callback gets called when something important happens inside a
- * framebuffer driver. We're looking if that important event is blanking,
- * and if it is and necessary, we're switching backlight power as well ...
+/*
+ * fb_notifier_callback
+ *
+ * This callback gets called when something important happens inside a
+ * framebuffer driver. The backlight core only cares about FB_BLANK_UNBLANK
+ * which is reported to the driver using backlight_update_status()
+ * as a state change.
+ *
+ * There may be several fbdev's connected to the backlight device,
+ * in which case they are kept track of. A state change is only reported
+ * if there is a change in backlight for the specified fbdev.
*/
static int fb_notifier_callback(struct notifier_block *self,
unsigned long event, void *data)
@@ -58,28 +107,29 @@ static int fb_notifier_callback(struct notifier_block *self,
bd = container_of(self, struct backlight_device, fb_notif);
mutex_lock(&bd->ops_lock);
- if (bd->ops)
- if (!bd->ops->check_fb ||
- bd->ops->check_fb(bd, evdata->info)) {
- fb_blank = *(int *)evdata->data;
- if (fb_blank == FB_BLANK_UNBLANK &&
- !bd->fb_bl_on[node]) {
- bd->fb_bl_on[node] = true;
- if (!bd->use_count++) {
- bd->props.state &= ~BL_CORE_FBBLANK;
- bd->props.fb_blank = FB_BLANK_UNBLANK;
- backlight_update_status(bd);
- }
- } else if (fb_blank != FB_BLANK_UNBLANK &&
- bd->fb_bl_on[node]) {
- bd->fb_bl_on[node] = false;
- if (!(--bd->use_count)) {
- bd->props.state |= BL_CORE_FBBLANK;
- bd->props.fb_blank = fb_blank;
- backlight_update_status(bd);
- }
- }
+
+ if (!bd->ops)
+ goto out;
+ if (bd->ops->check_fb && !bd->ops->check_fb(bd, evdata->info))
+ goto out;
+
+ fb_blank = *(int *)evdata->data;
+ if (fb_blank == FB_BLANK_UNBLANK && !bd->fb_bl_on[node]) {
+ bd->fb_bl_on[node] = true;
+ if (!bd->use_count++) {
+ bd->props.state &= ~BL_CORE_FBBLANK;
+ bd->props.fb_blank = FB_BLANK_UNBLANK;
+ backlight_update_status(bd);
}
+ } else if (fb_blank != FB_BLANK_UNBLANK && bd->fb_bl_on[node]) {
+ bd->fb_bl_on[node] = false;
+ if (!(--bd->use_count)) {
+ bd->props.state |= BL_CORE_FBBLANK;
+ bd->props.fb_blank = fb_blank;
+ backlight_update_status(bd);
+ }
+ }
+out:
mutex_unlock(&bd->ops_lock);
return 0;
}
@@ -320,9 +370,13 @@ ATTRIBUTE_GROUPS(bl_device);
* backlight_force_update - tell the backlight subsystem that hardware state
* has changed
* @bd: the backlight device to update
+ * @reason: reason for update
*
* Updates the internal state of the backlight in response to a hardware event,
- * and generate a uevent to notify userspace
+ * and generates an uevent to notify userspace. A backlight driver shall call
+ * backlight_force_update() when the backlight is changed using, for example,
+ * a hot-key. The updated brightness is read using get_brightness() and the
+ * brightness value is reported using an uevent.
*/
void backlight_force_update(struct backlight_device *bd,
enum backlight_update_reason reason)
@@ -335,19 +389,7 @@ void backlight_force_update(struct backlight_device *bd,
}
EXPORT_SYMBOL(backlight_force_update);
-/**
- * backlight_device_register - create and register a new object of
- * backlight_device class.
- * @name: the name of the new object(must be the same as the name of the
- * respective framebuffer device).
- * @parent: a pointer to the parent device
- * @devdata: an optional pointer to be stored for private driver use. The
- * methods may retrieve it by using bl_get_data(bd).
- * @ops: the backlight operations structure.
- *
- * Creates and registers new backlight device. Returns either an
- * ERR_PTR() or a pointer to the newly allocated device.
- */
+/* deprecated - use devm_backlight_device_register() */
struct backlight_device *backlight_device_register(const char *name,
struct device *parent, void *devdata, const struct backlight_ops *ops,
const struct backlight_properties *props)
@@ -414,6 +456,15 @@ struct backlight_device *backlight_device_register(const char *name,
}
EXPORT_SYMBOL(backlight_device_register);
+/** backlight_device_get_by_type - find first backlight device of a type
+ * @type: the type of backlight device
+ *
+ * Look up the first backlight device of the specified type
+ *
+ * RETURNS:
+ *
+ * Pointer to backlight device if any was found. Otherwise NULL.
+ */
struct backlight_device *backlight_device_get_by_type(enum backlight_type type)
{
bool found = false;
@@ -453,12 +504,7 @@ struct backlight_device *backlight_device_get_by_name(const char *name)
}
EXPORT_SYMBOL(backlight_device_get_by_name);
-/**
- * backlight_device_unregister - unregisters a backlight device object.
- * @bd: the backlight device object to be unregistered and freed.
- *
- * Unregisters a previously registered via backlight_device_register object.
- */
+/* deprecated - use devm_backlight_device_unregister() */
void backlight_device_unregister(struct backlight_device *bd)
{
if (!bd)
@@ -506,10 +552,12 @@ static int devm_backlight_device_match(struct device *dev, void *res,
* backlight_register_notifier - get notified of backlight (un)registration
* @nb: notifier block with the notifier to call on backlight (un)registration
*
- * @return 0 on success, otherwise a negative error code
- *
* Register a notifier to get notified when backlight devices get registered
* or unregistered.
+ *
+ * RETURNS:
+ *
+ * 0 on success, otherwise a negative error code
*/
int backlight_register_notifier(struct notifier_block *nb)
{
@@ -521,10 +569,12 @@ EXPORT_SYMBOL(backlight_register_notifier);
* backlight_unregister_notifier - unregister a backlight notifier
* @nb: notifier block to unregister
*
- * @return 0 on success, otherwise a negative error code
- *
* Register a notifier to get notified when backlight devices get registered
* or unregistered.
+ *
+ * RETURNS:
+ *
+ * 0 on success, otherwise a negative error code
*/
int backlight_unregister_notifier(struct notifier_block *nb)
{
@@ -533,19 +583,21 @@ int backlight_unregister_notifier(struct notifier_block *nb)
EXPORT_SYMBOL(backlight_unregister_notifier);
/**
- * devm_backlight_device_register - resource managed backlight_device_register()
+ * devm_backlight_device_register - register a new backlight device
* @dev: the device to register
* @name: the name of the device
- * @parent: a pointer to the parent device
+ * @parent: a pointer to the parent device (often the same as @dev)
* @devdata: an optional pointer to be stored for private driver use
* @ops: the backlight operations structure
* @props: the backlight properties
*
- * @return a struct backlight on success, or an ERR_PTR on error
+ * Creates and registers new backlight device. When a backlight device
+ * is registered the configuration must be specified in the @props
+ * parameter. See description of &backlight_properties.
+ *
+ * RETURNS:
*
- * Managed backlight_device_register(). The backlight_device returned
- * from this function are automatically freed on driver detach.
- * See backlight_device_register() for more information.
+ * struct backlight on success, or an ERR_PTR on error
*/
struct backlight_device *devm_backlight_device_register(struct device *dev,
const char *name, struct device *parent, void *devdata,
@@ -573,13 +625,13 @@ struct backlight_device *devm_backlight_device_register(struct device *dev,
EXPORT_SYMBOL(devm_backlight_device_register);
/**
- * devm_backlight_device_unregister - resource managed backlight_device_unregister()
+ * devm_backlight_device_unregister - unregister backlight device
* @dev: the device to unregister
* @bd: the backlight device to unregister
*
- * Deallocated a backlight allocated with devm_backlight_device_register().
+ * Deallocates a backlight allocated with devm_backlight_device_register().
* Normally this function will not need to be called and the resource management
- * code will ensure that the resource is freed.
+ * code will ensure that the resources are freed.
*/
void devm_backlight_device_unregister(struct device *dev,
struct backlight_device *bd)
@@ -621,22 +673,7 @@ struct backlight_device *of_find_backlight_by_node(struct device_node *node)
EXPORT_SYMBOL(of_find_backlight_by_node);
#endif
-/**
- * of_find_backlight - Get backlight device
- * @dev: Device
- *
- * This function looks for a property named 'backlight' on the DT node
- * connected to @dev and looks up the backlight device.
- *
- * Call backlight_put() to drop the reference on the backlight device.
- *
- * Returns:
- * A pointer to the backlight device if found.
- * Error pointer -EPROBE_DEFER if the DT property is set, but no backlight
- * device is found.
- * NULL if there's no backlight property.
- */
-struct backlight_device *of_find_backlight(struct device *dev)
+static struct backlight_device *of_find_backlight(struct device *dev)
{
struct backlight_device *bd = NULL;
struct device_node *np;
@@ -662,20 +699,29 @@ struct backlight_device *of_find_backlight(struct device *dev)
return bd;
}
-EXPORT_SYMBOL(of_find_backlight);
static void devm_backlight_release(void *data)
{
- backlight_put(data);
+ struct backlight_device *bd = data;
+
+ if (bd)
+ put_device(&bd->dev);
}
/**
- * devm_of_find_backlight - Resource-managed of_find_backlight()
- * @dev: Device
+ * devm_of_find_backlight - find backlight for a device
+ * @dev: the device
*
- * Device managed version of of_find_backlight().
- * The reference on the backlight device is automatically
+ * This function looks for a property named 'backlight' on the DT node
+ * connected to @dev and looks up the backlight device. The lookup is
+ * device managed so the reference to the backlight device is automatically
* dropped on driver detach.
+ *
+ * RETURNS:
+ *
+ * A pointer to the backlight device if found.
+ * Error pointer -EPROBE_DEFER if the DT property is set, but no backlight
+ * device is found. NULL if there's no backlight property.
*/
struct backlight_device *devm_of_find_backlight(struct device *dev)
{
@@ -687,7 +733,7 @@ struct backlight_device *devm_of_find_backlight(struct device *dev)
return bd;
ret = devm_add_action(dev, devm_backlight_release, bd);
if (ret) {
- backlight_put(bd);
+ put_device(&bd->dev);
return ERR_PTR(ret);
}
return bd;
diff --git a/drivers/video/backlight/bd6107.c b/drivers/video/backlight/bd6107.c
index d5d5fb457e78..515184fbe33a 100644
--- a/drivers/video/backlight/bd6107.c
+++ b/drivers/video/backlight/bd6107.c
@@ -82,12 +82,7 @@ static int bd6107_write(struct bd6107 *bd, u8 reg, u8 data)
static int bd6107_backlight_update_status(struct backlight_device *backlight)
{
struct bd6107 *bd = bl_get_data(backlight);
- int brightness = backlight->props.brightness;
-
- if (backlight->props.power != FB_BLANK_UNBLANK ||
- backlight->props.fb_blank != FB_BLANK_UNBLANK ||
- backlight->props.state & (BL_CORE_SUSPENDED | BL_CORE_FBBLANK))
- brightness = 0;
+ int brightness = backlight_get_brightness(backlight);
if (brightness) {
bd6107_write(bd, BD6107_PORTSEL, BD6107_PORTSEL_LEDM(2) |
diff --git a/drivers/video/backlight/corgi_lcd.c b/drivers/video/backlight/corgi_lcd.c
index 25ef0cbd7583..33f5d80495e6 100644
--- a/drivers/video/backlight/corgi_lcd.c
+++ b/drivers/video/backlight/corgi_lcd.c
@@ -420,13 +420,7 @@ static int corgi_bl_set_intensity(struct corgi_lcd *lcd, int intensity)
static int corgi_bl_update_status(struct backlight_device *bd)
{
struct corgi_lcd *lcd = bl_get_data(bd);
- int intensity = bd->props.brightness;
-
- if (bd->props.power != FB_BLANK_UNBLANK)
- intensity = 0;
-
- if (bd->props.fb_blank != FB_BLANK_UNBLANK)
- intensity = 0;
+ int intensity = backlight_get_brightness(bd);
if (corgibl_flags & CORGIBL_SUSPENDED)
intensity = 0;
diff --git a/drivers/video/backlight/cr_bllcd.c b/drivers/video/backlight/cr_bllcd.c
index 4624b7b7c6a6..4ad0a72531fe 100644
--- a/drivers/video/backlight/cr_bllcd.c
+++ b/drivers/video/backlight/cr_bllcd.c
@@ -59,26 +59,18 @@ struct cr_panel {
static int cr_backlight_set_intensity(struct backlight_device *bd)
{
- int intensity = bd->props.brightness;
u32 addr = gpio_bar + CRVML_PANEL_PORT;
u32 cur = inl(addr);
- if (bd->props.power == FB_BLANK_UNBLANK)
- intensity = FB_BLANK_UNBLANK;
- if (bd->props.fb_blank == FB_BLANK_UNBLANK)
- intensity = FB_BLANK_UNBLANK;
- if (bd->props.power == FB_BLANK_POWERDOWN)
- intensity = FB_BLANK_POWERDOWN;
- if (bd->props.fb_blank == FB_BLANK_POWERDOWN)
- intensity = FB_BLANK_POWERDOWN;
-
- if (intensity == FB_BLANK_UNBLANK) { /* FULL ON */
- cur &= ~CRVML_BACKLIGHT_OFF;
- outl(cur, addr);
- } else if (intensity == FB_BLANK_POWERDOWN) { /* OFF */
+ if (backlight_get_brightness(bd) == 0) {
+ /* OFF */
cur |= CRVML_BACKLIGHT_OFF;
outl(cur, addr);
- } /* anything else, don't bother */
+ } else {
+ /* FULL ON */
+ cur &= ~CRVML_BACKLIGHT_OFF;
+ outl(cur, addr);
+ }
return 0;
}
@@ -90,9 +82,9 @@ static int cr_backlight_get_intensity(struct backlight_device *bd)
u8 intensity;
if (cur & CRVML_BACKLIGHT_OFF)
- intensity = FB_BLANK_POWERDOWN;
+ intensity = 0;
else
- intensity = FB_BLANK_UNBLANK;
+ intensity = 1;
return intensity;
}
diff --git a/drivers/video/backlight/da903x_bl.c b/drivers/video/backlight/da903x_bl.c
index 62540e4bdedb..71f21bbc7a9f 100644
--- a/drivers/video/backlight/da903x_bl.c
+++ b/drivers/video/backlight/da903x_bl.c
@@ -77,18 +77,7 @@ static int da903x_backlight_set(struct backlight_device *bl, int brightness)
static int da903x_backlight_update_status(struct backlight_device *bl)
{
- int brightness = bl->props.brightness;
-
- if (bl->props.power != FB_BLANK_UNBLANK)
- brightness = 0;
-
- if (bl->props.fb_blank != FB_BLANK_UNBLANK)
- brightness = 0;
-
- if (bl->props.state & BL_CORE_SUSPENDED)
- brightness = 0;
-
- return da903x_backlight_set(bl, brightness);
+ return da903x_backlight_set(bl, backlight_get_brightness(bl));
}
static int da903x_backlight_get_brightness(struct backlight_device *bl)
diff --git a/drivers/video/backlight/ep93xx_bl.c b/drivers/video/backlight/ep93xx_bl.c
index 4149e0b2f83c..2387009d452d 100644
--- a/drivers/video/backlight/ep93xx_bl.c
+++ b/drivers/video/backlight/ep93xx_bl.c
@@ -36,13 +36,7 @@ static int ep93xxbl_set(struct backlight_device *bl, int brightness)
static int ep93xxbl_update_status(struct backlight_device *bl)
{
- int brightness = bl->props.brightness;
-
- if (bl->props.power != FB_BLANK_UNBLANK ||
- bl->props.fb_blank != FB_BLANK_UNBLANK)
- brightness = 0;
-
- return ep93xxbl_set(bl, brightness);
+ return ep93xxbl_set(bl, backlight_get_brightness(bl));
}
static int ep93xxbl_get_brightness(struct backlight_device *bl)
diff --git a/drivers/video/backlight/generic_bl.c b/drivers/video/backlight/generic_bl.c
deleted file mode 100644
index 8fe63dbc8590..000000000000
--- a/drivers/video/backlight/generic_bl.c
+++ /dev/null
@@ -1,110 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Generic Backlight Driver
- *
- * Copyright (c) 2004-2008 Richard Purdie
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/mutex.h>
-#include <linux/fb.h>
-#include <linux/backlight.h>
-
-static int genericbl_intensity;
-static struct backlight_device *generic_backlight_device;
-static struct generic_bl_info *bl_machinfo;
-
-static int genericbl_send_intensity(struct backlight_device *bd)
-{
- int intensity = bd->props.brightness;
-
- if (bd->props.power != FB_BLANK_UNBLANK)
- intensity = 0;
- if (bd->props.state & BL_CORE_FBBLANK)
- intensity = 0;
- if (bd->props.state & BL_CORE_SUSPENDED)
- intensity = 0;
-
- bl_machinfo->set_bl_intensity(intensity);
-
- genericbl_intensity = intensity;
-
- if (bl_machinfo->kick_battery)
- bl_machinfo->kick_battery();
-
- return 0;
-}
-
-static int genericbl_get_intensity(struct backlight_device *bd)
-{
- return genericbl_intensity;
-}
-
-static const struct backlight_ops genericbl_ops = {
- .options = BL_CORE_SUSPENDRESUME,
- .get_brightness = genericbl_get_intensity,
- .update_status = genericbl_send_intensity,
-};
-
-static int genericbl_probe(struct platform_device *pdev)
-{
- struct backlight_properties props;
- struct generic_bl_info *machinfo = dev_get_platdata(&pdev->dev);
- const char *name = "generic-bl";
- struct backlight_device *bd;
-
- bl_machinfo = machinfo;
- if (!machinfo->limit_mask)
- machinfo->limit_mask = -1;
-
- if (machinfo->name)
- name = machinfo->name;
-
- memset(&props, 0, sizeof(struct backlight_properties));
- props.type = BACKLIGHT_RAW;
- props.max_brightness = machinfo->max_intensity;
- bd = devm_backlight_device_register(&pdev->dev, name, &pdev->dev,
- NULL, &genericbl_ops, &props);
- if (IS_ERR(bd))
- return PTR_ERR(bd);
-
- platform_set_drvdata(pdev, bd);
-
- bd->props.power = FB_BLANK_UNBLANK;
- bd->props.brightness = machinfo->default_intensity;
- backlight_update_status(bd);
-
- generic_backlight_device = bd;
-
- dev_info(&pdev->dev, "Generic Backlight Driver Initialized.\n");
- return 0;
-}
-
-static int genericbl_remove(struct platform_device *pdev)
-{
- struct backlight_device *bd = platform_get_drvdata(pdev);
-
- bd->props.power = 0;
- bd->props.brightness = 0;
- backlight_update_status(bd);
-
- dev_info(&pdev->dev, "Generic Backlight Driver Unloaded\n");
- return 0;
-}
-
-static struct platform_driver genericbl_driver = {
- .probe = genericbl_probe,
- .remove = genericbl_remove,
- .driver = {
- .name = "generic-bl",
- },
-};
-
-module_platform_driver(genericbl_driver);
-
-MODULE_AUTHOR("Richard Purdie <rpurdie@rpsys.net>");
-MODULE_DESCRIPTION("Generic Backlight Driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/video/backlight/gpio_backlight.c b/drivers/video/backlight/gpio_backlight.c
index 75409ddfba3e..6f78d928f054 100644
--- a/drivers/video/backlight/gpio_backlight.c
+++ b/drivers/video/backlight/gpio_backlight.c
@@ -21,24 +21,11 @@ struct gpio_backlight {
struct gpio_desc *gpiod;
};
-static int gpio_backlight_get_next_brightness(struct backlight_device *bl)
-{
- int brightness = bl->props.brightness;
-
- if (bl->props.power != FB_BLANK_UNBLANK ||
- bl->props.fb_blank != FB_BLANK_UNBLANK ||
- bl->props.state & (BL_CORE_SUSPENDED | BL_CORE_FBBLANK))
- brightness = 0;
-
- return brightness;
-}
-
static int gpio_backlight_update_status(struct backlight_device *bl)
{
struct gpio_backlight *gbl = bl_get_data(bl);
- int brightness = gpio_backlight_get_next_brightness(bl);
- gpiod_set_value_cansleep(gbl->gpiod, brightness);
+ gpiod_set_value_cansleep(gbl->gpiod, backlight_get_brightness(bl));
return 0;
}
@@ -108,7 +95,7 @@ static int gpio_backlight_probe(struct platform_device *pdev)
bl->props.brightness = 1;
- init_brightness = gpio_backlight_get_next_brightness(bl);
+ init_brightness = backlight_get_brightness(bl);
ret = gpiod_direction_output(gbl->gpiod, init_brightness);
if (ret) {
dev_err(dev, "failed to set initial brightness\n");
diff --git a/drivers/video/backlight/hp680_bl.c b/drivers/video/backlight/hp680_bl.c
index 8ea42b8d9bc8..9123c33def05 100644
--- a/drivers/video/backlight/hp680_bl.c
+++ b/drivers/video/backlight/hp680_bl.c
@@ -33,12 +33,8 @@ static void hp680bl_send_intensity(struct backlight_device *bd)
{
unsigned long flags;
u16 v;
- int intensity = bd->props.brightness;
+ int intensity = backlight_get_brightness(bd);
- if (bd->props.power != FB_BLANK_UNBLANK)
- intensity = 0;
- if (bd->props.fb_blank != FB_BLANK_UNBLANK)
- intensity = 0;
if (hp680bl_suspended)
intensity = 0;
diff --git a/drivers/video/backlight/ili922x.c b/drivers/video/backlight/ili922x.c
index 9c5aa3fbb284..328aba9cddad 100644
--- a/drivers/video/backlight/ili922x.c
+++ b/drivers/video/backlight/ili922x.c
@@ -107,6 +107,8 @@
* lower frequency when the registers are read/written.
* The macro sets the frequency in the spi_transfer structure if
* the frequency exceeds the maximum value.
+ * @s: pointer to an SPI device
+ * @x: pointer to the read/write buffer pair
*/
#define CHECK_FREQ_REG(s, x) \
do { \
@@ -121,7 +123,7 @@
#define set_tx_byte(b) (tx_invert ? ~(b) : b)
-/**
+/*
* ili922x_id - id as set by manufacturer
*/
static int ili922x_id = 1;
@@ -130,7 +132,7 @@ module_param(ili922x_id, int, 0);
static int tx_invert;
module_param(tx_invert, int, 0);
-/**
+/*
* driver's private structure
*/
struct ili922x {
@@ -293,6 +295,8 @@ static int ili922x_write(struct spi_device *spi, u8 reg, u16 value)
#ifdef DEBUG
/**
* ili922x_reg_dump - dump all registers
+ *
+ * @spi: pointer to an SPI device
*/
static void ili922x_reg_dump(struct spi_device *spi)
{
diff --git a/drivers/video/backlight/jornada720_bl.c b/drivers/video/backlight/jornada720_bl.c
index f0385f9cf9da..996f7ba3b373 100644
--- a/drivers/video/backlight/jornada720_bl.c
+++ b/drivers/video/backlight/jornada720_bl.c
@@ -54,7 +54,7 @@ static int jornada_bl_update_status(struct backlight_device *bd)
jornada_ssp_start();
/* If backlight is off then really turn it off */
- if ((bd->props.power != FB_BLANK_UNBLANK) || (bd->props.fb_blank != FB_BLANK_UNBLANK)) {
+ if (backlight_is_blank(bd)) {
ret = jornada_ssp_byte(BRIGHTNESSOFF);
if (ret != TXDUMMY) {
dev_info(&bd->dev, "brightness off timeout\n");
diff --git a/drivers/video/backlight/kb3886_bl.c b/drivers/video/backlight/kb3886_bl.c
index 1dfe13c18925..55794b239cff 100644
--- a/drivers/video/backlight/kb3886_bl.c
+++ b/drivers/video/backlight/kb3886_bl.c
@@ -87,12 +87,8 @@ static const struct dmi_system_id kb3886bl_device_table[] __initconst = {
static int kb3886bl_send_intensity(struct backlight_device *bd)
{
- int intensity = bd->props.brightness;
+ int intensity = backlight_get_brightness(bd);
- if (bd->props.power != FB_BLANK_UNBLANK)
- intensity = 0;
- if (bd->props.fb_blank != FB_BLANK_UNBLANK)
- intensity = 0;
if (kb3886bl_flags & KB3886BL_SUSPENDED)
intensity = 0;
diff --git a/drivers/video/backlight/lcd.c b/drivers/video/backlight/lcd.c
index 78b033358625..db56e465aaff 100644
--- a/drivers/video/backlight/lcd.c
+++ b/drivers/video/backlight/lcd.c
@@ -179,6 +179,7 @@ ATTRIBUTE_GROUPS(lcd_device);
* lcd_device_register - register a new object of lcd_device class.
* @name: the name of the new object(must be the same as the name of the
* respective framebuffer device).
+ * @parent: pointer to the parent's struct device .
* @devdata: an optional pointer to be stored in the device. The
* methods may retrieve it by using lcd_get_data(ld).
* @ops: the lcd operations structure.
diff --git a/drivers/video/backlight/led_bl.c b/drivers/video/backlight/led_bl.c
index 3f66549997c8..f54d256e2d54 100644
--- a/drivers/video/backlight/led_bl.c
+++ b/drivers/video/backlight/led_bl.c
@@ -54,12 +54,7 @@ static void led_bl_power_off(struct led_bl_data *priv)
static int led_bl_update_status(struct backlight_device *bl)
{
struct led_bl_data *priv = bl_get_data(bl);
- int brightness = bl->props.brightness;
-
- if (bl->props.power != FB_BLANK_UNBLANK ||
- bl->props.fb_blank != FB_BLANK_UNBLANK ||
- bl->props.state & BL_CORE_FBBLANK)
- brightness = 0;
+ int brightness = backlight_get_brightness(bl);
if (brightness > 0)
led_bl_set_brightness(priv, brightness);
diff --git a/drivers/video/backlight/lm3533_bl.c b/drivers/video/backlight/lm3533_bl.c
index ee09d1bd02b9..1df1b6643c0b 100644
--- a/drivers/video/backlight/lm3533_bl.c
+++ b/drivers/video/backlight/lm3533_bl.c
@@ -39,14 +39,8 @@ static inline int lm3533_bl_get_ctrlbank_id(struct lm3533_bl *bl)
static int lm3533_bl_update_status(struct backlight_device *bd)
{
struct lm3533_bl *bl = bl_get_data(bd);
- int brightness = bd->props.brightness;
- if (bd->props.power != FB_BLANK_UNBLANK)
- brightness = 0;
- if (bd->props.fb_blank != FB_BLANK_UNBLANK)
- brightness = 0;
-
- return lm3533_ctrlbank_set_brightness(&bl->cb, (u8)brightness);
+ return lm3533_ctrlbank_set_brightness(&bl->cb, backlight_get_brightness(bd));
}
static int lm3533_bl_get_brightness(struct backlight_device *bd)
@@ -235,7 +229,7 @@ static struct attribute *lm3533_bl_attributes[] = {
static umode_t lm3533_bl_attr_is_visible(struct kobject *kobj,
struct attribute *attr, int n)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct lm3533_bl *bl = dev_get_drvdata(dev);
umode_t mode = attr->mode;
diff --git a/drivers/video/backlight/lm3630a_bl.c b/drivers/video/backlight/lm3630a_bl.c
index ee320883b710..e88a2b0e5904 100644
--- a/drivers/video/backlight/lm3630a_bl.c
+++ b/drivers/video/backlight/lm3630a_bl.c
@@ -391,7 +391,7 @@ static int lm3630a_parse_led_sources(struct fwnode_handle *node,
return ret;
for (i = 0; i < num_sources; i++) {
- if (sources[i] < LM3630A_SINK_0 || sources[i] > LM3630A_SINK_1)
+ if (sources[i] != LM3630A_SINK_0 && sources[i] != LM3630A_SINK_1)
return -EINVAL;
ret |= BIT(sources[i]);
@@ -412,7 +412,7 @@ static int lm3630a_parse_bank(struct lm3630a_platform_data *pdata,
if (ret)
return ret;
- if (bank < LM3630A_BANK_0 || bank > LM3630A_BANK_1)
+ if (bank != LM3630A_BANK_0 && bank != LM3630A_BANK_1)
return -EINVAL;
led_sources = lm3630a_parse_led_sources(node, BIT(bank));
diff --git a/drivers/video/backlight/lms501kf03.c b/drivers/video/backlight/lms501kf03.c
index 8ae32e3573c1..f949b66dce1b 100644
--- a/drivers/video/backlight/lms501kf03.c
+++ b/drivers/video/backlight/lms501kf03.c
@@ -9,7 +9,6 @@
#include <linux/backlight.h>
#include <linux/delay.h>
#include <linux/fb.h>
-#include <linux/gpio.h>
#include <linux/lcd.h>
#include <linux/module.h>
#include <linux/spi/spi.h>
@@ -89,14 +88,6 @@ static const unsigned char seq_rgb_gamma[] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
};
-static const unsigned char seq_up_dn[] = {
- 0x36, 0x10,
-};
-
-static const unsigned char seq_sleep_in[] = {
- 0x10,
-};
-
static const unsigned char seq_sleep_out[] = {
0x11,
};
diff --git a/drivers/video/backlight/locomolcd.c b/drivers/video/backlight/locomolcd.c
index cdc02e04f89d..297ee2e1ab0b 100644
--- a/drivers/video/backlight/locomolcd.c
+++ b/drivers/video/backlight/locomolcd.c
@@ -111,12 +111,8 @@ static int current_intensity;
static int locomolcd_set_intensity(struct backlight_device *bd)
{
- int intensity = bd->props.brightness;
+ int intensity = backlight_get_brightness(bd);
- if (bd->props.power != FB_BLANK_UNBLANK)
- intensity = 0;
- if (bd->props.fb_blank != FB_BLANK_UNBLANK)
- intensity = 0;
if (locomolcd_flags & LOCOMOLCD_SUSPENDED)
intensity = 0;
diff --git a/drivers/video/backlight/lv5207lp.c b/drivers/video/backlight/lv5207lp.c
index c6ad73a784e2..1842ae9a55f8 100644
--- a/drivers/video/backlight/lv5207lp.c
+++ b/drivers/video/backlight/lv5207lp.c
@@ -46,12 +46,7 @@ static int lv5207lp_write(struct lv5207lp *lv, u8 reg, u8 data)
static int lv5207lp_backlight_update_status(struct backlight_device *backlight)
{
struct lv5207lp *lv = bl_get_data(backlight);
- int brightness = backlight->props.brightness;
-
- if (backlight->props.power != FB_BLANK_UNBLANK ||
- backlight->props.fb_blank != FB_BLANK_UNBLANK ||
- backlight->props.state & (BL_CORE_SUSPENDED | BL_CORE_FBBLANK))
- brightness = 0;
+ int brightness = backlight_get_brightness(backlight);
if (brightness) {
lv5207lp_write(lv, LV5207LP_CTRL1,
diff --git a/drivers/video/backlight/max8925_bl.c b/drivers/video/backlight/max8925_bl.c
index 97cc260ff9d1..e607ec6fd4bf 100644
--- a/drivers/video/backlight/max8925_bl.c
+++ b/drivers/video/backlight/max8925_bl.c
@@ -64,18 +64,7 @@ out:
static int max8925_backlight_update_status(struct backlight_device *bl)
{
- int brightness = bl->props.brightness;
-
- if (bl->props.power != FB_BLANK_UNBLANK)
- brightness = 0;
-
- if (bl->props.fb_blank != FB_BLANK_UNBLANK)
- brightness = 0;
-
- if (bl->props.state & BL_CORE_SUSPENDED)
- brightness = 0;
-
- return max8925_backlight_set(bl, brightness);
+ return max8925_backlight_set(bl, backlight_get_brightness(bl));
}
static int max8925_backlight_get_brightness(struct backlight_device *bl)
diff --git a/drivers/video/backlight/ot200_bl.c b/drivers/video/backlight/ot200_bl.c
deleted file mode 100644
index 23ee7106c72a..000000000000
--- a/drivers/video/backlight/ot200_bl.c
+++ /dev/null
@@ -1,162 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2012 Bachmann electronic GmbH
- * Christian Gmeiner <christian.gmeiner@gmail.com>
- *
- * Backlight driver for ot200 visualisation device from
- * Bachmann electronic GmbH.
- */
-
-#include <linux/module.h>
-#include <linux/fb.h>
-#include <linux/backlight.h>
-#include <linux/gpio.h>
-#include <linux/platform_device.h>
-#include <linux/cs5535.h>
-
-static struct cs5535_mfgpt_timer *pwm_timer;
-
-/* this array defines the mapping of brightness in % to pwm frequency */
-static const u8 dim_table[101] = {0, 0, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2,
- 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4,
- 4, 5, 5, 5, 5, 6, 6, 6, 7, 7, 7, 8, 8, 9, 9,
- 10, 10, 11, 11, 12, 12, 13, 14, 15, 15, 16,
- 17, 18, 19, 20, 21, 22, 23, 24, 26, 27, 28,
- 30, 31, 33, 35, 37, 39, 41, 43, 45, 47, 50,
- 53, 55, 58, 61, 65, 68, 72, 75, 79, 84, 88,
- 93, 97, 103, 108, 114, 120, 126, 133, 140,
- 147, 155, 163};
-
-struct ot200_backlight_data {
- int current_brightness;
-};
-
-#define GPIO_DIMM 27
-#define SCALE 1
-#define CMP1MODE 0x2 /* compare on GE; output high on compare
- * greater than or equal */
-#define PWM_SETUP (SCALE | CMP1MODE << 6 | MFGPT_SETUP_CNTEN)
-#define MAX_COMP2 163
-
-static int ot200_backlight_update_status(struct backlight_device *bl)
-{
- struct ot200_backlight_data *data = bl_get_data(bl);
- int brightness = bl->props.brightness;
-
- if (bl->props.state & BL_CORE_FBBLANK)
- brightness = 0;
-
- /* enable or disable PWM timer */
- if (brightness == 0)
- cs5535_mfgpt_write(pwm_timer, MFGPT_REG_SETUP, 0);
- else if (data->current_brightness == 0) {
- cs5535_mfgpt_write(pwm_timer, MFGPT_REG_COUNTER, 0);
- cs5535_mfgpt_write(pwm_timer, MFGPT_REG_SETUP,
- MFGPT_SETUP_CNTEN);
- }
-
- /* apply new brightness value */
- cs5535_mfgpt_write(pwm_timer, MFGPT_REG_CMP1,
- MAX_COMP2 - dim_table[brightness]);
- data->current_brightness = brightness;
-
- return 0;
-}
-
-static int ot200_backlight_get_brightness(struct backlight_device *bl)
-{
- struct ot200_backlight_data *data = bl_get_data(bl);
- return data->current_brightness;
-}
-
-static const struct backlight_ops ot200_backlight_ops = {
- .update_status = ot200_backlight_update_status,
- .get_brightness = ot200_backlight_get_brightness,
-};
-
-static int ot200_backlight_probe(struct platform_device *pdev)
-{
- struct backlight_device *bl;
- struct ot200_backlight_data *data;
- struct backlight_properties props;
- int retval = 0;
-
- /* request gpio */
- if (devm_gpio_request(&pdev->dev, GPIO_DIMM,
- "ot200 backlight dimmer") < 0) {
- dev_err(&pdev->dev, "failed to request GPIO %d\n", GPIO_DIMM);
- return -ENODEV;
- }
-
- /* request timer */
- pwm_timer = cs5535_mfgpt_alloc_timer(7, MFGPT_DOMAIN_ANY);
- if (!pwm_timer) {
- dev_err(&pdev->dev, "MFGPT 7 not available\n");
- return -ENODEV;
- }
-
- data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
- if (!data) {
- retval = -ENOMEM;
- goto error_devm_kzalloc;
- }
-
- /* setup gpio */
- cs5535_gpio_set(GPIO_DIMM, GPIO_OUTPUT_ENABLE);
- cs5535_gpio_set(GPIO_DIMM, GPIO_OUTPUT_AUX1);
-
- /* setup timer */
- cs5535_mfgpt_write(pwm_timer, MFGPT_REG_CMP1, 0);
- cs5535_mfgpt_write(pwm_timer, MFGPT_REG_CMP2, MAX_COMP2);
- cs5535_mfgpt_write(pwm_timer, MFGPT_REG_SETUP, PWM_SETUP);
-
- data->current_brightness = 100;
- props.max_brightness = 100;
- props.brightness = 100;
- props.type = BACKLIGHT_RAW;
-
- bl = devm_backlight_device_register(&pdev->dev, dev_name(&pdev->dev),
- &pdev->dev, data, &ot200_backlight_ops,
- &props);
- if (IS_ERR(bl)) {
- dev_err(&pdev->dev, "failed to register backlight\n");
- retval = PTR_ERR(bl);
- goto error_devm_kzalloc;
- }
-
- platform_set_drvdata(pdev, bl);
-
- return 0;
-
-error_devm_kzalloc:
- cs5535_mfgpt_free_timer(pwm_timer);
- return retval;
-}
-
-static int ot200_backlight_remove(struct platform_device *pdev)
-{
- /* on module unload set brightness to 100% */
- cs5535_mfgpt_write(pwm_timer, MFGPT_REG_COUNTER, 0);
- cs5535_mfgpt_write(pwm_timer, MFGPT_REG_SETUP, MFGPT_SETUP_CNTEN);
- cs5535_mfgpt_write(pwm_timer, MFGPT_REG_CMP1,
- MAX_COMP2 - dim_table[100]);
-
- cs5535_mfgpt_free_timer(pwm_timer);
-
- return 0;
-}
-
-static struct platform_driver ot200_backlight_driver = {
- .driver = {
- .name = "ot200-backlight",
- },
- .probe = ot200_backlight_probe,
- .remove = ot200_backlight_remove,
-};
-
-module_platform_driver(ot200_backlight_driver);
-
-MODULE_DESCRIPTION("backlight driver for ot200 visualisation device");
-MODULE_AUTHOR("Christian Gmeiner <christian.gmeiner@gmail.com>");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:ot200-backlight");
diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
index 82b8d7594701..eff64db2e02e 100644
--- a/drivers/video/backlight/pwm_bl.c
+++ b/drivers/video/backlight/pwm_bl.c
@@ -108,14 +108,9 @@ static int compute_duty_cycle(struct pwm_bl_data *pb, int brightness)
static int pwm_backlight_update_status(struct backlight_device *bl)
{
struct pwm_bl_data *pb = bl_get_data(bl);
- int brightness = bl->props.brightness;
+ int brightness = backlight_get_brightness(bl);
struct pwm_state state;
- if (bl->props.power != FB_BLANK_UNBLANK ||
- bl->props.fb_blank != FB_BLANK_UNBLANK ||
- bl->props.state & BL_CORE_FBBLANK)
- brightness = 0;
-
if (pb->notify)
brightness = pb->notify(pb->dev, brightness);
diff --git a/drivers/video/backlight/qcom-wled.c b/drivers/video/backlight/qcom-wled.c
index 4c8c34b99441..3bc7800eb0a9 100644
--- a/drivers/video/backlight/qcom-wled.c
+++ b/drivers/video/backlight/qcom-wled.c
@@ -433,14 +433,9 @@ static int wled5_ovp_delay(struct wled *wled)
static int wled_update_status(struct backlight_device *bl)
{
struct wled *wled = bl_get_data(bl);
- u16 brightness = bl->props.brightness;
+ u16 brightness = backlight_get_brightness(bl);
int rc = 0;
- if (bl->props.power != FB_BLANK_UNBLANK ||
- bl->props.fb_blank != FB_BLANK_UNBLANK ||
- bl->props.state & BL_CORE_FBBLANK)
- brightness = 0;
-
mutex_lock(&wled->lock);
if (brightness) {
rc = wled->wled_set_brightness(wled, brightness);
@@ -1287,14 +1282,6 @@ static const struct wled_var_cfg wled4_string_i_limit_cfg = {
.size = ARRAY_SIZE(wled4_string_i_limit_values),
};
-static const struct wled_var_cfg wled3_string_cfg = {
- .size = 8,
-};
-
-static const struct wled_var_cfg wled4_string_cfg = {
- .size = 16,
-};
-
static const struct wled_var_cfg wled5_mod_sel_cfg = {
.size = 2,
};
diff --git a/drivers/video/backlight/sky81452-backlight.c b/drivers/video/backlight/sky81452-backlight.c
index 2355f00f5773..0ce181585008 100644
--- a/drivers/video/backlight/sky81452-backlight.c
+++ b/drivers/video/backlight/sky81452-backlight.c
@@ -8,15 +8,13 @@
#include <linux/backlight.h>
#include <linux/err.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
-#include <linux/platform_data/sky81452-backlight.h>
#include <linux/slab.h>
/* registers */
@@ -42,6 +40,29 @@
#define SKY81452_DEFAULT_NAME "lcd-backlight"
#define SKY81452_MAX_BRIGHTNESS (SKY81452_CS + 1)
+/**
+ * struct sky81452_platform_data
+ * @name: backlight driver name.
+ * If it is not defined, default name is lcd-backlight.
+ * @gpiod_enable:GPIO descriptor which control EN pin
+ * @enable: Enable mask for current sink channel 1, 2, 3, 4, 5 and 6.
+ * @ignore_pwm: true if DPWMI should be ignored.
+ * @dpwm_mode: true is DPWM dimming mode, otherwise Analog dimming mode.
+ * @phase_shift:true is phase shift mode.
+ * @short_detection_threshold: It should be one of 4, 5, 6 and 7V.
+ * @boost_current_limit: It should be one of 2300, 2750mA.
+ */
+struct sky81452_bl_platform_data {
+ const char *name;
+ struct gpio_desc *gpiod_enable;
+ unsigned int enable;
+ bool ignore_pwm;
+ bool dpwm_mode;
+ bool phase_shift;
+ unsigned int short_detection_threshold;
+ unsigned int boost_current_limit;
+};
+
#define CTZ(b) __builtin_ctz(b)
static int sky81452_bl_update_status(struct backlight_device *bd)
@@ -182,7 +203,7 @@ static struct sky81452_bl_platform_data *sky81452_bl_parse_dt(
pdata->ignore_pwm = of_property_read_bool(np, "skyworks,ignore-pwm");
pdata->dpwm_mode = of_property_read_bool(np, "skyworks,dpwm-mode");
pdata->phase_shift = of_property_read_bool(np, "skyworks,phase-shift");
- pdata->gpio_enable = of_get_gpio(np, 0);
+ pdata->gpiod_enable = devm_gpiod_get_optional(dev, NULL, GPIOD_OUT_HIGH);
ret = of_property_count_u32_elems(np, "led-sources");
if (ret < 0) {
@@ -252,26 +273,15 @@ static int sky81452_bl_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct regmap *regmap = dev_get_drvdata(dev->parent);
- struct sky81452_bl_platform_data *pdata = dev_get_platdata(dev);
+ struct sky81452_bl_platform_data *pdata;
struct backlight_device *bd;
struct backlight_properties props;
const char *name;
int ret;
- if (!pdata) {
- pdata = sky81452_bl_parse_dt(dev);
- if (IS_ERR(pdata))
- return PTR_ERR(pdata);
- }
-
- if (gpio_is_valid(pdata->gpio_enable)) {
- ret = devm_gpio_request_one(dev, pdata->gpio_enable,
- GPIOF_OUT_INIT_HIGH, "sky81452-en");
- if (ret < 0) {
- dev_err(dev, "failed to request GPIO. err=%d\n", ret);
- return ret;
- }
- }
+ pdata = sky81452_bl_parse_dt(dev);
+ if (IS_ERR(pdata))
+ return PTR_ERR(pdata);
ret = sky81452_bl_init_device(regmap, pdata);
if (ret < 0) {
@@ -312,8 +322,8 @@ static int sky81452_bl_remove(struct platform_device *pdev)
bd->props.brightness = 0;
backlight_update_status(bd);
- if (gpio_is_valid(pdata->gpio_enable))
- gpio_set_value_cansleep(pdata->gpio_enable, 0);
+ if (pdata->gpiod_enable)
+ gpiod_set_value_cansleep(pdata->gpiod_enable, 0);
return 0;
}
diff --git a/drivers/video/backlight/tps65217_bl.c b/drivers/video/backlight/tps65217_bl.c
index 762e3feed097..8457166f357f 100644
--- a/drivers/video/backlight/tps65217_bl.c
+++ b/drivers/video/backlight/tps65217_bl.c
@@ -77,15 +77,7 @@ static int tps65217_bl_update_status(struct backlight_device *bl)
{
struct tps65217_bl *tps65217_bl = bl_get_data(bl);
int rc;
- int brightness = bl->props.brightness;
-
- if (bl->props.state & BL_CORE_SUSPENDED)
- brightness = 0;
-
- if ((bl->props.power != FB_BLANK_UNBLANK) ||
- (bl->props.fb_blank != FB_BLANK_UNBLANK))
- /* framebuffer in low power mode or blanking active */
- brightness = 0;
+ int brightness = backlight_get_brightness(bl);
if (brightness > 0) {
rc = tps65217_reg_write(tps65217_bl->tps,
diff --git a/drivers/video/backlight/wm831x_bl.c b/drivers/video/backlight/wm831x_bl.c
index e55977d54c15..c5aaee205bdf 100644
--- a/drivers/video/backlight/wm831x_bl.c
+++ b/drivers/video/backlight/wm831x_bl.c
@@ -91,18 +91,7 @@ err:
static int wm831x_backlight_update_status(struct backlight_device *bl)
{
- int brightness = bl->props.brightness;
-
- if (bl->props.power != FB_BLANK_UNBLANK)
- brightness = 0;
-
- if (bl->props.fb_blank != FB_BLANK_UNBLANK)
- brightness = 0;
-
- if (bl->props.state & BL_CORE_SUSPENDED)
- brightness = 0;
-
- return wm831x_backlight_set(bl, brightness);
+ return wm831x_backlight_set(bl, backlight_get_brightness(bl));
}
static int wm831x_backlight_get_brightness(struct backlight_device *bl)
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 8be02f333b7a..31cc97f2f515 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -398,12 +398,9 @@ static inline s64 towards_target(struct virtio_balloon *vb)
s64 target;
u32 num_pages;
- virtio_cread(vb->vdev, struct virtio_balloon_config, num_pages,
- &num_pages);
-
/* Legacy balloon config space is LE, unlike all other devices. */
- if (!virtio_has_feature(vb->vdev, VIRTIO_F_VERSION_1))
- num_pages = le32_to_cpu((__force __le32)num_pages);
+ virtio_cread_le(vb->vdev, struct virtio_balloon_config, num_pages,
+ &num_pages);
target = num_pages;
return target - vb->num_pages;
@@ -462,11 +459,8 @@ static void update_balloon_size(struct virtio_balloon *vb)
u32 actual = vb->num_pages;
/* Legacy balloon config space is LE, unlike all other devices. */
- if (!virtio_has_feature(vb->vdev, VIRTIO_F_VERSION_1))
- actual = (__force u32)cpu_to_le32(actual);
-
- virtio_cwrite(vb->vdev, struct virtio_balloon_config, actual,
- &actual);
+ virtio_cwrite_le(vb->vdev, struct virtio_balloon_config, actual,
+ &actual);
}
static void update_balloon_stats_func(struct work_struct *work)
@@ -579,12 +573,10 @@ static u32 virtio_balloon_cmd_id_received(struct virtio_balloon *vb)
{
if (test_and_clear_bit(VIRTIO_BALLOON_CONFIG_READ_CMD_ID,
&vb->config_read_bitmap)) {
- virtio_cread(vb->vdev, struct virtio_balloon_config,
- free_page_hint_cmd_id,
- &vb->cmd_id_received_cache);
/* Legacy balloon config space is LE, unlike all other devices. */
- if (!virtio_has_feature(vb->vdev, VIRTIO_F_VERSION_1))
- vb->cmd_id_received_cache = le32_to_cpu((__force __le32)vb->cmd_id_received_cache);
+ virtio_cread_le(vb->vdev, struct virtio_balloon_config,
+ free_page_hint_cmd_id,
+ &vb->cmd_id_received_cache);
}
return vb->cmd_id_received_cache;
@@ -600,7 +592,7 @@ static int send_cmd_id_start(struct virtio_balloon *vb)
while (virtqueue_get_buf(vq, &unused))
;
- vb->cmd_id_active = virtio32_to_cpu(vb->vdev,
+ vb->cmd_id_active = cpu_to_virtio32(vb->vdev,
virtio_balloon_cmd_id_received(vb));
sg_init_one(&sg, &vb->cmd_id_active, sizeof(vb->cmd_id_active));
err = virtqueue_add_outbuf(vq, &sg, 1, &vb->cmd_id_active, GFP_KERNEL);
@@ -987,8 +979,8 @@ static int virtballoon_probe(struct virtio_device *vdev)
if (!want_init_on_free())
memset(&poison_val, PAGE_POISON, sizeof(poison_val));
- virtio_cwrite(vb->vdev, struct virtio_balloon_config,
- poison_val, &poison_val);
+ virtio_cwrite_le(vb->vdev, struct virtio_balloon_config,
+ poison_val, &poison_val);
}
vb->pr_dev_info.report = virtballoon_free_page_report;
@@ -1129,7 +1121,7 @@ static int virtballoon_validate(struct virtio_device *vdev)
else if (!virtio_has_feature(vdev, VIRTIO_BALLOON_F_PAGE_POISON))
__virtio_clear_bit(vdev, VIRTIO_BALLOON_F_REPORTING);
- __virtio_clear_bit(vdev, VIRTIO_F_IOMMU_PLATFORM);
+ __virtio_clear_bit(vdev, VIRTIO_F_ACCESS_PLATFORM);
return 0;
}
diff --git a/drivers/virtio/virtio_input.c b/drivers/virtio/virtio_input.c
index efaf65b0f42d..877b2ea3ed05 100644
--- a/drivers/virtio/virtio_input.c
+++ b/drivers/virtio/virtio_input.c
@@ -113,9 +113,9 @@ static u8 virtinput_cfg_select(struct virtio_input *vi,
{
u8 size;
- virtio_cwrite(vi->vdev, struct virtio_input_config, select, &select);
- virtio_cwrite(vi->vdev, struct virtio_input_config, subsel, &subsel);
- virtio_cread(vi->vdev, struct virtio_input_config, size, &size);
+ virtio_cwrite_le(vi->vdev, struct virtio_input_config, select, &select);
+ virtio_cwrite_le(vi->vdev, struct virtio_input_config, subsel, &subsel);
+ virtio_cread_le(vi->vdev, struct virtio_input_config, size, &size);
return size;
}
@@ -158,11 +158,11 @@ static void virtinput_cfg_abs(struct virtio_input *vi, int abs)
u32 mi, ma, re, fu, fl;
virtinput_cfg_select(vi, VIRTIO_INPUT_CFG_ABS_INFO, abs);
- virtio_cread(vi->vdev, struct virtio_input_config, u.abs.min, &mi);
- virtio_cread(vi->vdev, struct virtio_input_config, u.abs.max, &ma);
- virtio_cread(vi->vdev, struct virtio_input_config, u.abs.res, &re);
- virtio_cread(vi->vdev, struct virtio_input_config, u.abs.fuzz, &fu);
- virtio_cread(vi->vdev, struct virtio_input_config, u.abs.flat, &fl);
+ virtio_cread_le(vi->vdev, struct virtio_input_config, u.abs.min, &mi);
+ virtio_cread_le(vi->vdev, struct virtio_input_config, u.abs.max, &ma);
+ virtio_cread_le(vi->vdev, struct virtio_input_config, u.abs.res, &re);
+ virtio_cread_le(vi->vdev, struct virtio_input_config, u.abs.fuzz, &fu);
+ virtio_cread_le(vi->vdev, struct virtio_input_config, u.abs.flat, &fl);
input_set_abs_params(vi->idev, abs, mi, ma, fu, fl);
input_abs_set_res(vi->idev, abs, re);
}
@@ -244,14 +244,14 @@ static int virtinput_probe(struct virtio_device *vdev)
size = virtinput_cfg_select(vi, VIRTIO_INPUT_CFG_ID_DEVIDS, 0);
if (size >= sizeof(struct virtio_input_devids)) {
- virtio_cread(vi->vdev, struct virtio_input_config,
- u.ids.bustype, &vi->idev->id.bustype);
- virtio_cread(vi->vdev, struct virtio_input_config,
- u.ids.vendor, &vi->idev->id.vendor);
- virtio_cread(vi->vdev, struct virtio_input_config,
- u.ids.product, &vi->idev->id.product);
- virtio_cread(vi->vdev, struct virtio_input_config,
- u.ids.version, &vi->idev->id.version);
+ virtio_cread_le(vi->vdev, struct virtio_input_config,
+ u.ids.bustype, &vi->idev->id.bustype);
+ virtio_cread_le(vi->vdev, struct virtio_input_config,
+ u.ids.vendor, &vi->idev->id.vendor);
+ virtio_cread_le(vi->vdev, struct virtio_input_config,
+ u.ids.product, &vi->idev->id.product);
+ virtio_cread_le(vi->vdev, struct virtio_input_config,
+ u.ids.version, &vi->idev->id.version);
} else {
vi->idev->id.bustype = BUS_VIRTUAL;
}
diff --git a/drivers/virtio/virtio_mem.c b/drivers/virtio/virtio_mem.c
index f26f5f64ae82..c08512fcea90 100644
--- a/drivers/virtio/virtio_mem.c
+++ b/drivers/virtio/virtio_mem.c
@@ -1530,21 +1530,21 @@ static void virtio_mem_refresh_config(struct virtio_mem *vm)
uint64_t new_plugged_size, usable_region_size, end_addr;
/* the plugged_size is just a reflection of what _we_ did previously */
- virtio_cread(vm->vdev, struct virtio_mem_config, plugged_size,
- &new_plugged_size);
+ virtio_cread_le(vm->vdev, struct virtio_mem_config, plugged_size,
+ &new_plugged_size);
if (WARN_ON_ONCE(new_plugged_size != vm->plugged_size))
vm->plugged_size = new_plugged_size;
/* calculate the last usable memory block id */
- virtio_cread(vm->vdev, struct virtio_mem_config,
- usable_region_size, &usable_region_size);
+ virtio_cread_le(vm->vdev, struct virtio_mem_config,
+ usable_region_size, &usable_region_size);
end_addr = vm->addr + usable_region_size;
end_addr = min(end_addr, phys_limit);
vm->last_usable_mb_id = virtio_mem_phys_to_mb_id(end_addr) - 1;
/* see if there is a request to change the size */
- virtio_cread(vm->vdev, struct virtio_mem_config, requested_size,
- &vm->requested_size);
+ virtio_cread_le(vm->vdev, struct virtio_mem_config, requested_size,
+ &vm->requested_size);
dev_info(&vm->vdev->dev, "plugged size: 0x%llx", vm->plugged_size);
dev_info(&vm->vdev->dev, "requested size: 0x%llx", vm->requested_size);
@@ -1677,16 +1677,16 @@ static int virtio_mem_init(struct virtio_mem *vm)
}
/* Fetch all properties that can't change. */
- virtio_cread(vm->vdev, struct virtio_mem_config, plugged_size,
- &vm->plugged_size);
- virtio_cread(vm->vdev, struct virtio_mem_config, block_size,
- &vm->device_block_size);
- virtio_cread(vm->vdev, struct virtio_mem_config, node_id,
- &node_id);
+ virtio_cread_le(vm->vdev, struct virtio_mem_config, plugged_size,
+ &vm->plugged_size);
+ virtio_cread_le(vm->vdev, struct virtio_mem_config, block_size,
+ &vm->device_block_size);
+ virtio_cread_le(vm->vdev, struct virtio_mem_config, node_id,
+ &node_id);
vm->nid = virtio_mem_translate_node_id(vm, node_id);
- virtio_cread(vm->vdev, struct virtio_mem_config, addr, &vm->addr);
- virtio_cread(vm->vdev, struct virtio_mem_config, region_size,
- &vm->region_size);
+ virtio_cread_le(vm->vdev, struct virtio_mem_config, addr, &vm->addr);
+ virtio_cread_le(vm->vdev, struct virtio_mem_config, region_size,
+ &vm->region_size);
/*
* We always hotplug memory in memory block granularity. This way,
diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c
index db93cedd262f..9bdc6f68221f 100644
--- a/drivers/virtio/virtio_pci_modern.c
+++ b/drivers/virtio/virtio_pci_modern.c
@@ -481,6 +481,7 @@ static const struct virtio_config_ops virtio_pci_config_ops = {
* @dev: the pci device
* @cfg_type: the VIRTIO_PCI_CAP_* value we seek
* @ioresource_types: IORESOURCE_MEM and/or IORESOURCE_IO.
+ * @bars: the bitmask of BARs
*
* Returns offset of the capability, or 0.
*/
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index a2de775801af..becc77697960 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -240,7 +240,7 @@ static inline bool virtqueue_use_indirect(struct virtqueue *_vq,
static bool vring_use_dma_api(struct virtio_device *vdev)
{
- if (!virtio_has_iommu_quirk(vdev))
+ if (!virtio_has_dma_quirk(vdev))
return true;
/* Otherwise, we are left to guess. */
@@ -1960,6 +1960,9 @@ bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx)
{
struct vring_virtqueue *vq = to_vvq(_vq);
+ if (unlikely(vq->broken))
+ return false;
+
virtio_mb(vq->weak_barriers);
return vq->packed_ring ? virtqueue_poll_packed(_vq, last_used_idx) :
virtqueue_poll_split(_vq, last_used_idx);
@@ -2225,7 +2228,7 @@ void vring_transport_features(struct virtio_device *vdev)
break;
case VIRTIO_F_VERSION_1:
break;
- case VIRTIO_F_IOMMU_PLATFORM:
+ case VIRTIO_F_ACCESS_PLATFORM:
break;
case VIRTIO_F_RING_PACKED:
break;
diff --git a/drivers/virtio/virtio_vdpa.c b/drivers/virtio/virtio_vdpa.c
index c30eb55030be..4a9ddb44b2a7 100644
--- a/drivers/virtio/virtio_vdpa.c
+++ b/drivers/virtio/virtio_vdpa.c
@@ -57,9 +57,8 @@ static void virtio_vdpa_get(struct virtio_device *vdev, unsigned offset,
void *buf, unsigned len)
{
struct vdpa_device *vdpa = vd_get_vdpa(vdev);
- const struct vdpa_config_ops *ops = vdpa->config;
- ops->get_config(vdpa, offset, buf, len);
+ vdpa_get_config(vdpa, offset, buf, len);
}
static void virtio_vdpa_set(struct virtio_device *vdev, unsigned offset,
@@ -101,9 +100,8 @@ static void virtio_vdpa_set_status(struct virtio_device *vdev, u8 status)
static void virtio_vdpa_reset(struct virtio_device *vdev)
{
struct vdpa_device *vdpa = vd_get_vdpa(vdev);
- const struct vdpa_config_ops *ops = vdpa->config;
- return ops->set_status(vdpa, 0);
+ vdpa_reset(vdpa);
}
static bool virtio_vdpa_notify(struct virtqueue *vq)
@@ -294,12 +292,11 @@ static u64 virtio_vdpa_get_features(struct virtio_device *vdev)
static int virtio_vdpa_finalize_features(struct virtio_device *vdev)
{
struct vdpa_device *vdpa = vd_get_vdpa(vdev);
- const struct vdpa_config_ops *ops = vdpa->config;
/* Give virtio_ring a chance to accept features. */
vring_transport_features(vdev);
- return ops->set_features(vdpa, vdev->features);
+ return vdpa_set_features(vdpa, vdev->features);
}
static const char *virtio_vdpa_bus_name(struct virtio_device *vdev)