summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/acpica/acevents.h6
-rw-r--r--drivers/acpi/acpica/evregion.c12
-rw-r--r--drivers/acpi/acpica/evxfregn.c64
-rw-r--r--drivers/acpi/ec.c14
-rw-r--r--drivers/acpi/internal.h1
-rw-r--r--drivers/acpi/scan.c2
-rw-r--r--drivers/ata/libata-scsi.c15
-rw-r--r--drivers/atm/idt77252.c9
-rw-r--r--drivers/char/xillybus/xillyusb.c42
-rw-r--r--drivers/clk/thead/clk-th1520-ap.c2
-rw-r--r--drivers/gpio/gpio-mlxbf3.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c83
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c63
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v11_0.c59
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v12_0.c296
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15d.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc24.c2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c3
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c33
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c3
-rw-r--r--drivers/gpu/drm/amd/include/mes_v12_api_def.h7
-rw-r--r--drivers/gpu/drm/drm_panel_orientation_quirks.c12
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.c4
-rw-r--r--drivers/gpu/drm/rockchip/inno_hdmi.c4
-rw-r--r--drivers/gpu/drm/v3d/v3d_sched.c14
-rw-r--r--drivers/gpu/drm/xe/xe_device.c59
-rw-r--r--drivers/gpu/drm/xe/xe_device.h3
-rw-r--r--drivers/gpu/drm/xe/xe_device_types.h3
-rw-r--r--drivers/gpu/drm/xe/xe_drm_client.c5
-rw-r--r--drivers/gpu/drm/xe/xe_exec_queue.c10
-rw-r--r--drivers/gpu/drm/xe/xe_exec_queue_types.h7
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c11
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_vf.c2
-rw-r--r--drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c201
-rw-r--r--drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h12
-rw-r--r--drivers/gpu/drm/xe/xe_gt_tlb_invalidation_types.h4
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ct.c10
-rw-r--r--drivers/gpu/drm/xe/xe_guc_submit.c4
-rw-r--r--drivers/gpu/drm/xe/xe_pt.c26
-rw-r--r--drivers/gpu/drm/xe/xe_sync.c12
-rw-r--r--drivers/gpu/drm/xe/xe_vm.c38
-rw-r--r--drivers/i2c/busses/i2c-qcom-geni.c4
-rw-r--r--drivers/i2c/busses/i2c-tegra.c4
-rw-r--r--drivers/iommu/io-pgfault.c1
-rw-r--r--drivers/md/dm-ioctl.c22
-rw-r--r--drivers/md/dm.c4
-rw-r--r--drivers/md/persistent-data/dm-space-map-metadata.c4
-rw-r--r--drivers/md/raid1.c14
-rw-r--r--drivers/media/usb/dvb-usb/dvb-usb-init.c35
-rw-r--r--drivers/misc/fastrpc.c22
-rw-r--r--drivers/misc/lkdtm/refcount.c16
-rw-r--r--drivers/net/dsa/vitesse-vsc73xx-core.c54
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c4
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c30
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c3
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c30
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c3
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c4
-rw-r--r--drivers/net/ethernet/intel/igc/igc_defines.h6
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c8
-rw-r--r--drivers/net/ethernet/intel/igc/igc_tsn.c76
-rw-r--r--drivers/net/ethernet/intel/igc/igc_tsn.h1
-rw-r--r--drivers/net/ethernet/jme.c10
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_regs.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c50
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_en.c30
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet.h16
-rw-r--r--drivers/net/gtp.c3
-rw-r--r--drivers/net/phy/vitesse.c14
-rw-r--r--drivers/net/pse-pd/pse_core.c11
-rw-r--r--drivers/net/usb/ipheth.c20
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_tx.c72
-rw-r--r--drivers/net/wireless/ath/ath12k/hw.c6
-rw-r--r--drivers/net/wireless/ath/ath12k/hw.h4
-rw-r--r--drivers/net/wireless/ath/ath12k/mac.c1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/internal.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c32
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/main.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/hw.c2
-rw-r--r--drivers/nvdimm/pmem.c2
-rw-r--r--drivers/of/irq.c15
-rw-r--r--drivers/platform/x86/Kconfig1
-rw-r--r--drivers/platform/x86/amd/pmf/spc.c32
-rw-r--r--drivers/platform/x86/ideapad-laptop.c148
-rw-r--r--drivers/platform/x86/ideapad-laptop.h9
-rw-r--r--drivers/platform/x86/lenovo-ymc.c60
-rw-r--r--drivers/s390/block/dasd.c36
-rw-r--r--drivers/s390/block/dasd_3990_erp.c10
-rw-r--r--drivers/s390/block/dasd_eckd.c55
-rw-r--r--drivers/s390/block/dasd_genhd.c1
-rw-r--r--drivers/s390/block/dasd_int.h2
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_app.c11
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_os.c1
-rw-r--r--drivers/soc/fsl/qbman/qman.c5
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_stream_public.h8
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_internal.h19
-rw-r--r--drivers/thermal/gov_bang_bang.c83
-rw-r--r--drivers/thermal/thermal_core.c3
-rw-r--r--drivers/thunderbolt/debugfs.c10
-rw-r--r--drivers/thunderbolt/switch.c1
-rw-r--r--drivers/tty/serial/8250/8250_omap.c33
-rw-r--r--drivers/tty/serial/atmel_serial.c2
-rw-r--r--drivers/tty/serial/fsl_lpuart.c1
-rw-r--r--drivers/tty/vt/conmakehash.c12
-rw-r--r--drivers/usb/host/xhci-mem.c2
-rw-r--r--drivers/usb/host/xhci-ring.c1
-rw-r--r--drivers/usb/host/xhci.c8
-rw-r--r--drivers/usb/misc/usb-ljca.c1
-rw-r--r--drivers/usb/typec/tcpm/tcpm.c1
-rw-r--r--drivers/usb/typec/ucsi/ucsi.c2
139 files changed, 1642 insertions, 956 deletions
diff --git a/drivers/acpi/acpica/acevents.h b/drivers/acpi/acpica/acevents.h
index 2133085deda7..1c5218b79fc2 100644
--- a/drivers/acpi/acpica/acevents.h
+++ b/drivers/acpi/acpica/acevents.h
@@ -188,13 +188,9 @@ acpi_ev_detach_region(union acpi_operand_object *region_obj,
u8 acpi_ns_is_locked);
void
-acpi_ev_execute_reg_methods(struct acpi_namespace_node *node,
+acpi_ev_execute_reg_methods(struct acpi_namespace_node *node, u32 max_depth,
acpi_adr_space_type space_id, u32 function);
-void
-acpi_ev_execute_orphan_reg_method(struct acpi_namespace_node *node,
- acpi_adr_space_type space_id);
-
acpi_status
acpi_ev_execute_reg_method(union acpi_operand_object *region_obj, u32 function);
diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c
index dc6004daf624..cf53b9535f18 100644
--- a/drivers/acpi/acpica/evregion.c
+++ b/drivers/acpi/acpica/evregion.c
@@ -20,6 +20,10 @@ extern u8 acpi_gbl_default_address_spaces[];
/* Local prototypes */
+static void
+acpi_ev_execute_orphan_reg_method(struct acpi_namespace_node *device_node,
+ acpi_adr_space_type space_id);
+
static acpi_status
acpi_ev_reg_run(acpi_handle obj_handle,
u32 level, void *context, void **return_value);
@@ -61,6 +65,7 @@ acpi_status acpi_ev_initialize_op_regions(void)
acpi_gbl_default_address_spaces
[i])) {
acpi_ev_execute_reg_methods(acpi_gbl_root_node,
+ ACPI_UINT32_MAX,
acpi_gbl_default_address_spaces
[i], ACPI_REG_CONNECT);
}
@@ -668,6 +673,7 @@ cleanup1:
* FUNCTION: acpi_ev_execute_reg_methods
*
* PARAMETERS: node - Namespace node for the device
+ * max_depth - Depth to which search for _REG
* space_id - The address space ID
* function - Passed to _REG: On (1) or Off (0)
*
@@ -679,7 +685,7 @@ cleanup1:
******************************************************************************/
void
-acpi_ev_execute_reg_methods(struct acpi_namespace_node *node,
+acpi_ev_execute_reg_methods(struct acpi_namespace_node *node, u32 max_depth,
acpi_adr_space_type space_id, u32 function)
{
struct acpi_reg_walk_info info;
@@ -713,7 +719,7 @@ acpi_ev_execute_reg_methods(struct acpi_namespace_node *node,
* regions and _REG methods. (i.e. handlers must be installed for all
* regions of this Space ID before we can run any _REG methods)
*/
- (void)acpi_ns_walk_namespace(ACPI_TYPE_ANY, node, ACPI_UINT32_MAX,
+ (void)acpi_ns_walk_namespace(ACPI_TYPE_ANY, node, max_depth,
ACPI_NS_WALK_UNLOCK, acpi_ev_reg_run, NULL,
&info, NULL);
@@ -814,7 +820,7 @@ acpi_ev_reg_run(acpi_handle obj_handle,
*
******************************************************************************/
-void
+static void
acpi_ev_execute_orphan_reg_method(struct acpi_namespace_node *device_node,
acpi_adr_space_type space_id)
{
diff --git a/drivers/acpi/acpica/evxfregn.c b/drivers/acpi/acpica/evxfregn.c
index 624361a5f34d..95f78383bbdb 100644
--- a/drivers/acpi/acpica/evxfregn.c
+++ b/drivers/acpi/acpica/evxfregn.c
@@ -85,7 +85,8 @@ acpi_install_address_space_handler_internal(acpi_handle device,
/* Run all _REG methods for this address space */
if (run_reg) {
- acpi_ev_execute_reg_methods(node, space_id, ACPI_REG_CONNECT);
+ acpi_ev_execute_reg_methods(node, ACPI_UINT32_MAX, space_id,
+ ACPI_REG_CONNECT);
}
unlock_and_exit:
@@ -263,6 +264,7 @@ ACPI_EXPORT_SYMBOL(acpi_remove_address_space_handler)
* FUNCTION: acpi_execute_reg_methods
*
* PARAMETERS: device - Handle for the device
+ * max_depth - Depth to which search for _REG
* space_id - The address space ID
*
* RETURN: Status
@@ -271,7 +273,8 @@ ACPI_EXPORT_SYMBOL(acpi_remove_address_space_handler)
*
******************************************************************************/
acpi_status
-acpi_execute_reg_methods(acpi_handle device, acpi_adr_space_type space_id)
+acpi_execute_reg_methods(acpi_handle device, u32 max_depth,
+ acpi_adr_space_type space_id)
{
struct acpi_namespace_node *node;
acpi_status status;
@@ -296,7 +299,8 @@ acpi_execute_reg_methods(acpi_handle device, acpi_adr_space_type space_id)
/* Run all _REG methods for this address space */
- acpi_ev_execute_reg_methods(node, space_id, ACPI_REG_CONNECT);
+ acpi_ev_execute_reg_methods(node, max_depth, space_id,
+ ACPI_REG_CONNECT);
} else {
status = AE_BAD_PARAMETER;
}
@@ -306,57 +310,3 @@ acpi_execute_reg_methods(acpi_handle device, acpi_adr_space_type space_id)
}
ACPI_EXPORT_SYMBOL(acpi_execute_reg_methods)
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_execute_orphan_reg_method
- *
- * PARAMETERS: device - Handle for the device
- * space_id - The address space ID
- *
- * RETURN: Status
- *
- * DESCRIPTION: Execute an "orphan" _REG method that appears under an ACPI
- * device. This is a _REG method that has no corresponding region
- * within the device's scope.
- *
- ******************************************************************************/
-acpi_status
-acpi_execute_orphan_reg_method(acpi_handle device, acpi_adr_space_type space_id)
-{
- struct acpi_namespace_node *node;
- acpi_status status;
-
- ACPI_FUNCTION_TRACE(acpi_execute_orphan_reg_method);
-
- /* Parameter validation */
-
- if (!device) {
- return_ACPI_STATUS(AE_BAD_PARAMETER);
- }
-
- status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
-
- /* Convert and validate the device handle */
-
- node = acpi_ns_validate_handle(device);
- if (node) {
-
- /*
- * If an "orphan" _REG method is present in the device's scope
- * for the given address space ID, run it.
- */
-
- acpi_ev_execute_orphan_reg_method(node, space_id);
- } else {
- status = AE_BAD_PARAMETER;
- }
-
- (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
- return_ACPI_STATUS(status);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_execute_orphan_reg_method)
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 299ec653388c..38d2f6e6b12b 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -1487,12 +1487,13 @@ static bool install_gpio_irq_event_handler(struct acpi_ec *ec)
static int ec_install_handlers(struct acpi_ec *ec, struct acpi_device *device,
bool call_reg)
{
- acpi_handle scope_handle = ec == first_ec ? ACPI_ROOT_OBJECT : ec->handle;
acpi_status status;
acpi_ec_start(ec, false);
if (!test_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags)) {
+ acpi_handle scope_handle = ec == first_ec ? ACPI_ROOT_OBJECT : ec->handle;
+
acpi_ec_enter_noirq(ec);
status = acpi_install_address_space_handler_no_reg(scope_handle,
ACPI_ADR_SPACE_EC,
@@ -1506,10 +1507,7 @@ static int ec_install_handlers(struct acpi_ec *ec, struct acpi_device *device,
}
if (call_reg && !test_bit(EC_FLAGS_EC_REG_CALLED, &ec->flags)) {
- acpi_execute_reg_methods(scope_handle, ACPI_ADR_SPACE_EC);
- if (scope_handle != ec->handle)
- acpi_execute_orphan_reg_method(ec->handle, ACPI_ADR_SPACE_EC);
-
+ acpi_execute_reg_methods(ec->handle, ACPI_UINT32_MAX, ACPI_ADR_SPACE_EC);
set_bit(EC_FLAGS_EC_REG_CALLED, &ec->flags);
}
@@ -1724,6 +1722,12 @@ static void acpi_ec_remove(struct acpi_device *device)
}
}
+void acpi_ec_register_opregions(struct acpi_device *adev)
+{
+ if (first_ec && first_ec->handle != adev->handle)
+ acpi_execute_reg_methods(adev->handle, 1, ACPI_ADR_SPACE_EC);
+}
+
static acpi_status
ec_parse_io_ports(struct acpi_resource *resource, void *context)
{
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 601b670356e5..aadd4c218b32 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -223,6 +223,7 @@ int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit,
acpi_handle handle, acpi_ec_query_func func,
void *data);
void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit);
+void acpi_ec_register_opregions(struct acpi_device *adev);
#ifdef CONFIG_PM_SLEEP
void acpi_ec_flush_work(void);
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 59771412686b..22ae7829a915 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -2273,6 +2273,8 @@ static int acpi_bus_attach(struct acpi_device *device, void *first_pass)
if (device->handler)
goto ok;
+ acpi_ec_register_opregions(device);
+
if (!device->flags.initialized) {
device->flags.power_manageable =
device->power.states[ACPI_STATE_D0].flags.valid;
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index d6f5e25e1ed8..473e00a58a8b 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -951,8 +951,19 @@ static void ata_gen_passthru_sense(struct ata_queued_cmd *qc)
&sense_key, &asc, &ascq);
ata_scsi_set_sense(qc->dev, cmd, sense_key, asc, ascq);
} else {
- /* ATA PASS-THROUGH INFORMATION AVAILABLE */
- ata_scsi_set_sense(qc->dev, cmd, RECOVERED_ERROR, 0, 0x1D);
+ /*
+ * ATA PASS-THROUGH INFORMATION AVAILABLE
+ *
+ * Note: we are supposed to call ata_scsi_set_sense(), which
+ * respects the D_SENSE bit, instead of unconditionally
+ * generating the sense data in descriptor format. However,
+ * because hdparm, hddtemp, and udisks incorrectly assume sense
+ * data in descriptor format, without even looking at the
+ * RESPONSE CODE field in the returned sense data (to see which
+ * format the returned sense data is in), we are stuck with
+ * being bug compatible with older kernels.
+ */
+ scsi_build_sense(cmd, 1, RECOVERED_ERROR, 0, 0x1D);
}
}
diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
index e7f713cd70d3..a876024d8a05 100644
--- a/drivers/atm/idt77252.c
+++ b/drivers/atm/idt77252.c
@@ -1118,8 +1118,8 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
rpp->len += skb->len;
if (stat & SAR_RSQE_EPDU) {
+ unsigned int len, truesize;
unsigned char *l1l2;
- unsigned int len;
l1l2 = (unsigned char *) ((unsigned long) skb->data + skb->len - 6);
@@ -1189,14 +1189,15 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
ATM_SKB(skb)->vcc = vcc;
__net_timestamp(skb);
+ truesize = skb->truesize;
vcc->push(vcc, skb);
atomic_inc(&vcc->stats->rx);
- if (skb->truesize > SAR_FB_SIZE_3)
+ if (truesize > SAR_FB_SIZE_3)
add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
- else if (skb->truesize > SAR_FB_SIZE_2)
+ else if (truesize > SAR_FB_SIZE_2)
add_rx_skb(card, 2, SAR_FB_SIZE_2, 1);
- else if (skb->truesize > SAR_FB_SIZE_1)
+ else if (truesize > SAR_FB_SIZE_1)
add_rx_skb(card, 1, SAR_FB_SIZE_1, 1);
else
add_rx_skb(card, 0, SAR_FB_SIZE_0, 1);
diff --git a/drivers/char/xillybus/xillyusb.c b/drivers/char/xillybus/xillyusb.c
index 5a5afa14ca8c..45771b1a3716 100644
--- a/drivers/char/xillybus/xillyusb.c
+++ b/drivers/char/xillybus/xillyusb.c
@@ -50,6 +50,7 @@ MODULE_LICENSE("GPL v2");
static const char xillyname[] = "xillyusb";
static unsigned int fifo_buf_order;
+static struct workqueue_struct *wakeup_wq;
#define USB_VENDOR_ID_XILINX 0x03fd
#define USB_VENDOR_ID_ALTERA 0x09fb
@@ -569,10 +570,6 @@ static void cleanup_dev(struct kref *kref)
* errors if executed. The mechanism relies on that xdev->error is assigned
* a non-zero value by report_io_error() prior to queueing wakeup_all(),
* which prevents bulk_in_work() from calling process_bulk_in().
- *
- * The fact that wakeup_all() and bulk_in_work() are queued on the same
- * workqueue makes their concurrent execution very unlikely, however the
- * kernel's API doesn't seem to ensure this strictly.
*/
static void wakeup_all(struct work_struct *work)
@@ -627,7 +624,7 @@ static void report_io_error(struct xillyusb_dev *xdev,
if (do_once) {
kref_get(&xdev->kref); /* xdev is used by work item */
- queue_work(xdev->workq, &xdev->wakeup_workitem);
+ queue_work(wakeup_wq, &xdev->wakeup_workitem);
}
}
@@ -1906,6 +1903,13 @@ static const struct file_operations xillyusb_fops = {
static int xillyusb_setup_base_eps(struct xillyusb_dev *xdev)
{
+ struct usb_device *udev = xdev->udev;
+
+ /* Verify that device has the two fundamental bulk in/out endpoints */
+ if (usb_pipe_type_check(udev, usb_sndbulkpipe(udev, MSG_EP_NUM)) ||
+ usb_pipe_type_check(udev, usb_rcvbulkpipe(udev, IN_EP_NUM)))
+ return -ENODEV;
+
xdev->msg_ep = endpoint_alloc(xdev, MSG_EP_NUM | USB_DIR_OUT,
bulk_out_work, 1, 2);
if (!xdev->msg_ep)
@@ -1935,14 +1939,15 @@ static int setup_channels(struct xillyusb_dev *xdev,
__le16 *chandesc,
int num_channels)
{
- struct xillyusb_channel *chan;
+ struct usb_device *udev = xdev->udev;
+ struct xillyusb_channel *chan, *new_channels;
int i;
chan = kcalloc(num_channels, sizeof(*chan), GFP_KERNEL);
if (!chan)
return -ENOMEM;
- xdev->channels = chan;
+ new_channels = chan;
for (i = 0; i < num_channels; i++, chan++) {
unsigned int in_desc = le16_to_cpu(*chandesc++);
@@ -1971,6 +1976,15 @@ static int setup_channels(struct xillyusb_dev *xdev,
*/
if ((out_desc & 0x80) && i < 14) { /* Entry is valid */
+ if (usb_pipe_type_check(udev,
+ usb_sndbulkpipe(udev, i + 2))) {
+ dev_err(xdev->dev,
+ "Missing BULK OUT endpoint %d\n",
+ i + 2);
+ kfree(new_channels);
+ return -ENODEV;
+ }
+
chan->writable = 1;
chan->out_synchronous = !!(out_desc & 0x40);
chan->out_seekable = !!(out_desc & 0x20);
@@ -1980,6 +1994,7 @@ static int setup_channels(struct xillyusb_dev *xdev,
}
}
+ xdev->channels = new_channels;
return 0;
}
@@ -2096,9 +2111,11 @@ static int xillyusb_discovery(struct usb_interface *interface)
* just after responding with the IDT, there is no reason for any
* work item to be running now. To be sure that xdev->channels
* is updated on anything that might run in parallel, flush the
- * workqueue, which rarely does anything.
+ * device's workqueue and the wakeup work item. This rarely
+ * does anything.
*/
flush_workqueue(xdev->workq);
+ flush_work(&xdev->wakeup_workitem);
xdev->num_channels = num_channels;
@@ -2258,6 +2275,10 @@ static int __init xillyusb_init(void)
{
int rc = 0;
+ wakeup_wq = alloc_workqueue(xillyname, 0, 0);
+ if (!wakeup_wq)
+ return -ENOMEM;
+
if (LOG2_INITIAL_FIFO_BUF_SIZE > PAGE_SHIFT)
fifo_buf_order = LOG2_INITIAL_FIFO_BUF_SIZE - PAGE_SHIFT;
else
@@ -2265,12 +2286,17 @@ static int __init xillyusb_init(void)
rc = usb_register(&xillyusb_driver);
+ if (rc)
+ destroy_workqueue(wakeup_wq);
+
return rc;
}
static void __exit xillyusb_exit(void)
{
usb_deregister(&xillyusb_driver);
+
+ destroy_workqueue(wakeup_wq);
}
module_init(xillyusb_init);
diff --git a/drivers/clk/thead/clk-th1520-ap.c b/drivers/clk/thead/clk-th1520-ap.c
index cbc176b27c09..17e32ae08720 100644
--- a/drivers/clk/thead/clk-th1520-ap.c
+++ b/drivers/clk/thead/clk-th1520-ap.c
@@ -738,7 +738,7 @@ static struct ccu_div vp_axi_clk = {
.hw.init = CLK_HW_INIT_PARENTS_HW("vp-axi",
video_pll_clk_parent,
&ccu_div_ops,
- 0),
+ CLK_IGNORE_UNUSED),
},
};
diff --git a/drivers/gpio/gpio-mlxbf3.c b/drivers/gpio/gpio-mlxbf3.c
index d5906d419b0a..10ea71273c89 100644
--- a/drivers/gpio/gpio-mlxbf3.c
+++ b/drivers/gpio/gpio-mlxbf3.c
@@ -39,6 +39,8 @@
#define MLXBF_GPIO_CAUSE_OR_EVTEN0 0x14
#define MLXBF_GPIO_CAUSE_OR_CLRCAUSE 0x18
+#define MLXBF_GPIO_CLR_ALL_INTS GENMASK(31, 0)
+
struct mlxbf3_gpio_context {
struct gpio_chip gc;
@@ -82,6 +84,8 @@ static void mlxbf3_gpio_irq_disable(struct irq_data *irqd)
val = readl(gs->gpio_cause_io + MLXBF_GPIO_CAUSE_OR_EVTEN0);
val &= ~BIT(offset);
writel(val, gs->gpio_cause_io + MLXBF_GPIO_CAUSE_OR_EVTEN0);
+
+ writel(BIT(offset), gs->gpio_cause_io + MLXBF_GPIO_CAUSE_OR_CLRCAUSE);
raw_spin_unlock_irqrestore(&gs->gc.bgpio_lock, flags);
gpiochip_disable_irq(gc, offset);
@@ -253,6 +257,15 @@ static int mlxbf3_gpio_probe(struct platform_device *pdev)
return 0;
}
+static void mlxbf3_gpio_shutdown(struct platform_device *pdev)
+{
+ struct mlxbf3_gpio_context *gs = platform_get_drvdata(pdev);
+
+ /* Disable and clear all interrupts */
+ writel(0, gs->gpio_cause_io + MLXBF_GPIO_CAUSE_OR_EVTEN0);
+ writel(MLXBF_GPIO_CLR_ALL_INTS, gs->gpio_cause_io + MLXBF_GPIO_CAUSE_OR_CLRCAUSE);
+}
+
static const struct acpi_device_id mlxbf3_gpio_acpi_match[] = {
{ "MLNXBF33", 0 },
{}
@@ -265,6 +278,7 @@ static struct platform_driver mlxbf3_gpio_driver = {
.acpi_match_table = mlxbf3_gpio_acpi_match,
},
.probe = mlxbf3_gpio_probe,
+ .shutdown = mlxbf3_gpio_shutdown,
};
module_platform_driver(mlxbf3_gpio_driver);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 9aa952f258cf..6dfdff58bffd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -1057,6 +1057,9 @@ static int amdgpu_cs_patch_ibs(struct amdgpu_cs_parser *p,
r = amdgpu_ring_parse_cs(ring, p, job, ib);
if (r)
return r;
+
+ if (ib->sa_bo)
+ ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
} else {
ib->ptr = (uint32_t *)kptr;
r = amdgpu_ring_patch_cs_in_place(ring, p, job, ib);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index 5cb33ac99f70..c43d1b6e5d66 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -685,16 +685,24 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
switch (args->in.op) {
case AMDGPU_CTX_OP_ALLOC_CTX:
+ if (args->in.flags)
+ return -EINVAL;
r = amdgpu_ctx_alloc(adev, fpriv, filp, priority, &id);
args->out.alloc.ctx_id = id;
break;
case AMDGPU_CTX_OP_FREE_CTX:
+ if (args->in.flags)
+ return -EINVAL;
r = amdgpu_ctx_free(fpriv, id);
break;
case AMDGPU_CTX_OP_QUERY_STATE:
+ if (args->in.flags)
+ return -EINVAL;
r = amdgpu_ctx_query(adev, fpriv, id, &args->out);
break;
case AMDGPU_CTX_OP_QUERY_STATE2:
+ if (args->in.flags)
+ return -EINVAL;
r = amdgpu_ctx_query2(adev, fpriv, id, &args->out);
break;
case AMDGPU_CTX_OP_GET_STABLE_PSTATE:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index 82452606ae6c..c770cb201e64 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -509,6 +509,16 @@ int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev, int xcc_id)
int i, r = 0;
int j;
+ if (adev->enable_mes) {
+ for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+ j = i + xcc_id * adev->gfx.num_compute_rings;
+ amdgpu_mes_unmap_legacy_queue(adev,
+ &adev->gfx.compute_ring[j],
+ RESET_QUEUES, 0, 0);
+ }
+ return 0;
+ }
+
if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
return -EINVAL;
@@ -551,6 +561,18 @@ int amdgpu_gfx_disable_kgq(struct amdgpu_device *adev, int xcc_id)
int i, r = 0;
int j;
+ if (adev->enable_mes) {
+ if (amdgpu_gfx_is_master_xcc(adev, xcc_id)) {
+ for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
+ j = i + xcc_id * adev->gfx.num_gfx_rings;
+ amdgpu_mes_unmap_legacy_queue(adev,
+ &adev->gfx.gfx_ring[j],
+ PREEMPT_QUEUES, 0, 0);
+ }
+ }
+ return 0;
+ }
+
if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
return -EINVAL;
@@ -995,7 +1017,7 @@ uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg, uint32_t xcc_
if (amdgpu_device_skip_hw_access(adev))
return 0;
- if (adev->mes.ring.sched.ready)
+ if (adev->mes.ring[0].sched.ready)
return amdgpu_mes_rreg(adev, reg);
BUG_ON(!ring->funcs->emit_rreg);
@@ -1065,7 +1087,7 @@ void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, uint3
if (amdgpu_device_skip_hw_access(adev))
return;
- if (adev->mes.ring.sched.ready) {
+ if (adev->mes.ring[0].sched.ready) {
amdgpu_mes_wreg(adev, reg, v);
return;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
index c02659025656..b49b3650fd62 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -589,7 +589,8 @@ int amdgpu_gmc_allocate_vm_inv_eng(struct amdgpu_device *adev)
ring = adev->rings[i];
vmhub = ring->vm_hub;
- if (ring == &adev->mes.ring ||
+ if (ring == &adev->mes.ring[0] ||
+ ring == &adev->mes.ring[1] ||
ring == &adev->umsch_mm.ring)
continue;
@@ -761,7 +762,7 @@ void amdgpu_gmc_fw_reg_write_reg_wait(struct amdgpu_device *adev,
unsigned long flags;
uint32_t seq;
- if (adev->mes.ring.sched.ready) {
+ if (adev->mes.ring[0].sched.ready) {
amdgpu_mes_reg_write_reg_wait(adev, reg0, reg1,
ref, mask);
return;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
index dac88d2dd70d..1cb1ec7beefe 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
@@ -135,9 +135,11 @@ int amdgpu_mes_init(struct amdgpu_device *adev)
idr_init(&adev->mes.queue_id_idr);
ida_init(&adev->mes.doorbell_ida);
spin_lock_init(&adev->mes.queue_id_lock);
- spin_lock_init(&adev->mes.ring_lock);
mutex_init(&adev->mes.mutex_hidden);
+ for (i = 0; i < AMDGPU_MAX_MES_PIPES; i++)
+ spin_lock_init(&adev->mes.ring_lock[i]);
+
adev->mes.total_max_queue = AMDGPU_FENCE_MES_QUEUE_ID_MASK;
adev->mes.vmid_mask_mmhub = 0xffffff00;
adev->mes.vmid_mask_gfxhub = 0xffffff00;
@@ -163,36 +165,38 @@ int amdgpu_mes_init(struct amdgpu_device *adev)
adev->mes.sdma_hqd_mask[i] = 0xfc;
}
- r = amdgpu_device_wb_get(adev, &adev->mes.sch_ctx_offs);
- if (r) {
- dev_err(adev->dev,
- "(%d) ring trail_fence_offs wb alloc failed\n", r);
- goto error_ids;
- }
- adev->mes.sch_ctx_gpu_addr =
- adev->wb.gpu_addr + (adev->mes.sch_ctx_offs * 4);
- adev->mes.sch_ctx_ptr =
- (uint64_t *)&adev->wb.wb[adev->mes.sch_ctx_offs];
+ for (i = 0; i < AMDGPU_MAX_MES_PIPES; i++) {
+ r = amdgpu_device_wb_get(adev, &adev->mes.sch_ctx_offs[i]);
+ if (r) {
+ dev_err(adev->dev,
+ "(%d) ring trail_fence_offs wb alloc failed\n",
+ r);
+ goto error;
+ }
+ adev->mes.sch_ctx_gpu_addr[i] =
+ adev->wb.gpu_addr + (adev->mes.sch_ctx_offs[i] * 4);
+ adev->mes.sch_ctx_ptr[i] =
+ (uint64_t *)&adev->wb.wb[adev->mes.sch_ctx_offs[i]];
- r = amdgpu_device_wb_get(adev, &adev->mes.query_status_fence_offs);
- if (r) {
- amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
- dev_err(adev->dev,
- "(%d) query_status_fence_offs wb alloc failed\n", r);
- goto error_ids;
+ r = amdgpu_device_wb_get(adev,
+ &adev->mes.query_status_fence_offs[i]);
+ if (r) {
+ dev_err(adev->dev,
+ "(%d) query_status_fence_offs wb alloc failed\n",
+ r);
+ goto error;
+ }
+ adev->mes.query_status_fence_gpu_addr[i] = adev->wb.gpu_addr +
+ (adev->mes.query_status_fence_offs[i] * 4);
+ adev->mes.query_status_fence_ptr[i] =
+ (uint64_t *)&adev->wb.wb[adev->mes.query_status_fence_offs[i]];
}
- adev->mes.query_status_fence_gpu_addr =
- adev->wb.gpu_addr + (adev->mes.query_status_fence_offs * 4);
- adev->mes.query_status_fence_ptr =
- (uint64_t *)&adev->wb.wb[adev->mes.query_status_fence_offs];
r = amdgpu_device_wb_get(adev, &adev->mes.read_val_offs);
if (r) {
- amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
- amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs);
dev_err(adev->dev,
"(%d) read_val_offs alloc failed\n", r);
- goto error_ids;
+ goto error;
}
adev->mes.read_val_gpu_addr =
adev->wb.gpu_addr + (adev->mes.read_val_offs * 4);
@@ -212,10 +216,16 @@ int amdgpu_mes_init(struct amdgpu_device *adev)
error_doorbell:
amdgpu_mes_doorbell_free(adev);
error:
- amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
- amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs);
- amdgpu_device_wb_free(adev, adev->mes.read_val_offs);
-error_ids:
+ for (i = 0; i < AMDGPU_MAX_MES_PIPES; i++) {
+ if (adev->mes.sch_ctx_ptr[i])
+ amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs[i]);
+ if (adev->mes.query_status_fence_ptr[i])
+ amdgpu_device_wb_free(adev,
+ adev->mes.query_status_fence_offs[i]);
+ }
+ if (adev->mes.read_val_ptr)
+ amdgpu_device_wb_free(adev, adev->mes.read_val_offs);
+
idr_destroy(&adev->mes.pasid_idr);
idr_destroy(&adev->mes.gang_id_idr);
idr_destroy(&adev->mes.queue_id_idr);
@@ -226,13 +236,22 @@ error_ids:
void amdgpu_mes_fini(struct amdgpu_device *adev)
{
+ int i;
+
amdgpu_bo_free_kernel(&adev->mes.event_log_gpu_obj,
&adev->mes.event_log_gpu_addr,
&adev->mes.event_log_cpu_addr);
- amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
- amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs);
- amdgpu_device_wb_free(adev, adev->mes.read_val_offs);
+ for (i = 0; i < AMDGPU_MAX_MES_PIPES; i++) {
+ if (adev->mes.sch_ctx_ptr[i])
+ amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs[i]);
+ if (adev->mes.query_status_fence_ptr[i])
+ amdgpu_device_wb_free(adev,
+ adev->mes.query_status_fence_offs[i]);
+ }
+ if (adev->mes.read_val_ptr)
+ amdgpu_device_wb_free(adev, adev->mes.read_val_offs);
+
amdgpu_mes_doorbell_free(adev);
idr_destroy(&adev->mes.pasid_idr);
@@ -1499,7 +1518,7 @@ int amdgpu_mes_init_microcode(struct amdgpu_device *adev, int pipe)
amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix,
sizeof(ucode_prefix));
- if (adev->enable_uni_mes && pipe == AMDGPU_MES_SCHED_PIPE) {
+ if (adev->enable_uni_mes) {
snprintf(fw_name, sizeof(fw_name),
"amdgpu/%s_uni_mes.bin", ucode_prefix);
} else if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0) &&
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
index 2d659c612f03..0bc837dab578 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
@@ -82,8 +82,8 @@ struct amdgpu_mes {
uint64_t default_process_quantum;
uint64_t default_gang_quantum;
- struct amdgpu_ring ring;
- spinlock_t ring_lock;
+ struct amdgpu_ring ring[AMDGPU_MAX_MES_PIPES];
+ spinlock_t ring_lock[AMDGPU_MAX_MES_PIPES];
const struct firmware *fw[AMDGPU_MAX_MES_PIPES];
@@ -112,12 +112,12 @@ struct amdgpu_mes {
uint32_t gfx_hqd_mask[AMDGPU_MES_MAX_GFX_PIPES];
uint32_t sdma_hqd_mask[AMDGPU_MES_MAX_SDMA_PIPES];
uint32_t aggregated_doorbells[AMDGPU_MES_PRIORITY_NUM_LEVELS];
- uint32_t sch_ctx_offs;
- uint64_t sch_ctx_gpu_addr;
- uint64_t *sch_ctx_ptr;
- uint32_t query_status_fence_offs;
- uint64_t query_status_fence_gpu_addr;
- uint64_t *query_status_fence_ptr;
+ uint32_t sch_ctx_offs[AMDGPU_MAX_MES_PIPES];
+ uint64_t sch_ctx_gpu_addr[AMDGPU_MAX_MES_PIPES];
+ uint64_t *sch_ctx_ptr[AMDGPU_MAX_MES_PIPES];
+ uint32_t query_status_fence_offs[AMDGPU_MAX_MES_PIPES];
+ uint64_t query_status_fence_gpu_addr[AMDGPU_MAX_MES_PIPES];
+ uint64_t *query_status_fence_ptr[AMDGPU_MAX_MES_PIPES];
uint32_t read_val_offs;
uint64_t read_val_gpu_addr;
uint32_t *read_val_ptr;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index ad49cecb20b8..e6344a6b0a9f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -212,6 +212,8 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
*/
if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
sched_hw_submission = max(sched_hw_submission, 256);
+ if (ring->funcs->type == AMDGPU_RING_TYPE_MES)
+ sched_hw_submission = 8;
else if (ring == &adev->sdma.instance[0].page)
sched_hw_submission = 256;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
index 1a5439abd1a0..c87d68d4be53 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
@@ -461,8 +461,11 @@ struct amdgpu_vcn5_fw_shared {
struct amdgpu_fw_shared_unified_queue_struct sq;
uint8_t pad1[8];
struct amdgpu_fw_shared_fw_logging fw_log;
+ uint8_t pad2[20];
struct amdgpu_fw_shared_rb_setup rb_setup;
- uint8_t pad2[4];
+ struct amdgpu_fw_shared_smu_interface_info smu_dpm_interface;
+ struct amdgpu_fw_shared_drm_key_wa drm_key_wa;
+ uint8_t pad3[9];
};
#define VCN_BLOCK_ENCODE_DISABLE_MASK 0x80
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index 111c380f929b..b287a82e6177 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -858,7 +858,7 @@ void amdgpu_virt_post_reset(struct amdgpu_device *adev)
adev->gfx.is_poweron = false;
}
- adev->mes.ring.sched.ready = false;
+ adev->mes.ring[0].sched.ready = false;
}
bool amdgpu_virt_fw_load_skip_check(struct amdgpu_device *adev, uint32_t ucode_id)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
index 506fa8003388..2c611b8577a7 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
@@ -3546,33 +3546,9 @@ static int gfx_v12_0_hw_init(void *handle)
return r;
}
-static int gfx_v12_0_kiq_disable_kgq(struct amdgpu_device *adev)
-{
- struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
- struct amdgpu_ring *kiq_ring = &kiq->ring;
- int i, r = 0;
-
- if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
- return -EINVAL;
-
- if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size *
- adev->gfx.num_gfx_rings))
- return -ENOMEM;
-
- for (i = 0; i < adev->gfx.num_gfx_rings; i++)
- kiq->pmf->kiq_unmap_queues(kiq_ring, &adev->gfx.gfx_ring[i],
- PREEMPT_QUEUES, 0, 0);
-
- if (adev->gfx.kiq[0].ring.sched.ready)
- r = amdgpu_ring_test_helper(kiq_ring);
-
- return r;
-}
-
static int gfx_v12_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- int r;
uint32_t tmp;
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
@@ -3580,8 +3556,7 @@ static int gfx_v12_0_hw_fini(void *handle)
if (!adev->no_hw_access) {
if (amdgpu_async_gfx_ring) {
- r = gfx_v12_0_kiq_disable_kgq(adev);
- if (r)
+ if (amdgpu_gfx_disable_kgq(adev, 0))
DRM_ERROR("KGQ disable failed\n");
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
index b88a6fa173b3..2797fd84432b 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
@@ -231,7 +231,7 @@ static void gmc_v11_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
/* This is necessary for SRIOV as well as for GFXOFF to function
* properly under bare metal
*/
- if ((adev->gfx.kiq[0].ring.sched.ready || adev->mes.ring.sched.ready) &&
+ if ((adev->gfx.kiq[0].ring.sched.ready || adev->mes.ring[0].sched.ready) &&
(amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev))) {
amdgpu_gmc_fw_reg_write_reg_wait(adev, req, ack, inv_req,
1 << vmid, GET_INST(GC, 0));
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
index 26efce9aa410..edcb5351f8cc 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
@@ -299,7 +299,7 @@ static void gmc_v12_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
/* This is necessary for SRIOV as well as for GFXOFF to function
* properly under bare metal
*/
- if ((adev->gfx.kiq[0].ring.sched.ready || adev->mes.ring.sched.ready) &&
+ if ((adev->gfx.kiq[0].ring.sched.ready || adev->mes.ring[0].sched.ready) &&
(amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev))) {
struct amdgpu_vmhub *hub = &adev->vmhub[vmhub];
const unsigned eng = 17;
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
index 99adf3625657..98aa3ccd0d20 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
@@ -538,11 +538,11 @@ void jpeg_v2_0_dec_ring_emit_ib(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_IB_VMID_INTERNAL_OFFSET,
0, 0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, (vmid | (vmid << 4)));
+ amdgpu_ring_write(ring, (vmid | (vmid << 4) | (vmid << 8)));
amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JPEG_VMID_INTERNAL_OFFSET,
0, 0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, (vmid | (vmid << 4)));
+ amdgpu_ring_write(ring, (vmid | (vmid << 4) | (vmid << 8)));
amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET,
0, 0, PACKETJ_TYPE0));
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
index ad524ddc9760..6ae5a784e187 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
@@ -23,6 +23,7 @@
#include "amdgpu.h"
#include "amdgpu_jpeg.h"
+#include "amdgpu_cs.h"
#include "soc15.h"
#include "soc15d.h"
#include "jpeg_v4_0_3.h"
@@ -782,11 +783,15 @@ void jpeg_v4_0_3_dec_ring_emit_ib(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, PACKETJ(regUVD_LMI_JRBC_IB_VMID_INTERNAL_OFFSET,
0, 0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, (vmid | (vmid << 4)));
+
+ if (ring->funcs->parse_cs)
+ amdgpu_ring_write(ring, 0);
+ else
+ amdgpu_ring_write(ring, (vmid | (vmid << 4) | (vmid << 8)));
amdgpu_ring_write(ring, PACKETJ(regUVD_LMI_JPEG_VMID_INTERNAL_OFFSET,
0, 0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, (vmid | (vmid << 4)));
+ amdgpu_ring_write(ring, (vmid | (vmid << 4) | (vmid << 8)));
amdgpu_ring_write(ring, PACKETJ(regUVD_LMI_JRBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET,
0, 0, PACKETJ_TYPE0));
@@ -1084,6 +1089,7 @@ static const struct amdgpu_ring_funcs jpeg_v4_0_3_dec_ring_vm_funcs = {
.get_rptr = jpeg_v4_0_3_dec_ring_get_rptr,
.get_wptr = jpeg_v4_0_3_dec_ring_get_wptr,
.set_wptr = jpeg_v4_0_3_dec_ring_set_wptr,
+ .parse_cs = jpeg_v4_0_3_dec_ring_parse_cs,
.emit_frame_size =
SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
@@ -1248,3 +1254,56 @@ static void jpeg_v4_0_3_set_ras_funcs(struct amdgpu_device *adev)
{
adev->jpeg.ras = &jpeg_v4_0_3_ras;
}
+
+/**
+ * jpeg_v4_0_3_dec_ring_parse_cs - command submission parser
+ *
+ * @parser: Command submission parser context
+ * @job: the job to parse
+ * @ib: the IB to parse
+ *
+ * Parse the command stream, return -EINVAL for invalid packet,
+ * 0 otherwise
+ */
+int jpeg_v4_0_3_dec_ring_parse_cs(struct amdgpu_cs_parser *parser,
+ struct amdgpu_job *job,
+ struct amdgpu_ib *ib)
+{
+ uint32_t i, reg, res, cond, type;
+ struct amdgpu_device *adev = parser->adev;
+
+ for (i = 0; i < ib->length_dw ; i += 2) {
+ reg = CP_PACKETJ_GET_REG(ib->ptr[i]);
+ res = CP_PACKETJ_GET_RES(ib->ptr[i]);
+ cond = CP_PACKETJ_GET_COND(ib->ptr[i]);
+ type = CP_PACKETJ_GET_TYPE(ib->ptr[i]);
+
+ if (res) /* only support 0 at the moment */
+ return -EINVAL;
+
+ switch (type) {
+ case PACKETJ_TYPE0:
+ if (cond != PACKETJ_CONDITION_CHECK0 || reg < JPEG_REG_RANGE_START || reg > JPEG_REG_RANGE_END) {
+ dev_err(adev->dev, "Invalid packet [0x%08x]!\n", ib->ptr[i]);
+ return -EINVAL;
+ }
+ break;
+ case PACKETJ_TYPE3:
+ if (cond != PACKETJ_CONDITION_CHECK3 || reg < JPEG_REG_RANGE_START || reg > JPEG_REG_RANGE_END) {
+ dev_err(adev->dev, "Invalid packet [0x%08x]!\n", ib->ptr[i]);
+ return -EINVAL;
+ }
+ break;
+ case PACKETJ_TYPE6:
+ if (ib->ptr[i] == CP_PACKETJ_NOP)
+ continue;
+ dev_err(adev->dev, "Invalid packet [0x%08x]!\n", ib->ptr[i]);
+ return -EINVAL;
+ default:
+ dev_err(adev->dev, "Unknown packet type %d !\n", type);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.h b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.h
index 747a3e5f6856..71c54b294e15 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.h
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.h
@@ -46,6 +46,9 @@
#define JRBC_DEC_EXTERNAL_REG_WRITE_ADDR 0x18000
+#define JPEG_REG_RANGE_START 0x4000
+#define JPEG_REG_RANGE_END 0x41c2
+
extern const struct amdgpu_ip_block_version jpeg_v4_0_3_ip_block;
void jpeg_v4_0_3_dec_ring_emit_ib(struct amdgpu_ring *ring,
@@ -62,5 +65,7 @@ void jpeg_v4_0_3_dec_ring_insert_end(struct amdgpu_ring *ring);
void jpeg_v4_0_3_dec_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val);
void jpeg_v4_0_3_dec_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
uint32_t val, uint32_t mask);
-
+int jpeg_v4_0_3_dec_ring_parse_cs(struct amdgpu_cs_parser *parser,
+ struct amdgpu_job *job,
+ struct amdgpu_ib *ib);
#endif /* __JPEG_V4_0_3_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c
index d694a276498a..f4daff90c770 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c
@@ -646,6 +646,7 @@ static const struct amdgpu_ring_funcs jpeg_v5_0_0_dec_ring_vm_funcs = {
.get_rptr = jpeg_v5_0_0_dec_ring_get_rptr,
.get_wptr = jpeg_v5_0_0_dec_ring_get_wptr,
.set_wptr = jpeg_v5_0_0_dec_ring_set_wptr,
+ .parse_cs = jpeg_v4_0_3_dec_ring_parse_cs,
.emit_frame_size =
SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
index f9343642ae7e..2ea8223eb969 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
@@ -162,13 +162,13 @@ static int mes_v11_0_submit_pkt_and_poll_completion(struct amdgpu_mes *mes,
union MESAPI__QUERY_MES_STATUS mes_status_pkt;
signed long timeout = 3000000; /* 3000 ms */
struct amdgpu_device *adev = mes->adev;
- struct amdgpu_ring *ring = &mes->ring;
+ struct amdgpu_ring *ring = &mes->ring[0];
struct MES_API_STATUS *api_status;
union MESAPI__MISC *x_pkt = pkt;
const char *op_str, *misc_op_str;
unsigned long flags;
u64 status_gpu_addr;
- u32 status_offset;
+ u32 seq, status_offset;
u64 *status_ptr;
signed long r;
int ret;
@@ -191,11 +191,18 @@ static int mes_v11_0_submit_pkt_and_poll_completion(struct amdgpu_mes *mes,
status_ptr = (u64 *)&adev->wb.wb[status_offset];
*status_ptr = 0;
- spin_lock_irqsave(&mes->ring_lock, flags);
+ spin_lock_irqsave(&mes->ring_lock[0], flags);
r = amdgpu_ring_alloc(ring, (size + sizeof(mes_status_pkt)) / 4);
if (r)
goto error_unlock_free;
+ seq = ++ring->fence_drv.sync_seq;
+ r = amdgpu_fence_wait_polling(ring,
+ seq - ring->fence_drv.num_fences_mask,
+ timeout);
+ if (r < 1)
+ goto error_undo;
+
api_status = (struct MES_API_STATUS *)((char *)pkt + api_status_off);
api_status->api_completion_fence_addr = status_gpu_addr;
api_status->api_completion_fence_value = 1;
@@ -208,14 +215,13 @@ static int mes_v11_0_submit_pkt_and_poll_completion(struct amdgpu_mes *mes,
mes_status_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
mes_status_pkt.api_status.api_completion_fence_addr =
ring->fence_drv.gpu_addr;
- mes_status_pkt.api_status.api_completion_fence_value =
- ++ring->fence_drv.sync_seq;
+ mes_status_pkt.api_status.api_completion_fence_value = seq;
amdgpu_ring_write_multiple(ring, &mes_status_pkt,
sizeof(mes_status_pkt) / 4);
amdgpu_ring_commit(ring);
- spin_unlock_irqrestore(&mes->ring_lock, flags);
+ spin_unlock_irqrestore(&mes->ring_lock[0], flags);
op_str = mes_v11_0_get_op_string(x_pkt);
misc_op_str = mes_v11_0_get_misc_op_string(x_pkt);
@@ -229,7 +235,7 @@ static int mes_v11_0_submit_pkt_and_poll_completion(struct amdgpu_mes *mes,
dev_dbg(adev->dev, "MES msg=%d was emitted\n",
x_pkt->header.opcode);
- r = amdgpu_fence_wait_polling(ring, ring->fence_drv.sync_seq, timeout);
+ r = amdgpu_fence_wait_polling(ring, seq, timeout);
if (r < 1 || !*status_ptr) {
if (misc_op_str)
@@ -252,8 +258,12 @@ static int mes_v11_0_submit_pkt_and_poll_completion(struct amdgpu_mes *mes,
amdgpu_device_wb_free(adev, status_offset);
return 0;
+error_undo:
+ dev_err(adev->dev, "MES ring buffer is full.\n");
+ amdgpu_ring_undo(ring);
+
error_unlock_free:
- spin_unlock_irqrestore(&mes->ring_lock, flags);
+ spin_unlock_irqrestore(&mes->ring_lock[0], flags);
error_wb_free:
amdgpu_device_wb_free(adev, status_offset);
@@ -512,9 +522,9 @@ static int mes_v11_0_set_hw_resources(struct amdgpu_mes *mes)
mes_set_hw_res_pkt.vmid_mask_gfxhub = mes->vmid_mask_gfxhub;
mes_set_hw_res_pkt.gds_size = adev->gds.gds_size;
mes_set_hw_res_pkt.paging_vmid = 0;
- mes_set_hw_res_pkt.g_sch_ctx_gpu_mc_ptr = mes->sch_ctx_gpu_addr;
+ mes_set_hw_res_pkt.g_sch_ctx_gpu_mc_ptr = mes->sch_ctx_gpu_addr[0];
mes_set_hw_res_pkt.query_status_fence_gpu_mc_ptr =
- mes->query_status_fence_gpu_addr;
+ mes->query_status_fence_gpu_addr[0];
for (i = 0; i < MAX_COMPUTE_PIPES; i++)
mes_set_hw_res_pkt.compute_hqd_mask[i] =
@@ -1015,7 +1025,7 @@ static int mes_v11_0_kiq_enable_queue(struct amdgpu_device *adev)
return r;
}
- kiq->pmf->kiq_map_queues(kiq_ring, &adev->mes.ring);
+ kiq->pmf->kiq_map_queues(kiq_ring, &adev->mes.ring[0]);
return amdgpu_ring_test_helper(kiq_ring);
}
@@ -1029,7 +1039,7 @@ static int mes_v11_0_queue_init(struct amdgpu_device *adev,
if (pipe == AMDGPU_MES_KIQ_PIPE)
ring = &adev->gfx.kiq[0].ring;
else if (pipe == AMDGPU_MES_SCHED_PIPE)
- ring = &adev->mes.ring;
+ ring = &adev->mes.ring[0];
else
BUG();
@@ -1071,7 +1081,7 @@ static int mes_v11_0_ring_init(struct amdgpu_device *adev)
{
struct amdgpu_ring *ring;
- ring = &adev->mes.ring;
+ ring = &adev->mes.ring[0];
ring->funcs = &mes_v11_0_ring_funcs;
@@ -1124,7 +1134,7 @@ static int mes_v11_0_mqd_sw_init(struct amdgpu_device *adev,
if (pipe == AMDGPU_MES_KIQ_PIPE)
ring = &adev->gfx.kiq[0].ring;
else if (pipe == AMDGPU_MES_SCHED_PIPE)
- ring = &adev->mes.ring;
+ ring = &adev->mes.ring[0];
else
BUG();
@@ -1200,9 +1210,6 @@ static int mes_v11_0_sw_fini(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int pipe;
- amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
- amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs);
-
for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) {
kfree(adev->mes.mqd_backup[pipe]);
@@ -1216,12 +1223,12 @@ static int mes_v11_0_sw_fini(void *handle)
&adev->gfx.kiq[0].ring.mqd_gpu_addr,
&adev->gfx.kiq[0].ring.mqd_ptr);
- amdgpu_bo_free_kernel(&adev->mes.ring.mqd_obj,
- &adev->mes.ring.mqd_gpu_addr,
- &adev->mes.ring.mqd_ptr);
+ amdgpu_bo_free_kernel(&adev->mes.ring[0].mqd_obj,
+ &adev->mes.ring[0].mqd_gpu_addr,
+ &adev->mes.ring[0].mqd_ptr);
amdgpu_ring_fini(&adev->gfx.kiq[0].ring);
- amdgpu_ring_fini(&adev->mes.ring);
+ amdgpu_ring_fini(&adev->mes.ring[0]);
if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
mes_v11_0_free_ucode_buffers(adev, AMDGPU_MES_KIQ_PIPE);
@@ -1332,9 +1339,9 @@ failure:
static int mes_v11_0_kiq_hw_fini(struct amdgpu_device *adev)
{
- if (adev->mes.ring.sched.ready) {
- mes_v11_0_kiq_dequeue(&adev->mes.ring);
- adev->mes.ring.sched.ready = false;
+ if (adev->mes.ring[0].sched.ready) {
+ mes_v11_0_kiq_dequeue(&adev->mes.ring[0]);
+ adev->mes.ring[0].sched.ready = false;
}
if (amdgpu_sriov_vf(adev)) {
@@ -1352,7 +1359,7 @@ static int mes_v11_0_hw_init(void *handle)
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (adev->mes.ring.sched.ready)
+ if (adev->mes.ring[0].sched.ready)
goto out;
if (!adev->enable_mes_kiq) {
@@ -1397,7 +1404,7 @@ out:
* with MES enabled.
*/
adev->gfx.kiq[0].ring.sched.ready = false;
- adev->mes.ring.sched.ready = true;
+ adev->mes.ring[0].sched.ready = true;
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
index 0713bc3eb263..e39a58d262c9 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
@@ -142,19 +142,20 @@ static const char *mes_v12_0_get_misc_op_string(union MESAPI__MISC *x_pkt)
}
static int mes_v12_0_submit_pkt_and_poll_completion(struct amdgpu_mes *mes,
- void *pkt, int size,
- int api_status_off)
+ int pipe, void *pkt, int size,
+ int api_status_off)
{
union MESAPI__QUERY_MES_STATUS mes_status_pkt;
signed long timeout = 3000000; /* 3000 ms */
struct amdgpu_device *adev = mes->adev;
- struct amdgpu_ring *ring = &mes->ring;
+ struct amdgpu_ring *ring = &mes->ring[pipe];
+ spinlock_t *ring_lock = &mes->ring_lock[pipe];
struct MES_API_STATUS *api_status;
union MESAPI__MISC *x_pkt = pkt;
const char *op_str, *misc_op_str;
unsigned long flags;
u64 status_gpu_addr;
- u32 status_offset;
+ u32 seq, status_offset;
u64 *status_ptr;
signed long r;
int ret;
@@ -177,11 +178,18 @@ static int mes_v12_0_submit_pkt_and_poll_completion(struct amdgpu_mes *mes,
status_ptr = (u64 *)&adev->wb.wb[status_offset];
*status_ptr = 0;
- spin_lock_irqsave(&mes->ring_lock, flags);
+ spin_lock_irqsave(ring_lock, flags);
r = amdgpu_ring_alloc(ring, (size + sizeof(mes_status_pkt)) / 4);
if (r)
goto error_unlock_free;
+ seq = ++ring->fence_drv.sync_seq;
+ r = amdgpu_fence_wait_polling(ring,
+ seq - ring->fence_drv.num_fences_mask,
+ timeout);
+ if (r < 1)
+ goto error_undo;
+
api_status = (struct MES_API_STATUS *)((char *)pkt + api_status_off);
api_status->api_completion_fence_addr = status_gpu_addr;
api_status->api_completion_fence_value = 1;
@@ -194,39 +202,39 @@ static int mes_v12_0_submit_pkt_and_poll_completion(struct amdgpu_mes *mes,
mes_status_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
mes_status_pkt.api_status.api_completion_fence_addr =
ring->fence_drv.gpu_addr;
- mes_status_pkt.api_status.api_completion_fence_value =
- ++ring->fence_drv.sync_seq;
+ mes_status_pkt.api_status.api_completion_fence_value = seq;
amdgpu_ring_write_multiple(ring, &mes_status_pkt,
sizeof(mes_status_pkt) / 4);
amdgpu_ring_commit(ring);
- spin_unlock_irqrestore(&mes->ring_lock, flags);
+ spin_unlock_irqrestore(ring_lock, flags);
op_str = mes_v12_0_get_op_string(x_pkt);
misc_op_str = mes_v12_0_get_misc_op_string(x_pkt);
if (misc_op_str)
- dev_dbg(adev->dev, "MES msg=%s (%s) was emitted\n", op_str,
- misc_op_str);
+ dev_dbg(adev->dev, "MES(%d) msg=%s (%s) was emitted\n",
+ pipe, op_str, misc_op_str);
else if (op_str)
- dev_dbg(adev->dev, "MES msg=%s was emitted\n", op_str);
+ dev_dbg(adev->dev, "MES(%d) msg=%s was emitted\n",
+ pipe, op_str);
else
- dev_dbg(adev->dev, "MES msg=%d was emitted\n",
- x_pkt->header.opcode);
+ dev_dbg(adev->dev, "MES(%d) msg=%d was emitted\n",
+ pipe, x_pkt->header.opcode);
- r = amdgpu_fence_wait_polling(ring, ring->fence_drv.sync_seq, timeout);
+ r = amdgpu_fence_wait_polling(ring, seq, timeout);
if (r < 1 || !*status_ptr) {
if (misc_op_str)
- dev_err(adev->dev, "MES failed to respond to msg=%s (%s)\n",
- op_str, misc_op_str);
+ dev_err(adev->dev, "MES(%d) failed to respond to msg=%s (%s)\n",
+ pipe, op_str, misc_op_str);
else if (op_str)
- dev_err(adev->dev, "MES failed to respond to msg=%s\n",
- op_str);
+ dev_err(adev->dev, "MES(%d) failed to respond to msg=%s\n",
+ pipe, op_str);
else
- dev_err(adev->dev, "MES failed to respond to msg=%d\n",
- x_pkt->header.opcode);
+ dev_err(adev->dev, "MES(%d) failed to respond to msg=%d\n",
+ pipe, x_pkt->header.opcode);
while (halt_if_hws_hang)
schedule();
@@ -238,8 +246,12 @@ static int mes_v12_0_submit_pkt_and_poll_completion(struct amdgpu_mes *mes,
amdgpu_device_wb_free(adev, status_offset);
return 0;
+error_undo:
+ dev_err(adev->dev, "MES ring buffer is full.\n");
+ amdgpu_ring_undo(ring);
+
error_unlock_free:
- spin_unlock_irqrestore(&mes->ring_lock, flags);
+ spin_unlock_irqrestore(ring_lock, flags);
error_wb_free:
amdgpu_device_wb_free(adev, status_offset);
@@ -254,6 +266,8 @@ static int convert_to_mes_queue_type(int queue_type)
return MES_QUEUE_TYPE_COMPUTE;
else if (queue_type == AMDGPU_RING_TYPE_SDMA)
return MES_QUEUE_TYPE_SDMA;
+ else if (queue_type == AMDGPU_RING_TYPE_MES)
+ return MES_QUEUE_TYPE_SCHQ;
else
BUG();
return -1;
@@ -311,6 +325,7 @@ static int mes_v12_0_add_hw_queue(struct amdgpu_mes *mes,
mes_add_queue_pkt.gds_size = input->queue_size;
return mes_v12_0_submit_pkt_and_poll_completion(mes,
+ AMDGPU_MES_SCHED_PIPE,
&mes_add_queue_pkt, sizeof(mes_add_queue_pkt),
offsetof(union MESAPI__ADD_QUEUE, api_status));
}
@@ -330,6 +345,7 @@ static int mes_v12_0_remove_hw_queue(struct amdgpu_mes *mes,
mes_remove_queue_pkt.gang_context_addr = input->gang_context_addr;
return mes_v12_0_submit_pkt_and_poll_completion(mes,
+ AMDGPU_MES_SCHED_PIPE,
&mes_remove_queue_pkt, sizeof(mes_remove_queue_pkt),
offsetof(union MESAPI__REMOVE_QUEUE, api_status));
}
@@ -338,6 +354,7 @@ static int mes_v12_0_map_legacy_queue(struct amdgpu_mes *mes,
struct mes_map_legacy_queue_input *input)
{
union MESAPI__ADD_QUEUE mes_add_queue_pkt;
+ int pipe;
memset(&mes_add_queue_pkt, 0, sizeof(mes_add_queue_pkt));
@@ -354,7 +371,12 @@ static int mes_v12_0_map_legacy_queue(struct amdgpu_mes *mes,
convert_to_mes_queue_type(input->queue_type);
mes_add_queue_pkt.map_legacy_kq = 1;
- return mes_v12_0_submit_pkt_and_poll_completion(mes,
+ if (mes->adev->enable_uni_mes)
+ pipe = AMDGPU_MES_KIQ_PIPE;
+ else
+ pipe = AMDGPU_MES_SCHED_PIPE;
+
+ return mes_v12_0_submit_pkt_and_poll_completion(mes, pipe,
&mes_add_queue_pkt, sizeof(mes_add_queue_pkt),
offsetof(union MESAPI__ADD_QUEUE, api_status));
}
@@ -363,6 +385,7 @@ static int mes_v12_0_unmap_legacy_queue(struct amdgpu_mes *mes,
struct mes_unmap_legacy_queue_input *input)
{
union MESAPI__REMOVE_QUEUE mes_remove_queue_pkt;
+ int pipe;
memset(&mes_remove_queue_pkt, 0, sizeof(mes_remove_queue_pkt));
@@ -387,7 +410,12 @@ static int mes_v12_0_unmap_legacy_queue(struct amdgpu_mes *mes,
convert_to_mes_queue_type(input->queue_type);
}
- return mes_v12_0_submit_pkt_and_poll_completion(mes,
+ if (mes->adev->enable_uni_mes)
+ pipe = AMDGPU_MES_KIQ_PIPE;
+ else
+ pipe = AMDGPU_MES_SCHED_PIPE;
+
+ return mes_v12_0_submit_pkt_and_poll_completion(mes, pipe,
&mes_remove_queue_pkt, sizeof(mes_remove_queue_pkt),
offsetof(union MESAPI__REMOVE_QUEUE, api_status));
}
@@ -404,7 +432,7 @@ static int mes_v12_0_resume_gang(struct amdgpu_mes *mes,
return 0;
}
-static int mes_v12_0_query_sched_status(struct amdgpu_mes *mes)
+static int mes_v12_0_query_sched_status(struct amdgpu_mes *mes, int pipe)
{
union MESAPI__QUERY_MES_STATUS mes_status_pkt;
@@ -414,7 +442,7 @@ static int mes_v12_0_query_sched_status(struct amdgpu_mes *mes)
mes_status_pkt.header.opcode = MES_SCH_API_QUERY_SCHEDULER_STATUS;
mes_status_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
- return mes_v12_0_submit_pkt_and_poll_completion(mes,
+ return mes_v12_0_submit_pkt_and_poll_completion(mes, pipe,
&mes_status_pkt, sizeof(mes_status_pkt),
offsetof(union MESAPI__QUERY_MES_STATUS, api_status));
}
@@ -423,6 +451,7 @@ static int mes_v12_0_misc_op(struct amdgpu_mes *mes,
struct mes_misc_op_input *input)
{
union MESAPI__MISC misc_pkt;
+ int pipe;
memset(&misc_pkt, 0, sizeof(misc_pkt));
@@ -475,12 +504,17 @@ static int mes_v12_0_misc_op(struct amdgpu_mes *mes,
return -EINVAL;
}
- return mes_v12_0_submit_pkt_and_poll_completion(mes,
+ if (mes->adev->enable_uni_mes)
+ pipe = AMDGPU_MES_KIQ_PIPE;
+ else
+ pipe = AMDGPU_MES_SCHED_PIPE;
+
+ return mes_v12_0_submit_pkt_and_poll_completion(mes, pipe,
&misc_pkt, sizeof(misc_pkt),
offsetof(union MESAPI__MISC, api_status));
}
-static int mes_v12_0_set_hw_resources_1(struct amdgpu_mes *mes)
+static int mes_v12_0_set_hw_resources_1(struct amdgpu_mes *mes, int pipe)
{
union MESAPI_SET_HW_RESOURCES_1 mes_set_hw_res_1_pkt;
@@ -491,12 +525,12 @@ static int mes_v12_0_set_hw_resources_1(struct amdgpu_mes *mes)
mes_set_hw_res_1_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
mes_set_hw_res_1_pkt.mes_kiq_unmap_timeout = 100;
- return mes_v12_0_submit_pkt_and_poll_completion(mes,
+ return mes_v12_0_submit_pkt_and_poll_completion(mes, pipe,
&mes_set_hw_res_1_pkt, sizeof(mes_set_hw_res_1_pkt),
offsetof(union MESAPI_SET_HW_RESOURCES_1, api_status));
}
-static int mes_v12_0_set_hw_resources(struct amdgpu_mes *mes)
+static int mes_v12_0_set_hw_resources(struct amdgpu_mes *mes, int pipe)
{
int i;
struct amdgpu_device *adev = mes->adev;
@@ -508,27 +542,33 @@ static int mes_v12_0_set_hw_resources(struct amdgpu_mes *mes)
mes_set_hw_res_pkt.header.opcode = MES_SCH_API_SET_HW_RSRC;
mes_set_hw_res_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
- mes_set_hw_res_pkt.vmid_mask_mmhub = mes->vmid_mask_mmhub;
- mes_set_hw_res_pkt.vmid_mask_gfxhub = mes->vmid_mask_gfxhub;
- mes_set_hw_res_pkt.gds_size = adev->gds.gds_size;
- mes_set_hw_res_pkt.paging_vmid = 0;
- mes_set_hw_res_pkt.g_sch_ctx_gpu_mc_ptr = mes->sch_ctx_gpu_addr;
- mes_set_hw_res_pkt.query_status_fence_gpu_mc_ptr =
- mes->query_status_fence_gpu_addr;
-
- for (i = 0; i < MAX_COMPUTE_PIPES; i++)
- mes_set_hw_res_pkt.compute_hqd_mask[i] =
- mes->compute_hqd_mask[i];
-
- for (i = 0; i < MAX_GFX_PIPES; i++)
- mes_set_hw_res_pkt.gfx_hqd_mask[i] = mes->gfx_hqd_mask[i];
-
- for (i = 0; i < MAX_SDMA_PIPES; i++)
- mes_set_hw_res_pkt.sdma_hqd_mask[i] = mes->sdma_hqd_mask[i];
+ if (pipe == AMDGPU_MES_SCHED_PIPE) {
+ mes_set_hw_res_pkt.vmid_mask_mmhub = mes->vmid_mask_mmhub;
+ mes_set_hw_res_pkt.vmid_mask_gfxhub = mes->vmid_mask_gfxhub;
+ mes_set_hw_res_pkt.gds_size = adev->gds.gds_size;
+ mes_set_hw_res_pkt.paging_vmid = 0;
+
+ for (i = 0; i < MAX_COMPUTE_PIPES; i++)
+ mes_set_hw_res_pkt.compute_hqd_mask[i] =
+ mes->compute_hqd_mask[i];
+
+ for (i = 0; i < MAX_GFX_PIPES; i++)
+ mes_set_hw_res_pkt.gfx_hqd_mask[i] =
+ mes->gfx_hqd_mask[i];
+
+ for (i = 0; i < MAX_SDMA_PIPES; i++)
+ mes_set_hw_res_pkt.sdma_hqd_mask[i] =
+ mes->sdma_hqd_mask[i];
+
+ for (i = 0; i < AMD_PRIORITY_NUM_LEVELS; i++)
+ mes_set_hw_res_pkt.aggregated_doorbells[i] =
+ mes->aggregated_doorbells[i];
+ }
- for (i = 0; i < AMD_PRIORITY_NUM_LEVELS; i++)
- mes_set_hw_res_pkt.aggregated_doorbells[i] =
- mes->aggregated_doorbells[i];
+ mes_set_hw_res_pkt.g_sch_ctx_gpu_mc_ptr =
+ mes->sch_ctx_gpu_addr[pipe];
+ mes_set_hw_res_pkt.query_status_fence_gpu_mc_ptr =
+ mes->query_status_fence_gpu_addr[pipe];
for (i = 0; i < 5; i++) {
mes_set_hw_res_pkt.gc_base[i] = adev->reg_offset[GC_HWIP][0][i];
@@ -556,7 +596,7 @@ static int mes_v12_0_set_hw_resources(struct amdgpu_mes *mes)
mes_set_hw_res_pkt.event_intr_history_gpu_mc_ptr = mes->event_log_gpu_addr;
}
- return mes_v12_0_submit_pkt_and_poll_completion(mes,
+ return mes_v12_0_submit_pkt_and_poll_completion(mes, pipe,
&mes_set_hw_res_pkt, sizeof(mes_set_hw_res_pkt),
offsetof(union MESAPI_SET_HW_RESOURCES, api_status));
}
@@ -734,16 +774,11 @@ static void mes_v12_0_enable(struct amdgpu_device *adev, bool enable)
if (enable) {
data = RREG32_SOC15(GC, 0, regCP_MES_CNTL);
data = REG_SET_FIELD(data, CP_MES_CNTL, MES_PIPE0_RESET, 1);
- data = REG_SET_FIELD(data, CP_MES_CNTL, MES_PIPE1_RESET,
- (!adev->enable_uni_mes && adev->enable_mes_kiq) ? 1 : 0);
+ data = REG_SET_FIELD(data, CP_MES_CNTL, MES_PIPE1_RESET, 1);
WREG32_SOC15(GC, 0, regCP_MES_CNTL, data);
mutex_lock(&adev->srbm_mutex);
for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) {
- if ((!adev->enable_mes_kiq || adev->enable_uni_mes) &&
- pipe == AMDGPU_MES_KIQ_PIPE)
- continue;
-
soc21_grbm_select(adev, 3, pipe, 0, 0);
ucode_addr = adev->mes.uc_start_addr[pipe] >> 2;
@@ -757,8 +792,7 @@ static void mes_v12_0_enable(struct amdgpu_device *adev, bool enable)
/* unhalt MES and activate pipe0 */
data = REG_SET_FIELD(0, CP_MES_CNTL, MES_PIPE0_ACTIVE, 1);
- data = REG_SET_FIELD(data, CP_MES_CNTL, MES_PIPE1_ACTIVE,
- (!adev->enable_uni_mes && adev->enable_mes_kiq) ? 1 : 0);
+ data = REG_SET_FIELD(data, CP_MES_CNTL, MES_PIPE1_ACTIVE, 1);
WREG32_SOC15(GC, 0, regCP_MES_CNTL, data);
if (amdgpu_emu_mode)
@@ -774,8 +808,7 @@ static void mes_v12_0_enable(struct amdgpu_device *adev, bool enable)
data = REG_SET_FIELD(data, CP_MES_CNTL,
MES_INVALIDATE_ICACHE, 1);
data = REG_SET_FIELD(data, CP_MES_CNTL, MES_PIPE0_RESET, 1);
- data = REG_SET_FIELD(data, CP_MES_CNTL, MES_PIPE1_RESET,
- (!adev->enable_uni_mes && adev->enable_mes_kiq) ? 1 : 0);
+ data = REG_SET_FIELD(data, CP_MES_CNTL, MES_PIPE1_RESET, 1);
data = REG_SET_FIELD(data, CP_MES_CNTL, MES_HALT, 1);
WREG32_SOC15(GC, 0, regCP_MES_CNTL, data);
}
@@ -790,10 +823,6 @@ static void mes_v12_0_set_ucode_start_addr(struct amdgpu_device *adev)
mutex_lock(&adev->srbm_mutex);
for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) {
- if ((!adev->enable_mes_kiq || adev->enable_uni_mes) &&
- pipe == AMDGPU_MES_KIQ_PIPE)
- continue;
-
/* me=3, queue=0 */
soc21_grbm_select(adev, 3, pipe, 0, 0);
@@ -1085,7 +1114,7 @@ static int mes_v12_0_kiq_enable_queue(struct amdgpu_device *adev)
return r;
}
- kiq->pmf->kiq_map_queues(kiq_ring, &adev->mes.ring);
+ kiq->pmf->kiq_map_queues(kiq_ring, &adev->mes.ring[0]);
r = amdgpu_ring_test_ring(kiq_ring);
if (r) {
@@ -1101,14 +1130,12 @@ static int mes_v12_0_queue_init(struct amdgpu_device *adev,
struct amdgpu_ring *ring;
int r;
- if (pipe == AMDGPU_MES_KIQ_PIPE)
+ if (!adev->enable_uni_mes && pipe == AMDGPU_MES_KIQ_PIPE)
ring = &adev->gfx.kiq[0].ring;
- else if (pipe == AMDGPU_MES_SCHED_PIPE)
- ring = &adev->mes.ring;
else
- BUG();
+ ring = &adev->mes.ring[pipe];
- if ((pipe == AMDGPU_MES_SCHED_PIPE) &&
+ if ((adev->enable_uni_mes || pipe == AMDGPU_MES_SCHED_PIPE) &&
(amdgpu_in_reset(adev) || adev->in_suspend)) {
*(ring->wptr_cpu_addr) = 0;
*(ring->rptr_cpu_addr) = 0;
@@ -1120,13 +1147,12 @@ static int mes_v12_0_queue_init(struct amdgpu_device *adev,
return r;
if (pipe == AMDGPU_MES_SCHED_PIPE) {
- if (adev->enable_uni_mes) {
- mes_v12_0_queue_init_register(ring);
- } else {
+ if (adev->enable_uni_mes)
+ r = amdgpu_mes_map_legacy_queue(adev, ring);
+ else
r = mes_v12_0_kiq_enable_queue(adev);
- if (r)
- return r;
- }
+ if (r)
+ return r;
} else {
mes_v12_0_queue_init_register(ring);
}
@@ -1146,25 +1172,29 @@ static int mes_v12_0_queue_init(struct amdgpu_device *adev,
return 0;
}
-static int mes_v12_0_ring_init(struct amdgpu_device *adev)
+static int mes_v12_0_ring_init(struct amdgpu_device *adev, int pipe)
{
struct amdgpu_ring *ring;
- ring = &adev->mes.ring;
+ ring = &adev->mes.ring[pipe];
ring->funcs = &mes_v12_0_ring_funcs;
ring->me = 3;
- ring->pipe = 0;
+ ring->pipe = pipe;
ring->queue = 0;
ring->ring_obj = NULL;
ring->use_doorbell = true;
- ring->doorbell_index = adev->doorbell_index.mes_ring0 << 1;
- ring->eop_gpu_addr = adev->mes.eop_gpu_addr[AMDGPU_MES_SCHED_PIPE];
+ ring->eop_gpu_addr = adev->mes.eop_gpu_addr[pipe];
ring->no_scheduler = true;
sprintf(ring->name, "mes_%d.%d.%d", ring->me, ring->pipe, ring->queue);
+ if (pipe == AMDGPU_MES_SCHED_PIPE)
+ ring->doorbell_index = adev->doorbell_index.mes_ring0 << 1;
+ else
+ ring->doorbell_index = adev->doorbell_index.mes_ring1 << 1;
+
return amdgpu_ring_init(adev, ring, 1024, NULL, 0,
AMDGPU_RING_PRIO_DEFAULT, NULL);
}
@@ -1178,7 +1208,7 @@ static int mes_v12_0_kiq_ring_init(struct amdgpu_device *adev)
ring = &adev->gfx.kiq[0].ring;
ring->me = 3;
- ring->pipe = adev->enable_uni_mes ? 0 : 1;
+ ring->pipe = 1;
ring->queue = 0;
ring->adev = NULL;
@@ -1200,12 +1230,10 @@ static int mes_v12_0_mqd_sw_init(struct amdgpu_device *adev,
int r, mqd_size = sizeof(struct v12_compute_mqd);
struct amdgpu_ring *ring;
- if (pipe == AMDGPU_MES_KIQ_PIPE)
+ if (!adev->enable_uni_mes && pipe == AMDGPU_MES_KIQ_PIPE)
ring = &adev->gfx.kiq[0].ring;
- else if (pipe == AMDGPU_MES_SCHED_PIPE)
- ring = &adev->mes.ring;
else
- BUG();
+ ring = &adev->mes.ring[pipe];
if (ring->mqd_obj)
return 0;
@@ -1246,9 +1274,6 @@ static int mes_v12_0_sw_init(void *handle)
return r;
for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) {
- if (!adev->enable_mes_kiq && pipe == AMDGPU_MES_KIQ_PIPE)
- continue;
-
r = mes_v12_0_allocate_eop_buf(adev, pipe);
if (r)
return r;
@@ -1256,18 +1281,15 @@ static int mes_v12_0_sw_init(void *handle)
r = mes_v12_0_mqd_sw_init(adev, pipe);
if (r)
return r;
- }
- if (adev->enable_mes_kiq) {
- r = mes_v12_0_kiq_ring_init(adev);
+ if (!adev->enable_uni_mes && pipe == AMDGPU_MES_KIQ_PIPE)
+ r = mes_v12_0_kiq_ring_init(adev);
+ else
+ r = mes_v12_0_ring_init(adev, pipe);
if (r)
return r;
}
- r = mes_v12_0_ring_init(adev);
- if (r)
- return r;
-
return 0;
}
@@ -1276,9 +1298,6 @@ static int mes_v12_0_sw_fini(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int pipe;
- amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
- amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs);
-
for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) {
kfree(adev->mes.mqd_backup[pipe]);
@@ -1286,18 +1305,21 @@ static int mes_v12_0_sw_fini(void *handle)
&adev->mes.eop_gpu_addr[pipe],
NULL);
amdgpu_ucode_release(&adev->mes.fw[pipe]);
- }
-
- amdgpu_bo_free_kernel(&adev->gfx.kiq[0].ring.mqd_obj,
- &adev->gfx.kiq[0].ring.mqd_gpu_addr,
- &adev->gfx.kiq[0].ring.mqd_ptr);
- amdgpu_bo_free_kernel(&adev->mes.ring.mqd_obj,
- &adev->mes.ring.mqd_gpu_addr,
- &adev->mes.ring.mqd_ptr);
+ if (adev->enable_uni_mes || pipe == AMDGPU_MES_SCHED_PIPE) {
+ amdgpu_bo_free_kernel(&adev->mes.ring[pipe].mqd_obj,
+ &adev->mes.ring[pipe].mqd_gpu_addr,
+ &adev->mes.ring[pipe].mqd_ptr);
+ amdgpu_ring_fini(&adev->mes.ring[pipe]);
+ }
+ }
- amdgpu_ring_fini(&adev->gfx.kiq[0].ring);
- amdgpu_ring_fini(&adev->mes.ring);
+ if (!adev->enable_uni_mes) {
+ amdgpu_bo_free_kernel(&adev->gfx.kiq[0].ring.mqd_obj,
+ &adev->gfx.kiq[0].ring.mqd_gpu_addr,
+ &adev->gfx.kiq[0].ring.mqd_ptr);
+ amdgpu_ring_fini(&adev->gfx.kiq[0].ring);
+ }
if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
mes_v12_0_free_ucode_buffers(adev, AMDGPU_MES_KIQ_PIPE);
@@ -1341,7 +1363,7 @@ static void mes_v12_0_kiq_dequeue_sched(struct amdgpu_device *adev)
soc21_grbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
- adev->mes.ring.sched.ready = false;
+ adev->mes.ring[0].sched.ready = false;
}
static void mes_v12_0_kiq_setting(struct amdgpu_ring *ring)
@@ -1362,10 +1384,10 @@ static int mes_v12_0_kiq_hw_init(struct amdgpu_device *adev)
{
int r = 0;
- mes_v12_0_kiq_setting(&adev->gfx.kiq[0].ring);
-
if (adev->enable_uni_mes)
- return mes_v12_0_hw_init(adev);
+ mes_v12_0_kiq_setting(&adev->mes.ring[AMDGPU_MES_KIQ_PIPE]);
+ else
+ mes_v12_0_kiq_setting(&adev->gfx.kiq[0].ring);
if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
@@ -1392,6 +1414,14 @@ static int mes_v12_0_kiq_hw_init(struct amdgpu_device *adev)
if (r)
goto failure;
+ if (adev->enable_uni_mes) {
+ r = mes_v12_0_set_hw_resources(&adev->mes, AMDGPU_MES_KIQ_PIPE);
+ if (r)
+ goto failure;
+
+ mes_v12_0_set_hw_resources_1(&adev->mes, AMDGPU_MES_KIQ_PIPE);
+ }
+
r = mes_v12_0_hw_init(adev);
if (r)
goto failure;
@@ -1405,9 +1435,15 @@ failure:
static int mes_v12_0_kiq_hw_fini(struct amdgpu_device *adev)
{
- if (adev->mes.ring.sched.ready) {
- mes_v12_0_kiq_dequeue_sched(adev);
- adev->mes.ring.sched.ready = false;
+ if (adev->mes.ring[0].sched.ready) {
+ if (adev->enable_uni_mes)
+ amdgpu_mes_unmap_legacy_queue(adev,
+ &adev->mes.ring[AMDGPU_MES_SCHED_PIPE],
+ RESET_QUEUES, 0, 0);
+ else
+ mes_v12_0_kiq_dequeue_sched(adev);
+
+ adev->mes.ring[0].sched.ready = false;
}
mes_v12_0_enable(adev, false);
@@ -1420,10 +1456,10 @@ static int mes_v12_0_hw_init(void *handle)
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (adev->mes.ring.sched.ready)
+ if (adev->mes.ring[0].sched.ready)
goto out;
- if (!adev->enable_mes_kiq || adev->enable_uni_mes) {
+ if (!adev->enable_mes_kiq) {
if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
r = mes_v12_0_load_microcode(adev,
AMDGPU_MES_SCHED_PIPE, true);
@@ -1443,23 +1479,23 @@ static int mes_v12_0_hw_init(void *handle)
mes_v12_0_enable(adev, true);
}
+ /* Enable the MES to handle doorbell ring on unmapped queue */
+ mes_v12_0_enable_unmapped_doorbell_handling(&adev->mes, true);
+
r = mes_v12_0_queue_init(adev, AMDGPU_MES_SCHED_PIPE);
if (r)
goto failure;
- r = mes_v12_0_set_hw_resources(&adev->mes);
+ r = mes_v12_0_set_hw_resources(&adev->mes, AMDGPU_MES_SCHED_PIPE);
if (r)
goto failure;
if (adev->enable_uni_mes)
- mes_v12_0_set_hw_resources_1(&adev->mes);
+ mes_v12_0_set_hw_resources_1(&adev->mes, AMDGPU_MES_SCHED_PIPE);
mes_v12_0_init_aggregated_doorbell(&adev->mes);
- /* Enable the MES to handle doorbell ring on unmapped queue */
- mes_v12_0_enable_unmapped_doorbell_handling(&adev->mes, true);
-
- r = mes_v12_0_query_sched_status(&adev->mes);
+ r = mes_v12_0_query_sched_status(&adev->mes, AMDGPU_MES_SCHED_PIPE);
if (r) {
DRM_ERROR("MES is busy\n");
goto failure;
@@ -1472,7 +1508,7 @@ out:
* with MES enabled.
*/
adev->gfx.kiq[0].ring.sched.ready = false;
- adev->mes.ring.sched.ready = true;
+ adev->mes.ring[0].sched.ready = true;
return 0;
@@ -1515,17 +1551,7 @@ static int mes_v12_0_early_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int pipe, r;
- if (adev->enable_uni_mes) {
- r = amdgpu_mes_init_microcode(adev, AMDGPU_MES_SCHED_PIPE);
- if (!r)
- return 0;
-
- adev->enable_uni_mes = false;
- }
-
for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) {
- if (!adev->enable_mes_kiq && pipe == AMDGPU_MES_KIQ_PIPE)
- continue;
r = amdgpu_mes_init_microcode(adev, pipe);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15d.h b/drivers/gpu/drm/amd/amdgpu/soc15d.h
index 2357ff39323f..e74e1983da53 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15d.h
+++ b/drivers/gpu/drm/amd/amdgpu/soc15d.h
@@ -76,6 +76,12 @@
((cond & 0xF) << 24) | \
((type & 0xF) << 28))
+#define CP_PACKETJ_NOP 0x60000000
+#define CP_PACKETJ_GET_REG(x) ((x) & 0x3FFFF)
+#define CP_PACKETJ_GET_RES(x) (((x) >> 18) & 0x3F)
+#define CP_PACKETJ_GET_COND(x) (((x) >> 24) & 0xF)
+#define CP_PACKETJ_GET_TYPE(x) (((x) >> 28) & 0xF)
+
/* Packet 3 types */
#define PACKET3_NOP 0x10
#define PACKET3_SET_BASE 0x11
diff --git a/drivers/gpu/drm/amd/amdgpu/soc24.c b/drivers/gpu/drm/amd/amdgpu/soc24.c
index 7d641d0dadba..b0c3678cfb31 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc24.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc24.c
@@ -406,6 +406,7 @@ static int soc24_common_early_init(void *handle)
AMD_CG_SUPPORT_ATHUB_MGCG |
AMD_CG_SUPPORT_ATHUB_LS |
AMD_CG_SUPPORT_MC_MGCG |
+ AMD_CG_SUPPORT_HDP_SD |
AMD_CG_SUPPORT_MC_LS;
adev->pg_flags = AMD_PG_SUPPORT_VCN |
AMD_PG_SUPPORT_JPEG |
@@ -424,6 +425,7 @@ static int soc24_common_early_init(void *handle)
AMD_CG_SUPPORT_ATHUB_MGCG |
AMD_CG_SUPPORT_ATHUB_LS |
AMD_CG_SUPPORT_MC_MGCG |
+ AMD_CG_SUPPORT_HDP_SD |
AMD_CG_SUPPORT_MC_LS;
adev->pg_flags = AMD_PG_SUPPORT_VCN |
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 7e7929f24ae4..983a977632ff 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -2893,6 +2893,9 @@ static int dm_suspend(void *handle)
hpd_rx_irq_work_suspend(dm);
+ if (adev->dm.dc->caps.ips_support)
+ dc_allow_idle_optimizations(adev->dm.dc, true);
+
dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
dc_dmub_srv_set_power_state(dm->dc->ctx->dmub_srv, DC_ACPI_CM_POWER_STATE_D3);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 915eb2c08ece..2e9f6da1acdc 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -804,12 +804,25 @@ struct dsc_mst_fairness_params {
};
#if defined(CONFIG_DRM_AMD_DC_FP)
-static int kbps_to_peak_pbn(int kbps)
+static uint16_t get_fec_overhead_multiplier(struct dc_link *dc_link)
+{
+ u8 link_coding_cap;
+ uint16_t fec_overhead_multiplier_x1000 = PBN_FEC_OVERHEAD_MULTIPLIER_8B_10B;
+
+ link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(dc_link);
+ if (link_coding_cap == DP_128b_132b_ENCODING)
+ fec_overhead_multiplier_x1000 = PBN_FEC_OVERHEAD_MULTIPLIER_128B_132B;
+
+ return fec_overhead_multiplier_x1000;
+}
+
+static int kbps_to_peak_pbn(int kbps, uint16_t fec_overhead_multiplier_x1000)
{
u64 peak_kbps = kbps;
peak_kbps *= 1006;
- peak_kbps = div_u64(peak_kbps, 1000);
+ peak_kbps *= fec_overhead_multiplier_x1000;
+ peak_kbps = div_u64(peak_kbps, 1000 * 1000);
return (int) DIV64_U64_ROUND_UP(peak_kbps * 64, (54 * 8 * 1000));
}
@@ -910,11 +923,12 @@ static int increase_dsc_bpp(struct drm_atomic_state *state,
int link_timeslots_used;
int fair_pbn_alloc;
int ret = 0;
+ uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link);
for (i = 0; i < count; i++) {
if (vars[i + k].dsc_enabled) {
initial_slack[i] =
- kbps_to_peak_pbn(params[i].bw_range.max_kbps) - vars[i + k].pbn;
+ kbps_to_peak_pbn(params[i].bw_range.max_kbps, fec_overhead_multiplier_x1000) - vars[i + k].pbn;
bpp_increased[i] = false;
remaining_to_increase += 1;
} else {
@@ -1010,6 +1024,7 @@ static int try_disable_dsc(struct drm_atomic_state *state,
int next_index;
int remaining_to_try = 0;
int ret;
+ uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link);
for (i = 0; i < count; i++) {
if (vars[i + k].dsc_enabled
@@ -1039,7 +1054,7 @@ static int try_disable_dsc(struct drm_atomic_state *state,
if (next_index == -1)
break;
- vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.stream_kbps);
+ vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.stream_kbps, fec_overhead_multiplier_x1000);
ret = drm_dp_atomic_find_time_slots(state,
params[next_index].port->mgr,
params[next_index].port,
@@ -1052,8 +1067,7 @@ static int try_disable_dsc(struct drm_atomic_state *state,
vars[next_index].dsc_enabled = false;
vars[next_index].bpp_x16 = 0;
} else {
- vars[next_index].pbn = kbps_to_peak_pbn(
- params[next_index].bw_range.max_kbps);
+ vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.stream_kbps, fec_overhead_multiplier_x1000);
ret = drm_dp_atomic_find_time_slots(state,
params[next_index].port->mgr,
params[next_index].port,
@@ -1082,6 +1096,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
int count = 0;
int i, k, ret;
bool debugfs_overwrite = false;
+ uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link);
memset(params, 0, sizeof(params));
@@ -1146,7 +1161,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
/* Try no compression */
for (i = 0; i < count; i++) {
vars[i + k].aconnector = params[i].aconnector;
- vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps);
+ vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps, fec_overhead_multiplier_x1000);
vars[i + k].dsc_enabled = false;
vars[i + k].bpp_x16 = 0;
ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr, params[i].port,
@@ -1165,7 +1180,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
/* Try max compression */
for (i = 0; i < count; i++) {
if (params[i].compression_possible && params[i].clock_force_enable != DSC_CLK_FORCE_DISABLE) {
- vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.min_kbps);
+ vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.min_kbps, fec_overhead_multiplier_x1000);
vars[i + k].dsc_enabled = true;
vars[i + k].bpp_x16 = params[i].bw_range.min_target_bpp_x16;
ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr,
@@ -1173,7 +1188,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
if (ret < 0)
return ret;
} else {
- vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps);
+ vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps, fec_overhead_multiplier_x1000);
vars[i + k].dsc_enabled = false;
vars[i + k].bpp_x16 = 0;
ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr,
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
index fa84d34b7373..600d6e221011 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
@@ -46,6 +46,9 @@
#define SYNAPTICS_CASCADED_HUB_ID 0x5A
#define IS_SYNAPTICS_CASCADED_PANAMERA(devName, data) ((IS_SYNAPTICS_PANAMERA(devName) && ((int)data[2] == SYNAPTICS_CASCADED_HUB_ID)) ? 1 : 0)
+#define PBN_FEC_OVERHEAD_MULTIPLIER_8B_10B 1031
+#define PBN_FEC_OVERHEAD_MULTIPLIER_128B_132B 1000
+
enum mst_msg_ready_type {
NONE_MSG_RDY_EVENT = 0,
DOWN_REP_MSG_RDY_EVENT = 1,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
index ff03b1d98aa7..14a902ff3b8a 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
@@ -3589,7 +3589,7 @@ void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
(int)hubp->curs_attr.width || pos_cpy.x
<= (int)hubp->curs_attr.width +
pipe_ctx->plane_state->src_rect.x) {
- pos_cpy.x = temp_x + viewport_width;
+ pos_cpy.x = 2 * viewport_width - temp_x;
}
}
} else {
@@ -3682,7 +3682,7 @@ void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
(int)hubp->curs_attr.width || pos_cpy.x
<= (int)hubp->curs_attr.width +
pipe_ctx->plane_state->src_rect.x) {
- pos_cpy.x = 2 * viewport_width - temp_x;
+ pos_cpy.x = temp_x + viewport_width;
}
}
} else {
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
index 9a3cc0514a36..8e0588b1cf30 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
@@ -1778,6 +1778,9 @@ static bool dcn321_resource_construct(
dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
dc->caps.color.mpc.ocsc = 1;
+ /* Use pipe context based otg sync logic */
+ dc->config.use_pipe_ctx_sync_logic = true;
+
dc->config.dc_mode_clk_limit_support = true;
dc->config.enable_windowed_mpo_odm = true;
/* read VBIOS LTTPR caps */
diff --git a/drivers/gpu/drm/amd/include/mes_v12_api_def.h b/drivers/gpu/drm/amd/include/mes_v12_api_def.h
index 4cf2c9f30b3d..101e2fe962c6 100644
--- a/drivers/gpu/drm/amd/include/mes_v12_api_def.h
+++ b/drivers/gpu/drm/amd/include/mes_v12_api_def.h
@@ -97,6 +97,7 @@ enum MES_QUEUE_TYPE {
MES_QUEUE_TYPE_SDMA,
MES_QUEUE_TYPE_MAX,
+ MES_QUEUE_TYPE_SCHQ = MES_QUEUE_TYPE_MAX,
};
struct MES_API_STATUS {
@@ -242,8 +243,12 @@ union MESAPI_SET_HW_RESOURCES {
uint32_t send_write_data : 1;
uint32_t os_tdr_timeout_override : 1;
uint32_t use_rs64mem_for_proc_gang_ctx : 1;
+ uint32_t halt_on_misaligned_access : 1;
+ uint32_t use_add_queue_unmap_flag_addr : 1;
+ uint32_t enable_mes_sch_stb_log : 1;
+ uint32_t limit_single_process : 1;
uint32_t unmapped_doorbell_handling: 2;
- uint32_t reserved : 15;
+ uint32_t reserved : 11;
};
uint32_t uint32_all;
};
diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c
index c16c7678237e..0830cae9a4d0 100644
--- a/drivers/gpu/drm/drm_panel_orientation_quirks.c
+++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c
@@ -208,6 +208,18 @@ static const struct dmi_system_id orientation_data[] = {
DMI_MATCH(DMI_BOARD_NAME, "KUN"),
},
.driver_data = (void *)&lcd1600x2560_rightside_up,
+ }, { /* AYN Loki Max */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ayn"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Loki Max"),
+ },
+ .driver_data = (void *)&lcd1080x1920_leftside_up,
+ }, { /* AYN Loki Zero */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ayn"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Loki Zero"),
+ },
+ .driver_data = (void *)&lcd1080x1920_leftside_up,
}, { /* Chuwi HiBook (CWI514) */
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Hampoo"),
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index ae5c6ec24a1e..77b50c56c124 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -539,8 +539,8 @@ static int mtk_drm_kms_init(struct drm_device *drm)
}
/* IGT will check if the cursor size is configured */
- drm->mode_config.cursor_width = drm->mode_config.max_width;
- drm->mode_config.cursor_height = drm->mode_config.max_height;
+ drm->mode_config.cursor_width = 512;
+ drm->mode_config.cursor_height = 512;
/* Use OVL device for all DMA memory allocations */
crtc = drm_crtc_from_index(drm, 0);
diff --git a/drivers/gpu/drm/rockchip/inno_hdmi.c b/drivers/gpu/drm/rockchip/inno_hdmi.c
index 2241e53a2946..dec6913cec5b 100644
--- a/drivers/gpu/drm/rockchip/inno_hdmi.c
+++ b/drivers/gpu/drm/rockchip/inno_hdmi.c
@@ -279,7 +279,6 @@ static int inno_hdmi_upload_frame(struct drm_connector *connector,
const u8 *buffer, size_t len)
{
struct inno_hdmi *hdmi = connector_to_inno_hdmi(connector);
- u8 packed_frame[HDMI_MAXIMUM_INFO_FRAME_SIZE];
ssize_t i;
if (type != HDMI_INFOFRAME_TYPE_AVI) {
@@ -291,8 +290,7 @@ static int inno_hdmi_upload_frame(struct drm_connector *connector,
inno_hdmi_disable_frame(connector, type);
for (i = 0; i < len; i++)
- hdmi_writeb(hdmi, HDMI_CONTROL_PACKET_ADDR + i,
- packed_frame[i]);
+ hdmi_writeb(hdmi, HDMI_CONTROL_PACKET_ADDR + i, buffer[i]);
return 0;
}
diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c
index 9bd7453b25ad..b8682818bafa 100644
--- a/drivers/gpu/drm/v3d/v3d_sched.c
+++ b/drivers/gpu/drm/v3d/v3d_sched.c
@@ -315,7 +315,7 @@ v3d_csd_job_run(struct drm_sched_job *sched_job)
struct v3d_dev *v3d = job->base.v3d;
struct drm_device *dev = &v3d->drm;
struct dma_fence *fence;
- int i, csd_cfg0_reg, csd_cfg_reg_count;
+ int i, csd_cfg0_reg;
v3d->csd_job = job;
@@ -335,9 +335,17 @@ v3d_csd_job_run(struct drm_sched_job *sched_job)
v3d_switch_perfmon(v3d, &job->base);
csd_cfg0_reg = V3D_CSD_QUEUED_CFG0(v3d->ver);
- csd_cfg_reg_count = v3d->ver < 71 ? 6 : 7;
- for (i = 1; i <= csd_cfg_reg_count; i++)
+ for (i = 1; i <= 6; i++)
V3D_CORE_WRITE(0, csd_cfg0_reg + 4 * i, job->args.cfg[i]);
+
+ /* Although V3D 7.1 has an eighth configuration register, we are not
+ * using it. Therefore, make sure it remains unused.
+ *
+ * XXX: Set the CFG7 register
+ */
+ if (v3d->ver >= 71)
+ V3D_CORE_WRITE(0, V3D_V7_CSD_QUEUED_CFG7, 0);
+
/* CFG0 write kicks off the job. */
V3D_CORE_WRITE(0, csd_cfg0_reg, job->args.cfg[0]);
diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c
index 76109415eba6..f2f1d8ddb221 100644
--- a/drivers/gpu/drm/xe/xe_device.c
+++ b/drivers/gpu/drm/xe/xe_device.c
@@ -87,9 +87,55 @@ static int xe_file_open(struct drm_device *dev, struct drm_file *file)
spin_unlock(&xe->clients.lock);
file->driver_priv = xef;
+ kref_init(&xef->refcount);
+
return 0;
}
+static void xe_file_destroy(struct kref *ref)
+{
+ struct xe_file *xef = container_of(ref, struct xe_file, refcount);
+ struct xe_device *xe = xef->xe;
+
+ xa_destroy(&xef->exec_queue.xa);
+ mutex_destroy(&xef->exec_queue.lock);
+ xa_destroy(&xef->vm.xa);
+ mutex_destroy(&xef->vm.lock);
+
+ spin_lock(&xe->clients.lock);
+ xe->clients.count--;
+ spin_unlock(&xe->clients.lock);
+
+ xe_drm_client_put(xef->client);
+ kfree(xef);
+}
+
+/**
+ * xe_file_get() - Take a reference to the xe file object
+ * @xef: Pointer to the xe file
+ *
+ * Anyone with a pointer to xef must take a reference to the xe file
+ * object using this call.
+ *
+ * Return: xe file pointer
+ */
+struct xe_file *xe_file_get(struct xe_file *xef)
+{
+ kref_get(&xef->refcount);
+ return xef;
+}
+
+/**
+ * xe_file_put() - Drop a reference to the xe file object
+ * @xef: Pointer to the xe file
+ *
+ * Used to drop reference to the xef object
+ */
+void xe_file_put(struct xe_file *xef)
+{
+ kref_put(&xef->refcount, xe_file_destroy);
+}
+
static void xe_file_close(struct drm_device *dev, struct drm_file *file)
{
struct xe_device *xe = to_xe_device(dev);
@@ -98,6 +144,8 @@ static void xe_file_close(struct drm_device *dev, struct drm_file *file)
struct xe_exec_queue *q;
unsigned long idx;
+ xe_pm_runtime_get(xe);
+
/*
* No need for exec_queue.lock here as there is no contention for it
* when FD is closing as IOCTLs presumably can't be modifying the
@@ -108,21 +156,14 @@ static void xe_file_close(struct drm_device *dev, struct drm_file *file)
xe_exec_queue_kill(q);
xe_exec_queue_put(q);
}
- xa_destroy(&xef->exec_queue.xa);
- mutex_destroy(&xef->exec_queue.lock);
mutex_lock(&xef->vm.lock);
xa_for_each(&xef->vm.xa, idx, vm)
xe_vm_close_and_put(vm);
mutex_unlock(&xef->vm.lock);
- xa_destroy(&xef->vm.xa);
- mutex_destroy(&xef->vm.lock);
- spin_lock(&xe->clients.lock);
- xe->clients.count--;
- spin_unlock(&xe->clients.lock);
+ xe_file_put(xef);
- xe_drm_client_put(xef->client);
- kfree(xef);
+ xe_pm_runtime_put(xe);
}
static const struct drm_ioctl_desc xe_ioctls[] = {
diff --git a/drivers/gpu/drm/xe/xe_device.h b/drivers/gpu/drm/xe/xe_device.h
index bb07f5669dbb..b3952718b3c1 100644
--- a/drivers/gpu/drm/xe/xe_device.h
+++ b/drivers/gpu/drm/xe/xe_device.h
@@ -170,4 +170,7 @@ static inline bool xe_device_wedged(struct xe_device *xe)
void xe_device_declare_wedged(struct xe_device *xe);
+struct xe_file *xe_file_get(struct xe_file *xef);
+void xe_file_put(struct xe_file *xef);
+
#endif
diff --git a/drivers/gpu/drm/xe/xe_device_types.h b/drivers/gpu/drm/xe/xe_device_types.h
index 3bca6d344744..cbc582bcc90a 100644
--- a/drivers/gpu/drm/xe/xe_device_types.h
+++ b/drivers/gpu/drm/xe/xe_device_types.h
@@ -566,6 +566,9 @@ struct xe_file {
/** @client: drm client */
struct xe_drm_client *client;
+
+ /** @refcount: ref count of this xe file */
+ struct kref refcount;
};
#endif
diff --git a/drivers/gpu/drm/xe/xe_drm_client.c b/drivers/gpu/drm/xe/xe_drm_client.c
index 6a26923fa10e..7ddd59908334 100644
--- a/drivers/gpu/drm/xe/xe_drm_client.c
+++ b/drivers/gpu/drm/xe/xe_drm_client.c
@@ -251,11 +251,8 @@ static void show_run_ticks(struct drm_printer *p, struct drm_file *file)
/* Accumulate all the exec queues from this client */
mutex_lock(&xef->exec_queue.lock);
- xa_for_each(&xef->exec_queue.xa, i, q) {
+ xa_for_each(&xef->exec_queue.xa, i, q)
xe_exec_queue_update_run_ticks(q);
- xef->run_ticks[q->class] += q->run_ticks - q->old_run_ticks;
- q->old_run_ticks = q->run_ticks;
- }
mutex_unlock(&xef->exec_queue.lock);
/* Get the total GPU cycles */
diff --git a/drivers/gpu/drm/xe/xe_exec_queue.c b/drivers/gpu/drm/xe/xe_exec_queue.c
index 0ba37835849b..a39384bb9553 100644
--- a/drivers/gpu/drm/xe/xe_exec_queue.c
+++ b/drivers/gpu/drm/xe/xe_exec_queue.c
@@ -37,6 +37,10 @@ static void __xe_exec_queue_free(struct xe_exec_queue *q)
{
if (q->vm)
xe_vm_put(q->vm);
+
+ if (q->xef)
+ xe_file_put(q->xef);
+
kfree(q);
}
@@ -649,6 +653,7 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
goto kill_exec_queue;
args->exec_queue_id = id;
+ q->xef = xe_file_get(xef);
return 0;
@@ -762,6 +767,7 @@ bool xe_exec_queue_is_idle(struct xe_exec_queue *q)
*/
void xe_exec_queue_update_run_ticks(struct xe_exec_queue *q)
{
+ struct xe_file *xef;
struct xe_lrc *lrc;
u32 old_ts, new_ts;
@@ -773,6 +779,8 @@ void xe_exec_queue_update_run_ticks(struct xe_exec_queue *q)
if (!q->vm || !q->vm->xef)
return;
+ xef = q->vm->xef;
+
/*
* Only sample the first LRC. For parallel submission, all of them are
* scheduled together and we compensate that below by multiplying by
@@ -783,7 +791,7 @@ void xe_exec_queue_update_run_ticks(struct xe_exec_queue *q)
*/
lrc = q->lrc[0];
new_ts = xe_lrc_update_timestamp(lrc, &old_ts);
- q->run_ticks += (new_ts - old_ts) * q->width;
+ xef->run_ticks[q->class] += (new_ts - old_ts) * q->width;
}
void xe_exec_queue_kill(struct xe_exec_queue *q)
diff --git a/drivers/gpu/drm/xe/xe_exec_queue_types.h b/drivers/gpu/drm/xe/xe_exec_queue_types.h
index 201588ec33c3..a35ce24c9798 100644
--- a/drivers/gpu/drm/xe/xe_exec_queue_types.h
+++ b/drivers/gpu/drm/xe/xe_exec_queue_types.h
@@ -38,6 +38,9 @@ enum xe_exec_queue_priority {
* a kernel object.
*/
struct xe_exec_queue {
+ /** @xef: Back pointer to xe file if this is user created exec queue */
+ struct xe_file *xef;
+
/** @gt: graphics tile this exec queue can submit to */
struct xe_gt *gt;
/**
@@ -139,10 +142,6 @@ struct xe_exec_queue {
* Protected by @vm's resv. Unused if @vm == NULL.
*/
u64 tlb_flush_seqno;
- /** @old_run_ticks: prior hw engine class run time in ticks for this exec queue */
- u64 old_run_ticks;
- /** @run_ticks: hw engine class run time in ticks for this exec queue */
- u64 run_ticks;
/** @lrc: logical ring context for this exec queue */
struct xe_lrc *lrc[];
};
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
index 4699b7836001..b6f0a7299c03 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
@@ -1927,6 +1927,7 @@ static int pf_validate_vf_config(struct xe_gt *gt, unsigned int vfid)
{
struct xe_gt *primary_gt = gt_to_tile(gt)->primary_gt;
struct xe_device *xe = gt_to_xe(gt);
+ bool is_primary = !xe_gt_is_media_type(gt);
bool valid_ggtt, valid_ctxs, valid_dbs;
bool valid_any, valid_all;
@@ -1935,13 +1936,17 @@ static int pf_validate_vf_config(struct xe_gt *gt, unsigned int vfid)
valid_dbs = pf_get_vf_config_dbs(gt, vfid);
/* note that GuC doorbells are optional */
- valid_any = valid_ggtt || valid_ctxs || valid_dbs;
- valid_all = valid_ggtt && valid_ctxs;
+ valid_any = valid_ctxs || valid_dbs;
+ valid_all = valid_ctxs;
+
+ /* and GGTT/LMEM is configured on primary GT only */
+ valid_all = valid_all && valid_ggtt;
+ valid_any = valid_any || (valid_ggtt && is_primary);
if (IS_DGFX(xe)) {
bool valid_lmem = pf_get_vf_config_ggtt(primary_gt, vfid);
- valid_any = valid_any || valid_lmem;
+ valid_any = valid_any || (valid_lmem && is_primary);
valid_all = valid_all && valid_lmem;
}
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
index 41e46a00c01e..8892d6c2291e 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
@@ -850,7 +850,7 @@ static struct vf_runtime_reg *vf_lookup_reg(struct xe_gt *gt, u32 addr)
xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
- return bsearch(&key, runtime->regs, runtime->regs_size, sizeof(key),
+ return bsearch(&key, runtime->regs, runtime->num_regs, sizeof(key),
vf_runtime_reg_cmp);
}
diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
index d9359976ab8b..481d83d07367 100644
--- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
+++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
@@ -13,10 +13,13 @@
#include "xe_guc.h"
#include "xe_guc_ct.h"
#include "xe_mmio.h"
+#include "xe_pm.h"
#include "xe_sriov.h"
#include "xe_trace.h"
#include "regs/xe_guc_regs.h"
+#define FENCE_STACK_BIT DMA_FENCE_FLAG_USER_BITS
+
/*
* TLB inval depends on pending commands in the CT queue and then the real
* invalidation time. Double up the time to process full CT queue
@@ -33,6 +36,24 @@ static long tlb_timeout_jiffies(struct xe_gt *gt)
return hw_tlb_timeout + 2 * delay;
}
+static void
+__invalidation_fence_signal(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence)
+{
+ bool stack = test_bit(FENCE_STACK_BIT, &fence->base.flags);
+
+ trace_xe_gt_tlb_invalidation_fence_signal(xe, fence);
+ xe_gt_tlb_invalidation_fence_fini(fence);
+ dma_fence_signal(&fence->base);
+ if (!stack)
+ dma_fence_put(&fence->base);
+}
+
+static void
+invalidation_fence_signal(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence)
+{
+ list_del(&fence->link);
+ __invalidation_fence_signal(xe, fence);
+}
static void xe_gt_tlb_fence_timeout(struct work_struct *work)
{
@@ -54,10 +75,8 @@ static void xe_gt_tlb_fence_timeout(struct work_struct *work)
xe_gt_err(gt, "TLB invalidation fence timeout, seqno=%d recv=%d",
fence->seqno, gt->tlb_invalidation.seqno_recv);
- list_del(&fence->link);
fence->base.error = -ETIME;
- dma_fence_signal(&fence->base);
- dma_fence_put(&fence->base);
+ invalidation_fence_signal(xe, fence);
}
if (!list_empty(&gt->tlb_invalidation.pending_fences))
queue_delayed_work(system_wq,
@@ -87,21 +106,6 @@ int xe_gt_tlb_invalidation_init(struct xe_gt *gt)
return 0;
}
-static void
-__invalidation_fence_signal(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence)
-{
- trace_xe_gt_tlb_invalidation_fence_signal(xe, fence);
- dma_fence_signal(&fence->base);
- dma_fence_put(&fence->base);
-}
-
-static void
-invalidation_fence_signal(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence)
-{
- list_del(&fence->link);
- __invalidation_fence_signal(xe, fence);
-}
-
/**
* xe_gt_tlb_invalidation_reset - Initialize GT TLB invalidation reset
* @gt: graphics tile
@@ -111,7 +115,6 @@ invalidation_fence_signal(struct xe_device *xe, struct xe_gt_tlb_invalidation_fe
void xe_gt_tlb_invalidation_reset(struct xe_gt *gt)
{
struct xe_gt_tlb_invalidation_fence *fence, *next;
- struct xe_guc *guc = &gt->uc.guc;
int pending_seqno;
/*
@@ -134,7 +137,6 @@ void xe_gt_tlb_invalidation_reset(struct xe_gt *gt)
else
pending_seqno = gt->tlb_invalidation.seqno - 1;
WRITE_ONCE(gt->tlb_invalidation.seqno_recv, pending_seqno);
- wake_up_all(&guc->ct.wq);
list_for_each_entry_safe(fence, next,
&gt->tlb_invalidation.pending_fences, link)
@@ -165,6 +167,8 @@ static int send_tlb_invalidation(struct xe_guc *guc,
int seqno;
int ret;
+ xe_gt_assert(gt, fence);
+
/*
* XXX: The seqno algorithm relies on TLB invalidation being processed
* in order which they currently are, if that changes the algorithm will
@@ -173,10 +177,8 @@ static int send_tlb_invalidation(struct xe_guc *guc,
mutex_lock(&guc->ct.lock);
seqno = gt->tlb_invalidation.seqno;
- if (fence) {
- fence->seqno = seqno;
- trace_xe_gt_tlb_invalidation_fence_send(xe, fence);
- }
+ fence->seqno = seqno;
+ trace_xe_gt_tlb_invalidation_fence_send(xe, fence);
action[1] = seqno;
ret = xe_guc_ct_send_locked(&guc->ct, action, len,
G2H_LEN_DW_TLB_INVALIDATE, 1);
@@ -209,7 +211,6 @@ static int send_tlb_invalidation(struct xe_guc *guc,
TLB_INVALIDATION_SEQNO_MAX;
if (!gt->tlb_invalidation.seqno)
gt->tlb_invalidation.seqno = 1;
- ret = seqno;
}
mutex_unlock(&guc->ct.lock);
@@ -223,14 +224,16 @@ static int send_tlb_invalidation(struct xe_guc *guc,
/**
* xe_gt_tlb_invalidation_guc - Issue a TLB invalidation on this GT for the GuC
* @gt: graphics tile
+ * @fence: invalidation fence which will be signal on TLB invalidation
+ * completion
*
* Issue a TLB invalidation for the GuC. Completion of TLB is asynchronous and
- * caller can use seqno + xe_gt_tlb_invalidation_wait to wait for completion.
+ * caller can use the invalidation fence to wait for completion.
*
- * Return: Seqno which can be passed to xe_gt_tlb_invalidation_wait on success,
- * negative error code on error.
+ * Return: 0 on success, negative error code on error
*/
-static int xe_gt_tlb_invalidation_guc(struct xe_gt *gt)
+static int xe_gt_tlb_invalidation_guc(struct xe_gt *gt,
+ struct xe_gt_tlb_invalidation_fence *fence)
{
u32 action[] = {
XE_GUC_ACTION_TLB_INVALIDATION,
@@ -238,7 +241,7 @@ static int xe_gt_tlb_invalidation_guc(struct xe_gt *gt)
MAKE_INVAL_OP(XE_GUC_TLB_INVAL_GUC),
};
- return send_tlb_invalidation(&gt->uc.guc, NULL, action,
+ return send_tlb_invalidation(&gt->uc.guc, fence, action,
ARRAY_SIZE(action));
}
@@ -257,13 +260,17 @@ int xe_gt_tlb_invalidation_ggtt(struct xe_gt *gt)
if (xe_guc_ct_enabled(&gt->uc.guc.ct) &&
gt->uc.guc.submission_state.enabled) {
- int seqno;
-
- seqno = xe_gt_tlb_invalidation_guc(gt);
- if (seqno <= 0)
- return seqno;
+ struct xe_gt_tlb_invalidation_fence fence;
+ int ret;
+
+ xe_gt_tlb_invalidation_fence_init(gt, &fence, true);
+ ret = xe_gt_tlb_invalidation_guc(gt, &fence);
+ if (ret < 0) {
+ xe_gt_tlb_invalidation_fence_fini(&fence);
+ return ret;
+ }
- xe_gt_tlb_invalidation_wait(gt, seqno);
+ xe_gt_tlb_invalidation_fence_wait(&fence);
} else if (xe_device_uc_enabled(xe) && !xe_device_wedged(xe)) {
if (IS_SRIOV_VF(xe))
return 0;
@@ -290,18 +297,16 @@ int xe_gt_tlb_invalidation_ggtt(struct xe_gt *gt)
*
* @gt: graphics tile
* @fence: invalidation fence which will be signal on TLB invalidation
- * completion, can be NULL
+ * completion
* @start: start address
* @end: end address
* @asid: address space id
*
* Issue a range based TLB invalidation if supported, if not fallback to a full
- * TLB invalidation. Completion of TLB is asynchronous and caller can either use
- * the invalidation fence or seqno + xe_gt_tlb_invalidation_wait to wait for
- * completion.
+ * TLB invalidation. Completion of TLB is asynchronous and caller can use
+ * the invalidation fence to wait for completion.
*
- * Return: Seqno which can be passed to xe_gt_tlb_invalidation_wait on success,
- * negative error code on error.
+ * Return: Negative error code on error, 0 on success
*/
int xe_gt_tlb_invalidation_range(struct xe_gt *gt,
struct xe_gt_tlb_invalidation_fence *fence,
@@ -312,11 +317,11 @@ int xe_gt_tlb_invalidation_range(struct xe_gt *gt,
u32 action[MAX_TLB_INVALIDATION_LEN];
int len = 0;
+ xe_gt_assert(gt, fence);
+
/* Execlists not supported */
if (gt_to_xe(gt)->info.force_execlist) {
- if (fence)
- __invalidation_fence_signal(xe, fence);
-
+ __invalidation_fence_signal(xe, fence);
return 0;
}
@@ -382,12 +387,10 @@ int xe_gt_tlb_invalidation_range(struct xe_gt *gt,
* @vma: VMA to invalidate
*
* Issue a range based TLB invalidation if supported, if not fallback to a full
- * TLB invalidation. Completion of TLB is asynchronous and caller can either use
- * the invalidation fence or seqno + xe_gt_tlb_invalidation_wait to wait for
- * completion.
+ * TLB invalidation. Completion of TLB is asynchronous and caller can use
+ * the invalidation fence to wait for completion.
*
- * Return: Seqno which can be passed to xe_gt_tlb_invalidation_wait on success,
- * negative error code on error.
+ * Return: Negative error code on error, 0 on success
*/
int xe_gt_tlb_invalidation_vma(struct xe_gt *gt,
struct xe_gt_tlb_invalidation_fence *fence,
@@ -401,43 +404,6 @@ int xe_gt_tlb_invalidation_vma(struct xe_gt *gt,
}
/**
- * xe_gt_tlb_invalidation_wait - Wait for TLB to complete
- * @gt: graphics tile
- * @seqno: seqno to wait which was returned from xe_gt_tlb_invalidation
- *
- * Wait for tlb_timeout_jiffies() for a TLB invalidation to complete.
- *
- * Return: 0 on success, -ETIME on TLB invalidation timeout
- */
-int xe_gt_tlb_invalidation_wait(struct xe_gt *gt, int seqno)
-{
- struct xe_guc *guc = &gt->uc.guc;
- int ret;
-
- /* Execlists not supported */
- if (gt_to_xe(gt)->info.force_execlist)
- return 0;
-
- /*
- * XXX: See above, this algorithm only works if seqno are always in
- * order
- */
- ret = wait_event_timeout(guc->ct.wq,
- tlb_invalidation_seqno_past(gt, seqno),
- tlb_timeout_jiffies(gt));
- if (!ret) {
- struct drm_printer p = xe_gt_err_printer(gt);
-
- xe_gt_err(gt, "TLB invalidation time'd out, seqno=%d, recv=%d\n",
- seqno, gt->tlb_invalidation.seqno_recv);
- xe_guc_ct_print(&guc->ct, &p, true);
- return -ETIME;
- }
-
- return 0;
-}
-
-/**
* xe_guc_tlb_invalidation_done_handler - TLB invalidation done handler
* @guc: guc
* @msg: message indicating TLB invalidation done
@@ -480,12 +446,7 @@ int xe_guc_tlb_invalidation_done_handler(struct xe_guc *guc, u32 *msg, u32 len)
return 0;
}
- /*
- * wake_up_all() and wait_event_timeout() already have the correct
- * barriers.
- */
WRITE_ONCE(gt->tlb_invalidation.seqno_recv, msg[0]);
- wake_up_all(&guc->ct.wq);
list_for_each_entry_safe(fence, next,
&gt->tlb_invalidation.pending_fences, link) {
@@ -508,3 +469,59 @@ int xe_guc_tlb_invalidation_done_handler(struct xe_guc *guc, u32 *msg, u32 len)
return 0;
}
+
+static const char *
+invalidation_fence_get_driver_name(struct dma_fence *dma_fence)
+{
+ return "xe";
+}
+
+static const char *
+invalidation_fence_get_timeline_name(struct dma_fence *dma_fence)
+{
+ return "invalidation_fence";
+}
+
+static const struct dma_fence_ops invalidation_fence_ops = {
+ .get_driver_name = invalidation_fence_get_driver_name,
+ .get_timeline_name = invalidation_fence_get_timeline_name,
+};
+
+/**
+ * xe_gt_tlb_invalidation_fence_init - Initialize TLB invalidation fence
+ * @gt: GT
+ * @fence: TLB invalidation fence to initialize
+ * @stack: fence is stack variable
+ *
+ * Initialize TLB invalidation fence for use. xe_gt_tlb_invalidation_fence_fini
+ * must be called if fence is not signaled.
+ */
+void xe_gt_tlb_invalidation_fence_init(struct xe_gt *gt,
+ struct xe_gt_tlb_invalidation_fence *fence,
+ bool stack)
+{
+ xe_pm_runtime_get_noresume(gt_to_xe(gt));
+
+ spin_lock_irq(&gt->tlb_invalidation.lock);
+ dma_fence_init(&fence->base, &invalidation_fence_ops,
+ &gt->tlb_invalidation.lock,
+ dma_fence_context_alloc(1), 1);
+ spin_unlock_irq(&gt->tlb_invalidation.lock);
+ INIT_LIST_HEAD(&fence->link);
+ if (stack)
+ set_bit(FENCE_STACK_BIT, &fence->base.flags);
+ else
+ dma_fence_get(&fence->base);
+ fence->gt = gt;
+}
+
+/**
+ * xe_gt_tlb_invalidation_fence_fini - Finalize TLB invalidation fence
+ * @fence: TLB invalidation fence to finalize
+ *
+ * Drop PM ref which fence took durinig init.
+ */
+void xe_gt_tlb_invalidation_fence_fini(struct xe_gt_tlb_invalidation_fence *fence)
+{
+ xe_pm_runtime_put(gt_to_xe(fence->gt));
+}
diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h
index bf3bebd9f985..a84065fa324c 100644
--- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h
+++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h
@@ -23,7 +23,17 @@ int xe_gt_tlb_invalidation_vma(struct xe_gt *gt,
int xe_gt_tlb_invalidation_range(struct xe_gt *gt,
struct xe_gt_tlb_invalidation_fence *fence,
u64 start, u64 end, u32 asid);
-int xe_gt_tlb_invalidation_wait(struct xe_gt *gt, int seqno);
int xe_guc_tlb_invalidation_done_handler(struct xe_guc *guc, u32 *msg, u32 len);
+void xe_gt_tlb_invalidation_fence_init(struct xe_gt *gt,
+ struct xe_gt_tlb_invalidation_fence *fence,
+ bool stack);
+void xe_gt_tlb_invalidation_fence_fini(struct xe_gt_tlb_invalidation_fence *fence);
+
+static inline void
+xe_gt_tlb_invalidation_fence_wait(struct xe_gt_tlb_invalidation_fence *fence)
+{
+ dma_fence_wait(&fence->base, false);
+}
+
#endif /* _XE_GT_TLB_INVALIDATION_ */
diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation_types.h b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation_types.h
index 934c828efe31..de6e825e0851 100644
--- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation_types.h
+++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation_types.h
@@ -8,6 +8,8 @@
#include <linux/dma-fence.h>
+struct xe_gt;
+
/**
* struct xe_gt_tlb_invalidation_fence - XE GT TLB invalidation fence
*
@@ -17,6 +19,8 @@
struct xe_gt_tlb_invalidation_fence {
/** @base: dma fence base */
struct dma_fence base;
+ /** @gt: GT which fence belong to */
+ struct xe_gt *gt;
/** @link: link into list of pending tlb fences */
struct list_head link;
/** @seqno: seqno of TLB invalidation to signal fence one */
diff --git a/drivers/gpu/drm/xe/xe_guc_ct.c b/drivers/gpu/drm/xe/xe_guc_ct.c
index 7d2e937da1d8..64afc90ad2c5 100644
--- a/drivers/gpu/drm/xe/xe_guc_ct.c
+++ b/drivers/gpu/drm/xe/xe_guc_ct.c
@@ -327,6 +327,8 @@ static void xe_guc_ct_set_state(struct xe_guc_ct *ct,
xe_gt_assert(ct_to_gt(ct), ct->g2h_outstanding == 0 ||
state == XE_GUC_CT_STATE_STOPPED);
+ if (ct->g2h_outstanding)
+ xe_pm_runtime_put(ct_to_xe(ct));
ct->g2h_outstanding = 0;
ct->state = state;
@@ -495,10 +497,15 @@ static void h2g_reserve_space(struct xe_guc_ct *ct, u32 cmd_len)
static void __g2h_reserve_space(struct xe_guc_ct *ct, u32 g2h_len, u32 num_g2h)
{
xe_gt_assert(ct_to_gt(ct), g2h_len <= ct->ctbs.g2h.info.space);
+ xe_gt_assert(ct_to_gt(ct), (!g2h_len && !num_g2h) ||
+ (g2h_len && num_g2h));
if (g2h_len) {
lockdep_assert_held(&ct->fast_lock);
+ if (!ct->g2h_outstanding)
+ xe_pm_runtime_get_noresume(ct_to_xe(ct));
+
ct->ctbs.g2h.info.space -= g2h_len;
ct->g2h_outstanding += num_g2h;
}
@@ -511,7 +518,8 @@ static void __g2h_release_space(struct xe_guc_ct *ct, u32 g2h_len)
ct->ctbs.g2h.info.size - ct->ctbs.g2h.info.resv_space);
ct->ctbs.g2h.info.space += g2h_len;
- --ct->g2h_outstanding;
+ if (!--ct->g2h_outstanding)
+ xe_pm_runtime_put(ct_to_xe(ct));
}
static void g2h_release_space(struct xe_guc_ct *ct, u32 g2h_len)
diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c
index 8d7e7f4bbff7..6398629e6b4e 100644
--- a/drivers/gpu/drm/xe/xe_guc_submit.c
+++ b/drivers/gpu/drm/xe/xe_guc_submit.c
@@ -1393,6 +1393,8 @@ static void guc_exec_queue_process_msg(struct xe_sched_msg *msg)
default:
XE_WARN_ON("Unknown message type");
}
+
+ xe_pm_runtime_put(guc_to_xe(exec_queue_to_guc(msg->private_data)));
}
static const struct drm_sched_backend_ops drm_sched_ops = {
@@ -1482,6 +1484,8 @@ static void guc_exec_queue_kill(struct xe_exec_queue *q)
static void guc_exec_queue_add_msg(struct xe_exec_queue *q, struct xe_sched_msg *msg,
u32 opcode)
{
+ xe_pm_runtime_get_noresume(guc_to_xe(exec_queue_to_guc(q)));
+
INIT_LIST_HEAD(&msg->link);
msg->opcode = opcode;
msg->private_data = q;
diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c
index ade9e7a3a0ad..31a751a5de3f 100644
--- a/drivers/gpu/drm/xe/xe_pt.c
+++ b/drivers/gpu/drm/xe/xe_pt.c
@@ -1115,23 +1115,6 @@ struct invalidation_fence {
u32 asid;
};
-static const char *
-invalidation_fence_get_driver_name(struct dma_fence *dma_fence)
-{
- return "xe";
-}
-
-static const char *
-invalidation_fence_get_timeline_name(struct dma_fence *dma_fence)
-{
- return "invalidation_fence";
-}
-
-static const struct dma_fence_ops invalidation_fence_ops = {
- .get_driver_name = invalidation_fence_get_driver_name,
- .get_timeline_name = invalidation_fence_get_timeline_name,
-};
-
static void invalidation_fence_cb(struct dma_fence *fence,
struct dma_fence_cb *cb)
{
@@ -1170,15 +1153,8 @@ static int invalidation_fence_init(struct xe_gt *gt,
trace_xe_gt_tlb_invalidation_fence_create(gt_to_xe(gt), &ifence->base);
- spin_lock_irq(&gt->tlb_invalidation.lock);
- dma_fence_init(&ifence->base.base, &invalidation_fence_ops,
- &gt->tlb_invalidation.lock,
- dma_fence_context_alloc(1), 1);
- spin_unlock_irq(&gt->tlb_invalidation.lock);
-
- INIT_LIST_HEAD(&ifence->base.link);
+ xe_gt_tlb_invalidation_fence_init(gt, &ifence->base, false);
- dma_fence_get(&ifence->base.base); /* Ref for caller */
ifence->fence = fence;
ifence->gt = gt;
ifence->start = start;
diff --git a/drivers/gpu/drm/xe/xe_sync.c b/drivers/gpu/drm/xe/xe_sync.c
index c4e018aa2982..e8d31e010860 100644
--- a/drivers/gpu/drm/xe/xe_sync.c
+++ b/drivers/gpu/drm/xe/xe_sync.c
@@ -53,14 +53,18 @@ static struct xe_user_fence *user_fence_create(struct xe_device *xe, u64 addr,
u64 value)
{
struct xe_user_fence *ufence;
+ u64 __user *ptr = u64_to_user_ptr(addr);
+
+ if (!access_ok(ptr, sizeof(ptr)))
+ return ERR_PTR(-EFAULT);
ufence = kmalloc(sizeof(*ufence), GFP_KERNEL);
if (!ufence)
- return NULL;
+ return ERR_PTR(-ENOMEM);
ufence->xe = xe;
kref_init(&ufence->refcount);
- ufence->addr = u64_to_user_ptr(addr);
+ ufence->addr = ptr;
ufence->value = value;
ufence->mm = current->mm;
mmgrab(ufence->mm);
@@ -183,8 +187,8 @@ int xe_sync_entry_parse(struct xe_device *xe, struct xe_file *xef,
} else {
sync->ufence = user_fence_create(xe, sync_in.addr,
sync_in.timeline_value);
- if (XE_IOCTL_DBG(xe, !sync->ufence))
- return -ENOMEM;
+ if (XE_IOCTL_DBG(xe, IS_ERR(sync->ufence)))
+ return PTR_ERR(sync->ufence);
}
break;
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index 5b166fa03684..c7561a56abaf 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -1601,6 +1601,10 @@ static void vm_destroy_work_func(struct work_struct *w)
XE_WARN_ON(vm->pt_root[id]);
trace_xe_vm_free(vm);
+
+ if (vm->xef)
+ xe_file_put(vm->xef);
+
kfree(vm);
}
@@ -1916,7 +1920,7 @@ int xe_vm_create_ioctl(struct drm_device *dev, void *data,
}
args->vm_id = id;
- vm->xef = xef;
+ vm->xef = xe_file_get(xef);
/* Record BO memory for VM pagetable created against client */
for_each_tile(tile, xe, id)
@@ -3337,10 +3341,10 @@ int xe_vm_invalidate_vma(struct xe_vma *vma)
{
struct xe_device *xe = xe_vma_vm(vma)->xe;
struct xe_tile *tile;
+ struct xe_gt_tlb_invalidation_fence fence[XE_MAX_TILES_PER_DEVICE];
u32 tile_needs_invalidate = 0;
- int seqno[XE_MAX_TILES_PER_DEVICE];
u8 id;
- int ret;
+ int ret = 0;
xe_assert(xe, !xe_vma_is_null(vma));
trace_xe_vma_invalidate(vma);
@@ -3365,29 +3369,33 @@ int xe_vm_invalidate_vma(struct xe_vma *vma)
for_each_tile(tile, xe, id) {
if (xe_pt_zap_ptes(tile, vma)) {
- tile_needs_invalidate |= BIT(id);
xe_device_wmb(xe);
+ xe_gt_tlb_invalidation_fence_init(tile->primary_gt,
+ &fence[id], true);
+
/*
* FIXME: We potentially need to invalidate multiple
* GTs within the tile
*/
- seqno[id] = xe_gt_tlb_invalidation_vma(tile->primary_gt, NULL, vma);
- if (seqno[id] < 0)
- return seqno[id];
- }
- }
+ ret = xe_gt_tlb_invalidation_vma(tile->primary_gt,
+ &fence[id], vma);
+ if (ret < 0) {
+ xe_gt_tlb_invalidation_fence_fini(&fence[id]);
+ goto wait;
+ }
- for_each_tile(tile, xe, id) {
- if (tile_needs_invalidate & BIT(id)) {
- ret = xe_gt_tlb_invalidation_wait(tile->primary_gt, seqno[id]);
- if (ret < 0)
- return ret;
+ tile_needs_invalidate |= BIT(id);
}
}
+wait:
+ for_each_tile(tile, xe, id)
+ if (tile_needs_invalidate & BIT(id))
+ xe_gt_tlb_invalidation_fence_wait(&fence[id]);
+
vma->tile_invalidated = vma->tile_mask;
- return 0;
+ return ret;
}
struct xe_vm_snapshot {
diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c
index 365e37bba0f3..06e836e3e877 100644
--- a/drivers/i2c/busses/i2c-qcom-geni.c
+++ b/drivers/i2c/busses/i2c-qcom-geni.c
@@ -986,8 +986,10 @@ static int __maybe_unused geni_i2c_runtime_resume(struct device *dev)
return ret;
ret = clk_prepare_enable(gi2c->core_clk);
- if (ret)
+ if (ret) {
+ geni_icc_disable(&gi2c->se);
return ret;
+ }
ret = geni_se_resources_on(&gi2c->se);
if (ret) {
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index 85b31edc558d..1df5b4204142 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -1802,9 +1802,9 @@ static int tegra_i2c_probe(struct platform_device *pdev)
* domain.
*
* VI I2C device shouldn't be marked as IRQ-safe because VI I2C won't
- * be used for atomic transfers.
+ * be used for atomic transfers. ACPI device is not IRQ safe also.
*/
- if (!IS_VI(i2c_dev))
+ if (!IS_VI(i2c_dev) && !has_acpi_companion(i2c_dev->dev))
pm_runtime_irq_safe(i2c_dev->dev);
pm_runtime_enable(i2c_dev->dev);
diff --git a/drivers/iommu/io-pgfault.c b/drivers/iommu/io-pgfault.c
index cd679c13752e..81e9cc6e3164 100644
--- a/drivers/iommu/io-pgfault.c
+++ b/drivers/iommu/io-pgfault.c
@@ -170,6 +170,7 @@ void iommu_report_device_fault(struct device *dev, struct iopf_fault *evt)
report_partial_fault(iopf_param, fault);
iopf_put_dev_fault_param(iopf_param);
/* A request that is not the last does not need to be ack'd */
+ return;
}
/*
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index c2c07bfa6471..f299ff393a6a 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -1181,8 +1181,26 @@ static int do_resume(struct dm_ioctl *param)
suspend_flags &= ~DM_SUSPEND_LOCKFS_FLAG;
if (param->flags & DM_NOFLUSH_FLAG)
suspend_flags |= DM_SUSPEND_NOFLUSH_FLAG;
- if (!dm_suspended_md(md))
- dm_suspend(md, suspend_flags);
+ if (!dm_suspended_md(md)) {
+ r = dm_suspend(md, suspend_flags);
+ if (r) {
+ down_write(&_hash_lock);
+ hc = dm_get_mdptr(md);
+ if (hc && !hc->new_map) {
+ hc->new_map = new_map;
+ new_map = NULL;
+ } else {
+ r = -ENXIO;
+ }
+ up_write(&_hash_lock);
+ if (new_map) {
+ dm_sync_table(md);
+ dm_table_destroy(new_map);
+ }
+ dm_put(md);
+ return r;
+ }
+ }
old_size = dm_get_size(md);
old_map = dm_swap_table(md, new_map);
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 97fab2087df8..87bb90303435 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -2737,7 +2737,7 @@ static int dm_wait_for_bios_completion(struct mapped_device *md, unsigned int ta
break;
if (signal_pending_state(task_state, current)) {
- r = -EINTR;
+ r = -ERESTARTSYS;
break;
}
@@ -2762,7 +2762,7 @@ static int dm_wait_for_completion(struct mapped_device *md, unsigned int task_st
break;
if (signal_pending_state(task_state, current)) {
- r = -EINTR;
+ r = -ERESTARTSYS;
break;
}
diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
index 04698fd03e60..d48c4fafc779 100644
--- a/drivers/md/persistent-data/dm-space-map-metadata.c
+++ b/drivers/md/persistent-data/dm-space-map-metadata.c
@@ -277,7 +277,7 @@ static void sm_metadata_destroy(struct dm_space_map *sm)
{
struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
- kfree(smm);
+ kvfree(smm);
}
static int sm_metadata_get_nr_blocks(struct dm_space_map *sm, dm_block_t *count)
@@ -772,7 +772,7 @@ struct dm_space_map *dm_sm_metadata_init(void)
{
struct sm_metadata *smm;
- smm = kmalloc(sizeof(*smm), GFP_KERNEL);
+ smm = kvmalloc(sizeof(*smm), GFP_KERNEL);
if (!smm)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 7acfe7c9dc8d..761989d67906 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -617,6 +617,12 @@ static int choose_first_rdev(struct r1conf *conf, struct r1bio *r1_bio,
return -1;
}
+static bool rdev_in_recovery(struct md_rdev *rdev, struct r1bio *r1_bio)
+{
+ return !test_bit(In_sync, &rdev->flags) &&
+ rdev->recovery_offset < r1_bio->sector + r1_bio->sectors;
+}
+
static int choose_bb_rdev(struct r1conf *conf, struct r1bio *r1_bio,
int *max_sectors)
{
@@ -635,6 +641,7 @@ static int choose_bb_rdev(struct r1conf *conf, struct r1bio *r1_bio,
rdev = conf->mirrors[disk].rdev;
if (!rdev || test_bit(Faulty, &rdev->flags) ||
+ rdev_in_recovery(rdev, r1_bio) ||
test_bit(WriteMostly, &rdev->flags))
continue;
@@ -673,7 +680,8 @@ static int choose_slow_rdev(struct r1conf *conf, struct r1bio *r1_bio,
rdev = conf->mirrors[disk].rdev;
if (!rdev || test_bit(Faulty, &rdev->flags) ||
- !test_bit(WriteMostly, &rdev->flags))
+ !test_bit(WriteMostly, &rdev->flags) ||
+ rdev_in_recovery(rdev, r1_bio))
continue;
/* there are no bad blocks, we can use this disk */
@@ -733,9 +741,7 @@ static bool rdev_readable(struct md_rdev *rdev, struct r1bio *r1_bio)
if (!rdev || test_bit(Faulty, &rdev->flags))
return false;
- /* still in recovery */
- if (!test_bit(In_sync, &rdev->flags) &&
- rdev->recovery_offset < r1_bio->sector + r1_bio->sectors)
+ if (rdev_in_recovery(rdev, r1_bio))
return false;
/* don't read from slow disk unless have to */
diff --git a/drivers/media/usb/dvb-usb/dvb-usb-init.c b/drivers/media/usb/dvb-usb/dvb-usb-init.c
index 22d83ac18eb7..fbf58012becd 100644
--- a/drivers/media/usb/dvb-usb/dvb-usb-init.c
+++ b/drivers/media/usb/dvb-usb/dvb-usb-init.c
@@ -23,40 +23,11 @@ static int dvb_usb_force_pid_filter_usage;
module_param_named(force_pid_filter_usage, dvb_usb_force_pid_filter_usage, int, 0444);
MODULE_PARM_DESC(force_pid_filter_usage, "force all dvb-usb-devices to use a PID filter, if any (default: 0).");
-static int dvb_usb_check_bulk_endpoint(struct dvb_usb_device *d, u8 endpoint)
-{
- if (endpoint) {
- int ret;
-
- ret = usb_pipe_type_check(d->udev, usb_sndbulkpipe(d->udev, endpoint));
- if (ret)
- return ret;
- ret = usb_pipe_type_check(d->udev, usb_rcvbulkpipe(d->udev, endpoint));
- if (ret)
- return ret;
- }
- return 0;
-}
-
-static void dvb_usb_clear_halt(struct dvb_usb_device *d, u8 endpoint)
-{
- if (endpoint) {
- usb_clear_halt(d->udev, usb_sndbulkpipe(d->udev, endpoint));
- usb_clear_halt(d->udev, usb_rcvbulkpipe(d->udev, endpoint));
- }
-}
-
static int dvb_usb_adapter_init(struct dvb_usb_device *d, short *adapter_nrs)
{
struct dvb_usb_adapter *adap;
int ret, n, o;
- ret = dvb_usb_check_bulk_endpoint(d, d->props.generic_bulk_ctrl_endpoint);
- if (ret)
- return ret;
- ret = dvb_usb_check_bulk_endpoint(d, d->props.generic_bulk_ctrl_endpoint_response);
- if (ret)
- return ret;
for (n = 0; n < d->props.num_adapters; n++) {
adap = &d->adapter[n];
adap->dev = d;
@@ -132,8 +103,10 @@ static int dvb_usb_adapter_init(struct dvb_usb_device *d, short *adapter_nrs)
* when reloading the driver w/o replugging the device
* sometimes a timeout occurs, this helps
*/
- dvb_usb_clear_halt(d, d->props.generic_bulk_ctrl_endpoint);
- dvb_usb_clear_halt(d, d->props.generic_bulk_ctrl_endpoint_response);
+ if (d->props.generic_bulk_ctrl_endpoint != 0) {
+ usb_clear_halt(d->udev, usb_sndbulkpipe(d->udev, d->props.generic_bulk_ctrl_endpoint));
+ usb_clear_halt(d->udev, usb_rcvbulkpipe(d->udev, d->props.generic_bulk_ctrl_endpoint));
+ }
return 0;
diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
index 5204fda51da3..339d126414d4 100644
--- a/drivers/misc/fastrpc.c
+++ b/drivers/misc/fastrpc.c
@@ -2085,16 +2085,6 @@ err_invoke:
return err;
}
-static int is_attach_rejected(struct fastrpc_user *fl)
-{
- /* Check if the device node is non-secure */
- if (!fl->is_secure_dev) {
- dev_dbg(&fl->cctx->rpdev->dev, "untrusted app trying to attach to privileged DSP PD\n");
- return -EACCES;
- }
- return 0;
-}
-
static long fastrpc_device_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
@@ -2107,19 +2097,13 @@ static long fastrpc_device_ioctl(struct file *file, unsigned int cmd,
err = fastrpc_invoke(fl, argp);
break;
case FASTRPC_IOCTL_INIT_ATTACH:
- err = is_attach_rejected(fl);
- if (!err)
- err = fastrpc_init_attach(fl, ROOT_PD);
+ err = fastrpc_init_attach(fl, ROOT_PD);
break;
case FASTRPC_IOCTL_INIT_ATTACH_SNS:
- err = is_attach_rejected(fl);
- if (!err)
- err = fastrpc_init_attach(fl, SENSORS_PD);
+ err = fastrpc_init_attach(fl, SENSORS_PD);
break;
case FASTRPC_IOCTL_INIT_CREATE_STATIC:
- err = is_attach_rejected(fl);
- if (!err)
- err = fastrpc_init_create_static_process(fl, argp);
+ err = fastrpc_init_create_static_process(fl, argp);
break;
case FASTRPC_IOCTL_INIT_CREATE:
err = fastrpc_init_create_process(fl, argp);
diff --git a/drivers/misc/lkdtm/refcount.c b/drivers/misc/lkdtm/refcount.c
index 5cd488f54cfa..8f744bee6fbd 100644
--- a/drivers/misc/lkdtm/refcount.c
+++ b/drivers/misc/lkdtm/refcount.c
@@ -182,6 +182,21 @@ static void lkdtm_REFCOUNT_SUB_AND_TEST_NEGATIVE(void)
check_negative(&neg, 3);
}
+/*
+ * A refcount_sub_and_test() by zero when the counter is at zero should act like
+ * refcount_sub_and_test() above when going negative.
+ */
+static void lkdtm_REFCOUNT_SUB_AND_TEST_ZERO(void)
+{
+ refcount_t neg = REFCOUNT_INIT(0);
+
+ pr_info("attempting bad refcount_sub_and_test() at zero\n");
+ if (refcount_sub_and_test(0, &neg))
+ pr_warn("Weird: refcount_sub_and_test() reported zero\n");
+
+ check_negative(&neg, 0);
+}
+
static void check_from_zero(refcount_t *ref)
{
switch (refcount_read(ref)) {
@@ -400,6 +415,7 @@ static struct crashtype crashtypes[] = {
CRASHTYPE(REFCOUNT_DEC_NEGATIVE),
CRASHTYPE(REFCOUNT_DEC_AND_TEST_NEGATIVE),
CRASHTYPE(REFCOUNT_SUB_AND_TEST_NEGATIVE),
+ CRASHTYPE(REFCOUNT_SUB_AND_TEST_ZERO),
CRASHTYPE(REFCOUNT_INC_ZERO),
CRASHTYPE(REFCOUNT_ADD_ZERO),
CRASHTYPE(REFCOUNT_INC_SATURATED),
diff --git a/drivers/net/dsa/vitesse-vsc73xx-core.c b/drivers/net/dsa/vitesse-vsc73xx-core.c
index d9d3e30fd47a..e3f95d2cc2c1 100644
--- a/drivers/net/dsa/vitesse-vsc73xx-core.c
+++ b/drivers/net/dsa/vitesse-vsc73xx-core.c
@@ -40,6 +40,10 @@
#define VSC73XX_BLOCK_ARBITER 0x5 /* Only subblock 0 */
#define VSC73XX_BLOCK_SYSTEM 0x7 /* Only subblock 0 */
+/* MII Block subblock */
+#define VSC73XX_BLOCK_MII_INTERNAL 0x0 /* Internal MDIO subblock */
+#define VSC73XX_BLOCK_MII_EXTERNAL 0x1 /* External MDIO subblock */
+
#define CPU_PORT 6 /* CPU port */
/* MAC Block registers */
@@ -225,6 +229,8 @@
#define VSC73XX_MII_CMD 0x1
#define VSC73XX_MII_DATA 0x2
+#define VSC73XX_MII_STAT_BUSY BIT(3)
+
/* Arbiter block 5 registers */
#define VSC73XX_ARBEMPTY 0x0c
#define VSC73XX_ARBDISC 0x0e
@@ -299,6 +305,7 @@
#define IS_739X(a) (IS_7395(a) || IS_7398(a))
#define VSC73XX_POLL_SLEEP_US 1000
+#define VSC73XX_MDIO_POLL_SLEEP_US 5
#define VSC73XX_POLL_TIMEOUT_US 10000
struct vsc73xx_counter {
@@ -527,6 +534,22 @@ static int vsc73xx_detect(struct vsc73xx *vsc)
return 0;
}
+static int vsc73xx_mdio_busy_check(struct vsc73xx *vsc)
+{
+ int ret, err;
+ u32 val;
+
+ ret = read_poll_timeout(vsc73xx_read, err,
+ err < 0 || !(val & VSC73XX_MII_STAT_BUSY),
+ VSC73XX_MDIO_POLL_SLEEP_US,
+ VSC73XX_POLL_TIMEOUT_US, false, vsc,
+ VSC73XX_BLOCK_MII, VSC73XX_BLOCK_MII_INTERNAL,
+ VSC73XX_MII_STAT, &val);
+ if (ret)
+ return ret;
+ return err;
+}
+
static int vsc73xx_phy_read(struct dsa_switch *ds, int phy, int regnum)
{
struct vsc73xx *vsc = ds->priv;
@@ -534,12 +557,20 @@ static int vsc73xx_phy_read(struct dsa_switch *ds, int phy, int regnum)
u32 val;
int ret;
+ ret = vsc73xx_mdio_busy_check(vsc);
+ if (ret)
+ return ret;
+
/* Setting bit 26 means "read" */
cmd = BIT(26) | (phy << 21) | (regnum << 16);
ret = vsc73xx_write(vsc, VSC73XX_BLOCK_MII, 0, 1, cmd);
if (ret)
return ret;
- msleep(2);
+
+ ret = vsc73xx_mdio_busy_check(vsc);
+ if (ret)
+ return ret;
+
ret = vsc73xx_read(vsc, VSC73XX_BLOCK_MII, 0, 2, &val);
if (ret)
return ret;
@@ -563,18 +594,11 @@ static int vsc73xx_phy_write(struct dsa_switch *ds, int phy, int regnum,
u32 cmd;
int ret;
- /* It was found through tedious experiments that this router
- * chip really hates to have it's PHYs reset. They
- * never recover if that happens: autonegotiation stops
- * working after a reset. Just filter out this command.
- * (Resetting the whole chip is OK.)
- */
- if (regnum == 0 && (val & BIT(15))) {
- dev_info(vsc->dev, "reset PHY - disallowed\n");
- return 0;
- }
+ ret = vsc73xx_mdio_busy_check(vsc);
+ if (ret)
+ return ret;
- cmd = (phy << 21) | (regnum << 16);
+ cmd = (phy << 21) | (regnum << 16) | val;
ret = vsc73xx_write(vsc, VSC73XX_BLOCK_MII, 0, 1, cmd);
if (ret)
return ret;
@@ -957,6 +981,11 @@ static void vsc73xx_mac_link_up(struct phylink_config *config,
if (duplex == DUPLEX_FULL)
val |= VSC73XX_MAC_CFG_FDX;
+ else
+ /* In datasheet description ("Port Mode Procedure" in 5.6.2)
+ * this bit is configured only for half duplex.
+ */
+ val |= VSC73XX_MAC_CFG_WEXC_DIS;
/* This routine is described in the datasheet (below ARBDISC register
* description)
@@ -967,7 +996,6 @@ static void vsc73xx_mac_link_up(struct phylink_config *config,
get_random_bytes(&seed, 1);
val |= seed << VSC73XX_MAC_CFG_SEED_OFFSET;
val |= VSC73XX_MAC_CFG_SEED_LOAD;
- val |= VSC73XX_MAC_CFG_WEXC_DIS;
/* Those bits are responsible for MTU only. Kernel takes care about MTU,
* let's enable +8 bytes frame length unconditionally.
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 11665be3a22c..dcd3f54ed0cf 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -5250,8 +5250,8 @@ static int __maybe_unused macb_suspend(struct device *dev)
if (bp->wol & MACB_WOL_ENABLED) {
/* Check for IP address in WOL ARP mode */
idev = __in_dev_get_rcu(bp->dev);
- if (idev && idev->ifa_list)
- ifa = rcu_access_pointer(idev->ifa_list);
+ if (idev)
+ ifa = rcu_dereference(idev->ifa_list);
if ((bp->wolopts & WAKE_ARP) && !ifa) {
netdev_err(netdev, "IP address not assigned as required by WoL walk ARP\n");
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index a40c266c37f2..608cc6af5af1 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -1054,18 +1054,12 @@ static int phy_interface_mode(u8 lmac_type)
static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid)
{
- struct lmac *lmac, **priv;
+ struct lmac *lmac;
u64 cfg;
lmac = &bgx->lmac[lmacid];
lmac->bgx = bgx;
- lmac->netdev = alloc_netdev_dummy(sizeof(struct lmac *));
- if (!lmac->netdev)
- return -ENOMEM;
- priv = netdev_priv(lmac->netdev);
- *priv = lmac;
-
if ((lmac->lmac_type == BGX_MODE_SGMII) ||
(lmac->lmac_type == BGX_MODE_QSGMII) ||
(lmac->lmac_type == BGX_MODE_RGMII)) {
@@ -1191,7 +1185,6 @@ static void bgx_lmac_disable(struct bgx *bgx, u8 lmacid)
(lmac->lmac_type != BGX_MODE_10G_KR) && lmac->phydev)
phy_disconnect(lmac->phydev);
- free_netdev(lmac->netdev);
lmac->phydev = NULL;
}
@@ -1653,6 +1646,23 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
bgx_get_qlm_mode(bgx);
+ for (lmac = 0; lmac < bgx->lmac_count; lmac++) {
+ struct lmac *lmacp, **priv;
+
+ lmacp = &bgx->lmac[lmac];
+ lmacp->netdev = alloc_netdev_dummy(sizeof(struct lmac *));
+
+ if (!lmacp->netdev) {
+ for (int i = 0; i < lmac; i++)
+ free_netdev(bgx->lmac[i].netdev);
+ err = -ENOMEM;
+ goto err_enable;
+ }
+
+ priv = netdev_priv(lmacp->netdev);
+ *priv = lmacp;
+ }
+
err = bgx_init_phy(bgx);
if (err)
goto err_enable;
@@ -1692,8 +1702,10 @@ static void bgx_remove(struct pci_dev *pdev)
u8 lmac;
/* Disable all LMACs */
- for (lmac = 0; lmac < bgx->lmac_count; lmac++)
+ for (lmac = 0; lmac < bgx->lmac_count; lmac++) {
bgx_lmac_disable(bgx, lmac);
+ free_netdev(bgx->lmac[lmac].netdev);
+ }
pci_free_irq(pdev, GMPX_GMI_TX_INT, bgx);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index a5fc0209d628..4cbc4d069a1f 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -5724,6 +5724,9 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
struct net_device *netdev = handle->kinfo.netdev;
struct hns3_nic_priv *priv = netdev_priv(netdev);
+ if (!test_bit(HNS3_NIC_STATE_DOWN, &priv->state))
+ hns3_nic_net_stop(netdev);
+
if (!test_and_clear_bit(HNS3_NIC_STATE_INITED, &priv->state)) {
netdev_warn(netdev, "already uninitialized\n");
return 0;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
index e132c2f09560..cc7f46c0b35f 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
@@ -1598,8 +1598,7 @@ static void hclge_query_reg_info_of_ssu(struct hclge_dev *hdev)
{
u32 loop_para[HCLGE_MOD_MSG_PARA_ARRAY_MAX_SIZE] = {0};
struct hclge_mod_reg_common_msg msg;
- u8 i, j, num;
- u32 loop_time;
+ u8 i, j, num, loop_time;
num = ARRAY_SIZE(hclge_ssu_reg_common_msg);
for (i = 0; i < num; i++) {
@@ -1609,7 +1608,8 @@ static void hclge_query_reg_info_of_ssu(struct hclge_dev *hdev)
loop_time = 1;
loop_para[0] = 0;
if (msg.need_para) {
- loop_time = hdev->ae_dev->dev_specs.tnl_num;
+ loop_time = min(hdev->ae_dev->dev_specs.tnl_num,
+ HCLGE_MOD_MSG_PARA_ARRAY_MAX_SIZE);
for (j = 0; j < loop_time; j++)
loop_para[j] = j + 1;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index 82574ce0194f..6c33195a1168 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -2653,8 +2653,17 @@ static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
+ int ret;
+
+ ret = hclge_cfg_mac_speed_dup(hdev, speed, duplex, lane_num);
+
+ if (ret)
+ return ret;
- return hclge_cfg_mac_speed_dup(hdev, speed, duplex, lane_num);
+ hdev->hw.mac.req_speed = speed;
+ hdev->hw.mac.req_duplex = duplex;
+
+ return 0;
}
static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
@@ -2956,17 +2965,20 @@ static int hclge_mac_init(struct hclge_dev *hdev)
if (!test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
hdev->hw.mac.duplex = HCLGE_MAC_FULL;
- ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
- hdev->hw.mac.duplex, hdev->hw.mac.lane_num);
- if (ret)
- return ret;
-
if (hdev->hw.mac.support_autoneg) {
ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
if (ret)
return ret;
}
+ if (!hdev->hw.mac.autoneg) {
+ ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.req_speed,
+ hdev->hw.mac.req_duplex,
+ hdev->hw.mac.lane_num);
+ if (ret)
+ return ret;
+ }
+
mac->link = 0;
if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
@@ -11444,7 +11456,7 @@ static void hclge_pci_uninit(struct hclge_dev *hdev)
pcim_iounmap(pdev, hdev->hw.hw.io_base);
pci_free_irq_vectors(pdev);
- pci_release_mem_regions(pdev);
+ pci_release_regions(pdev);
pci_disable_device(pdev);
}
@@ -11516,8 +11528,8 @@ static void hclge_reset_done(struct hnae3_ae_dev *ae_dev)
dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", ret);
hdev->reset_type = HNAE3_NONE_RESET;
- clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
- up(&hdev->reset_sem);
+ if (test_and_clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
+ up(&hdev->reset_sem);
}
static void hclge_clear_resetting_state(struct hclge_dev *hdev)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
index 85fb11de43a1..80079657afeb 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
@@ -191,6 +191,9 @@ static void hclge_mac_adjust_link(struct net_device *netdev)
if (ret)
netdev_err(netdev, "failed to adjust link.\n");
+ hdev->hw.mac.req_speed = (u32)speed;
+ hdev->hw.mac.req_duplex = (u8)duplex;
+
ret = hclge_cfg_flowctrl(hdev);
if (ret)
netdev_err(netdev, "failed to configure flow control.\n");
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index 3735d2fed11f..094a7c7b5592 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -1747,8 +1747,8 @@ static void hclgevf_reset_done(struct hnae3_ae_dev *ae_dev)
ret);
hdev->reset_type = HNAE3_NONE_RESET;
- clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
- up(&hdev->reset_sem);
+ if (test_and_clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state))
+ up(&hdev->reset_sem);
}
static u32 hclgevf_get_fw_version(struct hnae3_handle *handle)
diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h
index 5f92b3c7c3d4..511384f3ec5c 100644
--- a/drivers/net/ethernet/intel/igc/igc_defines.h
+++ b/drivers/net/ethernet/intel/igc/igc_defines.h
@@ -404,6 +404,12 @@
#define IGC_DTXMXPKTSZ_TSN 0x19 /* 1600 bytes of max TX DMA packet size */
#define IGC_DTXMXPKTSZ_DEFAULT 0x98 /* 9728-byte Jumbo frames */
+/* Retry Buffer Control */
+#define IGC_RETX_CTL 0x041C
+#define IGC_RETX_CTL_WATERMARK_MASK 0xF
+#define IGC_RETX_CTL_QBVFULLTH_SHIFT 8 /* QBV Retry Buffer Full Threshold */
+#define IGC_RETX_CTL_QBVFULLEN 0x1000 /* Enable QBV Retry Buffer Full Threshold */
+
/* Transmit Scheduling Latency */
/* Latency between transmission scheduling (LaunchTime) and the time
* the packet is transmitted to the network in nanosecond.
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 8daf938afc36..dfd6c00b4205 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -6315,12 +6315,16 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter,
if (!validate_schedule(adapter, qopt))
return -EINVAL;
+ igc_ptp_read(adapter, &now);
+
+ if (igc_tsn_is_taprio_activated_by_user(adapter) &&
+ is_base_time_past(qopt->base_time, &now))
+ adapter->qbv_config_change_errors++;
+
adapter->cycle_time = qopt->cycle_time;
adapter->base_time = qopt->base_time;
adapter->taprio_offload_enable = true;
- igc_ptp_read(adapter, &now);
-
for (n = 0; n < qopt->num_entries; n++) {
struct tc_taprio_sched_entry *e = &qopt->entries[n];
diff --git a/drivers/net/ethernet/intel/igc/igc_tsn.c b/drivers/net/ethernet/intel/igc/igc_tsn.c
index 22cefb1eeedf..d68fa7f3d5f0 100644
--- a/drivers/net/ethernet/intel/igc/igc_tsn.c
+++ b/drivers/net/ethernet/intel/igc/igc_tsn.c
@@ -49,12 +49,19 @@ static unsigned int igc_tsn_new_flags(struct igc_adapter *adapter)
return new_flags;
}
+static bool igc_tsn_is_tx_mode_in_tsn(struct igc_adapter *adapter)
+{
+ struct igc_hw *hw = &adapter->hw;
+
+ return !!(rd32(IGC_TQAVCTRL) & IGC_TQAVCTRL_TRANSMIT_MODE_TSN);
+}
+
void igc_tsn_adjust_txtime_offset(struct igc_adapter *adapter)
{
struct igc_hw *hw = &adapter->hw;
u16 txoffset;
- if (!is_any_launchtime(adapter))
+ if (!igc_tsn_is_tx_mode_in_tsn(adapter))
return;
switch (adapter->link_speed) {
@@ -78,6 +85,23 @@ void igc_tsn_adjust_txtime_offset(struct igc_adapter *adapter)
wr32(IGC_GTXOFFSET, txoffset);
}
+static void igc_tsn_restore_retx_default(struct igc_adapter *adapter)
+{
+ struct igc_hw *hw = &adapter->hw;
+ u32 retxctl;
+
+ retxctl = rd32(IGC_RETX_CTL) & IGC_RETX_CTL_WATERMARK_MASK;
+ wr32(IGC_RETX_CTL, retxctl);
+}
+
+bool igc_tsn_is_taprio_activated_by_user(struct igc_adapter *adapter)
+{
+ struct igc_hw *hw = &adapter->hw;
+
+ return (rd32(IGC_BASET_H) || rd32(IGC_BASET_L)) &&
+ adapter->taprio_offload_enable;
+}
+
/* Returns the TSN specific registers to their default values after
* the adapter is reset.
*/
@@ -91,6 +115,9 @@ static int igc_tsn_disable_offload(struct igc_adapter *adapter)
wr32(IGC_TXPBS, I225_TXPBSIZE_DEFAULT);
wr32(IGC_DTXMXPKTSZ, IGC_DTXMXPKTSZ_DEFAULT);
+ if (igc_is_device_id_i226(hw))
+ igc_tsn_restore_retx_default(adapter);
+
tqavctrl = rd32(IGC_TQAVCTRL);
tqavctrl &= ~(IGC_TQAVCTRL_TRANSMIT_MODE_TSN |
IGC_TQAVCTRL_ENHANCED_QAV | IGC_TQAVCTRL_FUTSCDDIS);
@@ -111,6 +138,25 @@ static int igc_tsn_disable_offload(struct igc_adapter *adapter)
return 0;
}
+/* To partially fix i226 HW errata, reduce MAC internal buffering from 192 Bytes
+ * to 88 Bytes by setting RETX_CTL register using the recommendation from:
+ * a) Ethernet Controller I225/I226 Specification Update Rev 2.1
+ * Item 9: TSN: Packet Transmission Might Cross the Qbv Window
+ * b) I225/6 SW User Manual Rev 1.2.4: Section 8.11.5 Retry Buffer Control
+ */
+static void igc_tsn_set_retx_qbvfullthreshold(struct igc_adapter *adapter)
+{
+ struct igc_hw *hw = &adapter->hw;
+ u32 retxctl, watermark;
+
+ retxctl = rd32(IGC_RETX_CTL);
+ watermark = retxctl & IGC_RETX_CTL_WATERMARK_MASK;
+ /* Set QBVFULLTH value using watermark and set QBVFULLEN */
+ retxctl |= (watermark << IGC_RETX_CTL_QBVFULLTH_SHIFT) |
+ IGC_RETX_CTL_QBVFULLEN;
+ wr32(IGC_RETX_CTL, retxctl);
+}
+
static int igc_tsn_enable_offload(struct igc_adapter *adapter)
{
struct igc_hw *hw = &adapter->hw;
@@ -123,6 +169,9 @@ static int igc_tsn_enable_offload(struct igc_adapter *adapter)
wr32(IGC_DTXMXPKTSZ, IGC_DTXMXPKTSZ_TSN);
wr32(IGC_TXPBS, IGC_TXPBSIZE_TSN);
+ if (igc_is_device_id_i226(hw))
+ igc_tsn_set_retx_qbvfullthreshold(adapter);
+
for (i = 0; i < adapter->num_tx_queues; i++) {
struct igc_ring *ring = adapter->tx_ring[i];
u32 txqctl = 0;
@@ -262,14 +311,6 @@ skip_cbs:
s64 n = div64_s64(ktime_sub_ns(systim, base_time), cycle);
base_time = ktime_add_ns(base_time, (n + 1) * cycle);
-
- /* Increase the counter if scheduling into the past while
- * Gate Control List (GCL) is running.
- */
- if ((rd32(IGC_BASET_H) || rd32(IGC_BASET_L)) &&
- (adapter->tc_setup_type == TC_SETUP_QDISC_TAPRIO) &&
- (adapter->qbv_count > 1))
- adapter->qbv_config_change_errors++;
} else {
if (igc_is_device_id_i226(hw)) {
ktime_t adjust_time, expires_time;
@@ -331,15 +372,22 @@ int igc_tsn_reset(struct igc_adapter *adapter)
return err;
}
-int igc_tsn_offload_apply(struct igc_adapter *adapter)
+static bool igc_tsn_will_tx_mode_change(struct igc_adapter *adapter)
{
- struct igc_hw *hw = &adapter->hw;
+ bool any_tsn_enabled = !!(igc_tsn_new_flags(adapter) &
+ IGC_FLAG_TSN_ANY_ENABLED);
- /* Per I225/6 HW Design Section 7.5.2.1, transmit mode
- * cannot be changed dynamically. Require reset the adapter.
+ return (any_tsn_enabled && !igc_tsn_is_tx_mode_in_tsn(adapter)) ||
+ (!any_tsn_enabled && igc_tsn_is_tx_mode_in_tsn(adapter));
+}
+
+int igc_tsn_offload_apply(struct igc_adapter *adapter)
+{
+ /* Per I225/6 HW Design Section 7.5.2.1 guideline, if tx mode change
+ * from legacy->tsn or tsn->legacy, then reset adapter is needed.
*/
if (netif_running(adapter->netdev) &&
- (igc_is_device_id_i225(hw) || !adapter->qbv_count)) {
+ igc_tsn_will_tx_mode_change(adapter)) {
schedule_work(&adapter->reset_task);
return 0;
}
diff --git a/drivers/net/ethernet/intel/igc/igc_tsn.h b/drivers/net/ethernet/intel/igc/igc_tsn.h
index b53e6af560b7..98ec845a86bf 100644
--- a/drivers/net/ethernet/intel/igc/igc_tsn.h
+++ b/drivers/net/ethernet/intel/igc/igc_tsn.h
@@ -7,5 +7,6 @@
int igc_tsn_offload_apply(struct igc_adapter *adapter);
int igc_tsn_reset(struct igc_adapter *adapter);
void igc_tsn_adjust_txtime_offset(struct igc_adapter *adapter);
+bool igc_tsn_is_taprio_activated_by_user(struct igc_adapter *adapter);
#endif /* _IGC_BASE_H */
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index b06e24562973..d8be0e4dcb07 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -946,15 +946,13 @@ jme_udpsum(struct sk_buff *skb)
if (skb->protocol != htons(ETH_P_IP))
return csum;
skb_set_network_header(skb, ETH_HLEN);
- if ((ip_hdr(skb)->protocol != IPPROTO_UDP) ||
- (skb->len < (ETH_HLEN +
- (ip_hdr(skb)->ihl << 2) +
- sizeof(struct udphdr)))) {
+
+ if (ip_hdr(skb)->protocol != IPPROTO_UDP ||
+ skb->len < (ETH_HLEN + ip_hdrlen(skb) + sizeof(struct udphdr))) {
skb_reset_network_header(skb);
return csum;
}
- skb_set_transport_header(skb,
- ETH_HLEN + (ip_hdr(skb)->ihl << 2));
+ skb_set_transport_header(skb, ETH_HLEN + ip_hdrlen(skb));
csum = udp_hdr(skb)->check;
skb_reset_transport_header(skb);
skb_reset_network_header(skb);
diff --git a/drivers/net/ethernet/mediatek/mtk_wed.c b/drivers/net/ethernet/mediatek/mtk_wed.c
index 61334a71058c..e212a4ba9275 100644
--- a/drivers/net/ethernet/mediatek/mtk_wed.c
+++ b/drivers/net/ethernet/mediatek/mtk_wed.c
@@ -2666,14 +2666,15 @@ mtk_wed_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_pri
{
struct mtk_wed_flow_block_priv *priv = cb_priv;
struct flow_cls_offload *cls = type_data;
- struct mtk_wed_hw *hw = priv->hw;
+ struct mtk_wed_hw *hw = NULL;
- if (!tc_can_offload(priv->dev))
+ if (!priv || !tc_can_offload(priv->dev))
return -EOPNOTSUPP;
if (type != TC_SETUP_CLSFLOWER)
return -EOPNOTSUPP;
+ hw = priv->hw;
return mtk_flow_offload_cmd(hw->eth, cls, hw->index);
}
@@ -2729,6 +2730,7 @@ mtk_wed_setup_tc_block(struct mtk_wed_hw *hw, struct net_device *dev,
flow_block_cb_remove(block_cb, f);
list_del(&block_cb->driver_list);
kfree(block_cb->cb_priv);
+ block_cb->cb_priv = NULL;
}
return 0;
default:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 5fd82c67b6ab..bb5da42edc23 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -130,7 +130,7 @@ struct page_pool;
#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW 0x2
#define MLX5E_DEFAULT_LRO_TIMEOUT 32
-#define MLX5E_LRO_TIMEOUT_ARR_SIZE 4
+#define MLX5E_DEFAULT_SHAMPO_TIMEOUT 1024
#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC 0x10
#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE 0x3
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
index 6c9ccccca81e..64b62ed17b07 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
@@ -928,7 +928,7 @@ int mlx5e_build_rq_param(struct mlx5_core_dev *mdev,
MLX5_SET(wq, wq, log_headers_entry_size,
mlx5e_shampo_get_log_hd_entry_size(mdev, params));
MLX5_SET(rqc, rqc, reservation_timeout,
- params->packet_merge.timeout);
+ mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_SHAMPO_TIMEOUT));
MLX5_SET(rqc, rqc, shampo_match_criteria_type,
params->packet_merge.shampo.match_criteria_type);
MLX5_SET(rqc, rqc, shampo_no_match_alignment_granularity,
@@ -1087,6 +1087,20 @@ static u32 mlx5e_shampo_icosq_sz(struct mlx5_core_dev *mdev,
return wqebbs;
}
+#define MLX5E_LRO_TIMEOUT_ARR_SIZE 4
+
+u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout)
+{
+ int i;
+
+ /* The supported periods are organized in ascending order */
+ for (i = 0; i < MLX5E_LRO_TIMEOUT_ARR_SIZE - 1; i++)
+ if (MLX5_CAP_ETH(mdev, lro_timer_supported_periods[i]) >= wanted_timeout)
+ break;
+
+ return MLX5_CAP_ETH(mdev, lro_timer_supported_periods[i]);
+}
+
static u32 mlx5e_mpwrq_total_umr_wqebbs(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
index 749b2ec0436e..3f8986f9d862 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
@@ -108,6 +108,7 @@ u32 mlx5e_shampo_hd_per_wqe(struct mlx5_core_dev *mdev,
u32 mlx5e_shampo_hd_per_wq(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_rq_param *rq_param);
+u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout);
u8 mlx5e_mpwqe_get_log_stride_size(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
index 22918b2ef7f1..09433b91be17 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
@@ -146,7 +146,9 @@ static int mlx5e_tx_reporter_timeout_recover(void *ctx)
return err;
}
+ mutex_lock(&priv->state_lock);
err = mlx5e_safe_reopen_channels(priv);
+ mutex_unlock(&priv->state_lock);
if (!err) {
to_ctx->status = 1; /* all channels recovered */
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
index 3eccdadc0357..773624bb2c5d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
@@ -734,7 +734,7 @@ mlx5e_ethtool_flow_replace(struct mlx5e_priv *priv,
if (num_tuples <= 0) {
netdev_warn(priv->netdev, "%s: flow is not valid %d\n",
__func__, num_tuples);
- return num_tuples;
+ return num_tuples < 0 ? num_tuples : -EINVAL;
}
eth_ft = get_flow_table(priv, fs, num_tuples);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 6f686fabed44..5df904639b0c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -5167,18 +5167,6 @@ const struct net_device_ops mlx5e_netdev_ops = {
#endif
};
-static u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout)
-{
- int i;
-
- /* The supported periods are organized in ascending order */
- for (i = 0; i < MLX5E_LRO_TIMEOUT_ARR_SIZE - 1; i++)
- if (MLX5_CAP_ETH(mdev, lro_timer_supported_periods[i]) >= wanted_timeout)
- break;
-
- return MLX5_CAP_ETH(mdev, lro_timer_supported_periods[i]);
-}
-
void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16 mtu)
{
struct mlx5e_params *params = &priv->channels.params;
@@ -5308,7 +5296,7 @@ static void mlx5e_get_queue_stats_rx(struct net_device *dev, int i,
struct mlx5e_rq_stats *rq_stats;
ASSERT_RTNL();
- if (mlx5e_is_uplink_rep(priv))
+ if (mlx5e_is_uplink_rep(priv) || !priv->stats_nch)
return;
channel_stats = priv->channel_stats[i];
@@ -5328,6 +5316,9 @@ static void mlx5e_get_queue_stats_tx(struct net_device *dev, int i,
struct mlx5e_sq_stats *sq_stats;
ASSERT_RTNL();
+ if (!priv->stats_nch)
+ return;
+
/* no special case needed for ptp htb etc since txq2sq_stats is kept up
* to date for active sq_stats, otherwise get_base_stats takes care of
* inactive sqs.
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c
index f6deb5a3f820..eeb0b7ea05f1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c
@@ -126,7 +126,7 @@ static bool mlx5_sd_is_supported(struct mlx5_core_dev *dev, u8 host_buses)
}
static int mlx5_query_sd(struct mlx5_core_dev *dev, bool *sdm,
- u8 *host_buses, u8 *sd_group)
+ u8 *host_buses)
{
u32 out[MLX5_ST_SZ_DW(mpir_reg)];
int err;
@@ -135,10 +135,6 @@ static int mlx5_query_sd(struct mlx5_core_dev *dev, bool *sdm,
if (err)
return err;
- err = mlx5_query_nic_vport_sd_group(dev, sd_group);
- if (err)
- return err;
-
*sdm = MLX5_GET(mpir_reg, out, sdm);
*host_buses = MLX5_GET(mpir_reg, out, host_buses);
@@ -166,19 +162,23 @@ static int sd_init(struct mlx5_core_dev *dev)
if (mlx5_core_is_ecpf(dev))
return 0;
+ err = mlx5_query_nic_vport_sd_group(dev, &sd_group);
+ if (err)
+ return err;
+
+ if (!sd_group)
+ return 0;
+
if (!MLX5_CAP_MCAM_REG(dev, mpir))
return 0;
- err = mlx5_query_sd(dev, &sdm, &host_buses, &sd_group);
+ err = mlx5_query_sd(dev, &sdm, &host_buses);
if (err)
return err;
if (!sdm)
return 0;
- if (!sd_group)
- return 0;
-
group_id = mlx5_sd_group_id(dev, sd_group);
if (!mlx5_sd_is_supported(dev, host_buses)) {
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige.h b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige.h
index bc94e75a7aeb..e7777700ee18 100644
--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige.h
+++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige.h
@@ -40,6 +40,7 @@
*/
#define MLXBF_GIGE_BCAST_MAC_FILTER_IDX 0
#define MLXBF_GIGE_LOCAL_MAC_FILTER_IDX 1
+#define MLXBF_GIGE_MAX_FILTER_IDX 3
/* Define for broadcast MAC literal */
#define BCAST_MAC_ADDR 0xFFFFFFFFFFFF
@@ -175,6 +176,13 @@ enum mlxbf_gige_res {
int mlxbf_gige_mdio_probe(struct platform_device *pdev,
struct mlxbf_gige *priv);
void mlxbf_gige_mdio_remove(struct mlxbf_gige *priv);
+
+void mlxbf_gige_enable_multicast_rx(struct mlxbf_gige *priv);
+void mlxbf_gige_disable_multicast_rx(struct mlxbf_gige *priv);
+void mlxbf_gige_enable_mac_rx_filter(struct mlxbf_gige *priv,
+ unsigned int index);
+void mlxbf_gige_disable_mac_rx_filter(struct mlxbf_gige *priv,
+ unsigned int index);
void mlxbf_gige_set_mac_rx_filter(struct mlxbf_gige *priv,
unsigned int index, u64 dmac);
void mlxbf_gige_get_mac_rx_filter(struct mlxbf_gige *priv,
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
index b157f0f1c5a8..385a56ac7348 100644
--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
+++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
@@ -168,6 +168,10 @@ static int mlxbf_gige_open(struct net_device *netdev)
if (err)
goto napi_deinit;
+ mlxbf_gige_enable_mac_rx_filter(priv, MLXBF_GIGE_BCAST_MAC_FILTER_IDX);
+ mlxbf_gige_enable_mac_rx_filter(priv, MLXBF_GIGE_LOCAL_MAC_FILTER_IDX);
+ mlxbf_gige_enable_multicast_rx(priv);
+
/* Set bits in INT_EN that we care about */
int_en = MLXBF_GIGE_INT_EN_HW_ACCESS_ERROR |
MLXBF_GIGE_INT_EN_TX_CHECKSUM_INPUTS |
@@ -379,6 +383,7 @@ static int mlxbf_gige_probe(struct platform_device *pdev)
void __iomem *plu_base;
void __iomem *base;
int addr, phy_irq;
+ unsigned int i;
int err;
base = devm_platform_ioremap_resource(pdev, MLXBF_GIGE_RES_MAC);
@@ -423,6 +428,11 @@ static int mlxbf_gige_probe(struct platform_device *pdev)
priv->rx_q_entries = MLXBF_GIGE_DEFAULT_RXQ_SZ;
priv->tx_q_entries = MLXBF_GIGE_DEFAULT_TXQ_SZ;
+ for (i = 0; i <= MLXBF_GIGE_MAX_FILTER_IDX; i++)
+ mlxbf_gige_disable_mac_rx_filter(priv, i);
+ mlxbf_gige_disable_multicast_rx(priv);
+ mlxbf_gige_disable_promisc(priv);
+
/* Write initial MAC address to hardware */
mlxbf_gige_initial_mac(priv);
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_regs.h b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_regs.h
index 98a8681c21b9..4d14cb13fd64 100644
--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_regs.h
+++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_regs.h
@@ -62,6 +62,8 @@
#define MLXBF_GIGE_TX_STATUS_DATA_FIFO_FULL BIT(1)
#define MLXBF_GIGE_RX_MAC_FILTER_DMAC_RANGE_START 0x0520
#define MLXBF_GIGE_RX_MAC_FILTER_DMAC_RANGE_END 0x0528
+#define MLXBF_GIGE_RX_MAC_FILTER_GENERAL 0x0530
+#define MLXBF_GIGE_RX_MAC_FILTER_EN_MULTICAST BIT(1)
#define MLXBF_GIGE_RX_MAC_FILTER_COUNT_DISC 0x0540
#define MLXBF_GIGE_RX_MAC_FILTER_COUNT_DISC_EN BIT(0)
#define MLXBF_GIGE_RX_MAC_FILTER_COUNT_PASS 0x0548
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c
index 699984358493..eb62620b63c7 100644
--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c
+++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c
@@ -11,15 +11,31 @@
#include "mlxbf_gige.h"
#include "mlxbf_gige_regs.h"
-void mlxbf_gige_set_mac_rx_filter(struct mlxbf_gige *priv,
- unsigned int index, u64 dmac)
+void mlxbf_gige_enable_multicast_rx(struct mlxbf_gige *priv)
{
void __iomem *base = priv->base;
- u64 control;
+ u64 data;
- /* Write destination MAC to specified MAC RX filter */
- writeq(dmac, base + MLXBF_GIGE_RX_MAC_FILTER +
- (index * MLXBF_GIGE_RX_MAC_FILTER_STRIDE));
+ data = readq(base + MLXBF_GIGE_RX_MAC_FILTER_GENERAL);
+ data |= MLXBF_GIGE_RX_MAC_FILTER_EN_MULTICAST;
+ writeq(data, base + MLXBF_GIGE_RX_MAC_FILTER_GENERAL);
+}
+
+void mlxbf_gige_disable_multicast_rx(struct mlxbf_gige *priv)
+{
+ void __iomem *base = priv->base;
+ u64 data;
+
+ data = readq(base + MLXBF_GIGE_RX_MAC_FILTER_GENERAL);
+ data &= ~MLXBF_GIGE_RX_MAC_FILTER_EN_MULTICAST;
+ writeq(data, base + MLXBF_GIGE_RX_MAC_FILTER_GENERAL);
+}
+
+void mlxbf_gige_enable_mac_rx_filter(struct mlxbf_gige *priv,
+ unsigned int index)
+{
+ void __iomem *base = priv->base;
+ u64 control;
/* Enable MAC receive filter mask for specified index */
control = readq(base + MLXBF_GIGE_CONTROL);
@@ -27,6 +43,28 @@ void mlxbf_gige_set_mac_rx_filter(struct mlxbf_gige *priv,
writeq(control, base + MLXBF_GIGE_CONTROL);
}
+void mlxbf_gige_disable_mac_rx_filter(struct mlxbf_gige *priv,
+ unsigned int index)
+{
+ void __iomem *base = priv->base;
+ u64 control;
+
+ /* Disable MAC receive filter mask for specified index */
+ control = readq(base + MLXBF_GIGE_CONTROL);
+ control &= ~(MLXBF_GIGE_CONTROL_EN_SPECIFIC_MAC << index);
+ writeq(control, base + MLXBF_GIGE_CONTROL);
+}
+
+void mlxbf_gige_set_mac_rx_filter(struct mlxbf_gige *priv,
+ unsigned int index, u64 dmac)
+{
+ void __iomem *base = priv->base;
+
+ /* Write destination MAC to specified MAC RX filter */
+ writeq(dmac, base + MLXBF_GIGE_RX_MAC_FILTER +
+ (index * MLXBF_GIGE_RX_MAC_FILTER_STRIDE));
+}
+
void mlxbf_gige_get_mac_rx_filter(struct mlxbf_gige *priv,
unsigned int index, u64 *dmac)
{
diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c
index d2f07e179e86..39f56973746d 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_en.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
@@ -599,7 +599,11 @@ static void mana_get_rxbuf_cfg(int mtu, u32 *datasize, u32 *alloc_size,
else
*headroom = XDP_PACKET_HEADROOM;
- *alloc_size = mtu + MANA_RXBUF_PAD + *headroom;
+ *alloc_size = SKB_DATA_ALIGN(mtu + MANA_RXBUF_PAD + *headroom);
+
+ /* Using page pool in this case, so alloc_size is PAGE_SIZE */
+ if (*alloc_size < PAGE_SIZE)
+ *alloc_size = PAGE_SIZE;
*datasize = mtu + ETH_HLEN;
}
@@ -1788,7 +1792,6 @@ static void mana_poll_rx_cq(struct mana_cq *cq)
static int mana_cq_handler(void *context, struct gdma_queue *gdma_queue)
{
struct mana_cq *cq = context;
- u8 arm_bit;
int w;
WARN_ON_ONCE(cq->gdma_cq != gdma_queue);
@@ -1799,16 +1802,23 @@ static int mana_cq_handler(void *context, struct gdma_queue *gdma_queue)
mana_poll_tx_cq(cq);
w = cq->work_done;
-
- if (w < cq->budget &&
- napi_complete_done(&cq->napi, w)) {
- arm_bit = SET_ARM_BIT;
- } else {
- arm_bit = 0;
+ cq->work_done_since_doorbell += w;
+
+ if (w < cq->budget) {
+ mana_gd_ring_cq(gdma_queue, SET_ARM_BIT);
+ cq->work_done_since_doorbell = 0;
+ napi_complete_done(&cq->napi, w);
+ } else if (cq->work_done_since_doorbell >
+ cq->gdma_cq->queue_size / COMP_ENTRY_SIZE * 4) {
+ /* MANA hardware requires at least one doorbell ring every 8
+ * wraparounds of CQ even if there is no need to arm the CQ.
+ * This driver rings the doorbell as soon as we have exceeded
+ * 4 wraparounds.
+ */
+ mana_gd_ring_cq(gdma_queue, 0);
+ cq->work_done_since_doorbell = 0;
}
- mana_gd_ring_cq(gdma_queue, arm_bit);
-
return w;
}
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h
index fa5500decc96..c7d9221fafdc 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet.h
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h
@@ -160,16 +160,16 @@
#define XAE_RCW1_OFFSET 0x00000404 /* Rx Configuration Word 1 */
#define XAE_TC_OFFSET 0x00000408 /* Tx Configuration */
#define XAE_FCC_OFFSET 0x0000040C /* Flow Control Configuration */
-#define XAE_EMMC_OFFSET 0x00000410 /* EMAC mode configuration */
-#define XAE_PHYC_OFFSET 0x00000414 /* RGMII/SGMII configuration */
+#define XAE_EMMC_OFFSET 0x00000410 /* MAC speed configuration */
+#define XAE_PHYC_OFFSET 0x00000414 /* RX Max Frame Configuration */
#define XAE_ID_OFFSET 0x000004F8 /* Identification register */
-#define XAE_MDIO_MC_OFFSET 0x00000500 /* MII Management Config */
-#define XAE_MDIO_MCR_OFFSET 0x00000504 /* MII Management Control */
-#define XAE_MDIO_MWD_OFFSET 0x00000508 /* MII Management Write Data */
-#define XAE_MDIO_MRD_OFFSET 0x0000050C /* MII Management Read Data */
+#define XAE_MDIO_MC_OFFSET 0x00000500 /* MDIO Setup */
+#define XAE_MDIO_MCR_OFFSET 0x00000504 /* MDIO Control */
+#define XAE_MDIO_MWD_OFFSET 0x00000508 /* MDIO Write Data */
+#define XAE_MDIO_MRD_OFFSET 0x0000050C /* MDIO Read Data */
#define XAE_UAW0_OFFSET 0x00000700 /* Unicast address word 0 */
#define XAE_UAW1_OFFSET 0x00000704 /* Unicast address word 1 */
-#define XAE_FMI_OFFSET 0x00000708 /* Filter Mask Index */
+#define XAE_FMI_OFFSET 0x00000708 /* Frame Filter Control */
#define XAE_AF0_OFFSET 0x00000710 /* Address Filter 0 */
#define XAE_AF1_OFFSET 0x00000714 /* Address Filter 1 */
@@ -308,7 +308,7 @@
*/
#define XAE_UAW1_UNICASTADDR_MASK 0x0000FFFF
-/* Bit masks for Axi Ethernet FMI register */
+/* Bit masks for Axi Ethernet FMC register */
#define XAE_FMI_PM_MASK 0x80000000 /* Promis. mode enable */
#define XAE_FMI_IND_MASK 0x00000003 /* Index Mask */
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
index 427b91aca50d..0696faf60013 100644
--- a/drivers/net/gtp.c
+++ b/drivers/net/gtp.c
@@ -1269,6 +1269,9 @@ static netdev_tx_t gtp_dev_xmit(struct sk_buff *skb, struct net_device *dev)
if (skb_cow_head(skb, dev->needed_headroom))
goto tx_err;
+ if (!pskb_inet_may_pull(skb))
+ goto tx_err;
+
skb_reset_inner_headers(skb);
/* PDP context lookups in gtp_build_skb_*() need rcu read-side lock. */
diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c
index 897b979ec03c..3b5fcaf0dd36 100644
--- a/drivers/net/phy/vitesse.c
+++ b/drivers/net/phy/vitesse.c
@@ -237,16 +237,6 @@ static int vsc739x_config_init(struct phy_device *phydev)
return 0;
}
-static int vsc73xx_config_aneg(struct phy_device *phydev)
-{
- /* The VSC73xx switches does not like to be instructed to
- * do autonegotiation in any way, it prefers that you just go
- * with the power-on/reset defaults. Writing some registers will
- * just make autonegotiation permanently fail.
- */
- return 0;
-}
-
/* This adds a skew for both TX and RX clocks, so the skew should only be
* applied to "rgmii-id" interfaces. It may not work as expected
* on "rgmii-txid", "rgmii-rxid" or "rgmii" interfaces.
@@ -444,7 +434,6 @@ static struct phy_driver vsc82xx_driver[] = {
.phy_id_mask = 0x000ffff0,
/* PHY_GBIT_FEATURES */
.config_init = vsc738x_config_init,
- .config_aneg = vsc73xx_config_aneg,
.read_page = vsc73xx_read_page,
.write_page = vsc73xx_write_page,
}, {
@@ -453,7 +442,6 @@ static struct phy_driver vsc82xx_driver[] = {
.phy_id_mask = 0x000ffff0,
/* PHY_GBIT_FEATURES */
.config_init = vsc738x_config_init,
- .config_aneg = vsc73xx_config_aneg,
.read_page = vsc73xx_read_page,
.write_page = vsc73xx_write_page,
}, {
@@ -462,7 +450,6 @@ static struct phy_driver vsc82xx_driver[] = {
.phy_id_mask = 0x000ffff0,
/* PHY_GBIT_FEATURES */
.config_init = vsc739x_config_init,
- .config_aneg = vsc73xx_config_aneg,
.read_page = vsc73xx_read_page,
.write_page = vsc73xx_write_page,
}, {
@@ -471,7 +458,6 @@ static struct phy_driver vsc82xx_driver[] = {
.phy_id_mask = 0x000ffff0,
/* PHY_GBIT_FEATURES */
.config_init = vsc739x_config_init,
- .config_aneg = vsc73xx_config_aneg,
.read_page = vsc73xx_read_page,
.write_page = vsc73xx_write_page,
}, {
diff --git a/drivers/net/pse-pd/pse_core.c b/drivers/net/pse-pd/pse_core.c
index ec20953e0f82..4f032b16a8a0 100644
--- a/drivers/net/pse-pd/pse_core.c
+++ b/drivers/net/pse-pd/pse_core.c
@@ -401,9 +401,14 @@ devm_pse_pi_regulator_register(struct pse_controller_dev *pcdev,
rdesc->ops = &pse_pi_ops;
rdesc->owner = pcdev->owner;
- rinit_data->constraints.valid_ops_mask = REGULATOR_CHANGE_STATUS |
- REGULATOR_CHANGE_CURRENT;
- rinit_data->constraints.max_uA = MAX_PI_CURRENT;
+ rinit_data->constraints.valid_ops_mask = REGULATOR_CHANGE_STATUS;
+
+ if (pcdev->ops->pi_set_current_limit) {
+ rinit_data->constraints.valid_ops_mask |=
+ REGULATOR_CHANGE_CURRENT;
+ rinit_data->constraints.max_uA = MAX_PI_CURRENT;
+ }
+
rinit_data->supply_regulator = "vpwr";
rconfig.dev = pcdev->dev;
diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
index 687d70cfc556..46afb95ffabe 100644
--- a/drivers/net/usb/ipheth.c
+++ b/drivers/net/usb/ipheth.c
@@ -286,10 +286,11 @@ static void ipheth_rcvbulk_callback(struct urb *urb)
return;
}
- if (urb->actual_length <= IPHETH_IP_ALIGN) {
- dev->net->stats.rx_length_errors++;
- return;
- }
+ /* iPhone may periodically send URBs with no payload
+ * on the "bulk in" endpoint. It is safe to ignore them.
+ */
+ if (urb->actual_length == 0)
+ goto rx_submit;
/* RX URBs starting with 0x00 0x01 do not encapsulate Ethernet frames,
* but rather are control frames. Their purpose is not documented, and
@@ -298,7 +299,8 @@ static void ipheth_rcvbulk_callback(struct urb *urb)
* URB received from the bulk IN endpoint.
*/
if (unlikely
- (((char *)urb->transfer_buffer)[0] == 0 &&
+ (urb->actual_length == 4 &&
+ ((char *)urb->transfer_buffer)[0] == 0 &&
((char *)urb->transfer_buffer)[1] == 1))
goto rx_submit;
@@ -306,7 +308,6 @@ static void ipheth_rcvbulk_callback(struct urb *urb)
if (retval != 0) {
dev_err(&dev->intf->dev, "%s: callback retval: %d\n",
__func__, retval);
- return;
}
rx_submit:
@@ -354,13 +355,14 @@ static int ipheth_carrier_set(struct ipheth_device *dev)
0x02, /* index */
dev->ctrl_buf, IPHETH_CTRL_BUF_SIZE,
IPHETH_CTRL_TIMEOUT);
- if (retval < 0) {
+ if (retval <= 0) {
dev_err(&dev->intf->dev, "%s: usb_control_msg: %d\n",
__func__, retval);
return retval;
}
- if (dev->ctrl_buf[0] == IPHETH_CARRIER_ON) {
+ if ((retval == 1 && dev->ctrl_buf[0] == IPHETH_CARRIER_ON) ||
+ (retval >= 2 && dev->ctrl_buf[1] == IPHETH_CARRIER_ON)) {
netif_carrier_on(dev->net);
if (dev->tx_urb->status != -EINPROGRESS)
netif_wake_queue(dev->net);
@@ -475,8 +477,8 @@ static int ipheth_close(struct net_device *net)
{
struct ipheth_device *dev = netdev_priv(net);
- cancel_delayed_work_sync(&dev->carrier_work);
netif_stop_queue(net);
+ cancel_delayed_work_sync(&dev->carrier_work);
return 0;
}
diff --git a/drivers/net/wireless/ath/ath12k/dp_tx.c b/drivers/net/wireless/ath/ath12k/dp_tx.c
index d08c04343e90..44406e0b4a34 100644
--- a/drivers/net/wireless/ath/ath12k/dp_tx.c
+++ b/drivers/net/wireless/ath/ath12k/dp_tx.c
@@ -162,6 +162,60 @@ static int ath12k_dp_prepare_htt_metadata(struct sk_buff *skb)
return 0;
}
+static void ath12k_dp_tx_move_payload(struct sk_buff *skb,
+ unsigned long delta,
+ bool head)
+{
+ unsigned long len = skb->len;
+
+ if (head) {
+ skb_push(skb, delta);
+ memmove(skb->data, skb->data + delta, len);
+ skb_trim(skb, len);
+ } else {
+ skb_put(skb, delta);
+ memmove(skb->data + delta, skb->data, len);
+ skb_pull(skb, delta);
+ }
+}
+
+static int ath12k_dp_tx_align_payload(struct ath12k_base *ab,
+ struct sk_buff **pskb)
+{
+ u32 iova_mask = ab->hw_params->iova_mask;
+ unsigned long offset, delta1, delta2;
+ struct sk_buff *skb2, *skb = *pskb;
+ unsigned int headroom = skb_headroom(skb);
+ int tailroom = skb_tailroom(skb);
+ int ret = 0;
+
+ offset = (unsigned long)skb->data & iova_mask;
+ delta1 = offset;
+ delta2 = iova_mask - offset + 1;
+
+ if (headroom >= delta1) {
+ ath12k_dp_tx_move_payload(skb, delta1, true);
+ } else if (tailroom >= delta2) {
+ ath12k_dp_tx_move_payload(skb, delta2, false);
+ } else {
+ skb2 = skb_realloc_headroom(skb, iova_mask);
+ if (!skb2) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ dev_kfree_skb_any(skb);
+
+ offset = (unsigned long)skb2->data & iova_mask;
+ if (offset)
+ ath12k_dp_tx_move_payload(skb2, offset, true);
+ *pskb = skb2;
+ }
+
+out:
+ return ret;
+}
+
int ath12k_dp_tx(struct ath12k *ar, struct ath12k_vif *arvif,
struct sk_buff *skb)
{
@@ -184,6 +238,7 @@ int ath12k_dp_tx(struct ath12k *ar, struct ath12k_vif *arvif,
bool tcl_ring_retry;
bool msdu_ext_desc = false;
bool add_htt_metadata = false;
+ u32 iova_mask = ab->hw_params->iova_mask;
if (test_bit(ATH12K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags))
return -ESHUTDOWN;
@@ -279,6 +334,23 @@ tcl_ring_sel:
goto fail_remove_tx_buf;
}
+ if (iova_mask &&
+ (unsigned long)skb->data & iova_mask) {
+ ret = ath12k_dp_tx_align_payload(ab, &skb);
+ if (ret) {
+ ath12k_warn(ab, "failed to align TX buffer %d\n", ret);
+ /* don't bail out, give original buffer
+ * a chance even unaligned.
+ */
+ goto map;
+ }
+
+ /* hdr is pointing to a wrong place after alignment,
+ * so refresh it for later use.
+ */
+ hdr = (void *)skb->data;
+ }
+map:
ti.paddr = dma_map_single(ab->dev, skb->data, skb->len, DMA_TO_DEVICE);
if (dma_mapping_error(ab->dev, ti.paddr)) {
atomic_inc(&ab->soc_stats.tx_err.misc_fail);
diff --git a/drivers/net/wireless/ath/ath12k/hw.c b/drivers/net/wireless/ath/ath12k/hw.c
index 2e11ea763574..7b0b6a7f4701 100644
--- a/drivers/net/wireless/ath/ath12k/hw.c
+++ b/drivers/net/wireless/ath/ath12k/hw.c
@@ -924,6 +924,8 @@ static const struct ath12k_hw_params ath12k_hw_params[] = {
.acpi_guid = NULL,
.supports_dynamic_smps_6ghz = true,
+
+ .iova_mask = 0,
},
{
.name = "wcn7850 hw2.0",
@@ -1000,6 +1002,8 @@ static const struct ath12k_hw_params ath12k_hw_params[] = {
.acpi_guid = &wcn7850_uuid,
.supports_dynamic_smps_6ghz = false,
+
+ .iova_mask = ATH12K_PCIE_MAX_PAYLOAD_SIZE - 1,
},
{
.name = "qcn9274 hw2.0",
@@ -1072,6 +1076,8 @@ static const struct ath12k_hw_params ath12k_hw_params[] = {
.acpi_guid = NULL,
.supports_dynamic_smps_6ghz = true,
+
+ .iova_mask = 0,
},
};
diff --git a/drivers/net/wireless/ath/ath12k/hw.h b/drivers/net/wireless/ath/ath12k/hw.h
index e792eb6b249b..b1d302c48326 100644
--- a/drivers/net/wireless/ath/ath12k/hw.h
+++ b/drivers/net/wireless/ath/ath12k/hw.h
@@ -96,6 +96,8 @@
#define ATH12K_M3_FILE "m3.bin"
#define ATH12K_REGDB_FILE_NAME "regdb.bin"
+#define ATH12K_PCIE_MAX_PAYLOAD_SIZE 128
+
enum ath12k_hw_rate_cck {
ATH12K_HW_RATE_CCK_LP_11M = 0,
ATH12K_HW_RATE_CCK_LP_5_5M,
@@ -215,6 +217,8 @@ struct ath12k_hw_params {
const guid_t *acpi_guid;
bool supports_dynamic_smps_6ghz;
+
+ u32 iova_mask;
};
struct ath12k_hw_ops {
diff --git a/drivers/net/wireless/ath/ath12k/mac.c b/drivers/net/wireless/ath/ath12k/mac.c
index 8106297f0bc1..ce41c8153080 100644
--- a/drivers/net/wireless/ath/ath12k/mac.c
+++ b/drivers/net/wireless/ath/ath12k/mac.c
@@ -9193,6 +9193,7 @@ static int ath12k_mac_hw_register(struct ath12k_hw *ah)
hw->vif_data_size = sizeof(struct ath12k_vif);
hw->sta_data_size = sizeof(struct ath12k_sta);
+ hw->extra_tx_headroom = ab->hw_params->iova_mask;
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_STA_TX_PWR);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index 1585a5653ee4..d4cc5fa92341 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -4320,9 +4320,16 @@ brcmf_pmksa_v3_op(struct brcmf_if *ifp, struct cfg80211_pmksa *pmksa,
/* Single PMK operation */
pmk_op->count = cpu_to_le16(1);
length += sizeof(struct brcmf_pmksa_v3);
- memcpy(pmk_op->pmk[0].bssid, pmksa->bssid, ETH_ALEN);
- memcpy(pmk_op->pmk[0].pmkid, pmksa->pmkid, WLAN_PMKID_LEN);
- pmk_op->pmk[0].pmkid_len = WLAN_PMKID_LEN;
+ if (pmksa->bssid)
+ memcpy(pmk_op->pmk[0].bssid, pmksa->bssid, ETH_ALEN);
+ if (pmksa->pmkid) {
+ memcpy(pmk_op->pmk[0].pmkid, pmksa->pmkid, WLAN_PMKID_LEN);
+ pmk_op->pmk[0].pmkid_len = WLAN_PMKID_LEN;
+ }
+ if (pmksa->ssid && pmksa->ssid_len) {
+ memcpy(pmk_op->pmk[0].ssid.SSID, pmksa->ssid, pmksa->ssid_len);
+ pmk_op->pmk[0].ssid.SSID_len = pmksa->ssid_len;
+ }
pmk_op->pmk[0].time_left = cpu_to_le32(alive ? BRCMF_PMKSA_NO_EXPIRY : 0);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
index b59de4f80b4b..27a7e0b5b3d5 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
@@ -639,7 +639,8 @@ void iwl_trans_pcie_tx_reset(struct iwl_trans *trans);
int iwl_pcie_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq,
int slots_num, bool cmd_queue);
-dma_addr_t iwl_pcie_get_sgt_tb_phys(struct sg_table *sgt, void *addr);
+dma_addr_t iwl_pcie_get_sgt_tb_phys(struct sg_table *sgt, unsigned int offset,
+ unsigned int len);
struct sg_table *iwl_pcie_prep_tso(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_cmd_meta *cmd_meta,
u8 **hdr, unsigned int hdr_room);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
index 2e780fb2da42..b1846abb99b7 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
@@ -168,6 +168,7 @@ static int iwl_txq_gen2_build_amsdu(struct iwl_trans *trans,
struct ieee80211_hdr *hdr = (void *)skb->data;
unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room;
unsigned int mss = skb_shinfo(skb)->gso_size;
+ unsigned int data_offset = 0;
dma_addr_t start_hdr_phys;
u16 length, amsdu_pad;
u8 *start_hdr;
@@ -260,7 +261,8 @@ static int iwl_txq_gen2_build_amsdu(struct iwl_trans *trans,
int ret;
tb_len = min_t(unsigned int, tso.size, data_left);
- tb_phys = iwl_pcie_get_sgt_tb_phys(sgt, tso.data);
+ tb_phys = iwl_pcie_get_sgt_tb_phys(sgt, data_offset,
+ tb_len);
/* Not a real mapping error, use direct comparison */
if (unlikely(tb_phys == DMA_MAPPING_ERROR))
goto out_err;
@@ -272,6 +274,7 @@ static int iwl_txq_gen2_build_amsdu(struct iwl_trans *trans,
goto out_err;
data_left -= tb_len;
+ data_offset += tb_len;
tso_build_data(skb, &tso, tb_len);
}
}
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
index 22d482ae53d9..9fe050f0ddc1 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -1814,23 +1814,31 @@ out:
/**
* iwl_pcie_get_sgt_tb_phys - Find TB address in mapped SG list
* @sgt: scatter gather table
- * @addr: Virtual address
+ * @offset: Offset into the mapped memory (i.e. SKB payload data)
+ * @len: Length of the area
*
- * Find the entry that includes the address for the given address and return
- * correct physical address for the TB entry.
+ * Find the DMA address that corresponds to the SKB payload data at the
+ * position given by @offset.
*
* Returns: Address for TB entry
*/
-dma_addr_t iwl_pcie_get_sgt_tb_phys(struct sg_table *sgt, void *addr)
+dma_addr_t iwl_pcie_get_sgt_tb_phys(struct sg_table *sgt, unsigned int offset,
+ unsigned int len)
{
struct scatterlist *sg;
+ unsigned int sg_offset = 0;
int i;
+ /*
+ * Search the mapped DMA areas in the SG for the area that contains the
+ * data at offset with the given length.
+ */
for_each_sgtable_dma_sg(sgt, sg, i) {
- if (addr >= sg_virt(sg) &&
- (u8 *)addr < (u8 *)sg_virt(sg) + sg_dma_len(sg))
- return sg_dma_address(sg) +
- ((unsigned long)addr - (unsigned long)sg_virt(sg));
+ if (offset >= sg_offset &&
+ offset + len <= sg_offset + sg_dma_len(sg))
+ return sg_dma_address(sg) + offset - sg_offset;
+
+ sg_offset += sg_dma_len(sg);
}
WARN_ON_ONCE(1);
@@ -1875,7 +1883,9 @@ struct sg_table *iwl_pcie_prep_tso(struct iwl_trans *trans, struct sk_buff *skb,
sg_init_table(sgt->sgl, skb_shinfo(skb)->nr_frags + 1);
- sgt->orig_nents = skb_to_sgvec(skb, sgt->sgl, 0, skb->len);
+ /* Only map the data, not the header (it is copied to the TSO page) */
+ sgt->orig_nents = skb_to_sgvec(skb, sgt->sgl, skb_headlen(skb),
+ skb->data_len);
if (WARN_ON_ONCE(sgt->orig_nents <= 0))
return NULL;
@@ -1900,6 +1910,7 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
struct ieee80211_hdr *hdr = (void *)skb->data;
unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room;
unsigned int mss = skb_shinfo(skb)->gso_size;
+ unsigned int data_offset = 0;
u16 length, iv_len, amsdu_pad;
dma_addr_t start_hdr_phys;
u8 *start_hdr, *pos_hdr;
@@ -2000,7 +2011,7 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
data_left);
dma_addr_t tb_phys;
- tb_phys = iwl_pcie_get_sgt_tb_phys(sgt, tso.data);
+ tb_phys = iwl_pcie_get_sgt_tb_phys(sgt, data_offset, size);
/* Not a real mapping error, use direct comparison */
if (unlikely(tb_phys == DMA_MAPPING_ERROR))
return -EINVAL;
@@ -2011,6 +2022,7 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
tb_phys, size);
data_left -= size;
+ data_offset += size;
tso_build_data(skb, &tso, size);
}
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
index 1bab93d049df..23b228804289 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
@@ -1183,7 +1183,7 @@ static void mt7921_ipv6_addr_change(struct ieee80211_hw *hw,
struct inet6_dev *idev)
{
struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
- struct mt792x_dev *dev = mvif->phy->dev;
+ struct mt792x_dev *dev = mt792x_hw_dev(hw);
struct inet6_ifaddr *ifa;
struct in6_addr ns_addrs[IEEE80211_BSS_ARP_ADDR_LIST_LEN];
struct sk_buff *skb;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192du/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/hw.c
index 700c6e2bcad1..ff458fb8514d 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192du/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/hw.c
@@ -181,11 +181,11 @@ static void _rtl92du_init_queue_reserved_page(struct ieee80211_hw *hw,
struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
u32 txqpagenum, txqpageunit;
u32 txqremainingpage;
+ u32 value32 = 0;
u32 numhq = 0;
u32 numlq = 0;
u32 numnq = 0;
u32 numpubq;
- u32 value32;
if (rtlhal->macphymode != SINGLEMAC_SINGLEPHY) {
numpubq = NORMAL_PAGE_NUM_PUBQ_92D_DUAL_MAC;
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index 1ae8b2351654..210fb77f51ba 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -498,7 +498,7 @@ static int pmem_attach_disk(struct device *dev,
}
if (fua)
lim.features |= BLK_FEAT_FUA;
- if (is_nd_pfn(dev))
+ if (is_nd_pfn(dev) || pmem_should_map_pages(dev))
lim.features |= BLK_FEAT_DAX;
if (!devm_request_mem_region(dev, res->start, resource_size(res),
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index c94203ce65bb..8fd63100ba8f 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -344,7 +344,8 @@ int of_irq_parse_one(struct device_node *device, int index, struct of_phandle_ar
struct device_node *p;
const __be32 *addr;
u32 intsize;
- int i, res;
+ int i, res, addr_len;
+ __be32 addr_buf[3] = { 0 };
pr_debug("of_irq_parse_one: dev=%pOF, index=%d\n", device, index);
@@ -353,13 +354,19 @@ int of_irq_parse_one(struct device_node *device, int index, struct of_phandle_ar
return of_irq_parse_oldworld(device, index, out_irq);
/* Get the reg property (if any) */
- addr = of_get_property(device, "reg", NULL);
+ addr = of_get_property(device, "reg", &addr_len);
+
+ /* Prevent out-of-bounds read in case of longer interrupt parent address size */
+ if (addr_len > (3 * sizeof(__be32)))
+ addr_len = 3 * sizeof(__be32);
+ if (addr)
+ memcpy(addr_buf, addr, addr_len);
/* Try the new-style interrupts-extended first */
res = of_parse_phandle_with_args(device, "interrupts-extended",
"#interrupt-cells", index, out_irq);
if (!res)
- return of_irq_parse_raw(addr, out_irq);
+ return of_irq_parse_raw(addr_buf, out_irq);
/* Look for the interrupt parent. */
p = of_irq_find_parent(device);
@@ -389,7 +396,7 @@ int of_irq_parse_one(struct device_node *device, int index, struct of_phandle_ar
/* Check if there are any interrupt-map translations to process */
- res = of_irq_parse_raw(addr, out_irq);
+ res = of_irq_parse_raw(addr_buf, out_irq);
out:
of_node_put(p);
return res;
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 665fa9524986..ddfccc226751 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -477,6 +477,7 @@ config LENOVO_YMC
tristate "Lenovo Yoga Tablet Mode Control"
depends on ACPI_WMI
depends on INPUT
+ depends on IDEAPAD_LAPTOP
select INPUT_SPARSEKMAP
help
This driver maps the Tablet Mode Control switch to SW_TABLET_MODE input
diff --git a/drivers/platform/x86/amd/pmf/spc.c b/drivers/platform/x86/amd/pmf/spc.c
index a3dec14c3004..3c153fb1425e 100644
--- a/drivers/platform/x86/amd/pmf/spc.c
+++ b/drivers/platform/x86/amd/pmf/spc.c
@@ -150,36 +150,26 @@ static int amd_pmf_get_slider_info(struct amd_pmf_dev *dev, struct ta_pmf_enact_
return 0;
}
-static int amd_pmf_get_sensor_info(struct amd_pmf_dev *dev, struct ta_pmf_enact_table *in)
+static void amd_pmf_get_sensor_info(struct amd_pmf_dev *dev, struct ta_pmf_enact_table *in)
{
struct amd_sfh_info sfh_info;
- int ret;
+
+ /* Get the latest information from SFH */
+ in->ev_info.user_present = false;
/* Get ALS data */
- ret = amd_get_sfh_info(&sfh_info, MT_ALS);
- if (!ret)
+ if (!amd_get_sfh_info(&sfh_info, MT_ALS))
in->ev_info.ambient_light = sfh_info.ambient_light;
else
- return ret;
+ dev_dbg(dev->dev, "ALS is not enabled/detected\n");
/* get HPD data */
- ret = amd_get_sfh_info(&sfh_info, MT_HPD);
- if (ret)
- return ret;
-
- switch (sfh_info.user_present) {
- case SFH_NOT_DETECTED:
- in->ev_info.user_present = 0xff; /* assume no sensors connected */
- break;
- case SFH_USER_PRESENT:
- in->ev_info.user_present = 1;
- break;
- case SFH_USER_AWAY:
- in->ev_info.user_present = 0;
- break;
+ if (!amd_get_sfh_info(&sfh_info, MT_HPD)) {
+ if (sfh_info.user_present == SFH_USER_PRESENT)
+ in->ev_info.user_present = true;
+ } else {
+ dev_dbg(dev->dev, "HPD is not enabled/detected\n");
}
-
- return 0;
}
void amd_pmf_populate_ta_inputs(struct amd_pmf_dev *dev, struct ta_pmf_enact_table *in)
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index 1ace711f7442..98ec30fce9fd 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -126,6 +126,7 @@ struct ideapad_rfk_priv {
struct ideapad_private {
struct acpi_device *adev;
+ struct mutex vpc_mutex; /* protects the VPC calls */
struct rfkill *rfk[IDEAPAD_RFKILL_DEV_NUM];
struct ideapad_rfk_priv rfk_priv[IDEAPAD_RFKILL_DEV_NUM];
struct platform_device *platform_device;
@@ -146,6 +147,7 @@ struct ideapad_private {
bool touchpad_ctrl_via_ec : 1;
bool ctrl_ps2_aux_port : 1;
bool usb_charging : 1;
+ bool ymc_ec_trigger : 1;
} features;
struct {
bool initialized;
@@ -194,6 +196,12 @@ MODULE_PARM_DESC(touchpad_ctrl_via_ec,
"Enable registering a 'touchpad' sysfs-attribute which can be used to manually "
"tell the EC to enable/disable the touchpad. This may not work on all models.");
+static bool ymc_ec_trigger __read_mostly;
+module_param(ymc_ec_trigger, bool, 0444);
+MODULE_PARM_DESC(ymc_ec_trigger,
+ "Enable EC triggering work-around to force emitting tablet mode events. "
+ "If you need this please report this to: platform-driver-x86@vger.kernel.org");
+
/*
* shared data
*/
@@ -294,6 +302,8 @@ static int debugfs_status_show(struct seq_file *s, void *data)
struct ideapad_private *priv = s->private;
unsigned long value;
+ guard(mutex)(&priv->vpc_mutex);
+
if (!read_ec_data(priv->adev->handle, VPCCMD_R_BL_MAX, &value))
seq_printf(s, "Backlight max: %lu\n", value);
if (!read_ec_data(priv->adev->handle, VPCCMD_R_BL, &value))
@@ -412,7 +422,8 @@ static ssize_t camera_power_show(struct device *dev,
unsigned long result;
int err;
- err = read_ec_data(priv->adev->handle, VPCCMD_R_CAMERA, &result);
+ scoped_guard(mutex, &priv->vpc_mutex)
+ err = read_ec_data(priv->adev->handle, VPCCMD_R_CAMERA, &result);
if (err)
return err;
@@ -431,7 +442,8 @@ static ssize_t camera_power_store(struct device *dev,
if (err)
return err;
- err = write_ec_cmd(priv->adev->handle, VPCCMD_W_CAMERA, state);
+ scoped_guard(mutex, &priv->vpc_mutex)
+ err = write_ec_cmd(priv->adev->handle, VPCCMD_W_CAMERA, state);
if (err)
return err;
@@ -484,7 +496,8 @@ static ssize_t fan_mode_show(struct device *dev,
unsigned long result;
int err;
- err = read_ec_data(priv->adev->handle, VPCCMD_R_FAN, &result);
+ scoped_guard(mutex, &priv->vpc_mutex)
+ err = read_ec_data(priv->adev->handle, VPCCMD_R_FAN, &result);
if (err)
return err;
@@ -506,7 +519,8 @@ static ssize_t fan_mode_store(struct device *dev,
if (state > 4 || state == 3)
return -EINVAL;
- err = write_ec_cmd(priv->adev->handle, VPCCMD_W_FAN, state);
+ scoped_guard(mutex, &priv->vpc_mutex)
+ err = write_ec_cmd(priv->adev->handle, VPCCMD_W_FAN, state);
if (err)
return err;
@@ -591,7 +605,8 @@ static ssize_t touchpad_show(struct device *dev,
unsigned long result;
int err;
- err = read_ec_data(priv->adev->handle, VPCCMD_R_TOUCHPAD, &result);
+ scoped_guard(mutex, &priv->vpc_mutex)
+ err = read_ec_data(priv->adev->handle, VPCCMD_R_TOUCHPAD, &result);
if (err)
return err;
@@ -612,7 +627,8 @@ static ssize_t touchpad_store(struct device *dev,
if (err)
return err;
- err = write_ec_cmd(priv->adev->handle, VPCCMD_W_TOUCHPAD, state);
+ scoped_guard(mutex, &priv->vpc_mutex)
+ err = write_ec_cmd(priv->adev->handle, VPCCMD_W_TOUCHPAD, state);
if (err)
return err;
@@ -1005,6 +1021,8 @@ static int ideapad_rfk_set(void *data, bool blocked)
struct ideapad_rfk_priv *priv = data;
int opcode = ideapad_rfk_data[priv->dev].opcode;
+ guard(mutex)(&priv->priv->vpc_mutex);
+
return write_ec_cmd(priv->priv->adev->handle, opcode, !blocked);
}
@@ -1018,6 +1036,8 @@ static void ideapad_sync_rfk_state(struct ideapad_private *priv)
int i;
if (priv->features.hw_rfkill_switch) {
+ guard(mutex)(&priv->vpc_mutex);
+
if (read_ec_data(priv->adev->handle, VPCCMD_R_RF, &hw_blocked))
return;
hw_blocked = !hw_blocked;
@@ -1191,8 +1211,9 @@ static void ideapad_input_novokey(struct ideapad_private *priv)
{
unsigned long long_pressed;
- if (read_ec_data(priv->adev->handle, VPCCMD_R_NOVO, &long_pressed))
- return;
+ scoped_guard(mutex, &priv->vpc_mutex)
+ if (read_ec_data(priv->adev->handle, VPCCMD_R_NOVO, &long_pressed))
+ return;
if (long_pressed)
ideapad_input_report(priv, 17);
@@ -1204,8 +1225,9 @@ static void ideapad_check_special_buttons(struct ideapad_private *priv)
{
unsigned long bit, value;
- if (read_ec_data(priv->adev->handle, VPCCMD_R_SPECIAL_BUTTONS, &value))
- return;
+ scoped_guard(mutex, &priv->vpc_mutex)
+ if (read_ec_data(priv->adev->handle, VPCCMD_R_SPECIAL_BUTTONS, &value))
+ return;
for_each_set_bit (bit, &value, 16) {
switch (bit) {
@@ -1238,6 +1260,8 @@ static int ideapad_backlight_get_brightness(struct backlight_device *blightdev)
unsigned long now;
int err;
+ guard(mutex)(&priv->vpc_mutex);
+
err = read_ec_data(priv->adev->handle, VPCCMD_R_BL, &now);
if (err)
return err;
@@ -1250,6 +1274,8 @@ static int ideapad_backlight_update_status(struct backlight_device *blightdev)
struct ideapad_private *priv = bl_get_data(blightdev);
int err;
+ guard(mutex)(&priv->vpc_mutex);
+
err = write_ec_cmd(priv->adev->handle, VPCCMD_W_BL,
blightdev->props.brightness);
if (err)
@@ -1327,6 +1353,8 @@ static void ideapad_backlight_notify_power(struct ideapad_private *priv)
if (!blightdev)
return;
+ guard(mutex)(&priv->vpc_mutex);
+
if (read_ec_data(priv->adev->handle, VPCCMD_R_BL_POWER, &power))
return;
@@ -1339,7 +1367,8 @@ static void ideapad_backlight_notify_brightness(struct ideapad_private *priv)
/* if we control brightness via acpi video driver */
if (!priv->blightdev)
- read_ec_data(priv->adev->handle, VPCCMD_R_BL, &now);
+ scoped_guard(mutex, &priv->vpc_mutex)
+ read_ec_data(priv->adev->handle, VPCCMD_R_BL, &now);
else
backlight_force_update(priv->blightdev, BACKLIGHT_UPDATE_HOTKEY);
}
@@ -1564,7 +1593,8 @@ static void ideapad_sync_touchpad_state(struct ideapad_private *priv, bool send_
int ret;
/* Without reading from EC touchpad LED doesn't switch state */
- ret = read_ec_data(priv->adev->handle, VPCCMD_R_TOUCHPAD, &value);
+ scoped_guard(mutex, &priv->vpc_mutex)
+ ret = read_ec_data(priv->adev->handle, VPCCMD_R_TOUCHPAD, &value);
if (ret)
return;
@@ -1592,16 +1622,92 @@ static void ideapad_sync_touchpad_state(struct ideapad_private *priv, bool send_
priv->r_touchpad_val = value;
}
+static const struct dmi_system_id ymc_ec_trigger_quirk_dmi_table[] = {
+ {
+ /* Lenovo Yoga 7 14ARB7 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "82QF"),
+ },
+ },
+ {
+ /* Lenovo Yoga 7 14ACN6 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "82N7"),
+ },
+ },
+ { }
+};
+
+static void ideapad_laptop_trigger_ec(void)
+{
+ struct ideapad_private *priv;
+ int ret;
+
+ guard(mutex)(&ideapad_shared_mutex);
+
+ priv = ideapad_shared;
+ if (!priv)
+ return;
+
+ if (!priv->features.ymc_ec_trigger)
+ return;
+
+ scoped_guard(mutex, &priv->vpc_mutex)
+ ret = write_ec_cmd(priv->adev->handle, VPCCMD_W_YMC, 1);
+ if (ret)
+ dev_warn(&priv->platform_device->dev, "Could not write YMC: %d\n", ret);
+}
+
+static int ideapad_laptop_nb_notify(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ switch (action) {
+ case IDEAPAD_LAPTOP_YMC_EVENT:
+ ideapad_laptop_trigger_ec();
+ break;
+ }
+
+ return 0;
+}
+
+static struct notifier_block ideapad_laptop_notifier = {
+ .notifier_call = ideapad_laptop_nb_notify,
+};
+
+static BLOCKING_NOTIFIER_HEAD(ideapad_laptop_chain_head);
+
+int ideapad_laptop_register_notifier(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_register(&ideapad_laptop_chain_head, nb);
+}
+EXPORT_SYMBOL_NS_GPL(ideapad_laptop_register_notifier, IDEAPAD_LAPTOP);
+
+int ideapad_laptop_unregister_notifier(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_unregister(&ideapad_laptop_chain_head, nb);
+}
+EXPORT_SYMBOL_NS_GPL(ideapad_laptop_unregister_notifier, IDEAPAD_LAPTOP);
+
+void ideapad_laptop_call_notifier(unsigned long action, void *data)
+{
+ blocking_notifier_call_chain(&ideapad_laptop_chain_head, action, data);
+}
+EXPORT_SYMBOL_NS_GPL(ideapad_laptop_call_notifier, IDEAPAD_LAPTOP);
+
static void ideapad_acpi_notify(acpi_handle handle, u32 event, void *data)
{
struct ideapad_private *priv = data;
unsigned long vpc1, vpc2, bit;
- if (read_ec_data(handle, VPCCMD_R_VPC1, &vpc1))
- return;
+ scoped_guard(mutex, &priv->vpc_mutex) {
+ if (read_ec_data(handle, VPCCMD_R_VPC1, &vpc1))
+ return;
- if (read_ec_data(handle, VPCCMD_R_VPC2, &vpc2))
- return;
+ if (read_ec_data(handle, VPCCMD_R_VPC2, &vpc2))
+ return;
+ }
vpc1 = (vpc2 << 8) | vpc1;
@@ -1728,6 +1834,8 @@ static void ideapad_check_features(struct ideapad_private *priv)
priv->features.ctrl_ps2_aux_port =
ctrl_ps2_aux_port || dmi_check_system(ctrl_ps2_aux_port_list);
priv->features.touchpad_ctrl_via_ec = touchpad_ctrl_via_ec;
+ priv->features.ymc_ec_trigger =
+ ymc_ec_trigger || dmi_check_system(ymc_ec_trigger_quirk_dmi_table);
if (!read_ec_data(handle, VPCCMD_R_FAN, &val))
priv->features.fan_mode = true;
@@ -1906,6 +2014,10 @@ static int ideapad_acpi_add(struct platform_device *pdev)
priv->adev = adev;
priv->platform_device = pdev;
+ err = devm_mutex_init(&pdev->dev, &priv->vpc_mutex);
+ if (err)
+ return err;
+
ideapad_check_features(priv);
err = ideapad_sysfs_init(priv);
@@ -1974,6 +2086,8 @@ static int ideapad_acpi_add(struct platform_device *pdev)
if (err)
goto shared_init_failed;
+ ideapad_laptop_register_notifier(&ideapad_laptop_notifier);
+
return 0;
shared_init_failed:
@@ -2006,6 +2120,8 @@ static void ideapad_acpi_remove(struct platform_device *pdev)
struct ideapad_private *priv = dev_get_drvdata(&pdev->dev);
int i;
+ ideapad_laptop_unregister_notifier(&ideapad_laptop_notifier);
+
ideapad_shared_exit(priv);
acpi_remove_notify_handler(priv->adev->handle,
diff --git a/drivers/platform/x86/ideapad-laptop.h b/drivers/platform/x86/ideapad-laptop.h
index 4498a96de597..948cc61800a9 100644
--- a/drivers/platform/x86/ideapad-laptop.h
+++ b/drivers/platform/x86/ideapad-laptop.h
@@ -12,6 +12,15 @@
#include <linux/acpi.h>
#include <linux/jiffies.h>
#include <linux/errno.h>
+#include <linux/notifier.h>
+
+enum ideapad_laptop_notifier_actions {
+ IDEAPAD_LAPTOP_YMC_EVENT,
+};
+
+int ideapad_laptop_register_notifier(struct notifier_block *nb);
+int ideapad_laptop_unregister_notifier(struct notifier_block *nb);
+void ideapad_laptop_call_notifier(unsigned long action, void *data);
enum {
VPCCMD_R_VPC1 = 0x10,
diff --git a/drivers/platform/x86/lenovo-ymc.c b/drivers/platform/x86/lenovo-ymc.c
index e1fbc35504d4..e0bbd6a14a89 100644
--- a/drivers/platform/x86/lenovo-ymc.c
+++ b/drivers/platform/x86/lenovo-ymc.c
@@ -20,32 +20,10 @@
#define LENOVO_YMC_QUERY_INSTANCE 0
#define LENOVO_YMC_QUERY_METHOD 0x01
-static bool ec_trigger __read_mostly;
-module_param(ec_trigger, bool, 0444);
-MODULE_PARM_DESC(ec_trigger, "Enable EC triggering work-around to force emitting tablet mode events");
-
static bool force;
module_param(force, bool, 0444);
MODULE_PARM_DESC(force, "Force loading on boards without a convertible DMI chassis-type");
-static const struct dmi_system_id ec_trigger_quirk_dmi_table[] = {
- {
- /* Lenovo Yoga 7 14ARB7 */
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_NAME, "82QF"),
- },
- },
- {
- /* Lenovo Yoga 7 14ACN6 */
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_NAME, "82N7"),
- },
- },
- { }
-};
-
static const struct dmi_system_id allowed_chasis_types_dmi_table[] = {
{
.matches = {
@@ -62,21 +40,8 @@ static const struct dmi_system_id allowed_chasis_types_dmi_table[] = {
struct lenovo_ymc_private {
struct input_dev *input_dev;
- struct acpi_device *ec_acpi_dev;
};
-static void lenovo_ymc_trigger_ec(struct wmi_device *wdev, struct lenovo_ymc_private *priv)
-{
- int err;
-
- if (!priv->ec_acpi_dev)
- return;
-
- err = write_ec_cmd(priv->ec_acpi_dev->handle, VPCCMD_W_YMC, 1);
- if (err)
- dev_warn(&wdev->dev, "Could not write YMC: %d\n", err);
-}
-
static const struct key_entry lenovo_ymc_keymap[] = {
/* Laptop */
{ KE_SW, 0x01, { .sw = { SW_TABLET_MODE, 0 } } },
@@ -125,11 +90,9 @@ static void lenovo_ymc_notify(struct wmi_device *wdev, union acpi_object *data)
free_obj:
kfree(obj);
- lenovo_ymc_trigger_ec(wdev, priv);
+ ideapad_laptop_call_notifier(IDEAPAD_LAPTOP_YMC_EVENT, &code);
}
-static void acpi_dev_put_helper(void *p) { acpi_dev_put(p); }
-
static int lenovo_ymc_probe(struct wmi_device *wdev, const void *ctx)
{
struct lenovo_ymc_private *priv;
@@ -143,29 +106,10 @@ static int lenovo_ymc_probe(struct wmi_device *wdev, const void *ctx)
return -ENODEV;
}
- ec_trigger |= dmi_check_system(ec_trigger_quirk_dmi_table);
-
priv = devm_kzalloc(&wdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
- if (ec_trigger) {
- pr_debug("Lenovo YMC enable EC triggering.\n");
- priv->ec_acpi_dev = acpi_dev_get_first_match_dev("VPC2004", NULL, -1);
-
- if (!priv->ec_acpi_dev) {
- dev_err(&wdev->dev, "Could not find EC ACPI device.\n");
- return -ENODEV;
- }
- err = devm_add_action_or_reset(&wdev->dev,
- acpi_dev_put_helper, priv->ec_acpi_dev);
- if (err) {
- dev_err(&wdev->dev,
- "Could not clean up EC ACPI device: %d\n", err);
- return err;
- }
- }
-
input_dev = devm_input_allocate_device(&wdev->dev);
if (!input_dev)
return -ENOMEM;
@@ -192,7 +136,6 @@ static int lenovo_ymc_probe(struct wmi_device *wdev, const void *ctx)
dev_set_drvdata(&wdev->dev, priv);
/* Report the state for the first time on probe */
- lenovo_ymc_trigger_ec(wdev, priv);
lenovo_ymc_notify(wdev, NULL);
return 0;
}
@@ -217,3 +160,4 @@ module_wmi_driver(lenovo_ymc_driver);
MODULE_AUTHOR("Gergo Koteles <soyer@irl.hu>");
MODULE_DESCRIPTION("Lenovo Yoga Mode Control driver");
MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(IDEAPAD_LAPTOP);
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 0a97cfedd706..42a4a996defb 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -1601,9 +1601,15 @@ static int dasd_ese_needs_format(struct dasd_block *block, struct irb *irb)
if (!sense)
return 0;
- return !!(sense[1] & SNS1_NO_REC_FOUND) ||
- !!(sense[1] & SNS1_FILE_PROTECTED) ||
- scsw_cstat(&irb->scsw) == SCHN_STAT_INCORR_LEN;
+ if (sense[1] & SNS1_NO_REC_FOUND)
+ return 1;
+
+ if ((sense[1] & SNS1_INV_TRACK_FORMAT) &&
+ scsw_is_tm(&irb->scsw) &&
+ !(sense[2] & SNS2_ENV_DATA_PRESENT))
+ return 1;
+
+ return 0;
}
static int dasd_ese_oos_cond(u8 *sense)
@@ -1624,7 +1630,7 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
struct dasd_device *device;
unsigned long now;
int nrf_suppressed = 0;
- int fp_suppressed = 0;
+ int it_suppressed = 0;
struct request *req;
u8 *sense = NULL;
int expires;
@@ -1679,8 +1685,9 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
*/
sense = dasd_get_sense(irb);
if (sense) {
- fp_suppressed = (sense[1] & SNS1_FILE_PROTECTED) &&
- test_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags);
+ it_suppressed = (sense[1] & SNS1_INV_TRACK_FORMAT) &&
+ !(sense[2] & SNS2_ENV_DATA_PRESENT) &&
+ test_bit(DASD_CQR_SUPPRESS_IT, &cqr->flags);
nrf_suppressed = (sense[1] & SNS1_NO_REC_FOUND) &&
test_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
@@ -1695,7 +1702,7 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
return;
}
}
- if (!(fp_suppressed || nrf_suppressed))
+ if (!(it_suppressed || nrf_suppressed))
device->discipline->dump_sense_dbf(device, irb, "int");
if (device->features & DASD_FEATURE_ERPLOG)
@@ -2459,14 +2466,17 @@ retry:
rc = 0;
list_for_each_entry_safe(cqr, n, ccw_queue, blocklist) {
/*
- * In some cases the 'File Protected' or 'Incorrect Length'
- * error might be expected and error recovery would be
- * unnecessary in these cases. Check if the according suppress
- * bit is set.
+ * In some cases certain errors might be expected and
+ * error recovery would be unnecessary in these cases.
+ * Check if the according suppress bit is set.
*/
sense = dasd_get_sense(&cqr->irb);
- if (sense && sense[1] & SNS1_FILE_PROTECTED &&
- test_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags))
+ if (sense && (sense[1] & SNS1_INV_TRACK_FORMAT) &&
+ !(sense[2] & SNS2_ENV_DATA_PRESENT) &&
+ test_bit(DASD_CQR_SUPPRESS_IT, &cqr->flags))
+ continue;
+ if (sense && (sense[1] & SNS1_NO_REC_FOUND) &&
+ test_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags))
continue;
if (scsw_cstat(&cqr->irb.scsw) == 0x40 &&
test_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags))
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c
index bbbacfc386f2..d0aa267462c5 100644
--- a/drivers/s390/block/dasd_3990_erp.c
+++ b/drivers/s390/block/dasd_3990_erp.c
@@ -1386,14 +1386,8 @@ dasd_3990_erp_file_prot(struct dasd_ccw_req * erp)
struct dasd_device *device = erp->startdev;
- /*
- * In some cases the 'File Protected' error might be expected and
- * log messages shouldn't be written then.
- * Check if the according suppress bit is set.
- */
- if (!test_bit(DASD_CQR_SUPPRESS_FP, &erp->flags))
- dev_err(&device->cdev->dev,
- "Accessing the DASD failed because of a hardware error\n");
+ dev_err(&device->cdev->dev,
+ "Accessing the DASD failed because of a hardware error\n");
return dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED);
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 9388b5c383ca..90b106408992 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -2275,6 +2275,7 @@ dasd_eckd_analysis_ccw(struct dasd_device *device)
cqr->status = DASD_CQR_FILLED;
/* Set flags to suppress output for expected errors */
set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
+ set_bit(DASD_CQR_SUPPRESS_IT, &cqr->flags);
return cqr;
}
@@ -2556,7 +2557,6 @@ dasd_eckd_build_check_tcw(struct dasd_device *base, struct format_data_t *fdata,
cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
/* Set flags to suppress output for expected errors */
- set_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags);
set_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags);
return cqr;
@@ -4130,8 +4130,6 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
/* Set flags to suppress output for expected errors */
if (dasd_eckd_is_ese(basedev)) {
- set_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags);
- set_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags);
set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
}
@@ -4633,9 +4631,8 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
/* Set flags to suppress output for expected errors */
if (dasd_eckd_is_ese(basedev)) {
- set_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags);
- set_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags);
set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
+ set_bit(DASD_CQR_SUPPRESS_IT, &cqr->flags);
}
return cqr;
@@ -5780,36 +5777,32 @@ static void dasd_eckd_dump_sense(struct dasd_device *device,
{
u8 *sense = dasd_get_sense(irb);
- if (scsw_is_tm(&irb->scsw)) {
- /*
- * In some cases the 'File Protected' or 'Incorrect Length'
- * error might be expected and log messages shouldn't be written
- * then. Check if the according suppress bit is set.
- */
- if (sense && (sense[1] & SNS1_FILE_PROTECTED) &&
- test_bit(DASD_CQR_SUPPRESS_FP, &req->flags))
- return;
- if (scsw_cstat(&irb->scsw) == 0x40 &&
- test_bit(DASD_CQR_SUPPRESS_IL, &req->flags))
- return;
+ /*
+ * In some cases certain errors might be expected and
+ * log messages shouldn't be written then.
+ * Check if the according suppress bit is set.
+ */
+ if (sense && (sense[1] & SNS1_INV_TRACK_FORMAT) &&
+ !(sense[2] & SNS2_ENV_DATA_PRESENT) &&
+ test_bit(DASD_CQR_SUPPRESS_IT, &req->flags))
+ return;
- dasd_eckd_dump_sense_tcw(device, req, irb);
- } else {
- /*
- * In some cases the 'Command Reject' or 'No Record Found'
- * error might be expected and log messages shouldn't be
- * written then. Check if the according suppress bit is set.
- */
- if (sense && sense[0] & SNS0_CMD_REJECT &&
- test_bit(DASD_CQR_SUPPRESS_CR, &req->flags))
- return;
+ if (sense && sense[0] & SNS0_CMD_REJECT &&
+ test_bit(DASD_CQR_SUPPRESS_CR, &req->flags))
+ return;
- if (sense && sense[1] & SNS1_NO_REC_FOUND &&
- test_bit(DASD_CQR_SUPPRESS_NRF, &req->flags))
- return;
+ if (sense && sense[1] & SNS1_NO_REC_FOUND &&
+ test_bit(DASD_CQR_SUPPRESS_NRF, &req->flags))
+ return;
+ if (scsw_cstat(&irb->scsw) == 0x40 &&
+ test_bit(DASD_CQR_SUPPRESS_IL, &req->flags))
+ return;
+
+ if (scsw_is_tm(&irb->scsw))
+ dasd_eckd_dump_sense_tcw(device, req, irb);
+ else
dasd_eckd_dump_sense_ccw(device, req, irb);
- }
}
static int dasd_eckd_reload_device(struct dasd_device *device)
diff --git a/drivers/s390/block/dasd_genhd.c b/drivers/s390/block/dasd_genhd.c
index 1aa426b1dedd..6da47a65af61 100644
--- a/drivers/s390/block/dasd_genhd.c
+++ b/drivers/s390/block/dasd_genhd.c
@@ -41,7 +41,6 @@ int dasd_gendisk_alloc(struct dasd_block *block)
*/
.max_segment_size = PAGE_SIZE,
.seg_boundary_mask = PAGE_SIZE - 1,
- .dma_alignment = PAGE_SIZE - 1,
.max_segments = USHRT_MAX,
};
struct gendisk *gdp;
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index e5f40536b425..81cfb5c89681 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -196,7 +196,7 @@ struct dasd_ccw_req {
* The following flags are used to suppress output of certain errors.
*/
#define DASD_CQR_SUPPRESS_NRF 4 /* Suppress 'No Record Found' error */
-#define DASD_CQR_SUPPRESS_FP 5 /* Suppress 'File Protected' error*/
+#define DASD_CQR_SUPPRESS_IT 5 /* Suppress 'Invalid Track' error*/
#define DASD_CQR_SUPPRESS_IL 6 /* Suppress 'Incorrect Length' error */
#define DASD_CQR_SUPPRESS_CR 7 /* Suppress 'Command Reject' error */
diff --git a/drivers/scsi/mpi3mr/mpi3mr_app.c b/drivers/scsi/mpi3mr/mpi3mr_app.c
index 8b0eded6ef36..01f035f9330e 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_app.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_app.c
@@ -100,7 +100,8 @@ retry_trace:
dprint_init(mrioc,
"trying to allocate trace diag buffer of size = %dKB\n",
trace_size / 1024);
- if (mpi3mr_alloc_trace_buffer(mrioc, trace_size)) {
+ if (get_order(trace_size) > MAX_PAGE_ORDER ||
+ mpi3mr_alloc_trace_buffer(mrioc, trace_size)) {
retry = true;
trace_size -= trace_dec_size;
dprint_init(mrioc, "trace diag buffer allocation failed\n"
@@ -118,8 +119,12 @@ retry_fw:
diag_buffer->type = MPI3_DIAG_BUFFER_TYPE_FW;
diag_buffer->status = MPI3MR_HDB_BUFSTATUS_NOT_ALLOCATED;
if ((mrioc->facts.diag_fw_sz < fw_size) && (fw_size >= fw_min_size)) {
- diag_buffer->addr = dma_alloc_coherent(&mrioc->pdev->dev,
- fw_size, &diag_buffer->dma_addr, GFP_KERNEL);
+ if (get_order(fw_size) <= MAX_PAGE_ORDER) {
+ diag_buffer->addr
+ = dma_alloc_coherent(&mrioc->pdev->dev, fw_size,
+ &diag_buffer->dma_addr,
+ GFP_KERNEL);
+ }
if (!retry)
dprint_init(mrioc,
"%s:trying to allocate firmware diag buffer of size = %dKB\n",
diff --git a/drivers/scsi/mpi3mr/mpi3mr_os.c b/drivers/scsi/mpi3mr/mpi3mr_os.c
index ca8f132e03ae..616894571c6a 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_os.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_os.c
@@ -5234,6 +5234,7 @@ mpi3mr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
spin_lock_init(&mrioc->watchdog_lock);
spin_lock_init(&mrioc->chain_buf_lock);
spin_lock_init(&mrioc->sas_node_lock);
+ spin_lock_init(&mrioc->trigger_lock);
INIT_LIST_HEAD(&mrioc->fwevt_list);
INIT_LIST_HEAD(&mrioc->tgtdev_list);
diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c
index 7e9074519ad2..4dc8aba33d9b 100644
--- a/drivers/soc/fsl/qbman/qman.c
+++ b/drivers/soc/fsl/qbman/qman.c
@@ -2546,11 +2546,6 @@ release_lock:
}
EXPORT_SYMBOL(qman_delete_cgr);
-struct cgr_comp {
- struct qman_cgr *cgr;
- struct completion completion;
-};
-
static void qman_delete_cgr_smp_call(void *p)
{
qman_delete_cgr((struct qman_cgr *)p);
diff --git a/drivers/staging/media/atomisp/pci/ia_css_stream_public.h b/drivers/staging/media/atomisp/pci/ia_css_stream_public.h
index 961c61288083..aad860e54d3a 100644
--- a/drivers/staging/media/atomisp/pci/ia_css_stream_public.h
+++ b/drivers/staging/media/atomisp/pci/ia_css_stream_public.h
@@ -27,12 +27,16 @@
#include "ia_css_prbs.h"
#include "ia_css_input_port.h"
-/* Input modes, these enumerate all supported input modes.
- * Note that not all ISP modes support all input modes.
+/*
+ * Input modes, these enumerate all supported input modes.
+ * This enum is part of the atomisp firmware ABI and must
+ * NOT be changed!
+ * Note that not all ISP modes support all input modes.
*/
enum ia_css_input_mode {
IA_CSS_INPUT_MODE_SENSOR, /** data from sensor */
IA_CSS_INPUT_MODE_FIFO, /** data from input-fifo */
+ IA_CSS_INPUT_MODE_TPG, /** data from test-pattern generator */
IA_CSS_INPUT_MODE_PRBS, /** data from pseudo-random bit stream */
IA_CSS_INPUT_MODE_MEMORY, /** data from a frame in memory */
IA_CSS_INPUT_MODE_BUFFERED_SENSOR /** data is sent through mipi buffer */
diff --git a/drivers/staging/media/atomisp/pci/sh_css_internal.h b/drivers/staging/media/atomisp/pci/sh_css_internal.h
index a2d972ea3fa0..959e7f549641 100644
--- a/drivers/staging/media/atomisp/pci/sh_css_internal.h
+++ b/drivers/staging/media/atomisp/pci/sh_css_internal.h
@@ -344,7 +344,14 @@ struct sh_css_sp_input_formatter_set {
#define IA_CSS_MIPI_SIZE_CHECK_MAX_NOF_ENTRIES_PER_PORT (3)
-/* SP configuration information */
+/*
+ * SP configuration information
+ *
+ * This struct is part of the atomisp firmware ABI and is directly copied
+ * to ISP DRAM by sh_css_store_sp_group_to_ddr()
+ *
+ * Do NOT change this struct's layout or remove seemingly unused fields!
+ */
struct sh_css_sp_config {
u8 no_isp_sync; /* Signal host immediately after start */
u8 enable_raw_pool_locking; /** Enable Raw Buffer Locking for HALv3 Support */
@@ -354,6 +361,10 @@ struct sh_css_sp_config {
host (true) or when they are passed to the preview/video pipe
(false). */
+ /*
+ * Note the fields below are only used on the ISP2400 not on the ISP2401,
+ * sh_css_store_sp_group_to_ddr() skip copying these when run on the ISP2401.
+ */
struct {
u8 a_changed;
u8 b_changed;
@@ -363,11 +374,13 @@ struct sh_css_sp_config {
} input_formatter;
sync_generator_cfg_t sync_gen;
+ tpg_cfg_t tpg;
prbs_cfg_t prbs;
input_system_cfg_t input_circuit;
u8 input_circuit_cfg_changed;
- u32 mipi_sizes_for_check[N_CSI_PORTS][IA_CSS_MIPI_SIZE_CHECK_MAX_NOF_ENTRIES_PER_PORT];
- u8 enable_isys_event_queue;
+ u32 mipi_sizes_for_check[N_CSI_PORTS][IA_CSS_MIPI_SIZE_CHECK_MAX_NOF_ENTRIES_PER_PORT];
+ /* These last 2 fields are used on both the ISP2400 and the ISP2401 */
+ u8 enable_isys_event_queue;
u8 disable_cont_vf;
};
diff --git a/drivers/thermal/gov_bang_bang.c b/drivers/thermal/gov_bang_bang.c
index 4a2e869b9538..daed67d19efb 100644
--- a/drivers/thermal/gov_bang_bang.c
+++ b/drivers/thermal/gov_bang_bang.c
@@ -13,6 +13,28 @@
#include "thermal_core.h"
+static void bang_bang_set_instance_target(struct thermal_instance *instance,
+ unsigned int target)
+{
+ if (instance->target != 0 && instance->target != 1 &&
+ instance->target != THERMAL_NO_TARGET)
+ pr_debug("Unexpected state %ld of thermal instance %s in bang-bang\n",
+ instance->target, instance->name);
+
+ /*
+ * Enable the fan when the trip is crossed on the way up and disable it
+ * when the trip is crossed on the way down.
+ */
+ instance->target = target;
+ instance->initialized = true;
+
+ dev_dbg(&instance->cdev->device, "target=%ld\n", instance->target);
+
+ mutex_lock(&instance->cdev->lock);
+ __thermal_cdev_update(instance->cdev);
+ mutex_unlock(&instance->cdev->lock);
+}
+
/**
* bang_bang_control - controls devices associated with the given zone
* @tz: thermal_zone_device
@@ -54,33 +76,60 @@ static void bang_bang_control(struct thermal_zone_device *tz,
tz->temperature, trip->hysteresis);
list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
- if (instance->trip != trip)
- continue;
+ if (instance->trip == trip)
+ bang_bang_set_instance_target(instance, crossed_up);
+ }
+}
+
+static void bang_bang_manage(struct thermal_zone_device *tz)
+{
+ const struct thermal_trip_desc *td;
+ struct thermal_instance *instance;
- if (instance->target != 0 && instance->target != 1 &&
- instance->target != THERMAL_NO_TARGET)
- pr_debug("Unexpected state %ld of thermal instance %s in bang-bang\n",
- instance->target, instance->name);
+ /* If the code below has run already, nothing needs to be done. */
+ if (tz->governor_data)
+ return;
- /*
- * Enable the fan when the trip is crossed on the way up and
- * disable it when the trip is crossed on the way down.
- */
- instance->target = crossed_up;
+ for_each_trip_desc(tz, td) {
+ const struct thermal_trip *trip = &td->trip;
- dev_dbg(&instance->cdev->device, "target=%ld\n", instance->target);
+ if (tz->temperature >= td->threshold ||
+ trip->temperature == THERMAL_TEMP_INVALID ||
+ trip->type == THERMAL_TRIP_CRITICAL ||
+ trip->type == THERMAL_TRIP_HOT)
+ continue;
- mutex_lock(&instance->cdev->lock);
- instance->cdev->updated = false; /* cdev needs update */
- mutex_unlock(&instance->cdev->lock);
+ /*
+ * If the initial cooling device state is "on", but the zone
+ * temperature is not above the trip point, the core will not
+ * call bang_bang_control() until the zone temperature reaches
+ * the trip point temperature which may be never. In those
+ * cases, set the initial state of the cooling device to 0.
+ */
+ list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
+ if (!instance->initialized && instance->trip == trip)
+ bang_bang_set_instance_target(instance, 0);
+ }
}
- list_for_each_entry(instance, &tz->thermal_instances, tz_node)
- thermal_cdev_update(instance->cdev);
+ tz->governor_data = (void *)true;
+}
+
+static void bang_bang_update_tz(struct thermal_zone_device *tz,
+ enum thermal_notify_event reason)
+{
+ /*
+ * Let bang_bang_manage() know that it needs to walk trips after binding
+ * a new cdev and after system resume.
+ */
+ if (reason == THERMAL_TZ_BIND_CDEV || reason == THERMAL_TZ_RESUME)
+ tz->governor_data = NULL;
}
static struct thermal_governor thermal_gov_bang_bang = {
.name = "bang_bang",
.trip_crossed = bang_bang_control,
+ .manage = bang_bang_manage,
+ .update_tz = bang_bang_update_tz,
};
THERMAL_GOVERNOR_DECLARE(thermal_gov_bang_bang);
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index 95c399f94744..e6669aeda1ff 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -1728,7 +1728,8 @@ static void thermal_zone_device_resume(struct work_struct *work)
thermal_debug_tz_resume(tz);
thermal_zone_device_init(tz);
- __thermal_zone_device_update(tz, THERMAL_EVENT_UNSPECIFIED);
+ thermal_governor_update_tz(tz, THERMAL_TZ_RESUME);
+ __thermal_zone_device_update(tz, THERMAL_TZ_RESUME);
complete(&tz->resume);
tz->resuming = false;
diff --git a/drivers/thunderbolt/debugfs.c b/drivers/thunderbolt/debugfs.c
index 11185cc1db92..9ed4bb2e8d05 100644
--- a/drivers/thunderbolt/debugfs.c
+++ b/drivers/thunderbolt/debugfs.c
@@ -323,16 +323,17 @@ static ssize_t port_sb_regs_write(struct file *file, const char __user *user_buf
if (mutex_lock_interruptible(&tb->lock)) {
ret = -ERESTARTSYS;
- goto out_rpm_put;
+ goto out;
}
ret = sb_regs_write(port, port_sb_regs, ARRAY_SIZE(port_sb_regs),
USB4_SB_TARGET_ROUTER, 0, buf, count, ppos);
mutex_unlock(&tb->lock);
-out_rpm_put:
+out:
pm_runtime_mark_last_busy(&sw->dev);
pm_runtime_put_autosuspend(&sw->dev);
+ free_page((unsigned long)buf);
return ret < 0 ? ret : count;
}
@@ -355,16 +356,17 @@ static ssize_t retimer_sb_regs_write(struct file *file,
if (mutex_lock_interruptible(&tb->lock)) {
ret = -ERESTARTSYS;
- goto out_rpm_put;
+ goto out;
}
ret = sb_regs_write(rt->port, retimer_sb_regs, ARRAY_SIZE(retimer_sb_regs),
USB4_SB_TARGET_RETIMER, rt->index, buf, count, ppos);
mutex_unlock(&tb->lock);
-out_rpm_put:
+out:
pm_runtime_mark_last_busy(&rt->dev);
pm_runtime_put_autosuspend(&rt->dev);
+ free_page((unsigned long)buf);
return ret < 0 ? ret : count;
}
diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
index 326433df5880..6a2116cbb06f 100644
--- a/drivers/thunderbolt/switch.c
+++ b/drivers/thunderbolt/switch.c
@@ -3392,6 +3392,7 @@ void tb_switch_remove(struct tb_switch *sw)
tb_switch_remove(port->remote->sw);
port->remote = NULL;
} else if (port->xdomain) {
+ port->xdomain->is_unplugged = true;
tb_xdomain_remove(port->xdomain);
port->xdomain = NULL;
}
diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
index ccb83c05be20..88b58f44e4e9 100644
--- a/drivers/tty/serial/8250/8250_omap.c
+++ b/drivers/tty/serial/8250/8250_omap.c
@@ -27,7 +27,6 @@
#include <linux/pm_wakeirq.h>
#include <linux/dma-mapping.h>
#include <linux/sys_soc.h>
-#include <linux/pm_domain.h>
#include "8250.h"
@@ -119,12 +118,6 @@
#define UART_OMAP_TO_L 0x26
#define UART_OMAP_TO_H 0x27
-/*
- * Copy of the genpd flags for the console.
- * Only used if console suspend is disabled
- */
-static unsigned int genpd_flags_console;
-
struct omap8250_priv {
void __iomem *membase;
int line;
@@ -1657,7 +1650,6 @@ static int omap8250_suspend(struct device *dev)
{
struct omap8250_priv *priv = dev_get_drvdata(dev);
struct uart_8250_port *up = serial8250_get_port(priv->line);
- struct generic_pm_domain *genpd = pd_to_genpd(dev->pm_domain);
int err = 0;
serial8250_suspend_port(priv->line);
@@ -1668,19 +1660,8 @@ static int omap8250_suspend(struct device *dev)
if (!device_may_wakeup(dev))
priv->wer = 0;
serial_out(up, UART_OMAP_WER, priv->wer);
- if (uart_console(&up->port)) {
- if (console_suspend_enabled)
- err = pm_runtime_force_suspend(dev);
- else {
- /*
- * The pd shall not be powered-off (no console suspend).
- * Make copy of genpd flags before to set it always on.
- * The original value is restored during the resume.
- */
- genpd_flags_console = genpd->flags;
- genpd->flags |= GENPD_FLAG_ALWAYS_ON;
- }
- }
+ if (uart_console(&up->port) && console_suspend_enabled)
+ err = pm_runtime_force_suspend(dev);
flush_work(&priv->qos_work);
return err;
@@ -1690,16 +1671,12 @@ static int omap8250_resume(struct device *dev)
{
struct omap8250_priv *priv = dev_get_drvdata(dev);
struct uart_8250_port *up = serial8250_get_port(priv->line);
- struct generic_pm_domain *genpd = pd_to_genpd(dev->pm_domain);
int err;
if (uart_console(&up->port) && console_suspend_enabled) {
- if (console_suspend_enabled) {
- err = pm_runtime_force_resume(dev);
- if (err)
- return err;
- } else
- genpd->flags = genpd_flags_console;
+ err = pm_runtime_force_resume(dev);
+ if (err)
+ return err;
}
serial8250_resume_port(priv->line);
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index 0a90964d6d10..09b246c9e389 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -2514,7 +2514,7 @@ static const struct uart_ops atmel_pops = {
};
static const struct serial_rs485 atmel_rs485_supported = {
- .flags = SER_RS485_ENABLED | SER_RS485_RTS_AFTER_SEND | SER_RS485_RX_DURING_TX,
+ .flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND | SER_RS485_RX_DURING_TX,
.delay_rts_before_send = 1,
.delay_rts_after_send = 1,
};
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
index 615291ea9b5e..77efa7ee6eda 100644
--- a/drivers/tty/serial/fsl_lpuart.c
+++ b/drivers/tty/serial/fsl_lpuart.c
@@ -2923,6 +2923,7 @@ static int lpuart_probe(struct platform_device *pdev)
pm_runtime_set_autosuspend_delay(&pdev->dev, UART_AUTOSUSPEND_TIMEOUT);
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
+ pm_runtime_mark_last_busy(&pdev->dev);
ret = lpuart_global_reset(sport);
if (ret)
diff --git a/drivers/tty/vt/conmakehash.c b/drivers/tty/vt/conmakehash.c
index 82d9db68b2ce..a931fcde7ad9 100644
--- a/drivers/tty/vt/conmakehash.c
+++ b/drivers/tty/vt/conmakehash.c
@@ -11,8 +11,6 @@
* Copyright (C) 1995-1997 H. Peter Anvin
*/
-#include <libgen.h>
-#include <linux/limits.h>
#include <stdio.h>
#include <stdlib.h>
#include <sysexits.h>
@@ -79,7 +77,6 @@ int main(int argc, char *argv[])
{
FILE *ctbl;
const char *tblname;
- char base_tblname[PATH_MAX];
char buffer[65536];
int fontlen;
int i, nuni, nent;
@@ -245,20 +242,15 @@ int main(int argc, char *argv[])
for ( i = 0 ; i < fontlen ; i++ )
nuni += unicount[i];
- strncpy(base_tblname, tblname, PATH_MAX);
- base_tblname[PATH_MAX - 1] = 0;
printf("\
/*\n\
- * Do not edit this file; it was automatically generated by\n\
- *\n\
- * conmakehash %s > [this file]\n\
- *\n\
+ * Automatically generated file; Do not edit.\n\
*/\n\
\n\
#include <linux/types.h>\n\
\n\
u8 dfont_unicount[%d] = \n\
-{\n\t", basename(base_tblname), fontlen);
+{\n\t", fontlen);
for ( i = 0 ; i < fontlen ; i++ )
{
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index d7654f475daf..937ce5fd5809 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -1872,7 +1872,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
cancel_delayed_work_sync(&xhci->cmd_timer);
- for (i = 0; i < xhci->max_interrupters; i++) {
+ for (i = 0; xhci->interrupters && i < xhci->max_interrupters; i++) {
if (xhci->interrupters[i]) {
xhci_remove_interrupter(xhci, xhci->interrupters[i]);
xhci_free_interrupter(xhci, xhci->interrupters[i]);
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index b7517c3c8059..4ea2c3e072a9 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -2910,6 +2910,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
process_isoc_td(xhci, ep, ep_ring, td, ep_trb, event);
else
process_bulk_intr_td(xhci, ep, ep_ring, td, ep_trb, event);
+ return 0;
check_endpoint_halted:
if (xhci_halted_host_endpoint(ep_ctx, trb_comp_code))
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 0a8cf6c17f82..efdf4c228b8c 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -2837,7 +2837,7 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
xhci->num_active_eps);
return -ENOMEM;
}
- if ((xhci->quirks & XHCI_SW_BW_CHECKING) &&
+ if ((xhci->quirks & XHCI_SW_BW_CHECKING) && !ctx_change &&
xhci_reserve_bandwidth(xhci, virt_dev, command->in_ctx)) {
if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK))
xhci_free_host_resources(xhci, ctrl_ctx);
@@ -4200,8 +4200,10 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
mutex_unlock(&xhci->mutex);
ret = xhci_disable_slot(xhci, udev->slot_id);
xhci_free_virt_device(xhci, udev->slot_id);
- if (!ret)
- xhci_alloc_dev(hcd, udev);
+ if (!ret) {
+ if (xhci_alloc_dev(hcd, udev) == 1)
+ xhci_setup_addressable_virt_dev(xhci, udev);
+ }
kfree(command->completion);
kfree(command);
return -EPROTO;
diff --git a/drivers/usb/misc/usb-ljca.c b/drivers/usb/misc/usb-ljca.c
index 2d30fc1be306..1a8d5e80b9ae 100644
--- a/drivers/usb/misc/usb-ljca.c
+++ b/drivers/usb/misc/usb-ljca.c
@@ -169,6 +169,7 @@ static const struct acpi_device_id ljca_gpio_hids[] = {
{ "INTC1096" },
{ "INTC100B" },
{ "INTC10D1" },
+ { "INTC10B5" },
{},
};
diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
index cce39818e99a..4b02d6474259 100644
--- a/drivers/usb/typec/tcpm/tcpm.c
+++ b/drivers/usb/typec/tcpm/tcpm.c
@@ -5655,7 +5655,6 @@ static void run_state_machine(struct tcpm_port *port)
break;
case PORT_RESET:
tcpm_reset_port(port);
- port->pd_events = 0;
if (port->self_powered)
tcpm_set_cc(port, TYPEC_CC_OPEN);
else
diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
index 432a2d6266d7..4039851551c1 100644
--- a/drivers/usb/typec/ucsi/ucsi.c
+++ b/drivers/usb/typec/ucsi/ucsi.c
@@ -137,7 +137,7 @@ static int ucsi_run_command(struct ucsi *ucsi, u64 command, u32 *cci,
if (ret)
return ret;
- return err;
+ return err ?: UCSI_CCI_LENGTH(*cci);
}
static int ucsi_read_error(struct ucsi *ucsi, u8 connector_num)