diff options
Diffstat (limited to 'drivers')
1057 files changed, 21601 insertions, 9259 deletions
diff --git a/drivers/accel/ivpu/ivpu_drv.c b/drivers/accel/ivpu/ivpu_drv.c index 9418c73ee8ef..4b0640226986 100644 --- a/drivers/accel/ivpu/ivpu_drv.c +++ b/drivers/accel/ivpu/ivpu_drv.c @@ -480,9 +480,8 @@ static int ivpu_pci_init(struct ivpu_device *vdev) /* Clear any pending errors */ pcie_capability_clear_word(pdev, PCI_EXP_DEVSTA, 0x3f); - /* VPU 37XX does not require 10m D3hot delay */ - if (ivpu_hw_gen(vdev) == IVPU_HW_37XX) - pdev->d3hot_delay = 0; + /* NPU does not require 10m D3hot delay */ + pdev->d3hot_delay = 0; ret = pcim_enable_device(pdev); if (ret) { diff --git a/drivers/accel/ivpu/ivpu_fw.c b/drivers/accel/ivpu/ivpu_fw.c index 6576232f3e67..5fa8bd4603d5 100644 --- a/drivers/accel/ivpu/ivpu_fw.c +++ b/drivers/accel/ivpu/ivpu_fw.c @@ -222,7 +222,6 @@ ivpu_fw_init_wa(struct ivpu_device *vdev) const struct vpu_firmware_header *fw_hdr = (const void *)vdev->fw->file->data; if (IVPU_FW_CHECK_API_VER_LT(vdev, fw_hdr, BOOT, 3, 17) || - (ivpu_hw_gen(vdev) > IVPU_HW_37XX) || (ivpu_test_mode & IVPU_TEST_MODE_D0I3_MSG_DISABLE)) vdev->wa.disable_d0i3_msg = true; diff --git a/drivers/accel/ivpu/ivpu_hw_37xx.c b/drivers/accel/ivpu/ivpu_hw_37xx.c index f15a93d83057..89af1006df55 100644 --- a/drivers/accel/ivpu/ivpu_hw_37xx.c +++ b/drivers/accel/ivpu/ivpu_hw_37xx.c @@ -510,22 +510,12 @@ static int ivpu_boot_pwr_domain_enable(struct ivpu_device *vdev) return ret; } -static int ivpu_boot_pwr_domain_disable(struct ivpu_device *vdev) -{ - ivpu_boot_dpu_active_drive(vdev, false); - ivpu_boot_pwr_island_isolation_drive(vdev, true); - ivpu_boot_pwr_island_trickle_drive(vdev, false); - ivpu_boot_pwr_island_drive(vdev, false); - - return ivpu_boot_wait_for_pwr_island_status(vdev, 0x0); -} - static void ivpu_boot_no_snoop_enable(struct ivpu_device *vdev) { u32 val = REGV_RD32(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES); val = REG_SET_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, NOSNOOP_OVERRIDE_EN, val); - val = REG_SET_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, AW_NOSNOOP_OVERRIDE, val); + val = REG_CLR_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, AW_NOSNOOP_OVERRIDE, val); val = REG_SET_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, AR_NOSNOOP_OVERRIDE, val); REGV_WR32(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, val); @@ -616,12 +606,37 @@ static int ivpu_hw_37xx_info_init(struct ivpu_device *vdev) return 0; } +static int ivpu_hw_37xx_ip_reset(struct ivpu_device *vdev) +{ + int ret; + u32 val; + + if (IVPU_WA(punit_disabled)) + return 0; + + ret = REGB_POLL_FLD(VPU_37XX_BUTTRESS_VPU_IP_RESET, TRIGGER, 0, TIMEOUT_US); + if (ret) { + ivpu_err(vdev, "Timed out waiting for TRIGGER bit\n"); + return ret; + } + + val = REGB_RD32(VPU_37XX_BUTTRESS_VPU_IP_RESET); + val = REG_SET_FLD(VPU_37XX_BUTTRESS_VPU_IP_RESET, TRIGGER, val); + REGB_WR32(VPU_37XX_BUTTRESS_VPU_IP_RESET, val); + + ret = REGB_POLL_FLD(VPU_37XX_BUTTRESS_VPU_IP_RESET, TRIGGER, 0, TIMEOUT_US); + if (ret) + ivpu_err(vdev, "Timed out waiting for RESET completion\n"); + + return ret; +} + static int ivpu_hw_37xx_reset(struct ivpu_device *vdev) { int ret = 0; - if (ivpu_boot_pwr_domain_disable(vdev)) { - ivpu_err(vdev, "Failed to disable power domain\n"); + if (ivpu_hw_37xx_ip_reset(vdev)) { + ivpu_err(vdev, "Failed to reset NPU\n"); ret = -EIO; } @@ -661,6 +676,11 @@ static int ivpu_hw_37xx_power_up(struct ivpu_device *vdev) { int ret; + /* PLL requests may fail when powering down, so issue WP 0 here */ + ret = ivpu_pll_disable(vdev); + if (ret) + ivpu_warn(vdev, "Failed to disable PLL: %d\n", ret); + ret = ivpu_hw_37xx_d0i3_disable(vdev); if (ret) ivpu_warn(vdev, "Failed to disable D0I3: %d\n", ret); diff --git a/drivers/accel/ivpu/ivpu_hw_40xx.c b/drivers/accel/ivpu/ivpu_hw_40xx.c index 704288084f37..a1523d0b1ef3 100644 --- a/drivers/accel/ivpu/ivpu_hw_40xx.c +++ b/drivers/accel/ivpu/ivpu_hw_40xx.c @@ -24,7 +24,7 @@ #define SKU_HW_ID_SHIFT 16u #define SKU_HW_ID_MASK 0xffff0000u -#define PLL_CONFIG_DEFAULT 0x1 +#define PLL_CONFIG_DEFAULT 0x0 #define PLL_CDYN_DEFAULT 0x80 #define PLL_EPP_DEFAULT 0x80 #define PLL_REF_CLK_FREQ (50 * 1000000) @@ -530,7 +530,7 @@ static void ivpu_boot_no_snoop_enable(struct ivpu_device *vdev) u32 val = REGV_RD32(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES); val = REG_SET_FLD(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, SNOOP_OVERRIDE_EN, val); - val = REG_CLR_FLD(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, AW_SNOOP_OVERRIDE, val); + val = REG_SET_FLD(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, AW_SNOOP_OVERRIDE, val); val = REG_CLR_FLD(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, AR_SNOOP_OVERRIDE, val); REGV_WR32(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, val); @@ -704,7 +704,6 @@ static int ivpu_hw_40xx_info_init(struct ivpu_device *vdev) { struct ivpu_hw_info *hw = vdev->hw; u32 tile_disable; - u32 tile_enable; u32 fuse; fuse = REGB_RD32(VPU_40XX_BUTTRESS_TILE_FUSE); @@ -725,10 +724,6 @@ static int ivpu_hw_40xx_info_init(struct ivpu_device *vdev) else ivpu_dbg(vdev, MISC, "Fuse: All %d tiles enabled\n", TILE_MAX_NUM); - tile_enable = (~tile_disable) & TILE_MAX_MASK; - - hw->sku = REG_SET_FLD_NUM(SKU, HW_ID, LNL_HW_ID, hw->sku); - hw->sku = REG_SET_FLD_NUM(SKU, TILE, tile_enable, hw->sku); hw->tile_fuse = tile_disable; hw->pll.profiling_freq = PLL_PROFILING_FREQ_DEFAULT; diff --git a/drivers/accel/ivpu/ivpu_job.c b/drivers/accel/ivpu/ivpu_job.c index 0440bee3ecaf..e70cfb859339 100644 --- a/drivers/accel/ivpu/ivpu_job.c +++ b/drivers/accel/ivpu/ivpu_job.c @@ -294,7 +294,7 @@ static int ivpu_job_signal_and_destroy(struct ivpu_device *vdev, u32 job_id, u32 return -ENOENT; if (job->file_priv->has_mmu_faults) - job_status = VPU_JSM_STATUS_ABORTED; + job_status = DRM_IVPU_JOB_STATUS_ABORTED; job->bos[CMD_BUF_IDX]->job_status = job_status; dma_fence_signal(job->done_fence); @@ -315,7 +315,7 @@ void ivpu_jobs_abort_all(struct ivpu_device *vdev) unsigned long id; xa_for_each(&vdev->submitted_jobs_xa, id, job) - ivpu_job_signal_and_destroy(vdev, id, VPU_JSM_STATUS_ABORTED); + ivpu_job_signal_and_destroy(vdev, id, DRM_IVPU_JOB_STATUS_ABORTED); } static int ivpu_job_submit(struct ivpu_job *job) diff --git a/drivers/accel/ivpu/ivpu_mmu.c b/drivers/accel/ivpu/ivpu_mmu.c index 9a3122ffce03..91bd640655ab 100644 --- a/drivers/accel/ivpu/ivpu_mmu.c +++ b/drivers/accel/ivpu/ivpu_mmu.c @@ -72,10 +72,10 @@ #define IVPU_MMU_Q_COUNT_LOG2 4 /* 16 entries */ #define IVPU_MMU_Q_COUNT ((u32)1 << IVPU_MMU_Q_COUNT_LOG2) -#define IVPU_MMU_Q_WRAP_BIT (IVPU_MMU_Q_COUNT << 1) -#define IVPU_MMU_Q_WRAP_MASK (IVPU_MMU_Q_WRAP_BIT - 1) -#define IVPU_MMU_Q_IDX_MASK (IVPU_MMU_Q_COUNT - 1) +#define IVPU_MMU_Q_WRAP_MASK GENMASK(IVPU_MMU_Q_COUNT_LOG2, 0) +#define IVPU_MMU_Q_IDX_MASK (IVPU_MMU_Q_COUNT - 1) #define IVPU_MMU_Q_IDX(val) ((val) & IVPU_MMU_Q_IDX_MASK) +#define IVPU_MMU_Q_WRP(val) ((val) & IVPU_MMU_Q_COUNT) #define IVPU_MMU_CMDQ_CMD_SIZE 16 #define IVPU_MMU_CMDQ_SIZE (IVPU_MMU_Q_COUNT * IVPU_MMU_CMDQ_CMD_SIZE) @@ -475,20 +475,32 @@ static int ivpu_mmu_cmdq_wait_for_cons(struct ivpu_device *vdev) return 0; } +static bool ivpu_mmu_queue_is_full(struct ivpu_mmu_queue *q) +{ + return ((IVPU_MMU_Q_IDX(q->prod) == IVPU_MMU_Q_IDX(q->cons)) && + (IVPU_MMU_Q_WRP(q->prod) != IVPU_MMU_Q_WRP(q->cons))); +} + +static bool ivpu_mmu_queue_is_empty(struct ivpu_mmu_queue *q) +{ + return ((IVPU_MMU_Q_IDX(q->prod) == IVPU_MMU_Q_IDX(q->cons)) && + (IVPU_MMU_Q_WRP(q->prod) == IVPU_MMU_Q_WRP(q->cons))); +} + static int ivpu_mmu_cmdq_cmd_write(struct ivpu_device *vdev, const char *name, u64 data0, u64 data1) { - struct ivpu_mmu_queue *q = &vdev->mmu->cmdq; - u64 *queue_buffer = q->base; - int idx = IVPU_MMU_Q_IDX(q->prod) * (IVPU_MMU_CMDQ_CMD_SIZE / sizeof(*queue_buffer)); + struct ivpu_mmu_queue *cmdq = &vdev->mmu->cmdq; + u64 *queue_buffer = cmdq->base; + int idx = IVPU_MMU_Q_IDX(cmdq->prod) * (IVPU_MMU_CMDQ_CMD_SIZE / sizeof(*queue_buffer)); - if (!CIRC_SPACE(IVPU_MMU_Q_IDX(q->prod), IVPU_MMU_Q_IDX(q->cons), IVPU_MMU_Q_COUNT)) { + if (ivpu_mmu_queue_is_full(cmdq)) { ivpu_err(vdev, "Failed to write MMU CMD %s\n", name); return -EBUSY; } queue_buffer[idx] = data0; queue_buffer[idx + 1] = data1; - q->prod = (q->prod + 1) & IVPU_MMU_Q_WRAP_MASK; + cmdq->prod = (cmdq->prod + 1) & IVPU_MMU_Q_WRAP_MASK; ivpu_dbg(vdev, MMU, "CMD write: %s data: 0x%llx 0x%llx\n", name, data0, data1); @@ -560,7 +572,6 @@ static int ivpu_mmu_reset(struct ivpu_device *vdev) mmu->cmdq.cons = 0; memset(mmu->evtq.base, 0, IVPU_MMU_EVTQ_SIZE); - clflush_cache_range(mmu->evtq.base, IVPU_MMU_EVTQ_SIZE); mmu->evtq.prod = 0; mmu->evtq.cons = 0; @@ -874,14 +885,10 @@ static u32 *ivpu_mmu_get_event(struct ivpu_device *vdev) u32 *evt = evtq->base + (idx * IVPU_MMU_EVTQ_CMD_SIZE); evtq->prod = REGV_RD32(IVPU_MMU_REG_EVTQ_PROD_SEC); - if (!CIRC_CNT(IVPU_MMU_Q_IDX(evtq->prod), IVPU_MMU_Q_IDX(evtq->cons), IVPU_MMU_Q_COUNT)) + if (ivpu_mmu_queue_is_empty(evtq)) return NULL; - clflush_cache_range(evt, IVPU_MMU_EVTQ_CMD_SIZE); - evtq->cons = (evtq->cons + 1) & IVPU_MMU_Q_WRAP_MASK; - REGV_WR32(IVPU_MMU_REG_EVTQ_CONS_SEC, evtq->cons); - return evt; } @@ -902,6 +909,7 @@ void ivpu_mmu_irq_evtq_handler(struct ivpu_device *vdev) } ivpu_mmu_user_context_mark_invalid(vdev, ssid); + REGV_WR32(IVPU_MMU_REG_EVTQ_CONS_SEC, vdev->mmu->evtq.cons); } } diff --git a/drivers/accel/ivpu/ivpu_pm.c b/drivers/accel/ivpu/ivpu_pm.c index f501f27ebafd..5f73854234ba 100644 --- a/drivers/accel/ivpu/ivpu_pm.c +++ b/drivers/accel/ivpu/ivpu_pm.c @@ -58,11 +58,14 @@ static int ivpu_suspend(struct ivpu_device *vdev) { int ret; + /* Save PCI state before powering down as it sometimes gets corrupted if NPU hangs */ + pci_save_state(to_pci_dev(vdev->drm.dev)); + ret = ivpu_shutdown(vdev); - if (ret) { + if (ret) ivpu_err(vdev, "Failed to shutdown VPU: %d\n", ret); - return ret; - } + + pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D3hot); return ret; } @@ -71,6 +74,9 @@ static int ivpu_resume(struct ivpu_device *vdev) { int ret; + pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D0); + pci_restore_state(to_pci_dev(vdev->drm.dev)); + retry: ret = ivpu_hw_power_up(vdev); if (ret) { @@ -120,15 +126,20 @@ static void ivpu_pm_recovery_work(struct work_struct *work) ivpu_fw_log_dump(vdev); -retry: - ret = pci_try_reset_function(to_pci_dev(vdev->drm.dev)); - if (ret == -EAGAIN && !drm_dev_is_unplugged(&vdev->drm)) { - cond_resched(); - goto retry; - } + atomic_inc(&vdev->pm->reset_counter); + atomic_set(&vdev->pm->reset_pending, 1); + down_write(&vdev->pm->reset_lock); + + ivpu_suspend(vdev); + ivpu_pm_prepare_cold_boot(vdev); + ivpu_jobs_abort_all(vdev); + + ret = ivpu_resume(vdev); + if (ret) + ivpu_err(vdev, "Failed to resume NPU: %d\n", ret); - if (ret && ret != -EAGAIN) - ivpu_err(vdev, "Failed to reset VPU: %d\n", ret); + up_write(&vdev->pm->reset_lock); + atomic_set(&vdev->pm->reset_pending, 0); kobject_uevent_env(&vdev->drm.dev->kobj, KOBJ_CHANGE, evt); pm_runtime_mark_last_busy(vdev->drm.dev); @@ -200,9 +211,6 @@ int ivpu_pm_suspend_cb(struct device *dev) ivpu_suspend(vdev); ivpu_pm_prepare_warm_boot(vdev); - pci_save_state(to_pci_dev(dev)); - pci_set_power_state(to_pci_dev(dev), PCI_D3hot); - ivpu_dbg(vdev, PM, "Suspend done.\n"); return 0; @@ -216,9 +224,6 @@ int ivpu_pm_resume_cb(struct device *dev) ivpu_dbg(vdev, PM, "Resume..\n"); - pci_set_power_state(to_pci_dev(dev), PCI_D0); - pci_restore_state(to_pci_dev(dev)); - ret = ivpu_resume(vdev); if (ret) ivpu_err(vdev, "Failed to resume: %d\n", ret); diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c index 7b7c605166e0..ab2a82cb1b0b 100644 --- a/drivers/acpi/apei/ghes.c +++ b/drivers/acpi/apei/ghes.c @@ -26,7 +26,6 @@ #include <linux/interrupt.h> #include <linux/timer.h> #include <linux/cper.h> -#include <linux/cxl-event.h> #include <linux/platform_device.h> #include <linux/mutex.h> #include <linux/ratelimit.h> @@ -674,78 +673,6 @@ static void ghes_defer_non_standard_event(struct acpi_hest_generic_data *gdata, schedule_work(&entry->work); } -/* - * Only a single callback can be registered for CXL CPER events. - */ -static DECLARE_RWSEM(cxl_cper_rw_sem); -static cxl_cper_callback cper_callback; - -/* CXL Event record UUIDs are formatted as GUIDs and reported in section type */ - -/* - * General Media Event Record - * CXL rev 3.0 Section 8.2.9.2.1.1; Table 8-43 - */ -#define CPER_SEC_CXL_GEN_MEDIA_GUID \ - GUID_INIT(0xfbcd0a77, 0xc260, 0x417f, \ - 0x85, 0xa9, 0x08, 0x8b, 0x16, 0x21, 0xeb, 0xa6) - -/* - * DRAM Event Record - * CXL rev 3.0 section 8.2.9.2.1.2; Table 8-44 - */ -#define CPER_SEC_CXL_DRAM_GUID \ - GUID_INIT(0x601dcbb3, 0x9c06, 0x4eab, \ - 0xb8, 0xaf, 0x4e, 0x9b, 0xfb, 0x5c, 0x96, 0x24) - -/* - * Memory Module Event Record - * CXL rev 3.0 section 8.2.9.2.1.3; Table 8-45 - */ -#define CPER_SEC_CXL_MEM_MODULE_GUID \ - GUID_INIT(0xfe927475, 0xdd59, 0x4339, \ - 0xa5, 0x86, 0x79, 0xba, 0xb1, 0x13, 0xb7, 0x74) - -static void cxl_cper_post_event(enum cxl_event_type event_type, - struct cxl_cper_event_rec *rec) -{ - if (rec->hdr.length <= sizeof(rec->hdr) || - rec->hdr.length > sizeof(*rec)) { - pr_err(FW_WARN "CXL CPER Invalid section length (%u)\n", - rec->hdr.length); - return; - } - - if (!(rec->hdr.validation_bits & CPER_CXL_COMP_EVENT_LOG_VALID)) { - pr_err(FW_WARN "CXL CPER invalid event\n"); - return; - } - - guard(rwsem_read)(&cxl_cper_rw_sem); - if (cper_callback) - cper_callback(event_type, rec); -} - -int cxl_cper_register_callback(cxl_cper_callback callback) -{ - guard(rwsem_write)(&cxl_cper_rw_sem); - if (cper_callback) - return -EINVAL; - cper_callback = callback; - return 0; -} -EXPORT_SYMBOL_NS_GPL(cxl_cper_register_callback, CXL); - -int cxl_cper_unregister_callback(cxl_cper_callback callback) -{ - guard(rwsem_write)(&cxl_cper_rw_sem); - if (callback != cper_callback) - return -EINVAL; - cper_callback = NULL; - return 0; -} -EXPORT_SYMBOL_NS_GPL(cxl_cper_unregister_callback, CXL); - static bool ghes_do_proc(struct ghes *ghes, const struct acpi_hest_generic_status *estatus) { @@ -780,22 +707,6 @@ static bool ghes_do_proc(struct ghes *ghes, } else if (guid_equal(sec_type, &CPER_SEC_PROC_ARM)) { queued = ghes_handle_arm_hw_error(gdata, sev, sync); - } else if (guid_equal(sec_type, &CPER_SEC_CXL_GEN_MEDIA_GUID)) { - struct cxl_cper_event_rec *rec = - acpi_hest_get_payload(gdata); - - cxl_cper_post_event(CXL_CPER_EVENT_GEN_MEDIA, rec); - } else if (guid_equal(sec_type, &CPER_SEC_CXL_DRAM_GUID)) { - struct cxl_cper_event_rec *rec = - acpi_hest_get_payload(gdata); - - cxl_cper_post_event(CXL_CPER_EVENT_DRAM, rec); - } else if (guid_equal(sec_type, - &CPER_SEC_CXL_MEM_MODULE_GUID)) { - struct cxl_cper_event_rec *rec = - acpi_hest_get_payload(gdata); - - cxl_cper_post_event(CXL_CPER_EVENT_MEM_MODULE, rec); } else { void *err = acpi_hest_get_payload(gdata); diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index dbdee2924594..02255795b800 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c @@ -525,10 +525,12 @@ static void acpi_ec_clear(struct acpi_ec *ec) static void acpi_ec_enable_event(struct acpi_ec *ec) { - spin_lock(&ec->lock); + unsigned long flags; + + spin_lock_irqsave(&ec->lock, flags); if (acpi_ec_started(ec)) __acpi_ec_enable_event(ec); - spin_unlock(&ec->lock); + spin_unlock_irqrestore(&ec->lock, flags); /* Drain additional events if hardware requires that */ if (EC_FLAGS_CLEAR_ON_RESUME) @@ -544,9 +546,11 @@ static void __acpi_ec_flush_work(void) static void acpi_ec_disable_event(struct acpi_ec *ec) { - spin_lock(&ec->lock); + unsigned long flags; + + spin_lock_irqsave(&ec->lock, flags); __acpi_ec_disable_event(ec); - spin_unlock(&ec->lock); + spin_unlock_irqrestore(&ec->lock, flags); /* * When ec_freeze_events is true, we need to flush events in @@ -567,9 +571,10 @@ void acpi_ec_flush_work(void) static bool acpi_ec_guard_event(struct acpi_ec *ec) { + unsigned long flags; bool guarded; - spin_lock(&ec->lock); + spin_lock_irqsave(&ec->lock, flags); /* * If firmware SCI_EVT clearing timing is "event", we actually * don't know when the SCI_EVT will be cleared by firmware after @@ -585,29 +590,31 @@ static bool acpi_ec_guard_event(struct acpi_ec *ec) guarded = ec_event_clearing == ACPI_EC_EVT_TIMING_EVENT && ec->event_state != EC_EVENT_READY && (!ec->curr || ec->curr->command != ACPI_EC_COMMAND_QUERY); - spin_unlock(&ec->lock); + spin_unlock_irqrestore(&ec->lock, flags); return guarded; } static int ec_transaction_polled(struct acpi_ec *ec) { + unsigned long flags; int ret = 0; - spin_lock(&ec->lock); + spin_lock_irqsave(&ec->lock, flags); if (ec->curr && (ec->curr->flags & ACPI_EC_COMMAND_POLL)) ret = 1; - spin_unlock(&ec->lock); + spin_unlock_irqrestore(&ec->lock, flags); return ret; } static int ec_transaction_completed(struct acpi_ec *ec) { + unsigned long flags; int ret = 0; - spin_lock(&ec->lock); + spin_lock_irqsave(&ec->lock, flags); if (ec->curr && (ec->curr->flags & ACPI_EC_COMMAND_COMPLETE)) ret = 1; - spin_unlock(&ec->lock); + spin_unlock_irqrestore(&ec->lock, flags); return ret; } @@ -749,6 +756,7 @@ static int ec_guard(struct acpi_ec *ec) static int ec_poll(struct acpi_ec *ec) { + unsigned long flags; int repeat = 5; /* number of command restarts */ while (repeat--) { @@ -757,14 +765,14 @@ static int ec_poll(struct acpi_ec *ec) do { if (!ec_guard(ec)) return 0; - spin_lock(&ec->lock); + spin_lock_irqsave(&ec->lock, flags); advance_transaction(ec, false); - spin_unlock(&ec->lock); + spin_unlock_irqrestore(&ec->lock, flags); } while (time_before(jiffies, delay)); pr_debug("controller reset, restart transaction\n"); - spin_lock(&ec->lock); + spin_lock_irqsave(&ec->lock, flags); start_transaction(ec); - spin_unlock(&ec->lock); + spin_unlock_irqrestore(&ec->lock, flags); } return -ETIME; } @@ -772,10 +780,11 @@ static int ec_poll(struct acpi_ec *ec) static int acpi_ec_transaction_unlocked(struct acpi_ec *ec, struct transaction *t) { + unsigned long tmp; int ret = 0; /* start transaction */ - spin_lock(&ec->lock); + spin_lock_irqsave(&ec->lock, tmp); /* Enable GPE for command processing (IBF=0/OBF=1) */ if (!acpi_ec_submit_flushable_request(ec)) { ret = -EINVAL; @@ -786,11 +795,11 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec, ec->curr = t; ec_dbg_req("Command(%s) started", acpi_ec_cmd_string(t->command)); start_transaction(ec); - spin_unlock(&ec->lock); + spin_unlock_irqrestore(&ec->lock, tmp); ret = ec_poll(ec); - spin_lock(&ec->lock); + spin_lock_irqsave(&ec->lock, tmp); if (t->irq_count == ec_storm_threshold) acpi_ec_unmask_events(ec); ec_dbg_req("Command(%s) stopped", acpi_ec_cmd_string(t->command)); @@ -799,7 +808,7 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec, acpi_ec_complete_request(ec); ec_dbg_ref(ec, "Decrease command"); unlock: - spin_unlock(&ec->lock); + spin_unlock_irqrestore(&ec->lock, tmp); return ret; } @@ -927,7 +936,9 @@ EXPORT_SYMBOL(ec_get_handle); static void acpi_ec_start(struct acpi_ec *ec, bool resuming) { - spin_lock(&ec->lock); + unsigned long flags; + + spin_lock_irqsave(&ec->lock, flags); if (!test_and_set_bit(EC_FLAGS_STARTED, &ec->flags)) { ec_dbg_drv("Starting EC"); /* Enable GPE for event processing (SCI_EVT=1) */ @@ -937,28 +948,31 @@ static void acpi_ec_start(struct acpi_ec *ec, bool resuming) } ec_log_drv("EC started"); } - spin_unlock(&ec->lock); + spin_unlock_irqrestore(&ec->lock, flags); } static bool acpi_ec_stopped(struct acpi_ec *ec) { + unsigned long flags; bool flushed; - spin_lock(&ec->lock); + spin_lock_irqsave(&ec->lock, flags); flushed = acpi_ec_flushed(ec); - spin_unlock(&ec->lock); + spin_unlock_irqrestore(&ec->lock, flags); return flushed; } static void acpi_ec_stop(struct acpi_ec *ec, bool suspending) { - spin_lock(&ec->lock); + unsigned long flags; + + spin_lock_irqsave(&ec->lock, flags); if (acpi_ec_started(ec)) { ec_dbg_drv("Stopping EC"); set_bit(EC_FLAGS_STOPPED, &ec->flags); - spin_unlock(&ec->lock); + spin_unlock_irqrestore(&ec->lock, flags); wait_event(ec->wait, acpi_ec_stopped(ec)); - spin_lock(&ec->lock); + spin_lock_irqsave(&ec->lock, flags); /* Disable GPE for event processing (SCI_EVT=1) */ if (!suspending) { acpi_ec_complete_request(ec); @@ -969,25 +983,29 @@ static void acpi_ec_stop(struct acpi_ec *ec, bool suspending) clear_bit(EC_FLAGS_STOPPED, &ec->flags); ec_log_drv("EC stopped"); } - spin_unlock(&ec->lock); + spin_unlock_irqrestore(&ec->lock, flags); } static void acpi_ec_enter_noirq(struct acpi_ec *ec) { - spin_lock(&ec->lock); + unsigned long flags; + + spin_lock_irqsave(&ec->lock, flags); ec->busy_polling = true; ec->polling_guard = 0; ec_log_drv("interrupt blocked"); - spin_unlock(&ec->lock); + spin_unlock_irqrestore(&ec->lock, flags); } static void acpi_ec_leave_noirq(struct acpi_ec *ec) { - spin_lock(&ec->lock); + unsigned long flags; + + spin_lock_irqsave(&ec->lock, flags); ec->busy_polling = ec_busy_polling; ec->polling_guard = ec_polling_guard; ec_log_drv("interrupt unblocked"); - spin_unlock(&ec->lock); + spin_unlock_irqrestore(&ec->lock, flags); } void acpi_ec_block_transactions(void) @@ -1119,9 +1137,9 @@ static void acpi_ec_event_processor(struct work_struct *work) ec_dbg_evt("Query(0x%02x) stopped", handler->query_bit); - spin_lock(&ec->lock); + spin_lock_irq(&ec->lock); ec->queries_in_progress--; - spin_unlock(&ec->lock); + spin_unlock_irq(&ec->lock); acpi_ec_put_query_handler(handler); kfree(q); @@ -1184,12 +1202,12 @@ static int acpi_ec_submit_query(struct acpi_ec *ec) */ ec_dbg_evt("Query(0x%02x) scheduled", value); - spin_lock(&ec->lock); + spin_lock_irq(&ec->lock); ec->queries_in_progress++; queue_work(ec_query_wq, &q->work); - spin_unlock(&ec->lock); + spin_unlock_irq(&ec->lock); return 0; @@ -1205,14 +1223,14 @@ static void acpi_ec_event_handler(struct work_struct *work) ec_dbg_evt("Event started"); - spin_lock(&ec->lock); + spin_lock_irq(&ec->lock); while (ec->events_to_process) { - spin_unlock(&ec->lock); + spin_unlock_irq(&ec->lock); acpi_ec_submit_query(ec); - spin_lock(&ec->lock); + spin_lock_irq(&ec->lock); ec->events_to_process--; } @@ -1229,11 +1247,11 @@ static void acpi_ec_event_handler(struct work_struct *work) ec_dbg_evt("Event stopped"); - spin_unlock(&ec->lock); + spin_unlock_irq(&ec->lock); guard_timeout = !!ec_guard(ec); - spin_lock(&ec->lock); + spin_lock_irq(&ec->lock); /* Take care of SCI_EVT unless someone else is doing that. */ if (guard_timeout && !ec->curr) @@ -1246,7 +1264,7 @@ static void acpi_ec_event_handler(struct work_struct *work) ec->events_in_progress--; - spin_unlock(&ec->lock); + spin_unlock_irq(&ec->lock); } static void clear_gpe_and_advance_transaction(struct acpi_ec *ec, bool interrupt) @@ -1271,11 +1289,13 @@ static void clear_gpe_and_advance_transaction(struct acpi_ec *ec, bool interrupt static void acpi_ec_handle_interrupt(struct acpi_ec *ec) { - spin_lock(&ec->lock); + unsigned long flags; + + spin_lock_irqsave(&ec->lock, flags); clear_gpe_and_advance_transaction(ec, true); - spin_unlock(&ec->lock); + spin_unlock_irqrestore(&ec->lock, flags); } static u32 acpi_ec_gpe_handler(acpi_handle gpe_device, @@ -2085,7 +2105,7 @@ bool acpi_ec_dispatch_gpe(void) * Dispatch the EC GPE in-band, but do not report wakeup in any case * to allow the caller to process events properly after that. */ - spin_lock(&first_ec->lock); + spin_lock_irq(&first_ec->lock); if (acpi_ec_gpe_status_set(first_ec)) { pm_pr_dbg("ACPI EC GPE status set\n"); @@ -2094,7 +2114,7 @@ bool acpi_ec_dispatch_gpe(void) work_in_progress = acpi_ec_work_in_progress(first_ec); } - spin_unlock(&first_ec->lock); + spin_unlock_irq(&first_ec->lock); if (!work_in_progress) return false; @@ -2107,11 +2127,11 @@ bool acpi_ec_dispatch_gpe(void) pm_pr_dbg("ACPI EC work flushed\n"); - spin_lock(&first_ec->lock); + spin_lock_irq(&first_ec->lock); work_in_progress = acpi_ec_work_in_progress(first_ec); - spin_unlock(&first_ec->lock); + spin_unlock_irq(&first_ec->lock); } while (work_in_progress && !pm_wakeup_pending()); return false; diff --git a/drivers/android/binder.c b/drivers/android/binder.c index 8dd23b19e997..eca24f41556d 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -478,6 +478,16 @@ binder_enqueue_thread_work_ilocked(struct binder_thread *thread, { WARN_ON(!list_empty(&thread->waiting_thread_node)); binder_enqueue_work_ilocked(work, &thread->todo); + + /* (e)poll-based threads require an explicit wakeup signal when + * queuing their own work; they rely on these events to consume + * messages without I/O block. Without it, threads risk waiting + * indefinitely without handling the work. + */ + if (thread->looper & BINDER_LOOPER_STATE_POLL && + thread->pid == current->pid && !thread->process_todo) + wake_up_interruptible_sync(&thread->wait); + thread->process_todo = true; } diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index d2460fa985b7..682ff550ccfb 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c @@ -606,13 +606,13 @@ static const struct pci_device_id ahci_pci_tbl[] = { { PCI_VDEVICE(PROMISE, 0x3781), board_ahci }, /* FastTrak TX8660 ahci-mode */ /* ASMedia */ - { PCI_VDEVICE(ASMEDIA, 0x0601), board_ahci }, /* ASM1060 */ - { PCI_VDEVICE(ASMEDIA, 0x0602), board_ahci }, /* ASM1060 */ + { PCI_VDEVICE(ASMEDIA, 0x0601), board_ahci_43bit_dma }, /* ASM1060 */ + { PCI_VDEVICE(ASMEDIA, 0x0602), board_ahci_43bit_dma }, /* ASM1060 */ { PCI_VDEVICE(ASMEDIA, 0x0611), board_ahci_43bit_dma }, /* ASM1061 */ { PCI_VDEVICE(ASMEDIA, 0x0612), board_ahci_43bit_dma }, /* ASM1061/1062 */ - { PCI_VDEVICE(ASMEDIA, 0x0621), board_ahci }, /* ASM1061R */ - { PCI_VDEVICE(ASMEDIA, 0x0622), board_ahci }, /* ASM1062R */ - { PCI_VDEVICE(ASMEDIA, 0x0624), board_ahci }, /* ASM1062+JMB575 */ + { PCI_VDEVICE(ASMEDIA, 0x0621), board_ahci_43bit_dma }, /* ASM1061R */ + { PCI_VDEVICE(ASMEDIA, 0x0622), board_ahci_43bit_dma }, /* ASM1062R */ + { PCI_VDEVICE(ASMEDIA, 0x0624), board_ahci_43bit_dma }, /* ASM1062+JMB575 */ { PCI_VDEVICE(ASMEDIA, 0x1062), board_ahci }, /* ASM1062A */ { PCI_VDEVICE(ASMEDIA, 0x1064), board_ahci }, /* ASM1064 */ { PCI_VDEVICE(ASMEDIA, 0x1164), board_ahci }, /* ASM1164 */ @@ -671,9 +671,17 @@ MODULE_PARM_DESC(mobile_lpm_policy, "Default LPM policy for mobile chipsets"); static void ahci_pci_save_initial_config(struct pci_dev *pdev, struct ahci_host_priv *hpriv) { - if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA && pdev->device == 0x1166) { - dev_info(&pdev->dev, "ASM1166 has only six ports\n"); - hpriv->saved_port_map = 0x3f; + if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA) { + switch (pdev->device) { + case 0x1166: + dev_info(&pdev->dev, "ASM1166 has only six ports\n"); + hpriv->saved_port_map = 0x3f; + break; + case 0x1064: + dev_info(&pdev->dev, "ASM1064 has only four ports\n"); + hpriv->saved_port_map = 0xf; + break; + } } if (pdev->vendor == PCI_VENDOR_ID_JMICRON && pdev->device == 0x2361) { diff --git a/drivers/ata/ahci_ceva.c b/drivers/ata/ahci_ceva.c index 64f7f7d6ba84..11a2c199a7c2 100644 --- a/drivers/ata/ahci_ceva.c +++ b/drivers/ata/ahci_ceva.c @@ -88,7 +88,6 @@ struct ceva_ahci_priv { u32 axicc; bool is_cci_enabled; int flags; - struct reset_control *rst; }; static unsigned int ceva_ahci_read_id(struct ata_device *dev, @@ -189,6 +188,60 @@ static const struct scsi_host_template ahci_platform_sht = { AHCI_SHT(DRV_NAME), }; +static int ceva_ahci_platform_enable_resources(struct ahci_host_priv *hpriv) +{ + int rc, i; + + rc = ahci_platform_enable_regulators(hpriv); + if (rc) + return rc; + + rc = ahci_platform_enable_clks(hpriv); + if (rc) + goto disable_regulator; + + /* Assert the controller reset */ + rc = ahci_platform_assert_rsts(hpriv); + if (rc) + goto disable_clks; + + for (i = 0; i < hpriv->nports; i++) { + rc = phy_init(hpriv->phys[i]); + if (rc) + goto disable_rsts; + } + + /* De-assert the controller reset */ + ahci_platform_deassert_rsts(hpriv); + + for (i = 0; i < hpriv->nports; i++) { + rc = phy_power_on(hpriv->phys[i]); + if (rc) { + phy_exit(hpriv->phys[i]); + goto disable_phys; + } + } + + return 0; + +disable_rsts: + ahci_platform_deassert_rsts(hpriv); + +disable_phys: + while (--i >= 0) { + phy_power_off(hpriv->phys[i]); + phy_exit(hpriv->phys[i]); + } + +disable_clks: + ahci_platform_disable_clks(hpriv); + +disable_regulator: + ahci_platform_disable_regulators(hpriv); + + return rc; +} + static int ceva_ahci_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; @@ -203,47 +256,19 @@ static int ceva_ahci_probe(struct platform_device *pdev) return -ENOMEM; cevapriv->ahci_pdev = pdev; - - cevapriv->rst = devm_reset_control_get_optional_exclusive(&pdev->dev, - NULL); - if (IS_ERR(cevapriv->rst)) - dev_err_probe(&pdev->dev, PTR_ERR(cevapriv->rst), - "failed to get reset\n"); - hpriv = ahci_platform_get_resources(pdev, 0); if (IS_ERR(hpriv)) return PTR_ERR(hpriv); - if (!cevapriv->rst) { - rc = ahci_platform_enable_resources(hpriv); - if (rc) - return rc; - } else { - int i; + hpriv->rsts = devm_reset_control_get_optional_exclusive(&pdev->dev, + NULL); + if (IS_ERR(hpriv->rsts)) + return dev_err_probe(&pdev->dev, PTR_ERR(hpriv->rsts), + "failed to get reset\n"); - rc = ahci_platform_enable_clks(hpriv); - if (rc) - return rc; - /* Assert the controller reset */ - reset_control_assert(cevapriv->rst); - - for (i = 0; i < hpriv->nports; i++) { - rc = phy_init(hpriv->phys[i]); - if (rc) - return rc; - } - - /* De-assert the controller reset */ - reset_control_deassert(cevapriv->rst); - - for (i = 0; i < hpriv->nports; i++) { - rc = phy_power_on(hpriv->phys[i]); - if (rc) { - phy_exit(hpriv->phys[i]); - return rc; - } - } - } + rc = ceva_ahci_platform_enable_resources(hpriv); + if (rc) + return rc; if (of_property_read_bool(np, "ceva,broken-gen2")) cevapriv->flags = CEVA_FLAG_BROKEN_GEN2; @@ -252,52 +277,60 @@ static int ceva_ahci_probe(struct platform_device *pdev) if (of_property_read_u8_array(np, "ceva,p0-cominit-params", (u8 *)&cevapriv->pp2c[0], 4) < 0) { dev_warn(dev, "ceva,p0-cominit-params property not defined\n"); - return -EINVAL; + rc = -EINVAL; + goto disable_resources; } if (of_property_read_u8_array(np, "ceva,p1-cominit-params", (u8 *)&cevapriv->pp2c[1], 4) < 0) { dev_warn(dev, "ceva,p1-cominit-params property not defined\n"); - return -EINVAL; + rc = -EINVAL; + goto disable_resources; } /* Read OOB timing value for COMWAKE from device-tree*/ if (of_property_read_u8_array(np, "ceva,p0-comwake-params", (u8 *)&cevapriv->pp3c[0], 4) < 0) { dev_warn(dev, "ceva,p0-comwake-params property not defined\n"); - return -EINVAL; + rc = -EINVAL; + goto disable_resources; } if (of_property_read_u8_array(np, "ceva,p1-comwake-params", (u8 *)&cevapriv->pp3c[1], 4) < 0) { dev_warn(dev, "ceva,p1-comwake-params property not defined\n"); - return -EINVAL; + rc = -EINVAL; + goto disable_resources; } /* Read phy BURST timing value from device-tree */ if (of_property_read_u8_array(np, "ceva,p0-burst-params", (u8 *)&cevapriv->pp4c[0], 4) < 0) { dev_warn(dev, "ceva,p0-burst-params property not defined\n"); - return -EINVAL; + rc = -EINVAL; + goto disable_resources; } if (of_property_read_u8_array(np, "ceva,p1-burst-params", (u8 *)&cevapriv->pp4c[1], 4) < 0) { dev_warn(dev, "ceva,p1-burst-params property not defined\n"); - return -EINVAL; + rc = -EINVAL; + goto disable_resources; } /* Read phy RETRY interval timing value from device-tree */ if (of_property_read_u16_array(np, "ceva,p0-retry-params", (u16 *)&cevapriv->pp5c[0], 2) < 0) { dev_warn(dev, "ceva,p0-retry-params property not defined\n"); - return -EINVAL; + rc = -EINVAL; + goto disable_resources; } if (of_property_read_u16_array(np, "ceva,p1-retry-params", (u16 *)&cevapriv->pp5c[1], 2) < 0) { dev_warn(dev, "ceva,p1-retry-params property not defined\n"); - return -EINVAL; + rc = -EINVAL; + goto disable_resources; } /* @@ -335,7 +368,7 @@ static int __maybe_unused ceva_ahci_resume(struct device *dev) struct ahci_host_priv *hpriv = host->private_data; int rc; - rc = ahci_platform_enable_resources(hpriv); + rc = ceva_ahci_platform_enable_resources(hpriv); if (rc) return rc; diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 09ed67772fae..be3412cdb22e 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -2001,6 +2001,33 @@ bool ata_dev_power_init_tf(struct ata_device *dev, struct ata_taskfile *tf, return true; } +static bool ata_dev_power_is_active(struct ata_device *dev) +{ + struct ata_taskfile tf; + unsigned int err_mask; + + ata_tf_init(dev, &tf); + tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR; + tf.protocol = ATA_PROT_NODATA; + tf.command = ATA_CMD_CHK_POWER; + + err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); + if (err_mask) { + ata_dev_err(dev, "Check power mode failed (err_mask=0x%x)\n", + err_mask); + /* + * Assume we are in standby mode so that we always force a + * spinup in ata_dev_power_set_active(). + */ + return false; + } + + ata_dev_dbg(dev, "Power mode: 0x%02x\n", tf.nsect); + + /* Active or idle */ + return tf.nsect == 0xff; +} + /** * ata_dev_power_set_standby - Set a device power mode to standby * @dev: target device @@ -2017,6 +2044,11 @@ void ata_dev_power_set_standby(struct ata_device *dev) struct ata_taskfile tf; unsigned int err_mask; + /* If the device is already sleeping or in standby, do nothing. */ + if ((dev->flags & ATA_DFLAG_SLEEPING) || + !ata_dev_power_is_active(dev)) + return; + /* * Some odd clown BIOSes issue spindown on power off (ACPI S4 or S5) * causing some drives to spin up and down again. For these, do nothing @@ -2042,33 +2074,6 @@ void ata_dev_power_set_standby(struct ata_device *dev) err_mask); } -static bool ata_dev_power_is_active(struct ata_device *dev) -{ - struct ata_taskfile tf; - unsigned int err_mask; - - ata_tf_init(dev, &tf); - tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR; - tf.protocol = ATA_PROT_NODATA; - tf.command = ATA_CMD_CHK_POWER; - - err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); - if (err_mask) { - ata_dev_err(dev, "Check power mode failed (err_mask=0x%x)\n", - err_mask); - /* - * Assume we are in standby mode so that we always force a - * spinup in ata_dev_power_set_active(). - */ - return false; - } - - ata_dev_dbg(dev, "Power mode: 0x%02x\n", tf.nsect); - - /* Active or idle */ - return tf.nsect == 0xff; -} - /** * ata_dev_power_set_active - Set a device power mode to active * @dev: target device diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c index e327a0229dc1..e7f713cd70d3 100644 --- a/drivers/atm/idt77252.c +++ b/drivers/atm/idt77252.c @@ -2930,6 +2930,8 @@ open_card_ubr0(struct idt77252_dev *card) vc->scq = alloc_scq(card, vc->class); if (!vc->scq) { printk("%s: can't get SCQ.\n", card->name); + kfree(card->vcs[0]); + card->vcs[0] = NULL; return -ENOMEM; } diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c index 018ac202de34..024b78a0cfc1 100644 --- a/drivers/base/arch_topology.c +++ b/drivers/base/arch_topology.c @@ -431,9 +431,6 @@ init_cpu_capacity_callback(struct notifier_block *nb, struct cpufreq_policy *policy = data; int cpu; - if (!raw_capacity) - return 0; - if (val != CPUFREQ_CREATE_POLICY) return 0; @@ -450,9 +447,11 @@ init_cpu_capacity_callback(struct notifier_block *nb, } if (cpumask_empty(cpus_to_visit)) { - topology_normalize_cpu_scale(); - schedule_work(&update_topology_flags_work); - free_raw_capacity(); + if (raw_capacity) { + topology_normalize_cpu_scale(); + schedule_work(&update_topology_flags_work); + free_raw_capacity(); + } pr_debug("cpu_capacity: parsing done\n"); schedule_work(&parsing_done_work); } @@ -472,7 +471,7 @@ static int __init register_cpufreq_notifier(void) * On ACPI-based systems skip registering cpufreq notifier as cpufreq * information is not needed for cpu capacity initialization. */ - if (!acpi_disabled || !raw_capacity) + if (!acpi_disabled) return -EINVAL; if (!alloc_cpumask_var(&cpus_to_visit, GFP_KERNEL)) diff --git a/drivers/base/base.h b/drivers/base/base.h index eb4c0ace9242..0738ccad08b2 100644 --- a/drivers/base/base.h +++ b/drivers/base/base.h @@ -207,7 +207,7 @@ static inline int devtmpfs_init(void) { return 0; } #endif #ifdef CONFIG_BLOCK -extern struct class block_class; +extern const struct class block_class; static inline bool is_blockdev(struct device *dev) { return dev->class == &block_class; diff --git a/drivers/base/core.c b/drivers/base/core.c index 14d46af40f9a..9828da9b933c 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -125,7 +125,7 @@ static void __fwnode_link_del(struct fwnode_link *link) */ static void __fwnode_link_cycle(struct fwnode_link *link) { - pr_debug("%pfwf: Relaxing link with %pfwf\n", + pr_debug("%pfwf: cycle: depends on %pfwf\n", link->consumer, link->supplier); link->flags |= FWLINK_FLAG_CYCLE; } @@ -284,10 +284,12 @@ static bool device_is_ancestor(struct device *dev, struct device *target) return false; } +#define DL_MARKER_FLAGS (DL_FLAG_INFERRED | \ + DL_FLAG_CYCLE | \ + DL_FLAG_MANAGED) static inline bool device_link_flag_is_sync_state_only(u32 flags) { - return (flags & ~(DL_FLAG_INFERRED | DL_FLAG_CYCLE)) == - (DL_FLAG_SYNC_STATE_ONLY | DL_FLAG_MANAGED); + return (flags & ~DL_MARKER_FLAGS) == DL_FLAG_SYNC_STATE_ONLY; } /** @@ -1943,6 +1945,7 @@ static bool __fw_devlink_relax_cycles(struct device *con, /* Termination condition. */ if (sup_dev == con) { + pr_debug("----- cycle: start -----\n"); ret = true; goto out; } @@ -1974,8 +1977,11 @@ static bool __fw_devlink_relax_cycles(struct device *con, else par_dev = fwnode_get_next_parent_dev(sup_handle); - if (par_dev && __fw_devlink_relax_cycles(con, par_dev->fwnode)) + if (par_dev && __fw_devlink_relax_cycles(con, par_dev->fwnode)) { + pr_debug("%pfwf: cycle: child of %pfwf\n", sup_handle, + par_dev->fwnode); ret = true; + } if (!sup_dev) goto out; @@ -1991,6 +1997,8 @@ static bool __fw_devlink_relax_cycles(struct device *con, if (__fw_devlink_relax_cycles(con, dev_link->supplier->fwnode)) { + pr_debug("%pfwf: cycle: depends on %pfwf\n", sup_handle, + dev_link->supplier->fwnode); fw_devlink_relax_link(dev_link); dev_link->flags |= DL_FLAG_CYCLE; ret = true; @@ -2058,13 +2066,19 @@ static int fw_devlink_create_devlink(struct device *con, /* * SYNC_STATE_ONLY device links don't block probing and supports cycles. - * So cycle detection isn't necessary and shouldn't be done. + * So, one might expect that cycle detection isn't necessary for them. + * However, if the device link was marked as SYNC_STATE_ONLY because + * it's part of a cycle, then we still need to do cycle detection. This + * is because the consumer and supplier might be part of multiple cycles + * and we need to detect all those cycles. */ - if (!(flags & DL_FLAG_SYNC_STATE_ONLY)) { + if (!device_link_flag_is_sync_state_only(flags) || + flags & DL_FLAG_CYCLE) { device_links_write_lock(); if (__fw_devlink_relax_cycles(con, sup_handle)) { __fwnode_link_cycle(link); flags = fw_devlink_get_flags(link->flags); + pr_debug("----- cycle: end -----\n"); dev_info(con, "Fixed dependency cycle(s) with %pfwf\n", sup_handle); } diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c index 47de0f140ba6..0b33e81f9c9b 100644 --- a/drivers/base/cpu.c +++ b/drivers/base/cpu.c @@ -588,6 +588,7 @@ CPU_SHOW_VULN_FALLBACK(mmio_stale_data); CPU_SHOW_VULN_FALLBACK(retbleed); CPU_SHOW_VULN_FALLBACK(spec_rstack_overflow); CPU_SHOW_VULN_FALLBACK(gds); +CPU_SHOW_VULN_FALLBACK(reg_file_data_sampling); static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL); static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL); @@ -602,6 +603,7 @@ static DEVICE_ATTR(mmio_stale_data, 0444, cpu_show_mmio_stale_data, NULL); static DEVICE_ATTR(retbleed, 0444, cpu_show_retbleed, NULL); static DEVICE_ATTR(spec_rstack_overflow, 0444, cpu_show_spec_rstack_overflow, NULL); static DEVICE_ATTR(gather_data_sampling, 0444, cpu_show_gds, NULL); +static DEVICE_ATTR(reg_file_data_sampling, 0444, cpu_show_reg_file_data_sampling, NULL); static struct attribute *cpu_root_vulnerabilities_attrs[] = { &dev_attr_meltdown.attr, @@ -617,6 +619,7 @@ static struct attribute *cpu_root_vulnerabilities_attrs[] = { &dev_attr_retbleed.attr, &dev_attr_spec_rstack_overflow.attr, &dev_attr_gather_data_sampling.attr, + &dev_attr_reg_file_data_sampling.attr, NULL }; diff --git a/drivers/base/platform-msi.c b/drivers/base/platform-msi.c index f37ad34c80ec..0d01890160f3 100644 --- a/drivers/base/platform-msi.c +++ b/drivers/base/platform-msi.c @@ -13,6 +13,8 @@ #include <linux/msi.h> #include <linux/slab.h> +/* Begin of removal area. Once everything is converted over. Cleanup the includes too! */ + #define DEV_ID_SHIFT 21 #define MAX_DEV_MSIS (1 << (32 - DEV_ID_SHIFT)) @@ -204,8 +206,8 @@ static void platform_msi_free_priv_data(struct device *dev) * Returns: * Zero for success, or an error code in case of failure */ -int platform_msi_domain_alloc_irqs(struct device *dev, unsigned int nvec, - irq_write_msi_msg_t write_msi_msg) +static int platform_msi_domain_alloc_irqs(struct device *dev, unsigned int nvec, + irq_write_msi_msg_t write_msi_msg) { int err; @@ -219,18 +221,6 @@ int platform_msi_domain_alloc_irqs(struct device *dev, unsigned int nvec, return err; } -EXPORT_SYMBOL_GPL(platform_msi_domain_alloc_irqs); - -/** - * platform_msi_domain_free_irqs - Free MSI interrupts for @dev - * @dev: The device for which to free interrupts - */ -void platform_msi_domain_free_irqs(struct device *dev) -{ - msi_domain_free_irqs_all(dev, MSI_DEFAULT_DOMAIN); - platform_msi_free_priv_data(dev); -} -EXPORT_SYMBOL_GPL(platform_msi_domain_free_irqs); /** * platform_msi_get_host_data - Query the private data associated with @@ -350,3 +340,104 @@ int platform_msi_device_domain_alloc(struct irq_domain *domain, unsigned int vir return msi_domain_populate_irqs(domain->parent, dev, virq, nr_irqs, &data->arg); } + +/* End of removal area */ + +/* Real per device domain interfaces */ + +/* + * This indirection can go when platform_device_msi_init_and_alloc_irqs() + * is switched to a proper irq_chip::irq_write_msi_msg() callback. Keep it + * simple for now. + */ +static void platform_msi_write_msi_msg(struct irq_data *d, struct msi_msg *msg) +{ + irq_write_msi_msg_t cb = d->chip_data; + + cb(irq_data_get_msi_desc(d), msg); +} + +static void platform_msi_set_desc_byindex(msi_alloc_info_t *arg, struct msi_desc *desc) +{ + arg->desc = desc; + arg->hwirq = desc->msi_index; +} + +static const struct msi_domain_template platform_msi_template = { + .chip = { + .name = "pMSI", + .irq_mask = irq_chip_mask_parent, + .irq_unmask = irq_chip_unmask_parent, + .irq_write_msi_msg = platform_msi_write_msi_msg, + /* The rest is filled in by the platform MSI parent */ + }, + + .ops = { + .set_desc = platform_msi_set_desc_byindex, + }, + + .info = { + .bus_token = DOMAIN_BUS_DEVICE_MSI, + }, +}; + +/** + * platform_device_msi_init_and_alloc_irqs - Initialize platform device MSI + * and allocate interrupts for @dev + * @dev: The device for which to allocate interrupts + * @nvec: The number of interrupts to allocate + * @write_msi_msg: Callback to write an interrupt message for @dev + * + * Returns: + * Zero for success, or an error code in case of failure + * + * This creates a MSI domain on @dev which has @dev->msi.domain as + * parent. The parent domain sets up the new domain. The domain has + * a fixed size of @nvec. The domain is managed by devres and will + * be removed when the device is removed. + * + * Note: For migration purposes this falls back to the original platform_msi code + * up to the point where all platforms have been converted to the MSI + * parent model. + */ +int platform_device_msi_init_and_alloc_irqs(struct device *dev, unsigned int nvec, + irq_write_msi_msg_t write_msi_msg) +{ + struct irq_domain *domain = dev->msi.domain; + + if (!domain || !write_msi_msg) + return -EINVAL; + + /* Migration support. Will go away once everything is converted */ + if (!irq_domain_is_msi_parent(domain)) + return platform_msi_domain_alloc_irqs(dev, nvec, write_msi_msg); + + /* + * @write_msi_msg is stored in the resulting msi_domain_info::data. + * The underlying domain creation mechanism will assign that + * callback to the resulting irq chip. + */ + if (!msi_create_device_irq_domain(dev, MSI_DEFAULT_DOMAIN, + &platform_msi_template, + nvec, NULL, write_msi_msg)) + return -ENODEV; + + return msi_domain_alloc_irqs_range(dev, MSI_DEFAULT_DOMAIN, 0, nvec - 1); +} +EXPORT_SYMBOL_GPL(platform_device_msi_init_and_alloc_irqs); + +/** + * platform_device_msi_free_irqs_all - Free all interrupts for @dev + * @dev: The device for which to free interrupts + */ +void platform_device_msi_free_irqs_all(struct device *dev) +{ + struct irq_domain *domain = dev->msi.domain; + + msi_domain_free_irqs_all(dev, MSI_DEFAULT_DOMAIN); + + /* Migration support. Will go away once everything is converted */ + if (!irq_domain_is_msi_parent(domain)) + platform_msi_free_priv_data(dev); +} +EXPORT_SYMBOL_GPL(platform_device_msi_free_irqs_all); diff --git a/drivers/base/regmap/regmap-kunit.c b/drivers/base/regmap/regmap-kunit.c index 026bdcb45127..0d957c5f1bcc 100644 --- a/drivers/base/regmap/regmap-kunit.c +++ b/drivers/base/regmap/regmap-kunit.c @@ -9,6 +9,23 @@ #define BLOCK_TEST_SIZE 12 +static void get_changed_bytes(void *orig, void *new, size_t size) +{ + char *o = orig; + char *n = new; + int i; + + get_random_bytes(new, size); + + /* + * This could be nicer and more efficient but we shouldn't + * super care. + */ + for (i = 0; i < size; i++) + while (n[i] == o[i]) + get_random_bytes(&n[i], 1); +} + static const struct regmap_config test_regmap_config = { .max_register = BLOCK_TEST_SIZE, .reg_stride = 1, @@ -1202,7 +1219,8 @@ static void raw_noinc_write(struct kunit *test) struct regmap *map; struct regmap_config config; struct regmap_ram_data *data; - unsigned int val, val_test, val_last; + unsigned int val; + u16 val_test, val_last; u16 val_array[BLOCK_TEST_SIZE]; config = raw_regmap_config; @@ -1251,7 +1269,7 @@ static void raw_sync(struct kunit *test) struct regmap *map; struct regmap_config config; struct regmap_ram_data *data; - u16 val[2]; + u16 val[3]; u16 *hw_buf; unsigned int rval; int i; @@ -1265,17 +1283,13 @@ static void raw_sync(struct kunit *test) hw_buf = (u16 *)data->vals; - get_random_bytes(&val, sizeof(val)); + get_changed_bytes(&hw_buf[2], &val[0], sizeof(val)); /* Do a regular write and a raw write in cache only mode */ regcache_cache_only(map, true); - KUNIT_EXPECT_EQ(test, 0, regmap_raw_write(map, 2, val, sizeof(val))); - if (config.val_format_endian == REGMAP_ENDIAN_BIG) - KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 6, - be16_to_cpu(val[0]))); - else - KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 6, - le16_to_cpu(val[0]))); + KUNIT_EXPECT_EQ(test, 0, regmap_raw_write(map, 2, val, + sizeof(u16) * 2)); + KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 4, val[2])); /* We should read back the new values, and defaults for the rest */ for (i = 0; i < config.max_register + 1; i++) { @@ -1284,24 +1298,34 @@ static void raw_sync(struct kunit *test) switch (i) { case 2: case 3: - case 6: if (config.val_format_endian == REGMAP_ENDIAN_BIG) { KUNIT_EXPECT_EQ(test, rval, - be16_to_cpu(val[i % 2])); + be16_to_cpu(val[i - 2])); } else { KUNIT_EXPECT_EQ(test, rval, - le16_to_cpu(val[i % 2])); + le16_to_cpu(val[i - 2])); } break; + case 4: + KUNIT_EXPECT_EQ(test, rval, val[i - 2]); + break; default: KUNIT_EXPECT_EQ(test, config.reg_defaults[i].def, rval); break; } } + + /* + * The value written via _write() was translated by the core, + * translate the original copy for comparison purposes. + */ + if (config.val_format_endian == REGMAP_ENDIAN_BIG) + val[2] = cpu_to_be16(val[2]); + else + val[2] = cpu_to_le16(val[2]); /* The values should not appear in the "hardware" */ - KUNIT_EXPECT_MEMNEQ(test, &hw_buf[2], val, sizeof(val)); - KUNIT_EXPECT_MEMNEQ(test, &hw_buf[6], val, sizeof(u16)); + KUNIT_EXPECT_MEMNEQ(test, &hw_buf[2], &val[0], sizeof(val)); for (i = 0; i < config.max_register + 1; i++) data->written[i] = false; @@ -1312,8 +1336,7 @@ static void raw_sync(struct kunit *test) KUNIT_EXPECT_EQ(test, 0, regcache_sync(map)); /* The values should now appear in the "hardware" */ - KUNIT_EXPECT_MEMEQ(test, &hw_buf[2], val, sizeof(val)); - KUNIT_EXPECT_MEMEQ(test, &hw_buf[6], val, sizeof(u16)); + KUNIT_EXPECT_MEMEQ(test, &hw_buf[2], &val[0], sizeof(val)); regmap_exit(map); } diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c index 2b98114a9fe0..a25414228e47 100644 --- a/drivers/block/amiflop.c +++ b/drivers/block/amiflop.c @@ -1779,7 +1779,7 @@ static int fd_alloc_disk(int drive, int system) struct gendisk *disk; int err; - disk = blk_mq_alloc_disk(&unit[drive].tag_set, NULL); + disk = blk_mq_alloc_disk(&unit[drive].tag_set, NULL, NULL); if (IS_ERR(disk)) return PTR_ERR(disk); diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c index b1b47d88f5db..b6dac8cee70f 100644 --- a/drivers/block/aoe/aoeblk.c +++ b/drivers/block/aoe/aoeblk.c @@ -24,8 +24,8 @@ static DEFINE_MUTEX(aoeblk_mutex); static struct kmem_cache *buf_pool_cache; static struct dentry *aoe_debugfs_dir; -/* GPFS needs a larger value than the default. */ -static int aoe_maxsectors; +/* random default picked from the historic block max_sectors cap */ +static int aoe_maxsectors = 2560; module_param(aoe_maxsectors, int, 0644); MODULE_PARM_DESC(aoe_maxsectors, "When nonzero, set the maximum number of sectors per I/O request"); @@ -334,6 +334,10 @@ aoeblk_gdalloc(void *vp) mempool_t *mp; struct blk_mq_tag_set *set; sector_t ssize; + struct queue_limits lim = { + .max_hw_sectors = aoe_maxsectors, + .io_opt = SZ_2M, + }; ulong flags; int late = 0; int err; @@ -371,7 +375,7 @@ aoeblk_gdalloc(void *vp) goto err_mempool; } - gd = blk_mq_alloc_disk(set, d); + gd = blk_mq_alloc_disk(set, &lim, d); if (IS_ERR(gd)) { pr_err("aoe: cannot allocate block queue for %ld.%d\n", d->aoemajor, d->aoeminor); @@ -384,14 +388,9 @@ aoeblk_gdalloc(void *vp) WARN_ON(d->flags & DEVFL_TKILL); WARN_ON(d->gd); WARN_ON(d->flags & DEVFL_UP); - /* random number picked from the history block max_sectors cap */ - blk_queue_max_hw_sectors(gd->queue, 2560u); - blk_queue_io_opt(gd->queue, SZ_2M); d->bufpool = mp; d->blkq = gd->queue; d->gd = gd; - if (aoe_maxsectors) - blk_queue_max_hw_sectors(gd->queue, aoe_maxsectors); gd->major = AOE_MAJOR; gd->first_minor = d->sysminor; gd->minors = AOE_PARTITIONS; diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c index d7317425be51..cc9077b588d7 100644 --- a/drivers/block/aoe/aoecmd.c +++ b/drivers/block/aoe/aoecmd.c @@ -419,13 +419,16 @@ aoecmd_cfg_pkts(ushort aoemajor, unsigned char aoeminor, struct sk_buff_head *qu rcu_read_lock(); for_each_netdev_rcu(&init_net, ifp) { dev_hold(ifp); - if (!is_aoe_netif(ifp)) - goto cont; + if (!is_aoe_netif(ifp)) { + dev_put(ifp); + continue; + } skb = new_skb(sizeof *h + sizeof *ch); if (skb == NULL) { printk(KERN_INFO "aoe: skb alloc failure\n"); - goto cont; + dev_put(ifp); + continue; } skb_put(skb, sizeof *h + sizeof *ch); skb->dev = ifp; @@ -440,9 +443,6 @@ aoecmd_cfg_pkts(ushort aoemajor, unsigned char aoeminor, struct sk_buff_head *qu h->major = cpu_to_be16(aoemajor); h->minor = aoeminor; h->cmd = AOECMD_CFG; - -cont: - dev_put(ifp); } rcu_read_unlock(); } diff --git a/drivers/block/aoe/aoenet.c b/drivers/block/aoe/aoenet.c index c51ea95bc2ce..923a134fd766 100644 --- a/drivers/block/aoe/aoenet.c +++ b/drivers/block/aoe/aoenet.c @@ -63,6 +63,7 @@ tx(int id) __must_hold(&txlock) pr_warn("aoe: packet could not be sent on %s. %s\n", ifp ? ifp->name : "netif", "consider increasing tx_queue_len"); + dev_put(ifp); spin_lock_irq(&txlock); } return 0; diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c index 50949207798d..cacc4ba942a8 100644 --- a/drivers/block/ataflop.c +++ b/drivers/block/ataflop.c @@ -1994,7 +1994,7 @@ static int ataflop_alloc_disk(unsigned int drive, unsigned int type) { struct gendisk *disk; - disk = blk_mq_alloc_disk(&unit[drive].tag_set, NULL); + disk = blk_mq_alloc_disk(&unit[drive].tag_set, NULL, NULL); if (IS_ERR(disk)) return PTR_ERR(disk); diff --git a/drivers/block/brd.c b/drivers/block/brd.c index 970bd6ff38c4..e322cef6596b 100644 --- a/drivers/block/brd.c +++ b/drivers/block/brd.c @@ -318,6 +318,16 @@ static int brd_alloc(int i) struct gendisk *disk; char buf[DISK_NAME_LEN]; int err = -ENOMEM; + struct queue_limits lim = { + /* + * This is so fdisk will align partitions on 4k, because of + * direct_access API needing 4k alignment, returning a PFN + * (This is only a problem on very small devices <= 4M, + * otherwise fdisk will align on 1M. Regardless this call + * is harmless) + */ + .physical_block_size = PAGE_SIZE, + }; list_for_each_entry(brd, &brd_devices, brd_list) if (brd->brd_number == i) @@ -335,10 +345,11 @@ static int brd_alloc(int i) debugfs_create_u64(buf, 0444, brd_debugfs_dir, &brd->brd_nr_pages); - disk = brd->brd_disk = blk_alloc_disk(NUMA_NO_NODE); - if (!disk) + disk = brd->brd_disk = blk_alloc_disk(&lim, NUMA_NO_NODE); + if (IS_ERR(disk)) { + err = PTR_ERR(disk); goto out_free_dev; - + } disk->major = RAMDISK_MAJOR; disk->first_minor = i * max_part; disk->minors = max_part; @@ -347,15 +358,6 @@ static int brd_alloc(int i) strscpy(disk->disk_name, buf, DISK_NAME_LEN); set_capacity(disk, rd_size * 2); - /* - * This is so fdisk will align partitions on 4k, because of - * direct_access API needing 4k alignment, returning a PFN - * (This is only a problem on very small devices <= 4M, - * otherwise fdisk will align on 1M. Regardless this call - * is harmless) - */ - blk_queue_physical_block_size(disk->queue, PAGE_SIZE); - /* Tell the block layer that this is not a rotational device */ blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue); blk_queue_flag_set(QUEUE_FLAG_SYNCHRONOUS, disk->queue); diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h index c21e3732759e..94dc0a235919 100644 --- a/drivers/block/drbd/drbd_int.h +++ b/drivers/block/drbd/drbd_int.h @@ -524,9 +524,9 @@ struct drbd_md { struct drbd_backing_dev { struct block_device *backing_bdev; - struct bdev_handle *backing_bdev_handle; + struct file *backing_bdev_file; struct block_device *md_bdev; - struct bdev_handle *md_bdev_handle; + struct file *f_md_bdev; struct drbd_md md; struct disk_conf *disk_conf; /* RCU, for updates: resource->conf_update */ sector_t known_size; /* last known size of that backing device */ diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index 6bc86106c7b2..113b441d4d36 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -2690,6 +2690,14 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig int id; int vnr = adm_ctx->volume; enum drbd_ret_code err = ERR_NOMEM; + struct queue_limits lim = { + /* + * Setting the max_hw_sectors to an odd value of 8kibyte here. + * This triggers a max_bio_size message upon first attach or + * connect. + */ + .max_hw_sectors = DRBD_MAX_BIO_SIZE_SAFE >> 8, + }; device = minor_to_device(minor); if (device) @@ -2708,9 +2716,11 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig drbd_init_set_defaults(device); - disk = blk_alloc_disk(NUMA_NO_NODE); - if (!disk) + disk = blk_alloc_disk(&lim, NUMA_NO_NODE); + if (IS_ERR(disk)) { + err = PTR_ERR(disk); goto out_no_disk; + } device->vdisk = disk; device->rq_queue = disk->queue; @@ -2727,9 +2737,6 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, disk->queue); blk_queue_write_cache(disk->queue, true, true); - /* Setting the max_hw_sectors to an odd value of 8kibyte here - This triggers a max_bio_size message upon first attach or connect */ - blk_queue_max_hw_sectors(disk->queue, DRBD_MAX_BIO_SIZE_SAFE >> 8); device->md_io.page = alloc_page(GFP_KERNEL); if (!device->md_io.page) diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c index 43747a1aae43..5d65c9754d83 100644 --- a/drivers/block/drbd/drbd_nl.c +++ b/drivers/block/drbd/drbd_nl.c @@ -1189,9 +1189,31 @@ static int drbd_check_al_size(struct drbd_device *device, struct disk_conf *dc) return 0; } -static void blk_queue_discard_granularity(struct request_queue *q, unsigned int granularity) +static unsigned int drbd_max_peer_bio_size(struct drbd_device *device) { - q->limits.discard_granularity = granularity; + /* + * We may ignore peer limits if the peer is modern enough. From 8.3.8 + * onwards the peer can use multiple BIOs for a single peer_request. + */ + if (device->state.conn < C_WF_REPORT_PARAMS) + return device->peer_max_bio_size; + + if (first_peer_device(device)->connection->agreed_pro_version < 94) + return min(device->peer_max_bio_size, DRBD_MAX_SIZE_H80_PACKET); + + /* + * Correct old drbd (up to 8.3.7) if it believes it can do more than + * 32KiB. + */ + if (first_peer_device(device)->connection->agreed_pro_version == 94) + return DRBD_MAX_SIZE_H80_PACKET; + + /* + * drbd 8.3.8 onwards, before 8.4.0 + */ + if (first_peer_device(device)->connection->agreed_pro_version < 100) + return DRBD_MAX_BIO_SIZE_P95; + return DRBD_MAX_BIO_SIZE; } static unsigned int drbd_max_discard_sectors(struct drbd_connection *connection) @@ -1204,149 +1226,119 @@ static unsigned int drbd_max_discard_sectors(struct drbd_connection *connection) return AL_EXTENT_SIZE >> 9; } -static void decide_on_discard_support(struct drbd_device *device, +static bool drbd_discard_supported(struct drbd_connection *connection, struct drbd_backing_dev *bdev) { - struct drbd_connection *connection = - first_peer_device(device)->connection; - struct request_queue *q = device->rq_queue; - unsigned int max_discard_sectors; - if (bdev && !bdev_max_discard_sectors(bdev->backing_bdev)) - goto not_supported; + return false; if (connection->cstate >= C_CONNECTED && !(connection->agreed_features & DRBD_FF_TRIM)) { drbd_info(connection, "peer DRBD too old, does not support TRIM: disabling discards\n"); - goto not_supported; + return false; } - /* - * We don't care for the granularity, really. - * - * Stacking limits below should fix it for the local device. Whether or - * not it is a suitable granularity on the remote device is not our - * problem, really. If you care, you need to use devices with similar - * topology on all peers. - */ - blk_queue_discard_granularity(q, 512); - max_discard_sectors = drbd_max_discard_sectors(connection); - blk_queue_max_discard_sectors(q, max_discard_sectors); - blk_queue_max_write_zeroes_sectors(q, max_discard_sectors); - return; - -not_supported: - blk_queue_discard_granularity(q, 0); - blk_queue_max_discard_sectors(q, 0); + return true; } -static void fixup_write_zeroes(struct drbd_device *device, struct request_queue *q) +/* This is the workaround for "bio would need to, but cannot, be split" */ +static unsigned int drbd_backing_dev_max_segments(struct drbd_device *device) { - /* Fixup max_write_zeroes_sectors after blk_stack_limits(): - * if we can handle "zeroes" efficiently on the protocol, - * we want to do that, even if our backend does not announce - * max_write_zeroes_sectors itself. */ - struct drbd_connection *connection = first_peer_device(device)->connection; - /* If the peer announces WZEROES support, use it. Otherwise, rather - * send explicit zeroes than rely on some discard-zeroes-data magic. */ - if (connection->agreed_features & DRBD_FF_WZEROES) - q->limits.max_write_zeroes_sectors = DRBD_MAX_BBIO_SECTORS; - else - q->limits.max_write_zeroes_sectors = 0; -} + unsigned int max_segments; -static void fixup_discard_support(struct drbd_device *device, struct request_queue *q) -{ - unsigned int max_discard = device->rq_queue->limits.max_discard_sectors; - unsigned int discard_granularity = - device->rq_queue->limits.discard_granularity >> SECTOR_SHIFT; + rcu_read_lock(); + max_segments = rcu_dereference(device->ldev->disk_conf)->max_bio_bvecs; + rcu_read_unlock(); - if (discard_granularity > max_discard) { - blk_queue_discard_granularity(q, 0); - blk_queue_max_discard_sectors(q, 0); - } + if (!max_segments) + return BLK_MAX_SEGMENTS; + return max_segments; } -static void drbd_setup_queue_param(struct drbd_device *device, struct drbd_backing_dev *bdev, - unsigned int max_bio_size, struct o_qlim *o) +void drbd_reconsider_queue_parameters(struct drbd_device *device, + struct drbd_backing_dev *bdev, struct o_qlim *o) { + struct drbd_connection *connection = + first_peer_device(device)->connection; struct request_queue * const q = device->rq_queue; - unsigned int max_hw_sectors = max_bio_size >> 9; - unsigned int max_segments = 0; + unsigned int now = queue_max_hw_sectors(q) << 9; + struct queue_limits lim; struct request_queue *b = NULL; - struct disk_conf *dc; + unsigned int new; if (bdev) { b = bdev->backing_bdev->bd_disk->queue; - max_hw_sectors = min(queue_max_hw_sectors(b), max_bio_size >> 9); - rcu_read_lock(); - dc = rcu_dereference(device->ldev->disk_conf); - max_segments = dc->max_bio_bvecs; - rcu_read_unlock(); - - blk_set_stacking_limits(&q->limits); + device->local_max_bio_size = + queue_max_hw_sectors(b) << SECTOR_SHIFT; } - blk_queue_max_hw_sectors(q, max_hw_sectors); - /* This is the workaround for "bio would need to, but cannot, be split" */ - blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS); - blk_queue_segment_boundary(q, PAGE_SIZE-1); - decide_on_discard_support(device, bdev); - - if (b) { - blk_stack_limits(&q->limits, &b->limits, 0); - disk_update_readahead(device->vdisk); + /* + * We may later detach and re-attach on a disconnected Primary. Avoid + * decreasing the value in this case. + * + * We want to store what we know the peer DRBD can handle, not what the + * peer IO backend can handle. + */ + new = min3(DRBD_MAX_BIO_SIZE, device->local_max_bio_size, + max(drbd_max_peer_bio_size(device), device->peer_max_bio_size)); + if (new != now) { + if (device->state.role == R_PRIMARY && new < now) + drbd_err(device, "ASSERT FAILED new < now; (%u < %u)\n", + new, now); + drbd_info(device, "max BIO size = %u\n", new); } - fixup_write_zeroes(device, q); - fixup_discard_support(device, q); -} - -void drbd_reconsider_queue_parameters(struct drbd_device *device, struct drbd_backing_dev *bdev, struct o_qlim *o) -{ - unsigned int now, new, local, peer; - - now = queue_max_hw_sectors(device->rq_queue) << 9; - local = device->local_max_bio_size; /* Eventually last known value, from volatile memory */ - peer = device->peer_max_bio_size; /* Eventually last known value, from meta data */ + lim = queue_limits_start_update(q); if (bdev) { - local = queue_max_hw_sectors(bdev->backing_bdev->bd_disk->queue) << 9; - device->local_max_bio_size = local; + blk_set_stacking_limits(&lim); + lim.max_segments = drbd_backing_dev_max_segments(device); + } else { + lim.max_segments = BLK_MAX_SEGMENTS; } - local = min(local, DRBD_MAX_BIO_SIZE); - /* We may ignore peer limits if the peer is modern enough. - Because new from 8.3.8 onwards the peer can use multiple - BIOs for a single peer_request */ - if (device->state.conn >= C_WF_REPORT_PARAMS) { - if (first_peer_device(device)->connection->agreed_pro_version < 94) - peer = min(device->peer_max_bio_size, DRBD_MAX_SIZE_H80_PACKET); - /* Correct old drbd (up to 8.3.7) if it believes it can do more than 32KiB */ - else if (first_peer_device(device)->connection->agreed_pro_version == 94) - peer = DRBD_MAX_SIZE_H80_PACKET; - else if (first_peer_device(device)->connection->agreed_pro_version < 100) - peer = DRBD_MAX_BIO_SIZE_P95; /* drbd 8.3.8 onwards, before 8.4.0 */ - else - peer = DRBD_MAX_BIO_SIZE; + lim.max_hw_sectors = new >> SECTOR_SHIFT; + lim.seg_boundary_mask = PAGE_SIZE - 1; - /* We may later detach and re-attach on a disconnected Primary. - * Avoid this setting to jump back in that case. - * We want to store what we know the peer DRBD can handle, - * not what the peer IO backend can handle. */ - if (peer > device->peer_max_bio_size) - device->peer_max_bio_size = peer; + /* + * We don't care for the granularity, really. + * + * Stacking limits below should fix it for the local device. Whether or + * not it is a suitable granularity on the remote device is not our + * problem, really. If you care, you need to use devices with similar + * topology on all peers. + */ + if (drbd_discard_supported(connection, bdev)) { + lim.discard_granularity = 512; + lim.max_hw_discard_sectors = + drbd_max_discard_sectors(connection); + } else { + lim.discard_granularity = 0; + lim.max_hw_discard_sectors = 0; } - new = min(local, peer); - if (device->state.role == R_PRIMARY && new < now) - drbd_err(device, "ASSERT FAILED new < now; (%u < %u)\n", new, now); + if (bdev) + blk_stack_limits(&lim, &b->limits, 0); - if (new != now) - drbd_info(device, "max BIO size = %u\n", new); + /* + * If we can handle "zeroes" efficiently on the protocol, we want to do + * that, even if our backend does not announce max_write_zeroes_sectors + * itself. + */ + if (connection->agreed_features & DRBD_FF_WZEROES) + lim.max_write_zeroes_sectors = DRBD_MAX_BBIO_SECTORS; + else + lim.max_write_zeroes_sectors = 0; + + if ((lim.discard_granularity >> SECTOR_SHIFT) > + lim.max_hw_discard_sectors) { + lim.discard_granularity = 0; + lim.max_hw_discard_sectors = 0; + } - drbd_setup_queue_param(device, bdev, new, o); + if (queue_limits_commit_update(q, &lim)) + drbd_err(device, "setting new queue limits failed\n"); } /* Starts the worker thread */ @@ -1635,45 +1627,45 @@ success: return 0; } -static struct bdev_handle *open_backing_dev(struct drbd_device *device, +static struct file *open_backing_dev(struct drbd_device *device, const char *bdev_path, void *claim_ptr, bool do_bd_link) { - struct bdev_handle *handle; + struct file *file; int err = 0; - handle = bdev_open_by_path(bdev_path, BLK_OPEN_READ | BLK_OPEN_WRITE, - claim_ptr, NULL); - if (IS_ERR(handle)) { + file = bdev_file_open_by_path(bdev_path, BLK_OPEN_READ | BLK_OPEN_WRITE, + claim_ptr, NULL); + if (IS_ERR(file)) { drbd_err(device, "open(\"%s\") failed with %ld\n", - bdev_path, PTR_ERR(handle)); - return handle; + bdev_path, PTR_ERR(file)); + return file; } if (!do_bd_link) - return handle; + return file; - err = bd_link_disk_holder(handle->bdev, device->vdisk); + err = bd_link_disk_holder(file_bdev(file), device->vdisk); if (err) { - bdev_release(handle); + fput(file); drbd_err(device, "bd_link_disk_holder(\"%s\", ...) failed with %d\n", bdev_path, err); - handle = ERR_PTR(err); + file = ERR_PTR(err); } - return handle; + return file; } static int open_backing_devices(struct drbd_device *device, struct disk_conf *new_disk_conf, struct drbd_backing_dev *nbc) { - struct bdev_handle *handle; + struct file *file; - handle = open_backing_dev(device, new_disk_conf->backing_dev, device, + file = open_backing_dev(device, new_disk_conf->backing_dev, device, true); - if (IS_ERR(handle)) + if (IS_ERR(file)) return ERR_OPEN_DISK; - nbc->backing_bdev = handle->bdev; - nbc->backing_bdev_handle = handle; + nbc->backing_bdev = file_bdev(file); + nbc->backing_bdev_file = file; /* * meta_dev_idx >= 0: external fixed size, possibly multiple @@ -1683,7 +1675,7 @@ static int open_backing_devices(struct drbd_device *device, * should check it for you already; but if you don't, or * someone fooled it, we need to double check here) */ - handle = open_backing_dev(device, new_disk_conf->meta_dev, + file = open_backing_dev(device, new_disk_conf->meta_dev, /* claim ptr: device, if claimed exclusively; shared drbd_m_holder, * if potentially shared with other drbd minors */ (new_disk_conf->meta_dev_idx < 0) ? (void*)device : (void*)drbd_m_holder, @@ -1691,21 +1683,21 @@ static int open_backing_devices(struct drbd_device *device, * as would happen with internal metadata. */ (new_disk_conf->meta_dev_idx != DRBD_MD_INDEX_FLEX_INT && new_disk_conf->meta_dev_idx != DRBD_MD_INDEX_INTERNAL)); - if (IS_ERR(handle)) + if (IS_ERR(file)) return ERR_OPEN_MD_DISK; - nbc->md_bdev = handle->bdev; - nbc->md_bdev_handle = handle; + nbc->md_bdev = file_bdev(file); + nbc->f_md_bdev = file; return NO_ERROR; } static void close_backing_dev(struct drbd_device *device, - struct bdev_handle *handle, bool do_bd_unlink) + struct file *bdev_file, bool do_bd_unlink) { - if (!handle) + if (!bdev_file) return; if (do_bd_unlink) - bd_unlink_disk_holder(handle->bdev, device->vdisk); - bdev_release(handle); + bd_unlink_disk_holder(file_bdev(bdev_file), device->vdisk); + fput(bdev_file); } void drbd_backing_dev_free(struct drbd_device *device, struct drbd_backing_dev *ldev) @@ -1713,9 +1705,9 @@ void drbd_backing_dev_free(struct drbd_device *device, struct drbd_backing_dev * if (ldev == NULL) return; - close_backing_dev(device, ldev->md_bdev_handle, + close_backing_dev(device, ldev->f_md_bdev, ldev->md_bdev != ldev->backing_bdev); - close_backing_dev(device, ldev->backing_bdev_handle, true); + close_backing_dev(device, ldev->backing_bdev_file, true); kfree(ldev->disk_conf); kfree(ldev); @@ -2131,9 +2123,9 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info) fail: conn_reconfig_done(connection); if (nbc) { - close_backing_dev(device, nbc->md_bdev_handle, + close_backing_dev(device, nbc->f_md_bdev, nbc->md_bdev != nbc->backing_bdev); - close_backing_dev(device, nbc->backing_bdev_handle, true); + close_backing_dev(device, nbc->backing_bdev_file, true); kfree(nbc); } kfree(new_disk_conf); diff --git a/drivers/block/drbd/drbd_state.c b/drivers/block/drbd/drbd_state.c index 287a8d1d3f70..e858e7e0383f 100644 --- a/drivers/block/drbd/drbd_state.c +++ b/drivers/block/drbd/drbd_state.c @@ -1542,9 +1542,10 @@ int drbd_bitmap_io_from_worker(struct drbd_device *device, int notify_resource_state_change(struct sk_buff *skb, unsigned int seq, - struct drbd_resource_state_change *resource_state_change, + void *state_change, enum drbd_notification_type type) { + struct drbd_resource_state_change *resource_state_change = state_change; struct drbd_resource *resource = resource_state_change->resource; struct resource_info resource_info = { .res_role = resource_state_change->role[NEW], @@ -1558,13 +1559,14 @@ int notify_resource_state_change(struct sk_buff *skb, int notify_connection_state_change(struct sk_buff *skb, unsigned int seq, - struct drbd_connection_state_change *connection_state_change, + void *state_change, enum drbd_notification_type type) { - struct drbd_connection *connection = connection_state_change->connection; + struct drbd_connection_state_change *p = state_change; + struct drbd_connection *connection = p->connection; struct connection_info connection_info = { - .conn_connection_state = connection_state_change->cstate[NEW], - .conn_role = connection_state_change->peer_role[NEW], + .conn_connection_state = p->cstate[NEW], + .conn_role = p->peer_role[NEW], }; return notify_connection_state(skb, seq, connection, &connection_info, type); @@ -1572,9 +1574,10 @@ int notify_connection_state_change(struct sk_buff *skb, int notify_device_state_change(struct sk_buff *skb, unsigned int seq, - struct drbd_device_state_change *device_state_change, + void *state_change, enum drbd_notification_type type) { + struct drbd_device_state_change *device_state_change = state_change; struct drbd_device *device = device_state_change->device; struct device_info device_info = { .dev_disk_state = device_state_change->disk_state[NEW], @@ -1585,9 +1588,10 @@ int notify_device_state_change(struct sk_buff *skb, int notify_peer_device_state_change(struct sk_buff *skb, unsigned int seq, - struct drbd_peer_device_state_change *p, + void *state_change, enum drbd_notification_type type) { + struct drbd_peer_device_state_change *p = state_change; struct drbd_peer_device *peer_device = p->peer_device; struct peer_device_info peer_device_info = { .peer_repl_state = p->repl_state[NEW], @@ -1605,8 +1609,8 @@ static void broadcast_state_change(struct drbd_state_change *state_change) struct drbd_resource_state_change *resource_state_change = &state_change->resource[0]; bool resource_state_has_changed; unsigned int n_device, n_connection, n_peer_device, n_peer_devices; - int (*last_func)(struct sk_buff *, unsigned int, void *, - enum drbd_notification_type) = NULL; + int (*last_func)(struct sk_buff *, unsigned int, + void *, enum drbd_notification_type) = NULL; void *last_arg = NULL; #define HAS_CHANGED(state) ((state)[OLD] != (state)[NEW]) @@ -1616,7 +1620,7 @@ static void broadcast_state_change(struct drbd_state_change *state_change) }) #define REMEMBER_STATE_CHANGE(func, arg, type) \ ({ FINAL_STATE_CHANGE(type | NOTIFY_CONTINUES); \ - last_func = (typeof(last_func))func; \ + last_func = func; \ last_arg = arg; \ }) diff --git a/drivers/block/drbd/drbd_state_change.h b/drivers/block/drbd/drbd_state_change.h index 9d78d8e3912e..a56a57d67686 100644 --- a/drivers/block/drbd/drbd_state_change.h +++ b/drivers/block/drbd/drbd_state_change.h @@ -46,19 +46,19 @@ extern void forget_state_change(struct drbd_state_change *); extern int notify_resource_state_change(struct sk_buff *, unsigned int, - struct drbd_resource_state_change *, + void *, enum drbd_notification_type type); extern int notify_connection_state_change(struct sk_buff *, unsigned int, - struct drbd_connection_state_change *, + void *, enum drbd_notification_type type); extern int notify_device_state_change(struct sk_buff *, unsigned int, - struct drbd_device_state_change *, + void *, enum drbd_notification_type type); extern int notify_peer_device_state_change(struct sk_buff *, unsigned int, - struct drbd_peer_device_state_change *, + void *, enum drbd_notification_type type); #endif /* DRBD_STATE_CHANGE_H */ diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index d0e41d52d6a9..1b399ec8c07d 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c @@ -530,14 +530,13 @@ static struct format_descr format_req; static char *floppy_track_buffer; static int max_buffer_sectors; -typedef void (*done_f)(int); static const struct cont_t { void (*interrupt)(void); /* this is called after the interrupt of the * main command */ void (*redo)(void); /* this is called to retry the operation */ void (*error)(void); /* this is called to tally an error */ - done_f done; /* this is called to say if the operation has + void (*done)(int); /* this is called to say if the operation has * succeeded/failed */ } *cont; @@ -985,6 +984,10 @@ static void empty(void) { } +static void empty_done(int result) +{ +} + static void (*floppy_work_fn)(void); static void floppy_work_workfn(struct work_struct *work) @@ -1998,14 +2001,14 @@ static const struct cont_t wakeup_cont = { .interrupt = empty, .redo = do_wakeup, .error = empty, - .done = (done_f)empty + .done = empty_done, }; static const struct cont_t intr_cont = { .interrupt = empty, .redo = process_fd_request, .error = empty, - .done = (done_f)empty + .done = empty_done, }; /* schedules handler, waiting for completion. May be interrupted, will then @@ -4513,13 +4516,15 @@ static bool floppy_available(int drive) static int floppy_alloc_disk(unsigned int drive, unsigned int type) { + struct queue_limits lim = { + .max_hw_sectors = 64, + }; struct gendisk *disk; - disk = blk_mq_alloc_disk(&tag_sets[drive], NULL); + disk = blk_mq_alloc_disk(&tag_sets[drive], &lim, NULL); if (IS_ERR(disk)) return PTR_ERR(disk); - blk_queue_max_hw_sectors(disk->queue, 64); disk->major = FLOPPY_MAJOR; disk->first_minor = TOMINOR(drive) | (type << 2); disk->minors = 1; diff --git a/drivers/block/loop.c b/drivers/block/loop.c index f8145499da38..28a95fd366fe 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -750,12 +750,13 @@ static void loop_sysfs_exit(struct loop_device *lo) &loop_attribute_group); } -static void loop_config_discard(struct loop_device *lo) +static void loop_config_discard(struct loop_device *lo, + struct queue_limits *lim) { struct file *file = lo->lo_backing_file; struct inode *inode = file->f_mapping->host; - struct request_queue *q = lo->lo_queue; - u32 granularity, max_discard_sectors; + u32 granularity = 0, max_discard_sectors = 0; + struct kstatfs sbuf; /* * If the backing device is a block device, mirror its zeroing @@ -775,29 +776,17 @@ static void loop_config_discard(struct loop_device *lo) * We use punch hole to reclaim the free space used by the * image a.k.a. discard. */ - } else if (!file->f_op->fallocate) { - max_discard_sectors = 0; - granularity = 0; - - } else { - struct kstatfs sbuf; - + } else if (file->f_op->fallocate && !vfs_statfs(&file->f_path, &sbuf)) { max_discard_sectors = UINT_MAX >> 9; - if (!vfs_statfs(&file->f_path, &sbuf)) - granularity = sbuf.f_bsize; - else - max_discard_sectors = 0; + granularity = sbuf.f_bsize; } - if (max_discard_sectors) { - q->limits.discard_granularity = granularity; - blk_queue_max_discard_sectors(q, max_discard_sectors); - blk_queue_max_write_zeroes_sectors(q, max_discard_sectors); - } else { - q->limits.discard_granularity = 0; - blk_queue_max_discard_sectors(q, 0); - blk_queue_max_write_zeroes_sectors(q, 0); - } + lim->max_hw_discard_sectors = max_discard_sectors; + lim->max_write_zeroes_sectors = max_discard_sectors; + if (max_discard_sectors) + lim->discard_granularity = granularity; + else + lim->discard_granularity = 0; } struct loop_worker { @@ -986,6 +975,20 @@ loop_set_status_from_info(struct loop_device *lo, return 0; } +static int loop_reconfigure_limits(struct loop_device *lo, unsigned short bsize, + bool update_discard_settings) +{ + struct queue_limits lim; + + lim = queue_limits_start_update(lo->lo_queue); + lim.logical_block_size = bsize; + lim.physical_block_size = bsize; + lim.io_min = bsize; + if (update_discard_settings) + loop_config_discard(lo, &lim); + return queue_limits_commit_update(lo->lo_queue, &lim); +} + static int loop_configure(struct loop_device *lo, blk_mode_t mode, struct block_device *bdev, const struct loop_config *config) @@ -1083,11 +1086,10 @@ static int loop_configure(struct loop_device *lo, blk_mode_t mode, else bsize = 512; - blk_queue_logical_block_size(lo->lo_queue, bsize); - blk_queue_physical_block_size(lo->lo_queue, bsize); - blk_queue_io_min(lo->lo_queue, bsize); + error = loop_reconfigure_limits(lo, bsize, true); + if (WARN_ON_ONCE(error)) + goto out_unlock; - loop_config_discard(lo); loop_update_rotational(lo); loop_update_dio(lo); loop_sysfs_init(lo); @@ -1154,9 +1156,7 @@ static void __loop_clr_fd(struct loop_device *lo, bool release) lo->lo_offset = 0; lo->lo_sizelimit = 0; memset(lo->lo_file_name, 0, LO_NAME_SIZE); - blk_queue_logical_block_size(lo->lo_queue, 512); - blk_queue_physical_block_size(lo->lo_queue, 512); - blk_queue_io_min(lo->lo_queue, 512); + loop_reconfigure_limits(lo, 512, false); invalidate_disk(lo->lo_disk); loop_sysfs_exit(lo); /* let user-space know about this change */ @@ -1488,9 +1488,7 @@ static int loop_set_block_size(struct loop_device *lo, unsigned long arg) invalidate_bdev(lo->lo_device); blk_mq_freeze_queue(lo->lo_queue); - blk_queue_logical_block_size(lo->lo_queue, arg); - blk_queue_physical_block_size(lo->lo_queue, arg); - blk_queue_io_min(lo->lo_queue, arg); + err = loop_reconfigure_limits(lo, arg, false); loop_update_dio(lo); blk_mq_unfreeze_queue(lo->lo_queue); @@ -1982,6 +1980,12 @@ static const struct blk_mq_ops loop_mq_ops = { static int loop_add(int i) { + struct queue_limits lim = { + /* + * Random number picked from the historic block max_sectors cap. + */ + .max_hw_sectors = 2560u, + }; struct loop_device *lo; struct gendisk *disk; int err; @@ -2025,16 +2029,13 @@ static int loop_add(int i) if (err) goto out_free_idr; - disk = lo->lo_disk = blk_mq_alloc_disk(&lo->tag_set, lo); + disk = lo->lo_disk = blk_mq_alloc_disk(&lo->tag_set, &lim, lo); if (IS_ERR(disk)) { err = PTR_ERR(disk); goto out_cleanup_tags; } lo->lo_queue = lo->lo_disk->queue; - /* random number picked from the history block max_sectors cap */ - blk_queue_max_hw_sectors(lo->lo_queue, 2560u); - /* * By default, we do buffer IO, so it doesn't make sense to enable * merge because the I/O submitted to backing file is handled page by diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c index b200950e8fb5..43a187609ef7 100644 --- a/drivers/block/mtip32xx/mtip32xx.c +++ b/drivers/block/mtip32xx/mtip32xx.c @@ -3401,6 +3401,12 @@ static const struct blk_mq_ops mtip_mq_ops = { */ static int mtip_block_initialize(struct driver_data *dd) { + struct queue_limits lim = { + .physical_block_size = 4096, + .max_hw_sectors = 0xffff, + .max_segments = MTIP_MAX_SG, + .max_segment_size = 0x400000, + }; int rv = 0, wait_for_rebuild = 0; sector_t capacity; unsigned int index = 0; @@ -3431,7 +3437,7 @@ static int mtip_block_initialize(struct driver_data *dd) goto block_queue_alloc_tag_error; } - dd->disk = blk_mq_alloc_disk(&dd->tags, dd); + dd->disk = blk_mq_alloc_disk(&dd->tags, &lim, dd); if (IS_ERR(dd->disk)) { dev_err(&dd->pdev->dev, "Unable to allocate request queue\n"); @@ -3481,12 +3487,7 @@ skip_create_disk: /* Set device limits. */ blk_queue_flag_set(QUEUE_FLAG_NONROT, dd->queue); blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, dd->queue); - blk_queue_max_segments(dd->queue, MTIP_MAX_SG); - blk_queue_physical_block_size(dd->queue, 4096); - blk_queue_max_hw_sectors(dd->queue, 0xffff); - blk_queue_max_segment_size(dd->queue, 0x400000); dma_set_max_seg_size(&dd->pdev->dev, 0x400000); - blk_queue_io_min(dd->queue, 4096); /* Set the capacity of the device in 512 byte sectors. */ if (!(mtip_hw_get_capacity(dd, &capacity))) { diff --git a/drivers/block/n64cart.c b/drivers/block/n64cart.c index d914156db2d8..27b2187e7a6d 100644 --- a/drivers/block/n64cart.c +++ b/drivers/block/n64cart.c @@ -114,6 +114,10 @@ static const struct block_device_operations n64cart_fops = { */ static int __init n64cart_probe(struct platform_device *pdev) { + struct queue_limits lim = { + .physical_block_size = 4096, + .logical_block_size = 4096, + }; struct gendisk *disk; int err = -ENOMEM; @@ -131,9 +135,11 @@ static int __init n64cart_probe(struct platform_device *pdev) if (IS_ERR(reg_base)) return PTR_ERR(reg_base); - disk = blk_alloc_disk(NUMA_NO_NODE); - if (!disk) + disk = blk_alloc_disk(&lim, NUMA_NO_NODE); + if (IS_ERR(disk)) { + err = PTR_ERR(disk); goto out; + } disk->first_minor = 0; disk->flags = GENHD_FL_NO_PART; @@ -145,8 +151,6 @@ static int __init n64cart_probe(struct platform_device *pdev) set_disk_ro(disk, 1); blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue); - blk_queue_physical_block_size(disk->queue, 4096); - blk_queue_logical_block_size(disk->queue, 4096); err = add_disk(disk); if (err) diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 33a8f37bb6a1..9d4ec9273bf9 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -316,9 +316,12 @@ static void nbd_mark_nsock_dead(struct nbd_device *nbd, struct nbd_sock *nsock, nsock->sent = 0; } -static int nbd_set_size(struct nbd_device *nbd, loff_t bytesize, +static int __nbd_set_size(struct nbd_device *nbd, loff_t bytesize, loff_t blksize) { + struct queue_limits lim; + int error; + if (!blksize) blksize = 1u << NBD_DEF_BLKSIZE_BITS; @@ -334,10 +337,16 @@ static int nbd_set_size(struct nbd_device *nbd, loff_t bytesize, if (!nbd->pid) return 0; + lim = queue_limits_start_update(nbd->disk->queue); if (nbd->config->flags & NBD_FLAG_SEND_TRIM) - blk_queue_max_discard_sectors(nbd->disk->queue, UINT_MAX); - blk_queue_logical_block_size(nbd->disk->queue, blksize); - blk_queue_physical_block_size(nbd->disk->queue, blksize); + lim.max_hw_discard_sectors = UINT_MAX; + else + lim.max_hw_discard_sectors = 0; + lim.logical_block_size = blksize; + lim.physical_block_size = blksize; + error = queue_limits_commit_update(nbd->disk->queue, &lim); + if (error) + return error; if (max_part) set_bit(GD_NEED_PART_SCAN, &nbd->disk->state); @@ -346,6 +355,18 @@ static int nbd_set_size(struct nbd_device *nbd, loff_t bytesize, return 0; } +static int nbd_set_size(struct nbd_device *nbd, loff_t bytesize, + loff_t blksize) +{ + int error; + + blk_mq_freeze_queue(nbd->disk->queue); + error = __nbd_set_size(nbd, bytesize, blksize); + blk_mq_unfreeze_queue(nbd->disk->queue); + + return error; +} + static void nbd_complete_rq(struct request *req) { struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req); @@ -1351,7 +1372,6 @@ static void nbd_config_put(struct nbd_device *nbd) nbd->config = NULL; nbd->tag_set.timeout = 0; - blk_queue_max_discard_sectors(nbd->disk->queue, 0); mutex_unlock(&nbd->config_lock); nbd_put(nbd); @@ -1783,6 +1803,12 @@ static const struct blk_mq_ops nbd_mq_ops = { static struct nbd_device *nbd_dev_add(int index, unsigned int refs) { + struct queue_limits lim = { + .max_hw_sectors = 65536, + .max_user_sectors = 256, + .max_segments = USHRT_MAX, + .max_segment_size = UINT_MAX, + }; struct nbd_device *nbd; struct gendisk *disk; int err = -ENOMEM; @@ -1823,7 +1849,7 @@ static struct nbd_device *nbd_dev_add(int index, unsigned int refs) if (err < 0) goto out_free_tags; - disk = blk_mq_alloc_disk(&nbd->tag_set, NULL); + disk = blk_mq_alloc_disk(&nbd->tag_set, &lim, NULL); if (IS_ERR(disk)) { err = PTR_ERR(disk); goto out_free_idr; @@ -1843,11 +1869,6 @@ static struct nbd_device *nbd_dev_add(int index, unsigned int refs) * Tell the block layer that we are not a rotational device */ blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue); - blk_queue_max_discard_sectors(disk->queue, 0); - blk_queue_max_segment_size(disk->queue, UINT_MAX); - blk_queue_max_segments(disk->queue, USHRT_MAX); - blk_queue_max_hw_sectors(disk->queue, 65536); - disk->queue->limits.max_sectors = 256; mutex_init(&nbd->config_lock); refcount_set(&nbd->config_refs, 0); @@ -2433,6 +2454,12 @@ static int nbd_genl_status(struct sk_buff *skb, struct genl_info *info) } dev_list = nla_nest_start_noflag(reply, NBD_ATTR_DEVICE_LIST); + if (!dev_list) { + nlmsg_free(reply); + ret = -EMSGSIZE; + goto out; + } + if (index == -1) { ret = idr_for_each(&nbd_index_idr, &status_cb, reply); if (ret) { diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c index 36755f263e8e..71c39bcd872c 100644 --- a/drivers/block/null_blk/main.c +++ b/drivers/block/null_blk/main.c @@ -115,6 +115,18 @@ module_param_string(init_hctx, g_init_hctx_str, sizeof(g_init_hctx_str), 0444); MODULE_PARM_DESC(init_hctx, "Fault injection to fail hctx init. init_hctx=<interval>,<probability>,<space>,<times>"); #endif +/* + * Historic queue modes. + * + * These days nothing but NULL_Q_MQ is actually supported, but we keep it the + * enum for error reporting. + */ +enum { + NULL_Q_BIO = 0, + NULL_Q_RQ = 1, + NULL_Q_MQ = 2, +}; + static int g_queue_mode = NULL_Q_MQ; static int null_param_store_val(const char *str, int *val, int min, int max) @@ -165,8 +177,8 @@ static bool g_blocking; module_param_named(blocking, g_blocking, bool, 0444); MODULE_PARM_DESC(blocking, "Register as a blocking blk-mq driver device"); -static bool shared_tags; -module_param(shared_tags, bool, 0444); +static bool g_shared_tags; +module_param_named(shared_tags, g_shared_tags, bool, 0444); MODULE_PARM_DESC(shared_tags, "Share tag set between devices for blk-mq"); static bool g_shared_tag_bitmap; @@ -426,6 +438,7 @@ NULLB_DEVICE_ATTR(zone_max_open, uint, NULL); NULLB_DEVICE_ATTR(zone_max_active, uint, NULL); NULLB_DEVICE_ATTR(virt_boundary, bool, NULL); NULLB_DEVICE_ATTR(no_sched, bool, NULL); +NULLB_DEVICE_ATTR(shared_tags, bool, NULL); NULLB_DEVICE_ATTR(shared_tag_bitmap, bool, NULL); static ssize_t nullb_device_power_show(struct config_item *item, char *page) @@ -571,6 +584,7 @@ static struct configfs_attribute *nullb_device_attrs[] = { &nullb_device_attr_zone_offline, &nullb_device_attr_virt_boundary, &nullb_device_attr_no_sched, + &nullb_device_attr_shared_tags, &nullb_device_attr_shared_tag_bitmap, NULL, }; @@ -653,10 +667,11 @@ static ssize_t memb_group_features_show(struct config_item *item, char *page) "badblocks,blocking,blocksize,cache_size," "completion_nsec,discard,home_node,hw_queue_depth," "irqmode,max_sectors,mbps,memory_backed,no_sched," - "poll_queues,power,queue_mode,shared_tag_bitmap,size," - "submit_queues,use_per_node_hctx,virt_boundary,zoned," - "zone_capacity,zone_max_active,zone_max_open," - "zone_nr_conv,zone_offline,zone_readonly,zone_size\n"); + "poll_queues,power,queue_mode,shared_tag_bitmap," + "shared_tags,size,submit_queues,use_per_node_hctx," + "virt_boundary,zoned,zone_capacity,zone_max_active," + "zone_max_open,zone_nr_conv,zone_offline,zone_readonly," + "zone_size\n"); } CONFIGFS_ATTR_RO(memb_group_, features); @@ -738,6 +753,7 @@ static struct nullb_device *null_alloc_dev(void) dev->zone_max_active = g_zone_max_active; dev->virt_boundary = g_virt_boundary; dev->no_sched = g_no_sched; + dev->shared_tags = g_shared_tags; dev->shared_tag_bitmap = g_shared_tag_bitmap; return dev; } @@ -752,98 +768,11 @@ static void null_free_dev(struct nullb_device *dev) kfree(dev); } -static void put_tag(struct nullb_queue *nq, unsigned int tag) -{ - clear_bit_unlock(tag, nq->tag_map); - - if (waitqueue_active(&nq->wait)) - wake_up(&nq->wait); -} - -static unsigned int get_tag(struct nullb_queue *nq) -{ - unsigned int tag; - - do { - tag = find_first_zero_bit(nq->tag_map, nq->queue_depth); - if (tag >= nq->queue_depth) - return -1U; - } while (test_and_set_bit_lock(tag, nq->tag_map)); - - return tag; -} - -static void free_cmd(struct nullb_cmd *cmd) -{ - put_tag(cmd->nq, cmd->tag); -} - -static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer); - -static struct nullb_cmd *__alloc_cmd(struct nullb_queue *nq) -{ - struct nullb_cmd *cmd; - unsigned int tag; - - tag = get_tag(nq); - if (tag != -1U) { - cmd = &nq->cmds[tag]; - cmd->tag = tag; - cmd->error = BLK_STS_OK; - cmd->nq = nq; - if (nq->dev->irqmode == NULL_IRQ_TIMER) { - hrtimer_init(&cmd->timer, CLOCK_MONOTONIC, - HRTIMER_MODE_REL); - cmd->timer.function = null_cmd_timer_expired; - } - return cmd; - } - - return NULL; -} - -static struct nullb_cmd *alloc_cmd(struct nullb_queue *nq, struct bio *bio) -{ - struct nullb_cmd *cmd; - DEFINE_WAIT(wait); - - do { - /* - * This avoids multiple return statements, multiple calls to - * __alloc_cmd() and a fast path call to prepare_to_wait(). - */ - cmd = __alloc_cmd(nq); - if (cmd) { - cmd->bio = bio; - return cmd; - } - prepare_to_wait(&nq->wait, &wait, TASK_UNINTERRUPTIBLE); - io_schedule(); - finish_wait(&nq->wait, &wait); - } while (1); -} - -static void end_cmd(struct nullb_cmd *cmd) -{ - int queue_mode = cmd->nq->dev->queue_mode; - - switch (queue_mode) { - case NULL_Q_MQ: - blk_mq_end_request(cmd->rq, cmd->error); - return; - case NULL_Q_BIO: - cmd->bio->bi_status = cmd->error; - bio_endio(cmd->bio); - break; - } - - free_cmd(cmd); -} - static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer) { - end_cmd(container_of(timer, struct nullb_cmd, timer)); + struct nullb_cmd *cmd = container_of(timer, struct nullb_cmd, timer); + blk_mq_end_request(blk_mq_rq_from_pdu(cmd), cmd->error); return HRTIMER_NORESTART; } @@ -856,7 +785,9 @@ static void null_cmd_end_timer(struct nullb_cmd *cmd) static void null_complete_rq(struct request *rq) { - end_cmd(blk_mq_rq_to_pdu(rq)); + struct nullb_cmd *cmd = blk_mq_rq_to_pdu(rq); + + blk_mq_end_request(rq, cmd->error); } static struct nullb_page *null_alloc_page(void) @@ -1273,7 +1204,7 @@ static int null_transfer(struct nullb *nullb, struct page *page, static int null_handle_rq(struct nullb_cmd *cmd) { - struct request *rq = cmd->rq; + struct request *rq = blk_mq_rq_from_pdu(cmd); struct nullb *nullb = cmd->nq->dev->nullb; int err; unsigned int len; @@ -1298,63 +1229,21 @@ static int null_handle_rq(struct nullb_cmd *cmd) return 0; } -static int null_handle_bio(struct nullb_cmd *cmd) -{ - struct bio *bio = cmd->bio; - struct nullb *nullb = cmd->nq->dev->nullb; - int err; - unsigned int len; - sector_t sector = bio->bi_iter.bi_sector; - struct bio_vec bvec; - struct bvec_iter iter; - - spin_lock_irq(&nullb->lock); - bio_for_each_segment(bvec, bio, iter) { - len = bvec.bv_len; - err = null_transfer(nullb, bvec.bv_page, len, bvec.bv_offset, - op_is_write(bio_op(bio)), sector, - bio->bi_opf & REQ_FUA); - if (err) { - spin_unlock_irq(&nullb->lock); - return err; - } - sector += len >> SECTOR_SHIFT; - } - spin_unlock_irq(&nullb->lock); - return 0; -} - -static void null_stop_queue(struct nullb *nullb) -{ - struct request_queue *q = nullb->q; - - if (nullb->dev->queue_mode == NULL_Q_MQ) - blk_mq_stop_hw_queues(q); -} - -static void null_restart_queue_async(struct nullb *nullb) -{ - struct request_queue *q = nullb->q; - - if (nullb->dev->queue_mode == NULL_Q_MQ) - blk_mq_start_stopped_hw_queues(q, true); -} - static inline blk_status_t null_handle_throttled(struct nullb_cmd *cmd) { struct nullb_device *dev = cmd->nq->dev; struct nullb *nullb = dev->nullb; blk_status_t sts = BLK_STS_OK; - struct request *rq = cmd->rq; + struct request *rq = blk_mq_rq_from_pdu(cmd); if (!hrtimer_active(&nullb->bw_timer)) hrtimer_restart(&nullb->bw_timer); if (atomic_long_sub_return(blk_rq_bytes(rq), &nullb->cur_bytes) < 0) { - null_stop_queue(nullb); + blk_mq_stop_hw_queues(nullb->q); /* race with timer */ if (atomic_long_read(&nullb->cur_bytes) > 0) - null_restart_queue_async(nullb); + blk_mq_start_stopped_hw_queues(nullb->q, true); /* requeue request */ sts = BLK_STS_DEV_RESOURCE; } @@ -1381,37 +1270,29 @@ static inline blk_status_t null_handle_memory_backed(struct nullb_cmd *cmd, sector_t nr_sectors) { struct nullb_device *dev = cmd->nq->dev; - int err; if (op == REQ_OP_DISCARD) return null_handle_discard(dev, sector, nr_sectors); + return errno_to_blk_status(null_handle_rq(cmd)); - if (dev->queue_mode == NULL_Q_BIO) - err = null_handle_bio(cmd); - else - err = null_handle_rq(cmd); - - return errno_to_blk_status(err); } static void nullb_zero_read_cmd_buffer(struct nullb_cmd *cmd) { + struct request *rq = blk_mq_rq_from_pdu(cmd); struct nullb_device *dev = cmd->nq->dev; struct bio *bio; - if (dev->memory_backed) - return; - - if (dev->queue_mode == NULL_Q_BIO && bio_op(cmd->bio) == REQ_OP_READ) { - zero_fill_bio(cmd->bio); - } else if (req_op(cmd->rq) == REQ_OP_READ) { - __rq_for_each_bio(bio, cmd->rq) + if (!dev->memory_backed && req_op(rq) == REQ_OP_READ) { + __rq_for_each_bio(bio, rq) zero_fill_bio(bio); } } static inline void nullb_complete_cmd(struct nullb_cmd *cmd) { + struct request *rq = blk_mq_rq_from_pdu(cmd); + /* * Since root privileges are required to configure the null_blk * driver, it is fine that this driver does not initialize the @@ -1425,20 +1306,10 @@ static inline void nullb_complete_cmd(struct nullb_cmd *cmd) /* Complete IO by inline, softirq or timer */ switch (cmd->nq->dev->irqmode) { case NULL_IRQ_SOFTIRQ: - switch (cmd->nq->dev->queue_mode) { - case NULL_Q_MQ: - blk_mq_complete_request(cmd->rq); - break; - case NULL_Q_BIO: - /* - * XXX: no proper submitting cpu information available. - */ - end_cmd(cmd); - break; - } + blk_mq_complete_request(rq); break; case NULL_IRQ_NONE: - end_cmd(cmd); + blk_mq_end_request(rq, cmd->error); break; case NULL_IRQ_TIMER: null_cmd_end_timer(cmd); @@ -1499,7 +1370,7 @@ static enum hrtimer_restart nullb_bwtimer_fn(struct hrtimer *timer) return HRTIMER_NORESTART; atomic_long_set(&nullb->cur_bytes, mb_per_tick(mbps)); - null_restart_queue_async(nullb); + blk_mq_start_stopped_hw_queues(nullb->q, true); hrtimer_forward_now(&nullb->bw_timer, timer_interval); @@ -1516,26 +1387,6 @@ static void nullb_setup_bwtimer(struct nullb *nullb) hrtimer_start(&nullb->bw_timer, timer_interval, HRTIMER_MODE_REL); } -static struct nullb_queue *nullb_to_queue(struct nullb *nullb) -{ - int index = 0; - - if (nullb->nr_queues != 1) - index = raw_smp_processor_id() / ((nr_cpu_ids + nullb->nr_queues - 1) / nullb->nr_queues); - - return &nullb->queues[index]; -} - -static void null_submit_bio(struct bio *bio) -{ - sector_t sector = bio->bi_iter.bi_sector; - sector_t nr_sectors = bio_sectors(bio); - struct nullb *nullb = bio->bi_bdev->bd_disk->private_data; - struct nullb_queue *nq = nullb_to_queue(nullb); - - null_handle_cmd(alloc_cmd(nq, bio), sector, nr_sectors, bio_op(bio)); -} - #ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION static bool should_timeout_request(struct request *rq) @@ -1655,7 +1506,7 @@ static int null_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob) blk_rq_sectors(req)); if (!blk_mq_add_to_batch(req, iob, (__force int) cmd->error, blk_mq_end_request_batch)) - end_cmd(cmd); + blk_mq_end_request(req, cmd->error); nr++; } @@ -1711,7 +1562,6 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx, hrtimer_init(&cmd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); cmd->timer.function = null_cmd_timer_expired; } - cmd->rq = rq; cmd->error = BLK_STS_OK; cmd->nq = nq; cmd->fake_timeout = should_timeout_request(rq) || @@ -1770,34 +1620,8 @@ static void null_queue_rqs(struct request **rqlist) *rqlist = requeue_list; } -static void cleanup_queue(struct nullb_queue *nq) -{ - bitmap_free(nq->tag_map); - kfree(nq->cmds); -} - -static void cleanup_queues(struct nullb *nullb) -{ - int i; - - for (i = 0; i < nullb->nr_queues; i++) - cleanup_queue(&nullb->queues[i]); - - kfree(nullb->queues); -} - -static void null_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) -{ - struct nullb_queue *nq = hctx->driver_data; - struct nullb *nullb = nq->dev->nullb; - - nullb->nr_queues--; -} - static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq) { - init_waitqueue_head(&nq->wait); - nq->queue_depth = nullb->queue_depth; nq->dev = nullb->dev; INIT_LIST_HEAD(&nq->poll_list); spin_lock_init(&nq->poll_lock); @@ -1815,7 +1639,6 @@ static int null_init_hctx(struct blk_mq_hw_ctx *hctx, void *driver_data, nq = &nullb->queues[hctx_idx]; hctx->driver_data = nq; null_init_queue(nullb, nq); - nullb->nr_queues++; return 0; } @@ -1828,7 +1651,6 @@ static const struct blk_mq_ops null_mq_ops = { .poll = null_poll, .map_queues = null_map_queues, .init_hctx = null_init_hctx, - .exit_hctx = null_exit_hctx, }; static void null_del_dev(struct nullb *nullb) @@ -1849,21 +1671,20 @@ static void null_del_dev(struct nullb *nullb) if (test_bit(NULLB_DEV_FL_THROTTLED, &nullb->dev->flags)) { hrtimer_cancel(&nullb->bw_timer); atomic_long_set(&nullb->cur_bytes, LONG_MAX); - null_restart_queue_async(nullb); + blk_mq_start_stopped_hw_queues(nullb->q, true); } put_disk(nullb->disk); - if (dev->queue_mode == NULL_Q_MQ && - nullb->tag_set == &nullb->__tag_set) + if (nullb->tag_set == &nullb->__tag_set) blk_mq_free_tag_set(nullb->tag_set); - cleanup_queues(nullb); + kfree(nullb->queues); if (null_cache_active(nullb)) null_free_device_storage(nullb->dev, true); kfree(nullb); dev->nullb = NULL; } -static void null_config_discard(struct nullb *nullb) +static void null_config_discard(struct nullb *nullb, struct queue_limits *lim) { if (nullb->dev->discard == false) return; @@ -1880,43 +1701,14 @@ static void null_config_discard(struct nullb *nullb) return; } - blk_queue_max_discard_sectors(nullb->q, UINT_MAX >> 9); + lim->max_hw_discard_sectors = UINT_MAX >> 9; } -static const struct block_device_operations null_bio_ops = { - .owner = THIS_MODULE, - .submit_bio = null_submit_bio, - .report_zones = null_report_zones, -}; - -static const struct block_device_operations null_rq_ops = { +static const struct block_device_operations null_ops = { .owner = THIS_MODULE, .report_zones = null_report_zones, }; -static int setup_commands(struct nullb_queue *nq) -{ - struct nullb_cmd *cmd; - int i; - - nq->cmds = kcalloc(nq->queue_depth, sizeof(*cmd), GFP_KERNEL); - if (!nq->cmds) - return -ENOMEM; - - nq->tag_map = bitmap_zalloc(nq->queue_depth, GFP_KERNEL); - if (!nq->tag_map) { - kfree(nq->cmds); - return -ENOMEM; - } - - for (i = 0; i < nq->queue_depth; i++) { - cmd = &nq->cmds[i]; - cmd->tag = -1U; - } - - return 0; -} - static int setup_queues(struct nullb *nullb) { int nqueues = nr_cpu_ids; @@ -1929,101 +1721,66 @@ static int setup_queues(struct nullb *nullb) if (!nullb->queues) return -ENOMEM; - nullb->queue_depth = nullb->dev->hw_queue_depth; return 0; } -static int init_driver_queues(struct nullb *nullb) +static int null_init_tag_set(struct blk_mq_tag_set *set, int poll_queues) { - struct nullb_queue *nq; - int i, ret = 0; - - for (i = 0; i < nullb->dev->submit_queues; i++) { - nq = &nullb->queues[i]; - - null_init_queue(nullb, nq); - - ret = setup_commands(nq); - if (ret) - return ret; - nullb->nr_queues++; + set->ops = &null_mq_ops; + set->cmd_size = sizeof(struct nullb_cmd); + set->timeout = 5 * HZ; + set->nr_maps = 1; + if (poll_queues) { + set->nr_hw_queues += poll_queues; + set->nr_maps += 2; } - return 0; + return blk_mq_alloc_tag_set(set); } -static int null_gendisk_register(struct nullb *nullb) +static int null_init_global_tag_set(void) { - sector_t size = ((sector_t)nullb->dev->size * SZ_1M) >> SECTOR_SHIFT; - struct gendisk *disk = nullb->disk; + int error; - set_capacity(disk, size); - - disk->major = null_major; - disk->first_minor = nullb->index; - disk->minors = 1; - if (queue_is_mq(nullb->q)) - disk->fops = &null_rq_ops; - else - disk->fops = &null_bio_ops; - disk->private_data = nullb; - strscpy_pad(disk->disk_name, nullb->disk_name, DISK_NAME_LEN); + if (tag_set.ops) + return 0; - if (nullb->dev->zoned) { - int ret = null_register_zoned_dev(nullb); + tag_set.nr_hw_queues = g_submit_queues; + tag_set.queue_depth = g_hw_queue_depth; + tag_set.numa_node = g_home_node; + tag_set.flags = BLK_MQ_F_SHOULD_MERGE; + if (g_no_sched) + tag_set.flags |= BLK_MQ_F_NO_SCHED; + if (g_shared_tag_bitmap) + tag_set.flags |= BLK_MQ_F_TAG_HCTX_SHARED; + if (g_blocking) + tag_set.flags |= BLK_MQ_F_BLOCKING; - if (ret) - return ret; - } - - return add_disk(disk); + error = null_init_tag_set(&tag_set, g_poll_queues); + if (error) + tag_set.ops = NULL; + return error; } -static int null_init_tag_set(struct nullb *nullb, struct blk_mq_tag_set *set) +static int null_setup_tagset(struct nullb *nullb) { - unsigned int flags = BLK_MQ_F_SHOULD_MERGE; - int hw_queues, numa_node; - unsigned int queue_depth; - int poll_queues; - - if (nullb) { - hw_queues = nullb->dev->submit_queues; - poll_queues = nullb->dev->poll_queues; - queue_depth = nullb->dev->hw_queue_depth; - numa_node = nullb->dev->home_node; - if (nullb->dev->no_sched) - flags |= BLK_MQ_F_NO_SCHED; - if (nullb->dev->shared_tag_bitmap) - flags |= BLK_MQ_F_TAG_HCTX_SHARED; - if (nullb->dev->blocking) - flags |= BLK_MQ_F_BLOCKING; - } else { - hw_queues = g_submit_queues; - poll_queues = g_poll_queues; - queue_depth = g_hw_queue_depth; - numa_node = g_home_node; - if (g_no_sched) - flags |= BLK_MQ_F_NO_SCHED; - if (g_shared_tag_bitmap) - flags |= BLK_MQ_F_TAG_HCTX_SHARED; - if (g_blocking) - flags |= BLK_MQ_F_BLOCKING; - } - - set->ops = &null_mq_ops; - set->cmd_size = sizeof(struct nullb_cmd); - set->flags = flags; - set->driver_data = nullb; - set->nr_hw_queues = hw_queues; - set->queue_depth = queue_depth; - set->numa_node = numa_node; - if (poll_queues) { - set->nr_hw_queues += poll_queues; - set->nr_maps = 3; - } else { - set->nr_maps = 1; + if (nullb->dev->shared_tags) { + nullb->tag_set = &tag_set; + return null_init_global_tag_set(); } - return blk_mq_alloc_tag_set(set); + nullb->tag_set = &nullb->__tag_set; + nullb->tag_set->driver_data = nullb; + nullb->tag_set->nr_hw_queues = nullb->dev->submit_queues; + nullb->tag_set->queue_depth = nullb->dev->hw_queue_depth; + nullb->tag_set->numa_node = nullb->dev->home_node; + nullb->tag_set->flags = BLK_MQ_F_SHOULD_MERGE; + if (nullb->dev->no_sched) + nullb->tag_set->flags |= BLK_MQ_F_NO_SCHED; + if (nullb->dev->shared_tag_bitmap) + nullb->tag_set->flags |= BLK_MQ_F_TAG_HCTX_SHARED; + if (nullb->dev->blocking) + nullb->tag_set->flags |= BLK_MQ_F_BLOCKING; + return null_init_tag_set(nullb->tag_set, nullb->dev->poll_queues); } static int null_validate_conf(struct nullb_device *dev) @@ -2032,11 +1789,15 @@ static int null_validate_conf(struct nullb_device *dev) pr_err("legacy IO path is no longer available\n"); return -EINVAL; } + if (dev->queue_mode == NULL_Q_BIO) { + pr_err("BIO-based IO path is no longer available, using blk-mq instead.\n"); + dev->queue_mode = NULL_Q_MQ; + } dev->blocksize = round_down(dev->blocksize, 512); dev->blocksize = clamp_t(unsigned int, dev->blocksize, 512, 4096); - if (dev->queue_mode == NULL_Q_MQ && dev->use_per_node_hctx) { + if (dev->use_per_node_hctx) { if (dev->submit_queues != nr_online_nodes) dev->submit_queues = nr_online_nodes; } else if (dev->submit_queues > nr_cpu_ids) @@ -2048,8 +1809,6 @@ static int null_validate_conf(struct nullb_device *dev) if (dev->poll_queues > g_poll_queues) dev->poll_queues = g_poll_queues; dev->prev_poll_queues = dev->poll_queues; - - dev->queue_mode = min_t(unsigned int, dev->queue_mode, NULL_Q_MQ); dev->irqmode = min_t(unsigned int, dev->irqmode, NULL_IRQ_TIMER); /* Do memory allocation, so set blocking */ @@ -2060,9 +1819,6 @@ static int null_validate_conf(struct nullb_device *dev) dev->cache_size = min_t(unsigned long, ULONG_MAX / 1024 / 1024, dev->cache_size); dev->mbps = min_t(unsigned int, 1024 * 40, dev->mbps); - /* can not stop a queue */ - if (dev->queue_mode == NULL_Q_BIO) - dev->mbps = 0; if (dev->zoned && (!dev->zone_size || !is_power_of_2(dev->zone_size))) { @@ -2102,6 +1858,12 @@ static bool null_setup_fault(void) static int null_add_dev(struct nullb_device *dev) { + struct queue_limits lim = { + .logical_block_size = dev->blocksize, + .physical_block_size = dev->blocksize, + .max_hw_sectors = dev->max_sectors, + }; + struct nullb *nullb; int rv; @@ -2123,36 +1885,25 @@ static int null_add_dev(struct nullb_device *dev) if (rv) goto out_free_nullb; - if (dev->queue_mode == NULL_Q_MQ) { - if (shared_tags) { - nullb->tag_set = &tag_set; - rv = 0; - } else { - nullb->tag_set = &nullb->__tag_set; - rv = null_init_tag_set(nullb, nullb->tag_set); - } + rv = null_setup_tagset(nullb); + if (rv) + goto out_cleanup_queues; + if (dev->virt_boundary) + lim.virt_boundary_mask = PAGE_SIZE - 1; + null_config_discard(nullb, &lim); + if (dev->zoned) { + rv = null_init_zoned_dev(dev, &lim); if (rv) - goto out_cleanup_queues; - - nullb->tag_set->timeout = 5 * HZ; - nullb->disk = blk_mq_alloc_disk(nullb->tag_set, nullb); - if (IS_ERR(nullb->disk)) { - rv = PTR_ERR(nullb->disk); goto out_cleanup_tags; - } - nullb->q = nullb->disk->queue; - } else if (dev->queue_mode == NULL_Q_BIO) { - rv = -ENOMEM; - nullb->disk = blk_alloc_disk(nullb->dev->home_node); - if (!nullb->disk) - goto out_cleanup_queues; + } - nullb->q = nullb->disk->queue; - rv = init_driver_queues(nullb); - if (rv) - goto out_cleanup_disk; + nullb->disk = blk_mq_alloc_disk(nullb->tag_set, &lim, nullb); + if (IS_ERR(nullb->disk)) { + rv = PTR_ERR(nullb->disk); + goto out_cleanup_zone; } + nullb->q = nullb->disk->queue; if (dev->mbps) { set_bit(NULLB_DEV_FL_THROTTLED, &dev->flags); @@ -2164,12 +1915,6 @@ static int null_add_dev(struct nullb_device *dev) blk_queue_write_cache(nullb->q, true, true); } - if (dev->zoned) { - rv = null_init_zoned_dev(dev, nullb->q); - if (rv) - goto out_cleanup_disk; - } - nullb->q->queuedata = nullb; blk_queue_flag_set(QUEUE_FLAG_NONROT, nullb->q); @@ -2177,22 +1922,12 @@ static int null_add_dev(struct nullb_device *dev) rv = ida_alloc(&nullb_indexes, GFP_KERNEL); if (rv < 0) { mutex_unlock(&lock); - goto out_cleanup_zone; + goto out_cleanup_disk; } nullb->index = rv; dev->index = rv; mutex_unlock(&lock); - blk_queue_logical_block_size(nullb->q, dev->blocksize); - blk_queue_physical_block_size(nullb->q, dev->blocksize); - if (dev->max_sectors) - blk_queue_max_hw_sectors(nullb->q, dev->max_sectors); - - if (dev->virt_boundary) - blk_queue_virt_boundary(nullb->q, PAGE_SIZE - 1); - - null_config_discard(nullb); - if (config_item_name(&dev->group.cg_item)) { /* Use configfs dir name as the device name */ snprintf(nullb->disk_name, sizeof(nullb->disk_name), @@ -2201,7 +1936,22 @@ static int null_add_dev(struct nullb_device *dev) sprintf(nullb->disk_name, "nullb%d", nullb->index); } - rv = null_gendisk_register(nullb); + set_capacity(nullb->disk, + ((sector_t)nullb->dev->size * SZ_1M) >> SECTOR_SHIFT); + nullb->disk->major = null_major; + nullb->disk->first_minor = nullb->index; + nullb->disk->minors = 1; + nullb->disk->fops = &null_ops; + nullb->disk->private_data = nullb; + strscpy_pad(nullb->disk->disk_name, nullb->disk_name, DISK_NAME_LEN); + + if (nullb->dev->zoned) { + rv = null_register_zoned_dev(nullb); + if (rv) + goto out_ida_free; + } + + rv = add_disk(nullb->disk); if (rv) goto out_ida_free; @@ -2220,10 +1970,10 @@ out_cleanup_zone: out_cleanup_disk: put_disk(nullb->disk); out_cleanup_tags: - if (dev->queue_mode == NULL_Q_MQ && nullb->tag_set == &nullb->__tag_set) + if (nullb->tag_set == &nullb->__tag_set) blk_mq_free_tag_set(nullb->tag_set); out_cleanup_queues: - cleanup_queues(nullb); + kfree(nullb->queues); out_free_nullb: kfree(nullb); dev->nullb = NULL; @@ -2299,7 +2049,7 @@ static int __init null_init(void) return -EINVAL; } - if (g_queue_mode == NULL_Q_MQ && g_use_per_node_hctx) { + if (g_use_per_node_hctx) { if (g_submit_queues != nr_online_nodes) { pr_warn("submit_queues param is set to %u.\n", nr_online_nodes); @@ -2311,18 +2061,12 @@ static int __init null_init(void) g_submit_queues = 1; } - if (g_queue_mode == NULL_Q_MQ && shared_tags) { - ret = null_init_tag_set(NULL, &tag_set); - if (ret) - return ret; - } - config_group_init(&nullb_subsys.su_group); mutex_init(&nullb_subsys.su_mutex); ret = configfs_register_subsystem(&nullb_subsys); if (ret) - goto err_tagset; + return ret; mutex_init(&lock); @@ -2349,9 +2093,6 @@ err_dev: unregister_blkdev(null_major, "nullb"); err_conf: configfs_unregister_subsystem(&nullb_subsys); -err_tagset: - if (g_queue_mode == NULL_Q_MQ && shared_tags) - blk_mq_free_tag_set(&tag_set); return ret; } @@ -2370,7 +2111,7 @@ static void __exit null_exit(void) } mutex_unlock(&lock); - if (g_queue_mode == NULL_Q_MQ && shared_tags) + if (tag_set.ops) blk_mq_free_tag_set(&tag_set); } diff --git a/drivers/block/null_blk/null_blk.h b/drivers/block/null_blk/null_blk.h index 929f659dd255..477b97746823 100644 --- a/drivers/block/null_blk/null_blk.h +++ b/drivers/block/null_blk/null_blk.h @@ -16,11 +16,6 @@ #include <linux/mutex.h> struct nullb_cmd { - union { - struct request *rq; - struct bio *bio; - }; - unsigned int tag; blk_status_t error; bool fake_timeout; struct nullb_queue *nq; @@ -28,16 +23,11 @@ struct nullb_cmd { }; struct nullb_queue { - unsigned long *tag_map; - wait_queue_head_t wait; - unsigned int queue_depth; struct nullb_device *dev; unsigned int requeue_selection; struct list_head poll_list; spinlock_t poll_lock; - - struct nullb_cmd *cmds; }; struct nullb_zone { @@ -60,13 +50,6 @@ struct nullb_zone { unsigned int capacity; }; -/* Queue modes */ -enum { - NULL_Q_BIO = 0, - NULL_Q_RQ = 1, - NULL_Q_MQ = 2, -}; - struct nullb_device { struct nullb *nullb; struct config_group group; @@ -119,6 +102,7 @@ struct nullb_device { bool zoned; /* if device is zoned */ bool virt_boundary; /* virtual boundary on/off for the device */ bool no_sched; /* no IO scheduler for the device */ + bool shared_tags; /* share tag set between devices for blk-mq */ bool shared_tag_bitmap; /* use hostwide shared tags */ }; @@ -130,14 +114,12 @@ struct nullb { struct gendisk *disk; struct blk_mq_tag_set *tag_set; struct blk_mq_tag_set __tag_set; - unsigned int queue_depth; atomic_long_t cur_bytes; struct hrtimer bw_timer; unsigned long cache_flush_pos; spinlock_t lock; struct nullb_queue *queues; - unsigned int nr_queues; char disk_name[DISK_NAME_LEN]; }; @@ -147,7 +129,7 @@ blk_status_t null_process_cmd(struct nullb_cmd *cmd, enum req_op op, sector_t sector, unsigned int nr_sectors); #ifdef CONFIG_BLK_DEV_ZONED -int null_init_zoned_dev(struct nullb_device *dev, struct request_queue *q); +int null_init_zoned_dev(struct nullb_device *dev, struct queue_limits *lim); int null_register_zoned_dev(struct nullb *nullb); void null_free_zoned_dev(struct nullb_device *dev); int null_report_zones(struct gendisk *disk, sector_t sector, @@ -160,7 +142,7 @@ ssize_t zone_cond_store(struct nullb_device *dev, const char *page, size_t count, enum blk_zone_cond cond); #else static inline int null_init_zoned_dev(struct nullb_device *dev, - struct request_queue *q) + struct queue_limits *lim) { pr_err("CONFIG_BLK_DEV_ZONED not enabled\n"); return -EINVAL; diff --git a/drivers/block/null_blk/trace.h b/drivers/block/null_blk/trace.h index 6b2b370e786f..ef2d05d5f0df 100644 --- a/drivers/block/null_blk/trace.h +++ b/drivers/block/null_blk/trace.h @@ -41,10 +41,11 @@ TRACE_EVENT(nullb_zone_op, __field(unsigned int, zone_cond) ), TP_fast_assign( - __entry->op = req_op(cmd->rq); + __entry->op = req_op(blk_mq_rq_from_pdu(cmd)); __entry->zone_no = zone_no; __entry->zone_cond = zone_cond; - __assign_disk_name(__entry->disk, cmd->rq->q->disk); + __assign_disk_name(__entry->disk, + blk_mq_rq_from_pdu(cmd)->q->disk); ), TP_printk("%s req=%-15s zone_no=%u zone_cond=%-10s", __print_disk_name(__entry->disk), diff --git a/drivers/block/null_blk/zoned.c b/drivers/block/null_blk/zoned.c index 6f5e0994862e..1689e2584104 100644 --- a/drivers/block/null_blk/zoned.c +++ b/drivers/block/null_blk/zoned.c @@ -58,7 +58,8 @@ static inline void null_unlock_zone(struct nullb_device *dev, mutex_unlock(&zone->mutex); } -int null_init_zoned_dev(struct nullb_device *dev, struct request_queue *q) +int null_init_zoned_dev(struct nullb_device *dev, + struct queue_limits *lim) { sector_t dev_capacity_sects, zone_capacity_sects; struct nullb_zone *zone; @@ -151,27 +152,22 @@ int null_init_zoned_dev(struct nullb_device *dev, struct request_queue *q) sector += dev->zone_size_sects; } + lim->zoned = true; + lim->chunk_sectors = dev->zone_size_sects; + lim->max_zone_append_sectors = dev->zone_size_sects; + lim->max_open_zones = dev->zone_max_open; + lim->max_active_zones = dev->zone_max_active; return 0; } int null_register_zoned_dev(struct nullb *nullb) { - struct nullb_device *dev = nullb->dev; struct request_queue *q = nullb->q; - disk_set_zoned(nullb->disk); blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q); blk_queue_required_elevator_features(q, ELEVATOR_F_ZBD_SEQ_WRITE); - blk_queue_chunk_sectors(q, dev->zone_size_sects); nullb->disk->nr_zones = bdev_nr_zones(nullb->disk->part0); - blk_queue_max_zone_append_sectors(q, dev->zone_size_sects); - disk_set_max_open_zones(nullb->disk, dev->zone_max_open); - disk_set_max_active_zones(nullb->disk, dev->zone_max_active); - - if (queue_is_mq(q)) - return blk_revalidate_disk_zones(nullb->disk, NULL); - - return 0; + return blk_revalidate_disk_zones(nullb->disk, NULL); } void null_free_zoned_dev(struct nullb_device *dev) @@ -394,10 +390,7 @@ static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector, */ if (append) { sector = zone->wp; - if (dev->queue_mode == NULL_Q_MQ) - cmd->rq->__sector = sector; - else - cmd->bio->bi_iter.bi_sector = sector; + blk_mq_rq_from_pdu(cmd)->__sector = sector; } else if (sector != zone->wp) { ret = BLK_STS_IOERR; goto unlock; diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index d56d972aadb3..21728e9ea5c3 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c @@ -340,8 +340,8 @@ static ssize_t device_map_show(const struct class *c, const struct class_attribu n += sysfs_emit_at(data, n, "%s %u:%u %u:%u\n", pd->disk->disk_name, MAJOR(pd->pkt_dev), MINOR(pd->pkt_dev), - MAJOR(pd->bdev_handle->bdev->bd_dev), - MINOR(pd->bdev_handle->bdev->bd_dev)); + MAJOR(file_bdev(pd->bdev_file)->bd_dev), + MINOR(file_bdev(pd->bdev_file)->bd_dev)); } mutex_unlock(&ctl_mutex); return n; @@ -438,7 +438,7 @@ static int pkt_seq_show(struct seq_file *m, void *p) int states[PACKET_NUM_STATES]; seq_printf(m, "Writer %s mapped to %pg:\n", pd->disk->disk_name, - pd->bdev_handle->bdev); + file_bdev(pd->bdev_file)); seq_printf(m, "\nSettings:\n"); seq_printf(m, "\tpacket size:\t\t%dkB\n", pd->settings.size / 2); @@ -715,7 +715,7 @@ static void pkt_rbtree_insert(struct pktcdvd_device *pd, struct pkt_rb_node *nod */ static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *cgc) { - struct request_queue *q = bdev_get_queue(pd->bdev_handle->bdev); + struct request_queue *q = bdev_get_queue(file_bdev(pd->bdev_file)); struct scsi_cmnd *scmd; struct request *rq; int ret = 0; @@ -828,6 +828,12 @@ static noinline_for_stack int pkt_set_speed(struct pktcdvd_device *pd, */ static void pkt_queue_bio(struct pktcdvd_device *pd, struct bio *bio) { + /* + * Some CDRW drives can not handle writes larger than one packet, + * even if the size is a multiple of the packet size. + */ + bio->bi_opf |= REQ_NOMERGE; + spin_lock(&pd->iosched.lock); if (bio_data_dir(bio) == READ) bio_list_add(&pd->iosched.read_queue, bio); @@ -1048,7 +1054,7 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt) continue; bio = pkt->r_bios[f]; - bio_init(bio, pd->bdev_handle->bdev, bio->bi_inline_vecs, 1, + bio_init(bio, file_bdev(pd->bdev_file), bio->bi_inline_vecs, 1, REQ_OP_READ); bio->bi_iter.bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9); bio->bi_end_io = pkt_end_io_read; @@ -1264,7 +1270,7 @@ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt) struct device *ddev = disk_to_dev(pd->disk); int f; - bio_init(pkt->w_bio, pd->bdev_handle->bdev, pkt->w_bio->bi_inline_vecs, + bio_init(pkt->w_bio, file_bdev(pd->bdev_file), pkt->w_bio->bi_inline_vecs, pkt->frames, REQ_OP_WRITE); pkt->w_bio->bi_iter.bi_sector = pkt->sector; pkt->w_bio->bi_end_io = pkt_end_io_packet_write; @@ -2162,20 +2168,20 @@ static int pkt_open_dev(struct pktcdvd_device *pd, bool write) int ret; long lba; struct request_queue *q; - struct bdev_handle *bdev_handle; + struct file *bdev_file; /* * We need to re-open the cdrom device without O_NONBLOCK to be able * to read/write from/to it. It is already opened in O_NONBLOCK mode * so open should not fail. */ - bdev_handle = bdev_open_by_dev(pd->bdev_handle->bdev->bd_dev, + bdev_file = bdev_file_open_by_dev(file_bdev(pd->bdev_file)->bd_dev, BLK_OPEN_READ, pd, NULL); - if (IS_ERR(bdev_handle)) { - ret = PTR_ERR(bdev_handle); + if (IS_ERR(bdev_file)) { + ret = PTR_ERR(bdev_file); goto out; } - pd->open_bdev_handle = bdev_handle; + pd->f_open_bdev = bdev_file; ret = pkt_get_last_written(pd, &lba); if (ret) { @@ -2184,18 +2190,13 @@ static int pkt_open_dev(struct pktcdvd_device *pd, bool write) } set_capacity(pd->disk, lba << 2); - set_capacity_and_notify(pd->bdev_handle->bdev->bd_disk, lba << 2); + set_capacity_and_notify(file_bdev(pd->bdev_file)->bd_disk, lba << 2); - q = bdev_get_queue(pd->bdev_handle->bdev); + q = bdev_get_queue(file_bdev(pd->bdev_file)); if (write) { ret = pkt_open_write(pd); if (ret) goto out_putdev; - /* - * Some CDRW drives can not handle writes larger than one packet, - * even if the size is a multiple of the packet size. - */ - blk_queue_max_hw_sectors(q, pd->settings.size); set_bit(PACKET_WRITABLE, &pd->flags); } else { pkt_set_speed(pd, MAX_SPEED, MAX_SPEED); @@ -2218,7 +2219,7 @@ static int pkt_open_dev(struct pktcdvd_device *pd, bool write) return 0; out_putdev: - bdev_release(bdev_handle); + fput(bdev_file); out: return ret; } @@ -2237,8 +2238,8 @@ static void pkt_release_dev(struct pktcdvd_device *pd, int flush) pkt_lock_door(pd, 0); pkt_set_speed(pd, MAX_SPEED, MAX_SPEED); - bdev_release(pd->open_bdev_handle); - pd->open_bdev_handle = NULL; + fput(pd->f_open_bdev); + pd->f_open_bdev = NULL; pkt_shrink_pktlist(pd); } @@ -2326,7 +2327,7 @@ static void pkt_end_io_read_cloned(struct bio *bio) static void pkt_make_request_read(struct pktcdvd_device *pd, struct bio *bio) { - struct bio *cloned_bio = bio_alloc_clone(pd->bdev_handle->bdev, bio, + struct bio *cloned_bio = bio_alloc_clone(file_bdev(pd->bdev_file), bio, GFP_NOIO, &pkt_bio_set); struct packet_stacked_data *psd = mempool_alloc(&psd_pool, GFP_NOIO); @@ -2338,9 +2339,9 @@ static void pkt_make_request_read(struct pktcdvd_device *pd, struct bio *bio) pkt_queue_bio(pd, cloned_bio); } -static void pkt_make_request_write(struct request_queue *q, struct bio *bio) +static void pkt_make_request_write(struct bio *bio) { - struct pktcdvd_device *pd = q->queuedata; + struct pktcdvd_device *pd = bio->bi_bdev->bd_disk->private_data; sector_t zone; struct packet_data *pkt; int was_empty, blocked_bio; @@ -2432,7 +2433,7 @@ static void pkt_make_request_write(struct request_queue *q, struct bio *bio) static void pkt_submit_bio(struct bio *bio) { - struct pktcdvd_device *pd = bio->bi_bdev->bd_disk->queue->queuedata; + struct pktcdvd_device *pd = bio->bi_bdev->bd_disk->private_data; struct device *ddev = disk_to_dev(pd->disk); struct bio *split; @@ -2476,7 +2477,7 @@ static void pkt_submit_bio(struct bio *bio) split = bio; } - pkt_make_request_write(bio->bi_bdev->bd_disk->queue, split); + pkt_make_request_write(split); } while (split != bio); return; @@ -2484,20 +2485,11 @@ end_io: bio_io_error(bio); } -static void pkt_init_queue(struct pktcdvd_device *pd) -{ - struct request_queue *q = pd->disk->queue; - - blk_queue_logical_block_size(q, CD_FRAMESIZE); - blk_queue_max_hw_sectors(q, PACKET_MAX_SECTORS); - q->queuedata = pd; -} - static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev) { struct device *ddev = disk_to_dev(pd->disk); int i; - struct bdev_handle *bdev_handle; + struct file *bdev_file; struct scsi_device *sdev; if (pd->pkt_dev == dev) { @@ -2508,9 +2500,9 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev) struct pktcdvd_device *pd2 = pkt_devs[i]; if (!pd2) continue; - if (pd2->bdev_handle->bdev->bd_dev == dev) { + if (file_bdev(pd2->bdev_file)->bd_dev == dev) { dev_err(ddev, "%pg already setup\n", - pd2->bdev_handle->bdev); + file_bdev(pd2->bdev_file)); return -EBUSY; } if (pd2->pkt_dev == dev) { @@ -2519,13 +2511,13 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev) } } - bdev_handle = bdev_open_by_dev(dev, BLK_OPEN_READ | BLK_OPEN_NDELAY, + bdev_file = bdev_file_open_by_dev(dev, BLK_OPEN_READ | BLK_OPEN_NDELAY, NULL, NULL); - if (IS_ERR(bdev_handle)) - return PTR_ERR(bdev_handle); - sdev = scsi_device_from_queue(bdev_handle->bdev->bd_disk->queue); + if (IS_ERR(bdev_file)) + return PTR_ERR(bdev_file); + sdev = scsi_device_from_queue(file_bdev(bdev_file)->bd_disk->queue); if (!sdev) { - bdev_release(bdev_handle); + fput(bdev_file); return -EINVAL; } put_device(&sdev->sdev_gendev); @@ -2533,10 +2525,8 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev) /* This is safe, since we have a reference from open(). */ __module_get(THIS_MODULE); - pd->bdev_handle = bdev_handle; - set_blocksize(bdev_handle->bdev, CD_FRAMESIZE); - - pkt_init_queue(pd); + pd->bdev_file = bdev_file; + set_blocksize(file_bdev(bdev_file), CD_FRAMESIZE); atomic_set(&pd->cdrw.pending_bios, 0); pd->cdrw.thread = kthread_run(kcdrwd, pd, "%s", pd->disk->disk_name); @@ -2546,11 +2536,11 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev) } proc_create_single_data(pd->disk->disk_name, 0, pkt_proc, pkt_seq_show, pd); - dev_notice(ddev, "writer mapped to %pg\n", bdev_handle->bdev); + dev_notice(ddev, "writer mapped to %pg\n", file_bdev(bdev_file)); return 0; out_mem: - bdev_release(bdev_handle); + fput(bdev_file); /* This is safe: open() is still holding a reference. */ module_put(THIS_MODULE); return -ENOMEM; @@ -2605,9 +2595,9 @@ static unsigned int pkt_check_events(struct gendisk *disk, if (!pd) return 0; - if (!pd->bdev_handle) + if (!pd->bdev_file) return 0; - attached_disk = pd->bdev_handle->bdev->bd_disk; + attached_disk = file_bdev(pd->bdev_file)->bd_disk; if (!attached_disk || !attached_disk->fops->check_events) return 0; return attached_disk->fops->check_events(attached_disk, clearing); @@ -2634,6 +2624,10 @@ static const struct block_device_operations pktcdvd_ops = { */ static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev) { + struct queue_limits lim = { + .max_hw_sectors = PACKET_MAX_SECTORS, + .logical_block_size = CD_FRAMESIZE, + }; int idx; int ret = -ENOMEM; struct pktcdvd_device *pd; @@ -2673,10 +2667,11 @@ static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev) pd->write_congestion_on = write_congestion_on; pd->write_congestion_off = write_congestion_off; - ret = -ENOMEM; - disk = blk_alloc_disk(NUMA_NO_NODE); - if (!disk) + disk = blk_alloc_disk(&lim, NUMA_NO_NODE); + if (IS_ERR(disk)) { + ret = PTR_ERR(disk); goto out_mem; + } pd->disk = disk; disk->major = pktdev_major; disk->first_minor = idx; @@ -2692,7 +2687,7 @@ static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev) goto out_mem2; /* inherit events of the host device */ - disk->events = pd->bdev_handle->bdev->bd_disk->events; + disk->events = file_bdev(pd->bdev_file)->bd_disk->events; ret = add_disk(disk); if (ret) @@ -2757,7 +2752,7 @@ static int pkt_remove_dev(dev_t pkt_dev) pkt_debugfs_dev_remove(pd); pkt_sysfs_dev_remove(pd); - bdev_release(pd->bdev_handle); + fput(pd->bdev_file); remove_proc_entry(pd->disk->disk_name, pkt_proc); dev_notice(ddev, "writer unmapped\n"); @@ -2784,7 +2779,7 @@ static void pkt_get_status(struct pkt_ctrl_command *ctrl_cmd) pd = pkt_find_dev_from_minor(ctrl_cmd->dev_index); if (pd) { - ctrl_cmd->dev = new_encode_dev(pd->bdev_handle->bdev->bd_dev); + ctrl_cmd->dev = new_encode_dev(file_bdev(pd->bdev_file)->bd_dev); ctrl_cmd->pkt_dev = new_encode_dev(pd->pkt_dev); } else { ctrl_cmd->dev = 0; diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c index 36d7b36c60c7..b810ac0a5c4b 100644 --- a/drivers/block/ps3disk.c +++ b/drivers/block/ps3disk.c @@ -382,6 +382,14 @@ static int ps3disk_probe(struct ps3_system_bus_device *_dev) struct ps3disk_private *priv; int error; unsigned int devidx; + struct queue_limits lim = { + .logical_block_size = dev->blk_size, + .max_hw_sectors = dev->bounce_size >> 9, + .max_segments = -1, + .max_segment_size = dev->bounce_size, + .dma_alignment = dev->blk_size - 1, + }; + struct request_queue *queue; struct gendisk *gendisk; @@ -431,7 +439,7 @@ static int ps3disk_probe(struct ps3_system_bus_device *_dev) if (error) goto fail_teardown; - gendisk = blk_mq_alloc_disk(&priv->tag_set, dev); + gendisk = blk_mq_alloc_disk(&priv->tag_set, &lim, dev); if (IS_ERR(gendisk)) { dev_err(&dev->sbd.core, "%s:%u: blk_mq_alloc_disk failed\n", __func__, __LINE__); @@ -441,15 +449,8 @@ static int ps3disk_probe(struct ps3_system_bus_device *_dev) queue = gendisk->queue; - blk_queue_max_hw_sectors(queue, dev->bounce_size >> 9); - blk_queue_dma_alignment(queue, dev->blk_size-1); - blk_queue_logical_block_size(queue, dev->blk_size); - blk_queue_write_cache(queue, true, false); - blk_queue_max_segments(queue, -1); - blk_queue_max_segment_size(queue, dev->bounce_size); - priv->gendisk = gendisk; gendisk->major = ps3disk_major; gendisk->first_minor = devidx * PS3DISK_MINORS; diff --git a/drivers/block/ps3vram.c b/drivers/block/ps3vram.c index 38d42af01b25..bdcf083b45e2 100644 --- a/drivers/block/ps3vram.c +++ b/drivers/block/ps3vram.c @@ -730,10 +730,10 @@ static int ps3vram_probe(struct ps3_system_bus_device *dev) ps3vram_proc_init(dev); - gendisk = blk_alloc_disk(NUMA_NO_NODE); - if (!gendisk) { + gendisk = blk_alloc_disk(NULL, NUMA_NO_NODE); + if (IS_ERR(gendisk)) { dev_err(&dev->core, "blk_alloc_disk failed\n"); - error = -ENOMEM; + error = PTR_ERR(gendisk); goto out_cache_cleanup; } diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 12b5d53ec856..26ff5cd2bf0a 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -575,7 +575,7 @@ static const struct attribute_group rbd_bus_group = { }; __ATTRIBUTE_GROUPS(rbd_bus); -static struct bus_type rbd_bus_type = { +static const struct bus_type rbd_bus_type = { .name = "rbd", .bus_groups = rbd_bus_groups, }; @@ -4952,6 +4952,14 @@ static int rbd_init_disk(struct rbd_device *rbd_dev) struct request_queue *q; unsigned int objset_bytes = rbd_dev->layout.object_size * rbd_dev->layout.stripe_count; + struct queue_limits lim = { + .max_hw_sectors = objset_bytes >> SECTOR_SHIFT, + .max_user_sectors = objset_bytes >> SECTOR_SHIFT, + .io_min = rbd_dev->opts->alloc_size, + .io_opt = rbd_dev->opts->alloc_size, + .max_segments = USHRT_MAX, + .max_segment_size = UINT_MAX, + }; int err; memset(&rbd_dev->tag_set, 0, sizeof(rbd_dev->tag_set)); @@ -4966,7 +4974,13 @@ static int rbd_init_disk(struct rbd_device *rbd_dev) if (err) return err; - disk = blk_mq_alloc_disk(&rbd_dev->tag_set, rbd_dev); + if (rbd_dev->opts->trim) { + lim.discard_granularity = rbd_dev->opts->alloc_size; + lim.max_hw_discard_sectors = objset_bytes >> SECTOR_SHIFT; + lim.max_write_zeroes_sectors = objset_bytes >> SECTOR_SHIFT; + } + + disk = blk_mq_alloc_disk(&rbd_dev->tag_set, &lim, rbd_dev); if (IS_ERR(disk)) { err = PTR_ERR(disk); goto out_tag_set; @@ -4987,19 +5001,6 @@ static int rbd_init_disk(struct rbd_device *rbd_dev) blk_queue_flag_set(QUEUE_FLAG_NONROT, q); /* QUEUE_FLAG_ADD_RANDOM is off by default for blk-mq */ - blk_queue_max_hw_sectors(q, objset_bytes >> SECTOR_SHIFT); - q->limits.max_sectors = queue_max_hw_sectors(q); - blk_queue_max_segments(q, USHRT_MAX); - blk_queue_max_segment_size(q, UINT_MAX); - blk_queue_io_min(q, rbd_dev->opts->alloc_size); - blk_queue_io_opt(q, rbd_dev->opts->alloc_size); - - if (rbd_dev->opts->trim) { - q->limits.discard_granularity = rbd_dev->opts->alloc_size; - blk_queue_max_discard_sectors(q, objset_bytes >> SECTOR_SHIFT); - blk_queue_max_write_zeroes_sectors(q, objset_bytes >> SECTOR_SHIFT); - } - if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC)) blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, q); diff --git a/drivers/block/rnbd/rnbd-clt.c b/drivers/block/rnbd/rnbd-clt.c index 4044c369d22a..b7ffe03c6160 100644 --- a/drivers/block/rnbd/rnbd-clt.c +++ b/drivers/block/rnbd/rnbd-clt.c @@ -1329,43 +1329,6 @@ static void rnbd_init_mq_hw_queues(struct rnbd_clt_dev *dev) } } -static void setup_request_queue(struct rnbd_clt_dev *dev, - struct rnbd_msg_open_rsp *rsp) -{ - blk_queue_logical_block_size(dev->queue, - le16_to_cpu(rsp->logical_block_size)); - blk_queue_physical_block_size(dev->queue, - le16_to_cpu(rsp->physical_block_size)); - blk_queue_max_hw_sectors(dev->queue, - dev->sess->max_io_size / SECTOR_SIZE); - - /* - * we don't support discards to "discontiguous" segments - * in on request - */ - blk_queue_max_discard_segments(dev->queue, 1); - - blk_queue_max_discard_sectors(dev->queue, - le32_to_cpu(rsp->max_discard_sectors)); - dev->queue->limits.discard_granularity = - le32_to_cpu(rsp->discard_granularity); - dev->queue->limits.discard_alignment = - le32_to_cpu(rsp->discard_alignment); - if (le16_to_cpu(rsp->secure_discard)) - blk_queue_max_secure_erase_sectors(dev->queue, - le32_to_cpu(rsp->max_discard_sectors)); - blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, dev->queue); - blk_queue_flag_set(QUEUE_FLAG_SAME_FORCE, dev->queue); - blk_queue_max_segments(dev->queue, dev->sess->max_segments); - blk_queue_io_opt(dev->queue, dev->sess->max_io_size); - blk_queue_virt_boundary(dev->queue, SZ_4K - 1); - blk_queue_write_cache(dev->queue, - !!(rsp->cache_policy & RNBD_WRITEBACK), - !!(rsp->cache_policy & RNBD_FUA)); - blk_queue_max_write_zeroes_sectors(dev->queue, - le32_to_cpu(rsp->max_write_zeroes_sectors)); -} - static int rnbd_clt_setup_gen_disk(struct rnbd_clt_dev *dev, struct rnbd_msg_open_rsp *rsp, int idx) { @@ -1403,18 +1366,41 @@ static int rnbd_clt_setup_gen_disk(struct rnbd_clt_dev *dev, static int rnbd_client_setup_device(struct rnbd_clt_dev *dev, struct rnbd_msg_open_rsp *rsp) { + struct queue_limits lim = { + .logical_block_size = le16_to_cpu(rsp->logical_block_size), + .physical_block_size = le16_to_cpu(rsp->physical_block_size), + .io_opt = dev->sess->max_io_size, + .max_hw_sectors = dev->sess->max_io_size / SECTOR_SIZE, + .max_hw_discard_sectors = le32_to_cpu(rsp->max_discard_sectors), + .discard_granularity = le32_to_cpu(rsp->discard_granularity), + .discard_alignment = le32_to_cpu(rsp->discard_alignment), + .max_segments = dev->sess->max_segments, + .virt_boundary_mask = SZ_4K - 1, + .max_write_zeroes_sectors = + le32_to_cpu(rsp->max_write_zeroes_sectors), + }; int idx = dev->clt_device_id; dev->size = le64_to_cpu(rsp->nsectors) * le16_to_cpu(rsp->logical_block_size); - dev->gd = blk_mq_alloc_disk(&dev->sess->tag_set, dev); + if (rsp->secure_discard) { + lim.max_secure_erase_sectors = + le32_to_cpu(rsp->max_discard_sectors); + } + + dev->gd = blk_mq_alloc_disk(&dev->sess->tag_set, &lim, dev); if (IS_ERR(dev->gd)) return PTR_ERR(dev->gd); dev->queue = dev->gd->queue; rnbd_init_mq_hw_queues(dev); - setup_request_queue(dev, rsp); + blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, dev->queue); + blk_queue_flag_set(QUEUE_FLAG_SAME_FORCE, dev->queue); + blk_queue_write_cache(dev->queue, + !!(rsp->cache_policy & RNBD_WRITEBACK), + !!(rsp->cache_policy & RNBD_FUA)); + return rnbd_clt_setup_gen_disk(dev, rsp, idx); } diff --git a/drivers/block/rnbd/rnbd-srv.c b/drivers/block/rnbd/rnbd-srv.c index 3a0d5dcec6f2..f6e3a3c4b76c 100644 --- a/drivers/block/rnbd/rnbd-srv.c +++ b/drivers/block/rnbd/rnbd-srv.c @@ -145,7 +145,7 @@ static int process_rdma(struct rnbd_srv_session *srv_sess, priv->sess_dev = sess_dev; priv->id = id; - bio = bio_alloc(sess_dev->bdev_handle->bdev, 1, + bio = bio_alloc(file_bdev(sess_dev->bdev_file), 1, rnbd_to_bio_flags(le32_to_cpu(msg->rw)), GFP_KERNEL); if (bio_add_page(bio, virt_to_page(data), datalen, offset_in_page(data)) != datalen) { @@ -219,7 +219,7 @@ void rnbd_destroy_sess_dev(struct rnbd_srv_sess_dev *sess_dev, bool keep_id) rnbd_put_sess_dev(sess_dev); wait_for_completion(&dc); /* wait for inflights to drop to zero */ - bdev_release(sess_dev->bdev_handle); + fput(sess_dev->bdev_file); mutex_lock(&sess_dev->dev->lock); list_del(&sess_dev->dev_list); if (!sess_dev->readonly) @@ -534,7 +534,7 @@ rnbd_srv_get_or_create_srv_dev(struct block_device *bdev, static void rnbd_srv_fill_msg_open_rsp(struct rnbd_msg_open_rsp *rsp, struct rnbd_srv_sess_dev *sess_dev) { - struct block_device *bdev = sess_dev->bdev_handle->bdev; + struct block_device *bdev = file_bdev(sess_dev->bdev_file); rsp->hdr.type = cpu_to_le16(RNBD_MSG_OPEN_RSP); rsp->device_id = cpu_to_le32(sess_dev->device_id); @@ -560,7 +560,7 @@ static void rnbd_srv_fill_msg_open_rsp(struct rnbd_msg_open_rsp *rsp, static struct rnbd_srv_sess_dev * rnbd_srv_create_set_sess_dev(struct rnbd_srv_session *srv_sess, const struct rnbd_msg_open *open_msg, - struct bdev_handle *handle, bool readonly, + struct file *bdev_file, bool readonly, struct rnbd_srv_dev *srv_dev) { struct rnbd_srv_sess_dev *sdev = rnbd_sess_dev_alloc(srv_sess); @@ -572,7 +572,7 @@ rnbd_srv_create_set_sess_dev(struct rnbd_srv_session *srv_sess, strscpy(sdev->pathname, open_msg->dev_name, sizeof(sdev->pathname)); - sdev->bdev_handle = handle; + sdev->bdev_file = bdev_file; sdev->sess = srv_sess; sdev->dev = srv_dev; sdev->readonly = readonly; @@ -678,7 +678,7 @@ static int process_msg_open(struct rnbd_srv_session *srv_sess, struct rnbd_srv_dev *srv_dev; struct rnbd_srv_sess_dev *srv_sess_dev; const struct rnbd_msg_open *open_msg = msg; - struct bdev_handle *bdev_handle; + struct file *bdev_file; blk_mode_t open_flags = BLK_OPEN_READ; char *full_path; struct rnbd_msg_open_rsp *rsp = data; @@ -716,15 +716,15 @@ static int process_msg_open(struct rnbd_srv_session *srv_sess, goto reject; } - bdev_handle = bdev_open_by_path(full_path, open_flags, NULL, NULL); - if (IS_ERR(bdev_handle)) { - ret = PTR_ERR(bdev_handle); + bdev_file = bdev_file_open_by_path(full_path, open_flags, NULL, NULL); + if (IS_ERR(bdev_file)) { + ret = PTR_ERR(bdev_file); pr_err("Opening device '%s' on session %s failed, failed to open the block device, err: %pe\n", - full_path, srv_sess->sessname, bdev_handle); + full_path, srv_sess->sessname, bdev_file); goto free_path; } - srv_dev = rnbd_srv_get_or_create_srv_dev(bdev_handle->bdev, srv_sess, + srv_dev = rnbd_srv_get_or_create_srv_dev(file_bdev(bdev_file), srv_sess, open_msg->access_mode); if (IS_ERR(srv_dev)) { pr_err("Opening device '%s' on session %s failed, creating srv_dev failed, err: %pe\n", @@ -734,7 +734,7 @@ static int process_msg_open(struct rnbd_srv_session *srv_sess, } srv_sess_dev = rnbd_srv_create_set_sess_dev(srv_sess, open_msg, - bdev_handle, + bdev_file, open_msg->access_mode == RNBD_ACCESS_RO, srv_dev); if (IS_ERR(srv_sess_dev)) { @@ -750,7 +750,7 @@ static int process_msg_open(struct rnbd_srv_session *srv_sess, */ mutex_lock(&srv_dev->lock); if (!srv_dev->dev_kobj.state_in_sysfs) { - ret = rnbd_srv_create_dev_sysfs(srv_dev, bdev_handle->bdev); + ret = rnbd_srv_create_dev_sysfs(srv_dev, file_bdev(bdev_file)); if (ret) { mutex_unlock(&srv_dev->lock); rnbd_srv_err(srv_sess_dev, @@ -793,7 +793,7 @@ srv_dev_put: } rnbd_put_srv_dev(srv_dev); blkdev_put: - bdev_release(bdev_handle); + fput(bdev_file); free_path: kfree(full_path); reject: diff --git a/drivers/block/rnbd/rnbd-srv.h b/drivers/block/rnbd/rnbd-srv.h index 343cc682b617..18d873808b8d 100644 --- a/drivers/block/rnbd/rnbd-srv.h +++ b/drivers/block/rnbd/rnbd-srv.h @@ -46,7 +46,7 @@ struct rnbd_srv_dev { struct rnbd_srv_sess_dev { /* Entry inside rnbd_srv_dev struct */ struct list_head dev_list; - struct bdev_handle *bdev_handle; + struct file *bdev_file; struct rnbd_srv_session *sess; struct rnbd_srv_dev *dev; struct kobject kobj; diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c index 7bf4b48e2282..c99dd6698977 100644 --- a/drivers/block/sunvdc.c +++ b/drivers/block/sunvdc.c @@ -784,6 +784,14 @@ static const struct blk_mq_ops vdc_mq_ops = { static int probe_disk(struct vdc_port *port) { + struct queue_limits lim = { + .physical_block_size = port->vdisk_phys_blksz, + .max_hw_sectors = port->max_xfer_size, + /* Each segment in a request is up to an aligned page in size. */ + .seg_boundary_mask = PAGE_SIZE - 1, + .max_segment_size = PAGE_SIZE, + .max_segments = port->ring_cookies, + }; struct request_queue *q; struct gendisk *g; int err; @@ -824,7 +832,7 @@ static int probe_disk(struct vdc_port *port) if (err) return err; - g = blk_mq_alloc_disk(&port->tag_set, port); + g = blk_mq_alloc_disk(&port->tag_set, &lim, port); if (IS_ERR(g)) { printk(KERN_ERR PFX "%s: Could not allocate gendisk.\n", port->vio.name); @@ -835,12 +843,6 @@ static int probe_disk(struct vdc_port *port) port->disk = g; q = g->queue; - /* Each segment in a request is up to an aligned page in size. */ - blk_queue_segment_boundary(q, PAGE_SIZE - 1); - blk_queue_max_segment_size(q, PAGE_SIZE); - - blk_queue_max_segments(q, port->ring_cookies); - blk_queue_max_hw_sectors(q, port->max_xfer_size); g->major = vdc_major; g->first_minor = port->vio.vdev->dev_no << PARTITION_SHIFT; g->minors = 1 << PARTITION_SHIFT; @@ -872,8 +874,6 @@ static int probe_disk(struct vdc_port *port) } } - blk_queue_physical_block_size(q, port->vdisk_phys_blksz); - pr_info(PFX "%s: %u sectors (%u MB) protocol %d.%d\n", g->disk_name, port->vdisk_size, (port->vdisk_size >> (20 - 9)), diff --git a/drivers/block/swim.c b/drivers/block/swim.c index f85b6af414b4..6731678f3a41 100644 --- a/drivers/block/swim.c +++ b/drivers/block/swim.c @@ -820,7 +820,7 @@ static int swim_floppy_init(struct swim_priv *swd) goto exit_put_disks; swd->unit[drive].disk = - blk_mq_alloc_disk(&swd->unit[drive].tag_set, + blk_mq_alloc_disk(&swd->unit[drive].tag_set, NULL, &swd->unit[drive]); if (IS_ERR(swd->unit[drive].disk)) { blk_mq_free_tag_set(&swd->unit[drive].tag_set); @@ -916,7 +916,7 @@ out: return ret; } -static int swim_remove(struct platform_device *dev) +static void swim_remove(struct platform_device *dev) { struct swim_priv *swd = platform_get_drvdata(dev); int drive; @@ -937,13 +937,11 @@ static int swim_remove(struct platform_device *dev) release_mem_region(res->start, resource_size(res)); kfree(swd); - - return 0; } static struct platform_driver swim_driver = { .probe = swim_probe, - .remove = swim_remove, + .remove_new = swim_remove, .driver = { .name = CARDNAME, }, diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c index c2bc85826358..a04756ac778e 100644 --- a/drivers/block/swim3.c +++ b/drivers/block/swim3.c @@ -1210,7 +1210,7 @@ static int swim3_attach(struct macio_dev *mdev, if (rc) goto out_unregister; - disk = blk_mq_alloc_disk(&fs->tag_set, fs); + disk = blk_mq_alloc_disk(&fs->tag_set, NULL, fs); if (IS_ERR(disk)) { rc = PTR_ERR(disk); goto out_free_tag_set; diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c index 1dfb2e77898b..bea3d5cf8a83 100644 --- a/drivers/block/ublk_drv.c +++ b/drivers/block/ublk_drv.c @@ -246,21 +246,12 @@ static int ublk_dev_param_zoned_validate(const struct ublk_device *ub) return 0; } -static int ublk_dev_param_zoned_apply(struct ublk_device *ub) +static void ublk_dev_param_zoned_apply(struct ublk_device *ub) { - const struct ublk_param_zoned *p = &ub->params.zoned; - - disk_set_zoned(ub->ub_disk); blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, ub->ub_disk->queue); blk_queue_required_elevator_features(ub->ub_disk->queue, ELEVATOR_F_ZBD_SEQ_WRITE); - disk_set_max_active_zones(ub->ub_disk, p->max_active_zones); - disk_set_max_open_zones(ub->ub_disk, p->max_open_zones); - blk_queue_max_zone_append_sectors(ub->ub_disk->queue, p->max_zone_append_sectors); - ub->ub_disk->nr_zones = ublk_get_nr_zones(ub); - - return 0; } /* Based on virtblk_alloc_report_buffer */ @@ -432,9 +423,8 @@ static int ublk_dev_param_zoned_validate(const struct ublk_device *ub) return -EOPNOTSUPP; } -static int ublk_dev_param_zoned_apply(struct ublk_device *ub) +static void ublk_dev_param_zoned_apply(struct ublk_device *ub) { - return -EOPNOTSUPP; } static int ublk_revalidate_disk_zones(struct ublk_device *ub) @@ -498,11 +488,6 @@ static void ublk_dev_param_basic_apply(struct ublk_device *ub) struct request_queue *q = ub->ub_disk->queue; const struct ublk_param_basic *p = &ub->params.basic; - blk_queue_logical_block_size(q, 1 << p->logical_bs_shift); - blk_queue_physical_block_size(q, 1 << p->physical_bs_shift); - blk_queue_io_min(q, 1 << p->io_min_shift); - blk_queue_io_opt(q, 1 << p->io_opt_shift); - blk_queue_write_cache(q, p->attrs & UBLK_ATTR_VOLATILE_CACHE, p->attrs & UBLK_ATTR_FUA); if (p->attrs & UBLK_ATTR_ROTATIONAL) @@ -510,29 +495,12 @@ static void ublk_dev_param_basic_apply(struct ublk_device *ub) else blk_queue_flag_set(QUEUE_FLAG_NONROT, q); - blk_queue_max_hw_sectors(q, p->max_sectors); - blk_queue_chunk_sectors(q, p->chunk_sectors); - blk_queue_virt_boundary(q, p->virt_boundary_mask); - if (p->attrs & UBLK_ATTR_READ_ONLY) set_disk_ro(ub->ub_disk, true); set_capacity(ub->ub_disk, p->dev_sectors); } -static void ublk_dev_param_discard_apply(struct ublk_device *ub) -{ - struct request_queue *q = ub->ub_disk->queue; - const struct ublk_param_discard *p = &ub->params.discard; - - q->limits.discard_alignment = p->discard_alignment; - q->limits.discard_granularity = p->discard_granularity; - blk_queue_max_discard_sectors(q, p->max_discard_sectors); - blk_queue_max_write_zeroes_sectors(q, - p->max_write_zeroes_sectors); - blk_queue_max_discard_segments(q, p->max_discard_segments); -} - static int ublk_validate_params(const struct ublk_device *ub) { /* basic param is the only one which must be set */ @@ -576,20 +544,12 @@ static int ublk_validate_params(const struct ublk_device *ub) return 0; } -static int ublk_apply_params(struct ublk_device *ub) +static void ublk_apply_params(struct ublk_device *ub) { - if (!(ub->params.types & UBLK_PARAM_TYPE_BASIC)) - return -EINVAL; - ublk_dev_param_basic_apply(ub); - if (ub->params.types & UBLK_PARAM_TYPE_DISCARD) - ublk_dev_param_discard_apply(ub); - if (ub->params.types & UBLK_PARAM_TYPE_ZONED) - return ublk_dev_param_zoned_apply(ub); - - return 0; + ublk_dev_param_zoned_apply(ub); } static inline bool ublk_support_user_copy(const struct ublk_queue *ubq) @@ -645,14 +605,16 @@ static inline bool ublk_need_get_data(const struct ublk_queue *ubq) return ubq->flags & UBLK_F_NEED_GET_DATA; } -static struct ublk_device *ublk_get_device(struct ublk_device *ub) +/* Called in slow path only, keep it noinline for trace purpose */ +static noinline struct ublk_device *ublk_get_device(struct ublk_device *ub) { if (kobject_get_unless_zero(&ub->cdev_dev.kobj)) return ub; return NULL; } -static void ublk_put_device(struct ublk_device *ub) +/* Called in slow path only, keep it noinline for trace purpose */ +static noinline void ublk_put_device(struct ublk_device *ub) { put_device(&ub->cdev_dev); } @@ -711,7 +673,7 @@ static void ublk_free_disk(struct gendisk *disk) struct ublk_device *ub = disk->private_data; clear_bit(UB_STATE_USED, &ub->state); - put_device(&ub->cdev_dev); + ublk_put_device(ub); } static void ublk_store_owner_uid_gid(unsigned int *owner_uid, @@ -2182,7 +2144,7 @@ static void ublk_remove(struct ublk_device *ub) cancel_work_sync(&ub->stop_work); cancel_work_sync(&ub->quiesce_work); cdev_device_del(&ub->cdev, &ub->cdev_dev); - put_device(&ub->cdev_dev); + ublk_put_device(ub); ublks_added--; } @@ -2205,12 +2167,47 @@ static struct ublk_device *ublk_get_device_from_id(int idx) static int ublk_ctrl_start_dev(struct ublk_device *ub, struct io_uring_cmd *cmd) { const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe); + const struct ublk_param_basic *p = &ub->params.basic; int ublksrv_pid = (int)header->data[0]; + struct queue_limits lim = { + .logical_block_size = 1 << p->logical_bs_shift, + .physical_block_size = 1 << p->physical_bs_shift, + .io_min = 1 << p->io_min_shift, + .io_opt = 1 << p->io_opt_shift, + .max_hw_sectors = p->max_sectors, + .chunk_sectors = p->chunk_sectors, + .virt_boundary_mask = p->virt_boundary_mask, + + }; struct gendisk *disk; int ret = -EINVAL; if (ublksrv_pid <= 0) return -EINVAL; + if (!(ub->params.types & UBLK_PARAM_TYPE_BASIC)) + return -EINVAL; + + if (ub->params.types & UBLK_PARAM_TYPE_DISCARD) { + const struct ublk_param_discard *pd = &ub->params.discard; + + lim.discard_alignment = pd->discard_alignment; + lim.discard_granularity = pd->discard_granularity; + lim.max_hw_discard_sectors = pd->max_discard_sectors; + lim.max_write_zeroes_sectors = pd->max_write_zeroes_sectors; + lim.max_discard_segments = pd->max_discard_segments; + } + + if (ub->params.types & UBLK_PARAM_TYPE_ZONED) { + const struct ublk_param_zoned *p = &ub->params.zoned; + + if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED)) + return -EOPNOTSUPP; + + lim.zoned = true; + lim.max_active_zones = p->max_active_zones; + lim.max_open_zones = p->max_open_zones; + lim.max_zone_append_sectors = p->max_zone_append_sectors; + } if (wait_for_completion_interruptible(&ub->completion) != 0) return -EINTR; @@ -2222,7 +2219,7 @@ static int ublk_ctrl_start_dev(struct ublk_device *ub, struct io_uring_cmd *cmd) goto out_unlock; } - disk = blk_mq_alloc_disk(&ub->tag_set, NULL); + disk = blk_mq_alloc_disk(&ub->tag_set, &lim, NULL); if (IS_ERR(disk)) { ret = PTR_ERR(disk); goto out_unlock; @@ -2234,15 +2231,13 @@ static int ublk_ctrl_start_dev(struct ublk_device *ub, struct io_uring_cmd *cmd) ub->dev_info.ublksrv_pid = ublksrv_pid; ub->ub_disk = disk; - ret = ublk_apply_params(ub); - if (ret) - goto out_put_disk; + ublk_apply_params(ub); /* don't probe partitions if any one ubq daemon is un-trusted */ if (ub->nr_privileged_daemon != ub->nr_queues_ready) set_bit(GD_SUPPRESS_PART_SCAN, &disk->state); - get_device(&ub->cdev_dev); + ublk_get_device(ub); ub->dev_info.state = UBLK_S_DEV_LIVE; if (ublk_dev_is_zoned(ub)) { @@ -2262,7 +2257,6 @@ out_put_cdev: ub->dev_info.state = UBLK_S_DEV_DEAD; ublk_put_device(ub); } -out_put_disk: if (ret) put_disk(disk); out_unlock: @@ -2474,7 +2468,7 @@ static inline bool ublk_idr_freed(int id) return ptr == NULL; } -static int ublk_ctrl_del_dev(struct ublk_device **p_ub) +static int ublk_ctrl_del_dev(struct ublk_device **p_ub, bool wait) { struct ublk_device *ub = *p_ub; int idx = ub->ub_number; @@ -2508,7 +2502,7 @@ static int ublk_ctrl_del_dev(struct ublk_device **p_ub) * - the device number is freed already, we will not find this * device via ublk_get_device_from_id() */ - if (wait_event_interruptible(ublk_idr_wq, ublk_idr_freed(idx))) + if (wait && wait_event_interruptible(ublk_idr_wq, ublk_idr_freed(idx))) return -EINTR; return 0; } @@ -2907,7 +2901,10 @@ static int ublk_ctrl_uring_cmd(struct io_uring_cmd *cmd, ret = ublk_ctrl_add_dev(cmd); break; case UBLK_CMD_DEL_DEV: - ret = ublk_ctrl_del_dev(&ub); + ret = ublk_ctrl_del_dev(&ub, true); + break; + case UBLK_U_CMD_DEL_DEV_ASYNC: + ret = ublk_ctrl_del_dev(&ub, false); break; case UBLK_CMD_GET_QUEUE_AFFINITY: ret = ublk_ctrl_get_queue_affinity(ub, cmd); diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 5bf98fd6a651..42dea7601d87 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -720,25 +720,24 @@ fail_report: return ret; } -static int virtblk_probe_zoned_device(struct virtio_device *vdev, - struct virtio_blk *vblk, - struct request_queue *q) +static int virtblk_read_zoned_limits(struct virtio_blk *vblk, + struct queue_limits *lim) { + struct virtio_device *vdev = vblk->vdev; u32 v, wg; dev_dbg(&vdev->dev, "probing host-managed zoned device\n"); - disk_set_zoned(vblk->disk); - blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q); + lim->zoned = true; virtio_cread(vdev, struct virtio_blk_config, zoned.max_open_zones, &v); - disk_set_max_open_zones(vblk->disk, v); + lim->max_open_zones = v; dev_dbg(&vdev->dev, "max open zones = %u\n", v); virtio_cread(vdev, struct virtio_blk_config, zoned.max_active_zones, &v); - disk_set_max_active_zones(vblk->disk, v); + lim->max_active_zones = v; dev_dbg(&vdev->dev, "max active zones = %u\n", v); virtio_cread(vdev, struct virtio_blk_config, @@ -747,8 +746,8 @@ static int virtblk_probe_zoned_device(struct virtio_device *vdev, dev_warn(&vdev->dev, "zero write granularity reported\n"); return -ENODEV; } - blk_queue_physical_block_size(q, wg); - blk_queue_io_min(q, wg); + lim->physical_block_size = wg; + lim->io_min = wg; dev_dbg(&vdev->dev, "write granularity = %u\n", wg); @@ -764,13 +763,13 @@ static int virtblk_probe_zoned_device(struct virtio_device *vdev, vblk->zone_sectors); return -ENODEV; } - blk_queue_chunk_sectors(q, vblk->zone_sectors); + lim->chunk_sectors = vblk->zone_sectors; dev_dbg(&vdev->dev, "zone sectors = %u\n", vblk->zone_sectors); if (virtio_has_feature(vdev, VIRTIO_BLK_F_DISCARD)) { dev_warn(&vblk->vdev->dev, "ignoring negotiated F_DISCARD for zoned device\n"); - blk_queue_max_discard_sectors(q, 0); + lim->max_hw_discard_sectors = 0; } virtio_cread(vdev, struct virtio_blk_config, @@ -785,25 +784,21 @@ static int virtblk_probe_zoned_device(struct virtio_device *vdev, wg, v); return -ENODEV; } - blk_queue_max_zone_append_sectors(q, v); + lim->max_zone_append_sectors = v; dev_dbg(&vdev->dev, "max append sectors = %u\n", v); - return blk_revalidate_disk_zones(vblk->disk, NULL); + return 0; } - #else - /* - * Zoned block device support is not configured in this kernel. - * Host-managed zoned devices can't be supported, but others are - * good to go as regular block devices. + * Zoned block device support is not configured in this kernel, host-managed + * zoned devices can't be supported. */ #define virtblk_report_zones NULL - -static inline int virtblk_probe_zoned_device(struct virtio_device *vdev, - struct virtio_blk *vblk, struct request_queue *q) +static inline int virtblk_read_zoned_limits(struct virtio_blk *vblk, + struct queue_limits *lim) { - dev_err(&vdev->dev, + dev_err(&vblk->vdev->dev, "virtio_blk: zoned devices are not supported"); return -EOPNOTSUPP; } @@ -1248,31 +1243,17 @@ static const struct blk_mq_ops virtio_mq_ops = { static unsigned int virtblk_queue_depth; module_param_named(queue_depth, virtblk_queue_depth, uint, 0444); -static int virtblk_probe(struct virtio_device *vdev) +static int virtblk_read_limits(struct virtio_blk *vblk, + struct queue_limits *lim) { - struct virtio_blk *vblk; - struct request_queue *q; - int err, index; - + struct virtio_device *vdev = vblk->vdev; u32 v, blk_size, max_size, sg_elems, opt_io_size; u32 max_discard_segs = 0; u32 discard_granularity = 0; u16 min_io_size; u8 physical_block_exp, alignment_offset; - unsigned int queue_depth; size_t max_dma_size; - - if (!vdev->config->get) { - dev_err(&vdev->dev, "%s failure: config access disabled\n", - __func__); - return -EINVAL; - } - - err = ida_alloc_range(&vd_index_ida, 0, - minor_to_index(1 << MINORBITS) - 1, GFP_KERNEL); - if (err < 0) - goto out; - index = err; + int err; /* We need to know how many segments before we allocate. */ err = virtio_cread_feature(vdev, VIRTIO_BLK_F_SEG_MAX, @@ -1286,78 +1267,11 @@ static int virtblk_probe(struct virtio_device *vdev) /* Prevent integer overflows and honor max vq size */ sg_elems = min_t(u32, sg_elems, VIRTIO_BLK_MAX_SG_ELEMS - 2); - vdev->priv = vblk = kmalloc(sizeof(*vblk), GFP_KERNEL); - if (!vblk) { - err = -ENOMEM; - goto out_free_index; - } - - mutex_init(&vblk->vdev_mutex); - - vblk->vdev = vdev; - - INIT_WORK(&vblk->config_work, virtblk_config_changed_work); - - err = init_vq(vblk); - if (err) - goto out_free_vblk; - - /* Default queue sizing is to fill the ring. */ - if (!virtblk_queue_depth) { - queue_depth = vblk->vqs[0].vq->num_free; - /* ... but without indirect descs, we use 2 descs per req */ - if (!virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC)) - queue_depth /= 2; - } else { - queue_depth = virtblk_queue_depth; - } - - memset(&vblk->tag_set, 0, sizeof(vblk->tag_set)); - vblk->tag_set.ops = &virtio_mq_ops; - vblk->tag_set.queue_depth = queue_depth; - vblk->tag_set.numa_node = NUMA_NO_NODE; - vblk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; - vblk->tag_set.cmd_size = - sizeof(struct virtblk_req) + - sizeof(struct scatterlist) * VIRTIO_BLK_INLINE_SG_CNT; - vblk->tag_set.driver_data = vblk; - vblk->tag_set.nr_hw_queues = vblk->num_vqs; - vblk->tag_set.nr_maps = 1; - if (vblk->io_queues[HCTX_TYPE_POLL]) - vblk->tag_set.nr_maps = 3; - - err = blk_mq_alloc_tag_set(&vblk->tag_set); - if (err) - goto out_free_vq; - - vblk->disk = blk_mq_alloc_disk(&vblk->tag_set, vblk); - if (IS_ERR(vblk->disk)) { - err = PTR_ERR(vblk->disk); - goto out_free_tags; - } - q = vblk->disk->queue; - - virtblk_name_format("vd", index, vblk->disk->disk_name, DISK_NAME_LEN); - - vblk->disk->major = major; - vblk->disk->first_minor = index_to_minor(index); - vblk->disk->minors = 1 << PART_BITS; - vblk->disk->private_data = vblk; - vblk->disk->fops = &virtblk_fops; - vblk->index = index; - - /* configure queue flush support */ - virtblk_update_cache_mode(vdev); - - /* If disk is read-only in the host, the guest should obey */ - if (virtio_has_feature(vdev, VIRTIO_BLK_F_RO)) - set_disk_ro(vblk->disk, 1); - /* We can handle whatever the host told us to handle. */ - blk_queue_max_segments(q, sg_elems); + lim->max_segments = sg_elems; /* No real sector limit. */ - blk_queue_max_hw_sectors(q, UINT_MAX); + lim->max_hw_sectors = UINT_MAX; max_dma_size = virtio_max_dma_size(vdev); max_size = max_dma_size > U32_MAX ? U32_MAX : max_dma_size; @@ -1369,7 +1283,7 @@ static int virtblk_probe(struct virtio_device *vdev) if (!err) max_size = min(max_size, v); - blk_queue_max_segment_size(q, max_size); + lim->max_segment_size = max_size; /* Host can optionally specify the block size of the device */ err = virtio_cread_feature(vdev, VIRTIO_BLK_F_BLK_SIZE, @@ -1381,38 +1295,37 @@ static int virtblk_probe(struct virtio_device *vdev) dev_err(&vdev->dev, "virtio_blk: invalid block size: 0x%x\n", blk_size); - goto out_cleanup_disk; + return err; } - blk_queue_logical_block_size(q, blk_size); + lim->logical_block_size = blk_size; } else - blk_size = queue_logical_block_size(q); + blk_size = lim->logical_block_size; /* Use topology information if available */ err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY, struct virtio_blk_config, physical_block_exp, &physical_block_exp); if (!err && physical_block_exp) - blk_queue_physical_block_size(q, - blk_size * (1 << physical_block_exp)); + lim->physical_block_size = blk_size * (1 << physical_block_exp); err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY, struct virtio_blk_config, alignment_offset, &alignment_offset); if (!err && alignment_offset) - blk_queue_alignment_offset(q, blk_size * alignment_offset); + lim->alignment_offset = blk_size * alignment_offset; err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY, struct virtio_blk_config, min_io_size, &min_io_size); if (!err && min_io_size) - blk_queue_io_min(q, blk_size * min_io_size); + lim->io_min = blk_size * min_io_size; err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY, struct virtio_blk_config, opt_io_size, &opt_io_size); if (!err && opt_io_size) - blk_queue_io_opt(q, blk_size * opt_io_size); + lim->io_opt = blk_size * opt_io_size; if (virtio_has_feature(vdev, VIRTIO_BLK_F_DISCARD)) { virtio_cread(vdev, struct virtio_blk_config, @@ -1420,7 +1333,7 @@ static int virtblk_probe(struct virtio_device *vdev) virtio_cread(vdev, struct virtio_blk_config, max_discard_sectors, &v); - blk_queue_max_discard_sectors(q, v ? v : UINT_MAX); + lim->max_hw_discard_sectors = v ? v : UINT_MAX; virtio_cread(vdev, struct virtio_blk_config, max_discard_seg, &max_discard_segs); @@ -1429,7 +1342,7 @@ static int virtblk_probe(struct virtio_device *vdev) if (virtio_has_feature(vdev, VIRTIO_BLK_F_WRITE_ZEROES)) { virtio_cread(vdev, struct virtio_blk_config, max_write_zeroes_sectors, &v); - blk_queue_max_write_zeroes_sectors(q, v ? v : UINT_MAX); + lim->max_write_zeroes_sectors = v ? v : UINT_MAX; } /* The discard and secure erase limits are combined since the Linux @@ -1455,8 +1368,7 @@ static int virtblk_probe(struct virtio_device *vdev) if (!v) { dev_err(&vdev->dev, "virtio_blk: secure_erase_sector_alignment can't be 0\n"); - err = -EINVAL; - goto out_cleanup_disk; + return -EINVAL; } discard_granularity = min_not_zero(discard_granularity, v); @@ -1470,11 +1382,10 @@ static int virtblk_probe(struct virtio_device *vdev) if (!v) { dev_err(&vdev->dev, "virtio_blk: max_secure_erase_sectors can't be 0\n"); - err = -EINVAL; - goto out_cleanup_disk; + return -EINVAL; } - blk_queue_max_secure_erase_sectors(q, v); + lim->max_secure_erase_sectors = v; virtio_cread(vdev, struct virtio_blk_config, max_secure_erase_seg, &v); @@ -1485,8 +1396,7 @@ static int virtblk_probe(struct virtio_device *vdev) if (!v) { dev_err(&vdev->dev, "virtio_blk: max_secure_erase_seg can't be 0\n"); - err = -EINVAL; - goto out_cleanup_disk; + return -EINVAL; } max_discard_segs = min_not_zero(max_discard_segs, v); @@ -1502,45 +1412,142 @@ static int virtblk_probe(struct virtio_device *vdev) if (!max_discard_segs) max_discard_segs = sg_elems; - blk_queue_max_discard_segments(q, - min(max_discard_segs, MAX_DISCARD_SEGMENTS)); + lim->max_discard_segments = + min(max_discard_segs, MAX_DISCARD_SEGMENTS); if (discard_granularity) - q->limits.discard_granularity = discard_granularity << SECTOR_SHIFT; + lim->discard_granularity = + discard_granularity << SECTOR_SHIFT; else - q->limits.discard_granularity = blk_size; + lim->discard_granularity = blk_size; } - virtblk_update_capacity(vblk, false); - virtio_device_ready(vdev); - - /* - * All steps that follow use the VQs therefore they need to be - * placed after the virtio_device_ready() call above. - */ if (virtio_has_feature(vdev, VIRTIO_BLK_F_ZONED)) { u8 model; - virtio_cread(vdev, struct virtio_blk_config, zoned.model, - &model); + virtio_cread(vdev, struct virtio_blk_config, zoned.model, &model); switch (model) { case VIRTIO_BLK_Z_NONE: case VIRTIO_BLK_Z_HA: - /* Present the host-aware device as non-zoned */ - break; + /* treat host-aware devices as non-zoned */ + return 0; case VIRTIO_BLK_Z_HM: - err = virtblk_probe_zoned_device(vdev, vblk, q); + err = virtblk_read_zoned_limits(vblk, lim); if (err) - goto out_cleanup_disk; + return err; break; default: - dev_err(&vdev->dev, "unsupported zone model %d\n", - model); - err = -EINVAL; - goto out_cleanup_disk; + dev_err(&vdev->dev, "unsupported zone model %d\n", model); + return -EINVAL; } } + return 0; +} + +static int virtblk_probe(struct virtio_device *vdev) +{ + struct virtio_blk *vblk; + struct queue_limits lim = { }; + int err, index; + unsigned int queue_depth; + + if (!vdev->config->get) { + dev_err(&vdev->dev, "%s failure: config access disabled\n", + __func__); + return -EINVAL; + } + + err = ida_alloc_range(&vd_index_ida, 0, + minor_to_index(1 << MINORBITS) - 1, GFP_KERNEL); + if (err < 0) + goto out; + index = err; + + vdev->priv = vblk = kmalloc(sizeof(*vblk), GFP_KERNEL); + if (!vblk) { + err = -ENOMEM; + goto out_free_index; + } + + mutex_init(&vblk->vdev_mutex); + + vblk->vdev = vdev; + + INIT_WORK(&vblk->config_work, virtblk_config_changed_work); + + err = init_vq(vblk); + if (err) + goto out_free_vblk; + + /* Default queue sizing is to fill the ring. */ + if (!virtblk_queue_depth) { + queue_depth = vblk->vqs[0].vq->num_free; + /* ... but without indirect descs, we use 2 descs per req */ + if (!virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC)) + queue_depth /= 2; + } else { + queue_depth = virtblk_queue_depth; + } + + memset(&vblk->tag_set, 0, sizeof(vblk->tag_set)); + vblk->tag_set.ops = &virtio_mq_ops; + vblk->tag_set.queue_depth = queue_depth; + vblk->tag_set.numa_node = NUMA_NO_NODE; + vblk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; + vblk->tag_set.cmd_size = + sizeof(struct virtblk_req) + + sizeof(struct scatterlist) * VIRTIO_BLK_INLINE_SG_CNT; + vblk->tag_set.driver_data = vblk; + vblk->tag_set.nr_hw_queues = vblk->num_vqs; + vblk->tag_set.nr_maps = 1; + if (vblk->io_queues[HCTX_TYPE_POLL]) + vblk->tag_set.nr_maps = 3; + + err = blk_mq_alloc_tag_set(&vblk->tag_set); + if (err) + goto out_free_vq; + + err = virtblk_read_limits(vblk, &lim); + if (err) + goto out_free_tags; + + vblk->disk = blk_mq_alloc_disk(&vblk->tag_set, &lim, vblk); + if (IS_ERR(vblk->disk)) { + err = PTR_ERR(vblk->disk); + goto out_free_tags; + } + + virtblk_name_format("vd", index, vblk->disk->disk_name, DISK_NAME_LEN); + + vblk->disk->major = major; + vblk->disk->first_minor = index_to_minor(index); + vblk->disk->minors = 1 << PART_BITS; + vblk->disk->private_data = vblk; + vblk->disk->fops = &virtblk_fops; + vblk->index = index; + + /* configure queue flush support */ + virtblk_update_cache_mode(vdev); + + /* If disk is read-only in the host, the guest should obey */ + if (virtio_has_feature(vdev, VIRTIO_BLK_F_RO)) + set_disk_ro(vblk->disk, 1); + + virtblk_update_capacity(vblk, false); + virtio_device_ready(vdev); + + /* + * All steps that follow use the VQs therefore they need to be + * placed after the virtio_device_ready() call above. + */ + if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) && lim.zoned) { + blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, vblk->disk->queue); + err = blk_revalidate_disk_zones(vblk->disk, NULL); + if (err) + goto out_cleanup_disk; + } + err = device_add_disk(&vdev->dev, vblk->disk, virtblk_attr_groups); if (err) goto out_cleanup_disk; @@ -1593,14 +1600,15 @@ static int virtblk_freeze(struct virtio_device *vdev) { struct virtio_blk *vblk = vdev->priv; + /* Ensure no requests in virtqueues before deleting vqs. */ + blk_mq_freeze_queue(vblk->disk->queue); + /* Ensure we don't receive any more interrupts */ virtio_reset_device(vdev); /* Make sure no work handler is accessing the device. */ flush_work(&vblk->config_work); - blk_mq_quiesce_queue(vblk->disk->queue); - vdev->config->del_vqs(vdev); kfree(vblk->vqs); @@ -1618,7 +1626,7 @@ static int virtblk_restore(struct virtio_device *vdev) virtio_device_ready(vdev); - blk_mq_unquiesce_queue(vblk->disk->queue); + blk_mq_unfreeze_queue(vblk->disk->queue); return 0; } #endif diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c index 4defd7f387c7..944576d582fb 100644 --- a/drivers/block/xen-blkback/blkback.c +++ b/drivers/block/xen-blkback/blkback.c @@ -465,7 +465,7 @@ static int xen_vbd_translate(struct phys_req *req, struct xen_blkif *blkif, } req->dev = vbd->pdevice; - req->bdev = vbd->bdev_handle->bdev; + req->bdev = file_bdev(vbd->bdev_file); rc = 0; out: @@ -969,7 +969,7 @@ static int dispatch_discard_io(struct xen_blkif_ring *ring, int err = 0; int status = BLKIF_RSP_OKAY; struct xen_blkif *blkif = ring->blkif; - struct block_device *bdev = blkif->vbd.bdev_handle->bdev; + struct block_device *bdev = file_bdev(blkif->vbd.bdev_file); struct phys_req preq; xen_blkif_get(blkif); diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h index 1432c83183d0..b427d54bc120 100644 --- a/drivers/block/xen-blkback/common.h +++ b/drivers/block/xen-blkback/common.h @@ -221,7 +221,7 @@ struct xen_vbd { unsigned char type; /* phys device that this vbd maps to. */ u32 pdevice; - struct bdev_handle *bdev_handle; + struct file *bdev_file; /* Cached size parameter. */ sector_t size; unsigned int flush_support:1; @@ -360,7 +360,7 @@ struct pending_req { }; -#define vbd_sz(_v) bdev_nr_sectors((_v)->bdev_handle->bdev) +#define vbd_sz(_v) bdev_nr_sectors(file_bdev((_v)->bdev_file)) #define xen_blkif_get(_b) (atomic_inc(&(_b)->refcnt)) #define xen_blkif_put(_b) \ diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c index e34219ea2b05..0621878940ae 100644 --- a/drivers/block/xen-blkback/xenbus.c +++ b/drivers/block/xen-blkback/xenbus.c @@ -81,7 +81,7 @@ static void xen_update_blkif_status(struct xen_blkif *blkif) int i; /* Not ready to connect? */ - if (!blkif->rings || !blkif->rings[0].irq || !blkif->vbd.bdev_handle) + if (!blkif->rings || !blkif->rings[0].irq || !blkif->vbd.bdev_file) return; /* Already connected? */ @@ -99,13 +99,12 @@ static void xen_update_blkif_status(struct xen_blkif *blkif) return; } - err = sync_blockdev(blkif->vbd.bdev_handle->bdev); + err = sync_blockdev(file_bdev(blkif->vbd.bdev_file)); if (err) { xenbus_dev_error(blkif->be->dev, err, "block flush"); return; } - invalidate_inode_pages2( - blkif->vbd.bdev_handle->bdev->bd_inode->i_mapping); + invalidate_inode_pages2(blkif->vbd.bdev_file->f_mapping); for (i = 0; i < blkif->nr_rings; i++) { ring = &blkif->rings[i]; @@ -473,9 +472,9 @@ static void xenvbd_sysfs_delif(struct xenbus_device *dev) static void xen_vbd_free(struct xen_vbd *vbd) { - if (vbd->bdev_handle) - bdev_release(vbd->bdev_handle); - vbd->bdev_handle = NULL; + if (vbd->bdev_file) + fput(vbd->bdev_file); + vbd->bdev_file = NULL; } static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle, @@ -483,7 +482,7 @@ static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle, int cdrom) { struct xen_vbd *vbd; - struct bdev_handle *bdev_handle; + struct file *bdev_file; vbd = &blkif->vbd; vbd->handle = handle; @@ -492,17 +491,17 @@ static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle, vbd->pdevice = MKDEV(major, minor); - bdev_handle = bdev_open_by_dev(vbd->pdevice, vbd->readonly ? + bdev_file = bdev_file_open_by_dev(vbd->pdevice, vbd->readonly ? BLK_OPEN_READ : BLK_OPEN_WRITE, NULL, NULL); - if (IS_ERR(bdev_handle)) { + if (IS_ERR(bdev_file)) { pr_warn("xen_vbd_create: device %08x could not be opened\n", vbd->pdevice); return -ENOENT; } - vbd->bdev_handle = bdev_handle; - if (vbd->bdev_handle->bdev->bd_disk == NULL) { + vbd->bdev_file = bdev_file; + if (file_bdev(vbd->bdev_file)->bd_disk == NULL) { pr_warn("xen_vbd_create: device %08x doesn't exist\n", vbd->pdevice); xen_vbd_free(vbd); @@ -510,14 +509,14 @@ static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle, } vbd->size = vbd_sz(vbd); - if (cdrom || disk_to_cdi(vbd->bdev_handle->bdev->bd_disk)) + if (cdrom || disk_to_cdi(file_bdev(vbd->bdev_file)->bd_disk)) vbd->type |= VDISK_CDROM; - if (vbd->bdev_handle->bdev->bd_disk->flags & GENHD_FL_REMOVABLE) + if (file_bdev(vbd->bdev_file)->bd_disk->flags & GENHD_FL_REMOVABLE) vbd->type |= VDISK_REMOVABLE; - if (bdev_write_cache(bdev_handle->bdev)) + if (bdev_write_cache(file_bdev(bdev_file))) vbd->flush_support = true; - if (bdev_max_secure_erase_sectors(bdev_handle->bdev)) + if (bdev_max_secure_erase_sectors(file_bdev(bdev_file))) vbd->discard_secure = true; pr_debug("Successful creation of handle=%04x (dom=%u)\n", @@ -570,7 +569,7 @@ static void xen_blkbk_discard(struct xenbus_transaction xbt, struct backend_info struct xen_blkif *blkif = be->blkif; int err; int state = 0; - struct block_device *bdev = be->blkif->vbd.bdev_handle->bdev; + struct block_device *bdev = file_bdev(be->blkif->vbd.bdev_file); if (!xenbus_read_unsigned(dev->nodename, "discard-enable", 1)) return; @@ -932,7 +931,7 @@ again: } err = xenbus_printf(xbt, dev->nodename, "sector-size", "%lu", (unsigned long)bdev_logical_block_size( - be->blkif->vbd.bdev_handle->bdev)); + file_bdev(be->blkif->vbd.bdev_file))); if (err) { xenbus_dev_fatal(dev, err, "writing %s/sector-size", dev->nodename); @@ -940,7 +939,7 @@ again: } err = xenbus_printf(xbt, dev->nodename, "physical-sector-size", "%u", bdev_physical_block_size( - be->blkif->vbd.bdev_handle->bdev)); + file_bdev(be->blkif->vbd.bdev_file))); if (err) xenbus_dev_error(dev, err, "writing %s/physical-sector-size", dev->nodename); diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 434fab306777..fd7c0ff2139c 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -941,39 +941,35 @@ static const struct blk_mq_ops blkfront_mq_ops = { .complete = blkif_complete_rq, }; -static void blkif_set_queue_limits(struct blkfront_info *info) +static void blkif_set_queue_limits(const struct blkfront_info *info, + struct queue_limits *lim) { - struct request_queue *rq = info->rq; - struct gendisk *gd = info->gd; unsigned int segments = info->max_indirect_segments ? : BLKIF_MAX_SEGMENTS_PER_REQUEST; - blk_queue_flag_set(QUEUE_FLAG_VIRT, rq); - if (info->feature_discard) { - blk_queue_max_discard_sectors(rq, get_capacity(gd)); - rq->limits.discard_granularity = info->discard_granularity ?: - info->physical_sector_size; - rq->limits.discard_alignment = info->discard_alignment; + lim->max_hw_discard_sectors = UINT_MAX; + if (info->discard_granularity) + lim->discard_granularity = info->discard_granularity; + lim->discard_alignment = info->discard_alignment; if (info->feature_secdiscard) - blk_queue_max_secure_erase_sectors(rq, - get_capacity(gd)); + lim->max_secure_erase_sectors = UINT_MAX; } /* Hard sector size and max sectors impersonate the equiv. hardware. */ - blk_queue_logical_block_size(rq, info->sector_size); - blk_queue_physical_block_size(rq, info->physical_sector_size); - blk_queue_max_hw_sectors(rq, (segments * XEN_PAGE_SIZE) / 512); + lim->logical_block_size = info->sector_size; + lim->physical_block_size = info->physical_sector_size; + lim->max_hw_sectors = (segments * XEN_PAGE_SIZE) / 512; /* Each segment in a request is up to an aligned page in size. */ - blk_queue_segment_boundary(rq, PAGE_SIZE - 1); - blk_queue_max_segment_size(rq, PAGE_SIZE); + lim->seg_boundary_mask = PAGE_SIZE - 1; + lim->max_segment_size = PAGE_SIZE; /* Ensure a merged request will fit in a single I/O ring slot. */ - blk_queue_max_segments(rq, segments / GRANTS_PER_PSEG); + lim->max_segments = segments / GRANTS_PER_PSEG; /* Make sure buffer addresses are sector-aligned. */ - blk_queue_dma_alignment(rq, 511); + lim->dma_alignment = 511; } static const char *flush_info(struct blkfront_info *info) @@ -1070,6 +1066,7 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity, struct blkfront_info *info, u16 sector_size, unsigned int physical_sector_size) { + struct queue_limits lim = {}; struct gendisk *gd; int nr_minors = 1; int err; @@ -1136,11 +1133,13 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity, if (err) goto out_release_minors; - gd = blk_mq_alloc_disk(&info->tag_set, info); + blkif_set_queue_limits(info, &lim); + gd = blk_mq_alloc_disk(&info->tag_set, &lim, info); if (IS_ERR(gd)) { err = PTR_ERR(gd); goto out_free_tag_set; } + blk_queue_flag_set(QUEUE_FLAG_VIRT, gd->queue); strcpy(gd->disk_name, DEV_NAME); ptr = encode_disk_name(gd->disk_name + sizeof(DEV_NAME) - 1, offset); @@ -1162,7 +1161,6 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity, info->gd = gd; info->sector_size = sector_size; info->physical_sector_size = physical_sector_size; - blkif_set_queue_limits(info); xlvbd_flush(info); @@ -2006,18 +2004,19 @@ static int blkfront_probe(struct xenbus_device *dev, static int blkif_recover(struct blkfront_info *info) { + struct queue_limits lim; unsigned int r_index; struct request *req, *n; int rc; struct bio *bio; - unsigned int segs; struct blkfront_ring_info *rinfo; + lim = queue_limits_start_update(info->rq); blkfront_gather_backend_features(info); - /* Reset limits changed by blk_mq_update_nr_hw_queues(). */ - blkif_set_queue_limits(info); - segs = info->max_indirect_segments ? : BLKIF_MAX_SEGMENTS_PER_REQUEST; - blk_queue_max_segments(info->rq, segs / GRANTS_PER_PSEG); + blkif_set_queue_limits(info, &lim); + rc = queue_limits_commit_update(info->rq, &lim); + if (rc) + return rc; for_each_rinfo(info, rinfo, r_index) { rc = blkfront_setup_indirect(rinfo); @@ -2037,7 +2036,9 @@ static int blkif_recover(struct blkfront_info *info) list_for_each_entry_safe(req, n, &info->requests, queuelist) { /* Requeue pending requests (flush or discard) */ list_del_init(&req->queuelist); - BUG_ON(req->nr_phys_segments > segs); + BUG_ON(req->nr_phys_segments > + (info->max_indirect_segments ? : + BLKIF_MAX_SEGMENTS_PER_REQUEST)); blk_mq_requeue_request(req, false); } blk_mq_start_stopped_hw_queues(info->rq, true); diff --git a/drivers/block/z2ram.c b/drivers/block/z2ram.c index 11493167b0a8..7c5f4e4d9b50 100644 --- a/drivers/block/z2ram.c +++ b/drivers/block/z2ram.c @@ -318,7 +318,7 @@ static int z2ram_register_disk(int minor) struct gendisk *disk; int err; - disk = blk_mq_alloc_disk(&tag_set, NULL); + disk = blk_mq_alloc_disk(&tag_set, NULL, NULL); if (IS_ERR(disk)) return PTR_ERR(disk); diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index 6772e0c654fa..da7a20fa6152 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -426,11 +426,11 @@ static void reset_bdev(struct zram *zram) if (!zram->backing_dev) return; - bdev_release(zram->bdev_handle); + fput(zram->bdev_file); /* hope filp_close flush all of IO */ filp_close(zram->backing_dev, NULL); zram->backing_dev = NULL; - zram->bdev_handle = NULL; + zram->bdev_file = NULL; zram->disk->fops = &zram_devops; kvfree(zram->bitmap); zram->bitmap = NULL; @@ -476,7 +476,7 @@ static ssize_t backing_dev_store(struct device *dev, struct address_space *mapping; unsigned int bitmap_sz; unsigned long nr_pages, *bitmap = NULL; - struct bdev_handle *bdev_handle = NULL; + struct file *bdev_file = NULL; int err; struct zram *zram = dev_to_zram(dev); @@ -513,11 +513,11 @@ static ssize_t backing_dev_store(struct device *dev, goto out; } - bdev_handle = bdev_open_by_dev(inode->i_rdev, + bdev_file = bdev_file_open_by_dev(inode->i_rdev, BLK_OPEN_READ | BLK_OPEN_WRITE, zram, NULL); - if (IS_ERR(bdev_handle)) { - err = PTR_ERR(bdev_handle); - bdev_handle = NULL; + if (IS_ERR(bdev_file)) { + err = PTR_ERR(bdev_file); + bdev_file = NULL; goto out; } @@ -531,7 +531,7 @@ static ssize_t backing_dev_store(struct device *dev, reset_bdev(zram); - zram->bdev_handle = bdev_handle; + zram->bdev_file = bdev_file; zram->backing_dev = backing_dev; zram->bitmap = bitmap; zram->nr_pages = nr_pages; @@ -544,8 +544,8 @@ static ssize_t backing_dev_store(struct device *dev, out: kvfree(bitmap); - if (bdev_handle) - bdev_release(bdev_handle); + if (bdev_file) + fput(bdev_file); if (backing_dev) filp_close(backing_dev, NULL); @@ -587,7 +587,7 @@ static void read_from_bdev_async(struct zram *zram, struct page *page, { struct bio *bio; - bio = bio_alloc(zram->bdev_handle->bdev, 1, parent->bi_opf, GFP_NOIO); + bio = bio_alloc(file_bdev(zram->bdev_file), 1, parent->bi_opf, GFP_NOIO); bio->bi_iter.bi_sector = entry * (PAGE_SIZE >> 9); __bio_add_page(bio, page, PAGE_SIZE, 0); bio_chain(bio, parent); @@ -703,7 +703,7 @@ static ssize_t writeback_store(struct device *dev, continue; } - bio_init(&bio, zram->bdev_handle->bdev, &bio_vec, 1, + bio_init(&bio, file_bdev(zram->bdev_file), &bio_vec, 1, REQ_OP_WRITE | REQ_SYNC); bio.bi_iter.bi_sector = blk_idx * (PAGE_SIZE >> 9); __bio_add_page(&bio, page, PAGE_SIZE, 0); @@ -785,7 +785,7 @@ static void zram_sync_read(struct work_struct *work) struct bio_vec bv; struct bio bio; - bio_init(&bio, zw->zram->bdev_handle->bdev, &bv, 1, REQ_OP_READ); + bio_init(&bio, file_bdev(zw->zram->bdev_file), &bv, 1, REQ_OP_READ); bio.bi_iter.bi_sector = zw->entry * (PAGE_SIZE >> 9); __bio_add_page(&bio, zw->page, PAGE_SIZE, 0); zw->error = submit_bio_wait(&bio); @@ -2177,6 +2177,28 @@ ATTRIBUTE_GROUPS(zram_disk); */ static int zram_add(void) { + struct queue_limits lim = { + .logical_block_size = ZRAM_LOGICAL_BLOCK_SIZE, + /* + * To ensure that we always get PAGE_SIZE aligned and + * n*PAGE_SIZED sized I/O requests. + */ + .physical_block_size = PAGE_SIZE, + .io_min = PAGE_SIZE, + .io_opt = PAGE_SIZE, + .max_hw_discard_sectors = UINT_MAX, + /* + * zram_bio_discard() will clear all logical blocks if logical + * block size is identical with physical block size(PAGE_SIZE). + * But if it is different, we will skip discarding some parts of + * logical blocks in the part of the request range which isn't + * aligned to physical block size. So we can't ensure that all + * discarded logical blocks are zeroed. + */ +#if ZRAM_LOGICAL_BLOCK_SIZE == PAGE_SIZE + .max_write_zeroes_sectors = UINT_MAX, +#endif + }; struct zram *zram; int ret, device_id; @@ -2195,11 +2217,11 @@ static int zram_add(void) #endif /* gendisk structure */ - zram->disk = blk_alloc_disk(NUMA_NO_NODE); - if (!zram->disk) { + zram->disk = blk_alloc_disk(&lim, NUMA_NO_NODE); + if (IS_ERR(zram->disk)) { pr_err("Error allocating disk structure for device %d\n", device_id); - ret = -ENOMEM; + ret = PTR_ERR(zram->disk); goto out_free_idr; } @@ -2216,29 +2238,6 @@ static int zram_add(void) /* zram devices sort of resembles non-rotational disks */ blk_queue_flag_set(QUEUE_FLAG_NONROT, zram->disk->queue); blk_queue_flag_set(QUEUE_FLAG_SYNCHRONOUS, zram->disk->queue); - - /* - * To ensure that we always get PAGE_SIZE aligned - * and n*PAGE_SIZED sized I/O requests. - */ - blk_queue_physical_block_size(zram->disk->queue, PAGE_SIZE); - blk_queue_logical_block_size(zram->disk->queue, - ZRAM_LOGICAL_BLOCK_SIZE); - blk_queue_io_min(zram->disk->queue, PAGE_SIZE); - blk_queue_io_opt(zram->disk->queue, PAGE_SIZE); - blk_queue_max_discard_sectors(zram->disk->queue, UINT_MAX); - - /* - * zram_bio_discard() will clear all logical blocks if logical block - * size is identical with physical block size(PAGE_SIZE). But if it is - * different, we will skip discarding some parts of logical blocks in - * the part of the request range which isn't aligned to physical block - * size. So we can't ensure that all discarded logical blocks are - * zeroed. - */ - if (ZRAM_LOGICAL_BLOCK_SIZE == PAGE_SIZE) - blk_queue_max_write_zeroes_sectors(zram->disk->queue, UINT_MAX); - blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, zram->disk->queue); ret = device_add_disk(NULL, zram->disk, zram_disk_groups); if (ret) diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h index 3b94d12f41b4..37bf29f34d26 100644 --- a/drivers/block/zram/zram_drv.h +++ b/drivers/block/zram/zram_drv.h @@ -132,7 +132,7 @@ struct zram { spinlock_t wb_limit_lock; bool wb_limit_enable; u64 bd_wb_limit; - struct bdev_handle *bdev_handle; + struct file *bdev_file; unsigned long *bitmap; unsigned long nr_pages; #endif diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c index fdb0fae88d1c..b40b32fa7f1c 100644 --- a/drivers/bluetooth/btqca.c +++ b/drivers/bluetooth/btqca.c @@ -152,7 +152,7 @@ static int qca_send_patch_config_cmd(struct hci_dev *hdev) bt_dev_dbg(hdev, "QCA Patch config"); skb = __hci_cmd_sync_ev(hdev, EDL_PATCH_CMD_OPCODE, sizeof(cmd), - cmd, HCI_EV_VENDOR, HCI_INIT_TIMEOUT); + cmd, 0, HCI_INIT_TIMEOUT); if (IS_ERR(skb)) { err = PTR_ERR(skb); bt_dev_err(hdev, "Sending QCA Patch config failed (%d)", err); diff --git a/drivers/bluetooth/hci_bcm4377.c b/drivers/bluetooth/hci_bcm4377.c index a61757835695..9a7243d5db71 100644 --- a/drivers/bluetooth/hci_bcm4377.c +++ b/drivers/bluetooth/hci_bcm4377.c @@ -1417,7 +1417,7 @@ static int bcm4377_check_bdaddr(struct bcm4377_data *bcm4377) bda = (struct hci_rp_read_bd_addr *)skb->data; if (!bcm4377_is_valid_bdaddr(bcm4377, &bda->bdaddr)) - set_bit(HCI_QUIRK_INVALID_BDADDR, &bcm4377->hdev->quirks); + set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &bcm4377->hdev->quirks); kfree_skb(skb); return 0; @@ -2368,7 +2368,6 @@ static int bcm4377_probe(struct pci_dev *pdev, const struct pci_device_id *id) hdev->set_bdaddr = bcm4377_hci_set_bdaddr; hdev->setup = bcm4377_hci_setup; - set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks); if (bcm4377->hw->broken_mws_transport_config) set_bit(HCI_QUIRK_BROKEN_MWS_TRANSPORT_CONFIG, &hdev->quirks); if (bcm4377->hw->broken_ext_scan) diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c index 94b8c406f0c0..edd2a81b4d5e 100644 --- a/drivers/bluetooth/hci_qca.c +++ b/drivers/bluetooth/hci_qca.c @@ -7,6 +7,7 @@ * * Copyright (C) 2007 Texas Instruments, Inc. * Copyright (c) 2010, 2012, 2018 The Linux Foundation. All rights reserved. + * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved. * * Acknowledgements: * This file is based on hci_ll.c, which was... @@ -1806,13 +1807,12 @@ static int qca_power_on(struct hci_dev *hdev) static void hci_coredump_qca(struct hci_dev *hdev) { + int err; static const u8 param[] = { 0x26 }; - struct sk_buff *skb; - skb = __hci_cmd_sync(hdev, 0xfc0c, 1, param, HCI_CMD_TIMEOUT); - if (IS_ERR(skb)) - bt_dev_err(hdev, "%s: trigger crash failed (%ld)", __func__, PTR_ERR(skb)); - kfree_skb(skb); + err = __hci_cmd_send(hdev, 0xfc0c, 1, param); + if (err < 0) + bt_dev_err(hdev, "%s: trigger crash failed (%d)", __func__, err); } static int qca_get_data_path_id(struct hci_dev *hdev, __u8 *data_path_id) @@ -1904,7 +1904,17 @@ retry: case QCA_WCN6750: case QCA_WCN6855: case QCA_WCN7850: - set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks); + + /* Set BDA quirk bit for reading BDA value from fwnode property + * only if that property exist in DT. + */ + if (fwnode_property_present(dev_fwnode(hdev->dev.parent), "local-bd-address")) { + set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks); + bt_dev_info(hdev, "setting quirk bit to read BDA from fwnode later"); + } else { + bt_dev_dbg(hdev, "local-bd-address` is not present in the devicetree so not setting quirk bit for BDA"); + } + hci_set_aosp_capable(hdev); ret = qca_read_soc_version(hdev, &ver, soc_type); diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig index e6742998f372..d5e7fa9173a1 100644 --- a/drivers/bus/Kconfig +++ b/drivers/bus/Kconfig @@ -186,11 +186,12 @@ config SUNXI_RSB config TEGRA_ACONNECT tristate "Tegra ACONNECT Bus Driver" - depends on ARCH_TEGRA_210_SOC + depends on ARCH_TEGRA depends on OF && PM help Driver for the Tegra ACONNECT bus which is used to interface with - the devices inside the Audio Processing Engine (APE) for Tegra210. + the devices inside the Audio Processing Engine (APE) for + Tegra210 and later. config TEGRA_GMI tristate "Tegra Generic Memory Interface bus driver" diff --git a/drivers/bus/imx-weim.c b/drivers/bus/imx-weim.c index 6b5da73c8541..837bf9d51c6e 100644 --- a/drivers/bus/imx-weim.c +++ b/drivers/bus/imx-weim.c @@ -120,7 +120,7 @@ static int imx_weim_gpr_setup(struct platform_device *pdev) i++; } - if (i == 0 || i % 4) + if (i == 0) goto err; for (i = 0; i < ARRAY_SIZE(gprvals); i++) { diff --git a/drivers/bus/sunxi-rsb.c b/drivers/bus/sunxi-rsb.c index fd3e9d82340a..1e29ba76615d 100644 --- a/drivers/bus/sunxi-rsb.c +++ b/drivers/bus/sunxi-rsb.c @@ -128,7 +128,7 @@ struct sunxi_rsb { }; /* bus / slave device related functions */ -static struct bus_type sunxi_rsb_bus; +static const struct bus_type sunxi_rsb_bus; static int sunxi_rsb_device_match(struct device *dev, struct device_driver *drv) { @@ -177,7 +177,7 @@ static int sunxi_rsb_device_modalias(const struct device *dev, struct kobj_ueven return of_device_uevent_modalias(dev, env); } -static struct bus_type sunxi_rsb_bus = { +static const struct bus_type sunxi_rsb_bus = { .name = RSB_CTRL_NAME, .match = sunxi_rsb_device_match, .probe = sunxi_rsb_device_probe, diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c index 245e5e827d0d..41d33f39efe5 100644 --- a/drivers/bus/ti-sysc.c +++ b/drivers/bus/ti-sysc.c @@ -2400,7 +2400,7 @@ static int sysc_child_add_clocks(struct sysc *ddata, return 0; } -static struct device_type sysc_device_type = { +static const struct device_type sysc_device_type = { }; static struct sysc *sysc_child_to_parent(struct device *dev) diff --git a/drivers/cache/ax45mp_cache.c b/drivers/cache/ax45mp_cache.c index 57186c58dc84..1d7dd3d2c101 100644 --- a/drivers/cache/ax45mp_cache.c +++ b/drivers/cache/ax45mp_cache.c @@ -129,8 +129,12 @@ static void ax45mp_dma_cache_wback(phys_addr_t paddr, size_t size) unsigned long line_size; unsigned long flags; + if (unlikely(start == end)) + return; + line_size = ax45mp_priv.ax45mp_cache_line_size; start = start & (~(line_size - 1)); + end = ((end + line_size - 1) & (~(line_size - 1))); local_irq_save(flags); ax45mp_cpu_dcache_wb_range(start, end); local_irq_restore(flags); diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c index d668b174ace9..eefdd422ad8e 100644 --- a/drivers/cdrom/gdrom.c +++ b/drivers/cdrom/gdrom.c @@ -724,11 +724,6 @@ static void probe_gdrom_setupdisk(void) static int probe_gdrom_setupqueue(void) { - blk_queue_logical_block_size(gd.gdrom_rq, GDROM_HARD_SECTOR); - /* using DMA so memory will need to be contiguous */ - blk_queue_max_segments(gd.gdrom_rq, 1); - /* set a large max size to get most from DMA */ - blk_queue_max_segment_size(gd.gdrom_rq, 0x40000); gd.disk->queue = gd.gdrom_rq; return gdrom_init_dma_mode(); } @@ -743,6 +738,13 @@ static const struct blk_mq_ops gdrom_mq_ops = { */ static int probe_gdrom(struct platform_device *devptr) { + struct queue_limits lim = { + .logical_block_size = GDROM_HARD_SECTOR, + /* using DMA so memory will need to be contiguous */ + .max_segments = 1, + /* set a large max size to get most from DMA */ + .max_segment_size = 0x40000, + }; int err; /* @@ -778,7 +780,7 @@ static int probe_gdrom(struct platform_device *devptr) if (err) goto probe_fail_free_cd_info; - gd.disk = blk_mq_alloc_disk(&gd.tag_set, NULL); + gd.disk = blk_mq_alloc_disk(&gd.tag_set, &lim, NULL); if (IS_ERR(gd.disk)) { err = PTR_ERR(gd.disk); goto probe_fail_free_tag_set; @@ -829,7 +831,7 @@ probe_fail_no_mem: return err; } -static int remove_gdrom(struct platform_device *devptr) +static void remove_gdrom(struct platform_device *devptr) { blk_mq_free_tag_set(&gd.tag_set); free_irq(HW_EVENT_GDROM_CMD, &gd); @@ -840,13 +842,11 @@ static int remove_gdrom(struct platform_device *devptr) unregister_cdrom(gd.cd_info); kfree(gd.cd_info); kfree(gd.toc); - - return 0; } static struct platform_driver gdrom_driver = { .probe = probe_gdrom, - .remove = remove_gdrom, + .remove_new = remove_gdrom, .driver = { .name = GDROM_DEV_NAME, }, diff --git a/drivers/clk/rockchip/clk-rk3588.c b/drivers/clk/rockchip/clk-rk3588.c index 6994165e0395..0b60ae78f9d8 100644 --- a/drivers/clk/rockchip/clk-rk3588.c +++ b/drivers/clk/rockchip/clk-rk3588.c @@ -2458,15 +2458,18 @@ static struct rockchip_clk_branch rk3588_clk_branches[] __initdata = { static void __init rk3588_clk_init(struct device_node *np) { struct rockchip_clk_provider *ctx; + unsigned long clk_nr_clks; void __iomem *reg_base; + clk_nr_clks = rockchip_clk_find_max_clk_id(rk3588_clk_branches, + ARRAY_SIZE(rk3588_clk_branches)) + 1; reg_base = of_iomap(np, 0); if (!reg_base) { pr_err("%s: could not map cru region\n", __func__); return; } - ctx = rockchip_clk_init(np, reg_base, CLK_NR_CLKS); + ctx = rockchip_clk_init(np, reg_base, clk_nr_clks); if (IS_ERR(ctx)) { pr_err("%s: rockchip clk init failed\n", __func__); iounmap(reg_base); diff --git a/drivers/clk/rockchip/clk.c b/drivers/clk/rockchip/clk.c index 4059d9365ae6..73d2cbdc716b 100644 --- a/drivers/clk/rockchip/clk.c +++ b/drivers/clk/rockchip/clk.c @@ -429,6 +429,23 @@ void rockchip_clk_register_plls(struct rockchip_clk_provider *ctx, } EXPORT_SYMBOL_GPL(rockchip_clk_register_plls); +unsigned long rockchip_clk_find_max_clk_id(struct rockchip_clk_branch *list, + unsigned int nr_clk) +{ + unsigned long max = 0; + unsigned int idx; + + for (idx = 0; idx < nr_clk; idx++, list++) { + if (list->id > max) + max = list->id; + if (list->child && list->child->id > max) + max = list->id; + } + + return max; +} +EXPORT_SYMBOL_GPL(rockchip_clk_find_max_clk_id); + void rockchip_clk_register_branches(struct rockchip_clk_provider *ctx, struct rockchip_clk_branch *list, unsigned int nr_clk) diff --git a/drivers/clk/rockchip/clk.h b/drivers/clk/rockchip/clk.h index 758ebaf2236b..fd3b476dedda 100644 --- a/drivers/clk/rockchip/clk.h +++ b/drivers/clk/rockchip/clk.h @@ -973,6 +973,8 @@ struct rockchip_clk_provider *rockchip_clk_init(struct device_node *np, void __iomem *base, unsigned long nr_clks); void rockchip_clk_of_add_provider(struct device_node *np, struct rockchip_clk_provider *ctx); +unsigned long rockchip_clk_find_max_clk_id(struct rockchip_clk_branch *list, + unsigned int nr_clk); void rockchip_clk_register_branches(struct rockchip_clk_provider *ctx, struct rockchip_clk_branch *list, unsigned int nr_clk); diff --git a/drivers/clk/samsung/clk-gs101.c b/drivers/clk/samsung/clk-gs101.c index 0964bb11657f..782993951fff 100644 --- a/drivers/clk/samsung/clk-gs101.c +++ b/drivers/clk/samsung/clk-gs101.c @@ -2475,7 +2475,7 @@ static const struct samsung_cmu_info misc_cmu_info __initconst = { .nr_clk_ids = CLKS_NR_MISC, .clk_regs = misc_clk_regs, .nr_clk_regs = ARRAY_SIZE(misc_clk_regs), - .clk_name = "dout_cmu_misc_bus", + .clk_name = "bus", }; /* ---- platform_driver ----------------------------------------------------- */ diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c index e054de92de91..8d4a52056684 100644 --- a/drivers/clocksource/arm_arch_timer.c +++ b/drivers/clocksource/arm_arch_timer.c @@ -1807,7 +1807,7 @@ TIMER_ACPI_DECLARE(arch_timer, ACPI_SIG_GTDT, arch_timer_acpi_init); #endif int kvm_arch_ptp_get_crosststamp(u64 *cycle, struct timespec64 *ts, - struct clocksource **cs) + enum clocksource_ids *cs_id) { struct arm_smccc_res hvc_res; u32 ptp_counter; @@ -1831,8 +1831,8 @@ int kvm_arch_ptp_get_crosststamp(u64 *cycle, struct timespec64 *ts, *ts = ktime_to_timespec64(ktime); if (cycle) *cycle = (u64)hvc_res.a2 << 32 | hvc_res.a3; - if (cs) - *cs = &clocksource_counter; + if (cs_id) + *cs_id = CSID_ARM_ARCH_COUNTER; return 0; } diff --git a/drivers/comedi/drivers/comedi_8255.c b/drivers/comedi/drivers/comedi_8255.c index e4974b508328..a933ef53845a 100644 --- a/drivers/comedi/drivers/comedi_8255.c +++ b/drivers/comedi/drivers/comedi_8255.c @@ -159,6 +159,7 @@ static int __subdev_8255_init(struct comedi_device *dev, return -ENOMEM; spriv->context = context; + spriv->io = io; s->type = COMEDI_SUBD_DIO; s->subdev_flags = SDF_READABLE | SDF_WRITABLE; diff --git a/drivers/comedi/drivers/comedi_test.c b/drivers/comedi/drivers/comedi_test.c index 30ea8b53ebf8..05ae9122823f 100644 --- a/drivers/comedi/drivers/comedi_test.c +++ b/drivers/comedi/drivers/comedi_test.c @@ -87,6 +87,8 @@ struct waveform_private { struct comedi_device *dev; /* parent comedi device */ u64 ao_last_scan_time; /* time of previous AO scan in usec */ unsigned int ao_scan_period; /* AO scan period in usec */ + bool ai_timer_enable:1; /* should AI timer be running? */ + bool ao_timer_enable:1; /* should AO timer be running? */ unsigned short ao_loopbacks[N_CHANS]; }; @@ -236,8 +238,12 @@ static void waveform_ai_timer(struct timer_list *t) time_increment = devpriv->ai_convert_time - now; else time_increment = 1; - mod_timer(&devpriv->ai_timer, - jiffies + usecs_to_jiffies(time_increment)); + spin_lock(&dev->spinlock); + if (devpriv->ai_timer_enable) { + mod_timer(&devpriv->ai_timer, + jiffies + usecs_to_jiffies(time_increment)); + } + spin_unlock(&dev->spinlock); } overrun: @@ -393,9 +399,12 @@ static int waveform_ai_cmd(struct comedi_device *dev, * Seem to need an extra jiffy here, otherwise timer expires slightly * early! */ + spin_lock_bh(&dev->spinlock); + devpriv->ai_timer_enable = true; devpriv->ai_timer.expires = jiffies + usecs_to_jiffies(devpriv->ai_convert_period) + 1; add_timer(&devpriv->ai_timer); + spin_unlock_bh(&dev->spinlock); return 0; } @@ -404,6 +413,9 @@ static int waveform_ai_cancel(struct comedi_device *dev, { struct waveform_private *devpriv = dev->private; + spin_lock_bh(&dev->spinlock); + devpriv->ai_timer_enable = false; + spin_unlock_bh(&dev->spinlock); if (in_softirq()) { /* Assume we were called from the timer routine itself. */ del_timer(&devpriv->ai_timer); @@ -495,8 +507,12 @@ static void waveform_ao_timer(struct timer_list *t) unsigned int time_inc = devpriv->ao_last_scan_time + devpriv->ao_scan_period - now; - mod_timer(&devpriv->ao_timer, - jiffies + usecs_to_jiffies(time_inc)); + spin_lock(&dev->spinlock); + if (devpriv->ao_timer_enable) { + mod_timer(&devpriv->ao_timer, + jiffies + usecs_to_jiffies(time_inc)); + } + spin_unlock(&dev->spinlock); } underrun: @@ -517,9 +533,12 @@ static int waveform_ao_inttrig_start(struct comedi_device *dev, async->inttrig = NULL; devpriv->ao_last_scan_time = ktime_to_us(ktime_get()); + spin_lock_bh(&dev->spinlock); + devpriv->ao_timer_enable = true; devpriv->ao_timer.expires = jiffies + usecs_to_jiffies(devpriv->ao_scan_period); add_timer(&devpriv->ao_timer); + spin_unlock_bh(&dev->spinlock); return 1; } @@ -604,6 +623,9 @@ static int waveform_ao_cancel(struct comedi_device *dev, struct waveform_private *devpriv = dev->private; s->async->inttrig = NULL; + spin_lock_bh(&dev->spinlock); + devpriv->ao_timer_enable = false; + spin_unlock_bh(&dev->spinlock); if (in_softirq()) { /* Assume we were called from the timer routine itself. */ del_timer(&devpriv->ao_timer); diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c index 3d5e6d705fc6..44b19e696176 100644 --- a/drivers/connector/cn_proc.c +++ b/drivers/connector/cn_proc.c @@ -108,9 +108,8 @@ static inline void send_msg(struct cn_msg *msg) filter_data[1] = 0; } - if (cn_netlink_send_mult(msg, msg->len, 0, CN_IDX_PROC, GFP_NOWAIT, - cn_filter, (void *)filter_data) == -ESRCH) - atomic_set(&proc_event_num_listeners, 0); + cn_netlink_send_mult(msg, msg->len, 0, CN_IDX_PROC, GFP_NOWAIT, + cn_filter, (void *)filter_data); local_unlock(&local_event.lock); } diff --git a/drivers/counter/counter-core.c b/drivers/counter/counter-core.c index 09c77afb33ca..3f24481fc04a 100644 --- a/drivers/counter/counter-core.c +++ b/drivers/counter/counter-core.c @@ -31,10 +31,11 @@ struct counter_device_allochelper { struct counter_device counter; /* - * This is cache line aligned to ensure private data behaves like if it - * were kmalloced separately. + * This ensures private data behaves like if it were kmalloced + * separately. Also ensures the minimum alignment for safe DMA + * operations (which may or may not mean cache alignment). */ - unsigned long privdata[] ____cacheline_aligned; + unsigned long privdata[] __aligned(ARCH_DMA_MINALIGN); }; static void counter_device_release(struct device *dev) diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index ca94e60e705a..79619227ea51 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -2987,6 +2987,9 @@ static void intel_cpufreq_adjust_perf(unsigned int cpunum, if (min_pstate < cpu->min_perf_ratio) min_pstate = cpu->min_perf_ratio; + if (min_pstate > cpu->max_perf_ratio) + min_pstate = cpu->max_perf_ratio; + max_pstate = min(cap_pstate, cpu->max_perf_ratio); if (max_pstate < min_pstate) max_pstate = min_pstate; diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c index 1262a7773ef3..de50c00ba218 100644 --- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c +++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c @@ -299,22 +299,6 @@ theend: return err; } -static void sun8i_ce_cipher_run(struct crypto_engine *engine, void *areq) -{ - struct skcipher_request *breq = container_of(areq, struct skcipher_request, base); - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(breq); - struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm); - struct sun8i_ce_dev *ce = op->ce; - struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(breq); - int flow, err; - - flow = rctx->flow; - err = sun8i_ce_run_task(ce, flow, crypto_tfm_alg_name(breq->base.tfm)); - local_bh_disable(); - crypto_finalize_skcipher_request(engine, breq, err); - local_bh_enable(); -} - static void sun8i_ce_cipher_unprepare(struct crypto_engine *engine, void *async_req) { @@ -360,6 +344,23 @@ static void sun8i_ce_cipher_unprepare(struct crypto_engine *engine, dma_unmap_single(ce->dev, rctx->addr_key, op->keylen, DMA_TO_DEVICE); } +static void sun8i_ce_cipher_run(struct crypto_engine *engine, void *areq) +{ + struct skcipher_request *breq = container_of(areq, struct skcipher_request, base); + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(breq); + struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm); + struct sun8i_ce_dev *ce = op->ce; + struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(breq); + int flow, err; + + flow = rctx->flow; + err = sun8i_ce_run_task(ce, flow, crypto_tfm_alg_name(breq->base.tfm)); + sun8i_ce_cipher_unprepare(engine, areq); + local_bh_disable(); + crypto_finalize_skcipher_request(engine, breq, err); + local_bh_enable(); +} + int sun8i_ce_cipher_do_one(struct crypto_engine *engine, void *areq) { int err = sun8i_ce_cipher_prepare(engine, areq); @@ -368,7 +369,6 @@ int sun8i_ce_cipher_do_one(struct crypto_engine *engine, void *areq) return err; sun8i_ce_cipher_run(engine, areq); - sun8i_ce_cipher_unprepare(engine, areq); return 0; } diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c index a148ff1f0872..a4f6884416a0 100644 --- a/drivers/crypto/caam/caamalg_qi2.c +++ b/drivers/crypto/caam/caamalg_qi2.c @@ -4545,6 +4545,7 @@ struct caam_hash_alg { struct list_head entry; struct device *dev; int alg_type; + bool is_hmac; struct ahash_alg ahash_alg; }; @@ -4571,7 +4572,7 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm) ctx->dev = caam_hash->dev; - if (alg->setkey) { + if (caam_hash->is_hmac) { ctx->adata.key_dma = dma_map_single_attrs(ctx->dev, ctx->key, ARRAY_SIZE(ctx->key), DMA_TO_DEVICE, @@ -4611,7 +4612,7 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm) * For keyed hash algorithms shared descriptors * will be created later in setkey() callback */ - return alg->setkey ? 0 : ahash_set_sh_desc(ahash); + return caam_hash->is_hmac ? 0 : ahash_set_sh_desc(ahash); } static void caam_hash_cra_exit(struct crypto_tfm *tfm) @@ -4646,12 +4647,14 @@ static struct caam_hash_alg *caam_hash_alloc(struct device *dev, template->hmac_name); snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", template->hmac_driver_name); + t_alg->is_hmac = true; } else { snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name); snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", template->driver_name); t_alg->ahash_alg.setkey = NULL; + t_alg->is_hmac = false; } alg->cra_module = THIS_MODULE; alg->cra_init = caam_hash_cra_init; diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c index 290c8500c247..fdd724228c2f 100644 --- a/drivers/crypto/caam/caamhash.c +++ b/drivers/crypto/caam/caamhash.c @@ -1753,6 +1753,7 @@ static struct caam_hash_template driver_hash[] = { struct caam_hash_alg { struct list_head entry; int alg_type; + bool is_hmac; struct ahash_engine_alg ahash_alg; }; @@ -1804,7 +1805,7 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm) } else { if (priv->era >= 6) { ctx->dir = DMA_BIDIRECTIONAL; - ctx->key_dir = alg->setkey ? DMA_TO_DEVICE : DMA_NONE; + ctx->key_dir = caam_hash->is_hmac ? DMA_TO_DEVICE : DMA_NONE; } else { ctx->dir = DMA_TO_DEVICE; ctx->key_dir = DMA_NONE; @@ -1862,7 +1863,7 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm) * For keyed hash algorithms shared descriptors * will be created later in setkey() callback */ - return alg->setkey ? 0 : ahash_set_sh_desc(ahash); + return caam_hash->is_hmac ? 0 : ahash_set_sh_desc(ahash); } static void caam_hash_cra_exit(struct crypto_tfm *tfm) @@ -1915,12 +1916,14 @@ caam_hash_alloc(struct caam_hash_template *template, template->hmac_name); snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", template->hmac_driver_name); + t_alg->is_hmac = true; } else { snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name); snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", template->driver_name); halg->setkey = NULL; + t_alg->is_hmac = false; } alg->cra_module = THIS_MODULE; alg->cra_init = caam_hash_cra_init; diff --git a/drivers/crypto/ccp/Kconfig b/drivers/crypto/ccp/Kconfig index 32268e239bf1..f394e45e11ab 100644 --- a/drivers/crypto/ccp/Kconfig +++ b/drivers/crypto/ccp/Kconfig @@ -38,7 +38,7 @@ config CRYPTO_DEV_CCP_CRYPTO config CRYPTO_DEV_SP_PSP bool "Platform Security Processor (PSP) device" default y - depends on CRYPTO_DEV_CCP_DD && X86_64 + depends on CRYPTO_DEV_CCP_DD && X86_64 && AMD_IOMMU help Provide support for the AMD Platform Security Processor (PSP). The PSP is a dedicated processor that provides support for key diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c index e4d3f45242f6..f44efbb89c34 100644 --- a/drivers/crypto/ccp/sev-dev.c +++ b/drivers/crypto/ccp/sev-dev.c @@ -21,14 +21,18 @@ #include <linux/hw_random.h> #include <linux/ccp.h> #include <linux/firmware.h> +#include <linux/panic_notifier.h> #include <linux/gfp.h> #include <linux/cpufeature.h> #include <linux/fs.h> #include <linux/fs_struct.h> #include <linux/psp.h> +#include <linux/amd-iommu.h> #include <asm/smp.h> #include <asm/cacheflush.h> +#include <asm/e820/types.h> +#include <asm/sev.h> #include "psp-dev.h" #include "sev-dev.h" @@ -37,6 +41,19 @@ #define SEV_FW_FILE "amd/sev.fw" #define SEV_FW_NAME_SIZE 64 +/* Minimum firmware version required for the SEV-SNP support */ +#define SNP_MIN_API_MAJOR 1 +#define SNP_MIN_API_MINOR 51 + +/* + * Maximum number of firmware-writable buffers that might be specified + * in the parameters of a legacy SEV command buffer. + */ +#define CMD_BUF_FW_WRITABLE_MAX 2 + +/* Leave room in the descriptor array for an end-of-list indicator. */ +#define CMD_BUF_DESC_MAX (CMD_BUF_FW_WRITABLE_MAX + 1) + static DEFINE_MUTEX(sev_cmd_mutex); static struct sev_misc_dev *misc_dev; @@ -68,9 +85,14 @@ static int psp_timeout; * The TMR is a 1MB area that must be 1MB aligned. Use the page allocator * to allocate the memory, which will return aligned memory for the specified * allocation order. + * + * When SEV-SNP is enabled the TMR needs to be 2MB aligned and 2MB sized. */ -#define SEV_ES_TMR_SIZE (1024 * 1024) +#define SEV_TMR_SIZE (1024 * 1024) +#define SNP_TMR_SIZE (2 * 1024 * 1024) + static void *sev_es_tmr; +static size_t sev_es_tmr_size = SEV_TMR_SIZE; /* INIT_EX NV Storage: * The NV Storage is a 32Kb area and must be 4Kb page aligned. Use the page @@ -80,6 +102,13 @@ static void *sev_es_tmr; #define NV_LENGTH (32 * 1024) static void *sev_init_ex_buffer; +/* + * SEV_DATA_RANGE_LIST: + * Array containing range of pages that firmware transitions to HV-fixed + * page state. + */ +static struct sev_data_range_list *snp_range_list; + static inline bool sev_version_greater_or_equal(u8 maj, u8 min) { struct sev_device *sev = psp_master->sev_data; @@ -115,6 +144,25 @@ static int sev_wait_cmd_ioc(struct sev_device *sev, { int ret; + /* + * If invoked during panic handling, local interrupts are disabled, + * so the PSP command completion interrupt can't be used. Poll for + * PSP command completion instead. + */ + if (irqs_disabled()) { + unsigned long timeout_usecs = (timeout * USEC_PER_SEC) / 10; + + /* Poll for SEV command completion: */ + while (timeout_usecs--) { + *reg = ioread32(sev->io_regs + sev->vdata->cmdresp_reg); + if (*reg & PSP_CMDRESP_RESP) + return 0; + + udelay(10); + } + return -ETIMEDOUT; + } + ret = wait_event_timeout(sev->int_queue, sev->int_rcvd, timeout * HZ); if (!ret) @@ -130,6 +178,8 @@ static int sev_cmd_buffer_len(int cmd) switch (cmd) { case SEV_CMD_INIT: return sizeof(struct sev_data_init); case SEV_CMD_INIT_EX: return sizeof(struct sev_data_init_ex); + case SEV_CMD_SNP_SHUTDOWN_EX: return sizeof(struct sev_data_snp_shutdown_ex); + case SEV_CMD_SNP_INIT_EX: return sizeof(struct sev_data_snp_init_ex); case SEV_CMD_PLATFORM_STATUS: return sizeof(struct sev_user_data_status); case SEV_CMD_PEK_CSR: return sizeof(struct sev_data_pek_csr); case SEV_CMD_PEK_CERT_IMPORT: return sizeof(struct sev_data_pek_cert_import); @@ -158,23 +208,27 @@ static int sev_cmd_buffer_len(int cmd) case SEV_CMD_GET_ID: return sizeof(struct sev_data_get_id); case SEV_CMD_ATTESTATION_REPORT: return sizeof(struct sev_data_attestation_report); case SEV_CMD_SEND_CANCEL: return sizeof(struct sev_data_send_cancel); + case SEV_CMD_SNP_GCTX_CREATE: return sizeof(struct sev_data_snp_addr); + case SEV_CMD_SNP_LAUNCH_START: return sizeof(struct sev_data_snp_launch_start); + case SEV_CMD_SNP_LAUNCH_UPDATE: return sizeof(struct sev_data_snp_launch_update); + case SEV_CMD_SNP_ACTIVATE: return sizeof(struct sev_data_snp_activate); + case SEV_CMD_SNP_DECOMMISSION: return sizeof(struct sev_data_snp_addr); + case SEV_CMD_SNP_PAGE_RECLAIM: return sizeof(struct sev_data_snp_page_reclaim); + case SEV_CMD_SNP_GUEST_STATUS: return sizeof(struct sev_data_snp_guest_status); + case SEV_CMD_SNP_LAUNCH_FINISH: return sizeof(struct sev_data_snp_launch_finish); + case SEV_CMD_SNP_DBG_DECRYPT: return sizeof(struct sev_data_snp_dbg); + case SEV_CMD_SNP_DBG_ENCRYPT: return sizeof(struct sev_data_snp_dbg); + case SEV_CMD_SNP_PAGE_UNSMASH: return sizeof(struct sev_data_snp_page_unsmash); + case SEV_CMD_SNP_PLATFORM_STATUS: return sizeof(struct sev_data_snp_addr); + case SEV_CMD_SNP_GUEST_REQUEST: return sizeof(struct sev_data_snp_guest_request); + case SEV_CMD_SNP_CONFIG: return sizeof(struct sev_user_data_snp_config); + case SEV_CMD_SNP_COMMIT: return sizeof(struct sev_data_snp_commit); default: return 0; } return 0; } -static void *sev_fw_alloc(unsigned long len) -{ - struct page *page; - - page = alloc_pages(GFP_KERNEL, get_order(len)); - if (!page) - return NULL; - - return page_address(page); -} - static struct file *open_file_as_root(const char *filename, int flags, umode_t mode) { struct file *fp; @@ -305,13 +359,485 @@ static int sev_write_init_ex_file_if_required(int cmd_id) return sev_write_init_ex_file(); } +/* + * snp_reclaim_pages() needs __sev_do_cmd_locked(), and __sev_do_cmd_locked() + * needs snp_reclaim_pages(), so a forward declaration is needed. + */ +static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret); + +static int snp_reclaim_pages(unsigned long paddr, unsigned int npages, bool locked) +{ + int ret, err, i; + + paddr = __sme_clr(ALIGN_DOWN(paddr, PAGE_SIZE)); + + for (i = 0; i < npages; i++, paddr += PAGE_SIZE) { + struct sev_data_snp_page_reclaim data = {0}; + + data.paddr = paddr; + + if (locked) + ret = __sev_do_cmd_locked(SEV_CMD_SNP_PAGE_RECLAIM, &data, &err); + else + ret = sev_do_cmd(SEV_CMD_SNP_PAGE_RECLAIM, &data, &err); + + if (ret) + goto cleanup; + + ret = rmp_make_shared(__phys_to_pfn(paddr), PG_LEVEL_4K); + if (ret) + goto cleanup; + } + + return 0; + +cleanup: + /* + * If there was a failure reclaiming the page then it is no longer safe + * to release it back to the system; leak it instead. + */ + snp_leak_pages(__phys_to_pfn(paddr), npages - i); + return ret; +} + +static int rmp_mark_pages_firmware(unsigned long paddr, unsigned int npages, bool locked) +{ + unsigned long pfn = __sme_clr(paddr) >> PAGE_SHIFT; + int rc, i; + + for (i = 0; i < npages; i++, pfn++) { + rc = rmp_make_private(pfn, 0, PG_LEVEL_4K, 0, true); + if (rc) + goto cleanup; + } + + return 0; + +cleanup: + /* + * Try unrolling the firmware state changes by + * reclaiming the pages which were already changed to the + * firmware state. + */ + snp_reclaim_pages(paddr, i, locked); + + return rc; +} + +static struct page *__snp_alloc_firmware_pages(gfp_t gfp_mask, int order) +{ + unsigned long npages = 1ul << order, paddr; + struct sev_device *sev; + struct page *page; + + if (!psp_master || !psp_master->sev_data) + return NULL; + + page = alloc_pages(gfp_mask, order); + if (!page) + return NULL; + + /* If SEV-SNP is initialized then add the page in RMP table. */ + sev = psp_master->sev_data; + if (!sev->snp_initialized) + return page; + + paddr = __pa((unsigned long)page_address(page)); + if (rmp_mark_pages_firmware(paddr, npages, false)) + return NULL; + + return page; +} + +void *snp_alloc_firmware_page(gfp_t gfp_mask) +{ + struct page *page; + + page = __snp_alloc_firmware_pages(gfp_mask, 0); + + return page ? page_address(page) : NULL; +} +EXPORT_SYMBOL_GPL(snp_alloc_firmware_page); + +static void __snp_free_firmware_pages(struct page *page, int order, bool locked) +{ + struct sev_device *sev = psp_master->sev_data; + unsigned long paddr, npages = 1ul << order; + + if (!page) + return; + + paddr = __pa((unsigned long)page_address(page)); + if (sev->snp_initialized && + snp_reclaim_pages(paddr, npages, locked)) + return; + + __free_pages(page, order); +} + +void snp_free_firmware_page(void *addr) +{ + if (!addr) + return; + + __snp_free_firmware_pages(virt_to_page(addr), 0, false); +} +EXPORT_SYMBOL_GPL(snp_free_firmware_page); + +static void *sev_fw_alloc(unsigned long len) +{ + struct page *page; + + page = __snp_alloc_firmware_pages(GFP_KERNEL, get_order(len)); + if (!page) + return NULL; + + return page_address(page); +} + +/** + * struct cmd_buf_desc - descriptors for managing legacy SEV command address + * parameters corresponding to buffers that may be written to by firmware. + * + * @paddr_ptr: pointer to the address parameter in the command buffer which may + * need to be saved/restored depending on whether a bounce buffer + * is used. In the case of a bounce buffer, the command buffer + * needs to be updated with the address of the new bounce buffer + * snp_map_cmd_buf_desc() has allocated specifically for it. Must + * be NULL if this descriptor is only an end-of-list indicator. + * + * @paddr_orig: storage for the original address parameter, which can be used to + * restore the original value in @paddr_ptr in cases where it is + * replaced with the address of a bounce buffer. + * + * @len: length of buffer located at the address originally stored at @paddr_ptr + * + * @guest_owned: true if the address corresponds to guest-owned pages, in which + * case bounce buffers are not needed. + */ +struct cmd_buf_desc { + u64 *paddr_ptr; + u64 paddr_orig; + u32 len; + bool guest_owned; +}; + +/* + * If a legacy SEV command parameter is a memory address, those pages in + * turn need to be transitioned to/from firmware-owned before/after + * executing the firmware command. + * + * Additionally, in cases where those pages are not guest-owned, a bounce + * buffer is needed in place of the original memory address parameter. + * + * A set of descriptors are used to keep track of this handling, and + * initialized here based on the specific commands being executed. + */ +static void snp_populate_cmd_buf_desc_list(int cmd, void *cmd_buf, + struct cmd_buf_desc *desc_list) +{ + switch (cmd) { + case SEV_CMD_PDH_CERT_EXPORT: { + struct sev_data_pdh_cert_export *data = cmd_buf; + + desc_list[0].paddr_ptr = &data->pdh_cert_address; + desc_list[0].len = data->pdh_cert_len; + desc_list[1].paddr_ptr = &data->cert_chain_address; + desc_list[1].len = data->cert_chain_len; + break; + } + case SEV_CMD_GET_ID: { + struct sev_data_get_id *data = cmd_buf; + + desc_list[0].paddr_ptr = &data->address; + desc_list[0].len = data->len; + break; + } + case SEV_CMD_PEK_CSR: { + struct sev_data_pek_csr *data = cmd_buf; + + desc_list[0].paddr_ptr = &data->address; + desc_list[0].len = data->len; + break; + } + case SEV_CMD_LAUNCH_UPDATE_DATA: { + struct sev_data_launch_update_data *data = cmd_buf; + + desc_list[0].paddr_ptr = &data->address; + desc_list[0].len = data->len; + desc_list[0].guest_owned = true; + break; + } + case SEV_CMD_LAUNCH_UPDATE_VMSA: { + struct sev_data_launch_update_vmsa *data = cmd_buf; + + desc_list[0].paddr_ptr = &data->address; + desc_list[0].len = data->len; + desc_list[0].guest_owned = true; + break; + } + case SEV_CMD_LAUNCH_MEASURE: { + struct sev_data_launch_measure *data = cmd_buf; + + desc_list[0].paddr_ptr = &data->address; + desc_list[0].len = data->len; + break; + } + case SEV_CMD_LAUNCH_UPDATE_SECRET: { + struct sev_data_launch_secret *data = cmd_buf; + + desc_list[0].paddr_ptr = &data->guest_address; + desc_list[0].len = data->guest_len; + desc_list[0].guest_owned = true; + break; + } + case SEV_CMD_DBG_DECRYPT: { + struct sev_data_dbg *data = cmd_buf; + + desc_list[0].paddr_ptr = &data->dst_addr; + desc_list[0].len = data->len; + desc_list[0].guest_owned = true; + break; + } + case SEV_CMD_DBG_ENCRYPT: { + struct sev_data_dbg *data = cmd_buf; + + desc_list[0].paddr_ptr = &data->dst_addr; + desc_list[0].len = data->len; + desc_list[0].guest_owned = true; + break; + } + case SEV_CMD_ATTESTATION_REPORT: { + struct sev_data_attestation_report *data = cmd_buf; + + desc_list[0].paddr_ptr = &data->address; + desc_list[0].len = data->len; + break; + } + case SEV_CMD_SEND_START: { + struct sev_data_send_start *data = cmd_buf; + + desc_list[0].paddr_ptr = &data->session_address; + desc_list[0].len = data->session_len; + break; + } + case SEV_CMD_SEND_UPDATE_DATA: { + struct sev_data_send_update_data *data = cmd_buf; + + desc_list[0].paddr_ptr = &data->hdr_address; + desc_list[0].len = data->hdr_len; + desc_list[1].paddr_ptr = &data->trans_address; + desc_list[1].len = data->trans_len; + break; + } + case SEV_CMD_SEND_UPDATE_VMSA: { + struct sev_data_send_update_vmsa *data = cmd_buf; + + desc_list[0].paddr_ptr = &data->hdr_address; + desc_list[0].len = data->hdr_len; + desc_list[1].paddr_ptr = &data->trans_address; + desc_list[1].len = data->trans_len; + break; + } + case SEV_CMD_RECEIVE_UPDATE_DATA: { + struct sev_data_receive_update_data *data = cmd_buf; + + desc_list[0].paddr_ptr = &data->guest_address; + desc_list[0].len = data->guest_len; + desc_list[0].guest_owned = true; + break; + } + case SEV_CMD_RECEIVE_UPDATE_VMSA: { + struct sev_data_receive_update_vmsa *data = cmd_buf; + + desc_list[0].paddr_ptr = &data->guest_address; + desc_list[0].len = data->guest_len; + desc_list[0].guest_owned = true; + break; + } + default: + break; + } +} + +static int snp_map_cmd_buf_desc(struct cmd_buf_desc *desc) +{ + unsigned int npages; + + if (!desc->len) + return 0; + + /* Allocate a bounce buffer if this isn't a guest owned page. */ + if (!desc->guest_owned) { + struct page *page; + + page = alloc_pages(GFP_KERNEL_ACCOUNT, get_order(desc->len)); + if (!page) { + pr_warn("Failed to allocate bounce buffer for SEV legacy command.\n"); + return -ENOMEM; + } + + desc->paddr_orig = *desc->paddr_ptr; + *desc->paddr_ptr = __psp_pa(page_to_virt(page)); + } + + npages = PAGE_ALIGN(desc->len) >> PAGE_SHIFT; + + /* Transition the buffer to firmware-owned. */ + if (rmp_mark_pages_firmware(*desc->paddr_ptr, npages, true)) { + pr_warn("Error moving pages to firmware-owned state for SEV legacy command.\n"); + return -EFAULT; + } + + return 0; +} + +static int snp_unmap_cmd_buf_desc(struct cmd_buf_desc *desc) +{ + unsigned int npages; + + if (!desc->len) + return 0; + + npages = PAGE_ALIGN(desc->len) >> PAGE_SHIFT; + + /* Transition the buffers back to hypervisor-owned. */ + if (snp_reclaim_pages(*desc->paddr_ptr, npages, true)) { + pr_warn("Failed to reclaim firmware-owned pages while issuing SEV legacy command.\n"); + return -EFAULT; + } + + /* Copy data from bounce buffer and then free it. */ + if (!desc->guest_owned) { + void *bounce_buf = __va(__sme_clr(*desc->paddr_ptr)); + void *dst_buf = __va(__sme_clr(desc->paddr_orig)); + + memcpy(dst_buf, bounce_buf, desc->len); + __free_pages(virt_to_page(bounce_buf), get_order(desc->len)); + + /* Restore the original address in the command buffer. */ + *desc->paddr_ptr = desc->paddr_orig; + } + + return 0; +} + +static int snp_map_cmd_buf_desc_list(int cmd, void *cmd_buf, struct cmd_buf_desc *desc_list) +{ + int i; + + snp_populate_cmd_buf_desc_list(cmd, cmd_buf, desc_list); + + for (i = 0; i < CMD_BUF_DESC_MAX; i++) { + struct cmd_buf_desc *desc = &desc_list[i]; + + if (!desc->paddr_ptr) + break; + + if (snp_map_cmd_buf_desc(desc)) + goto err_unmap; + } + + return 0; + +err_unmap: + for (i--; i >= 0; i--) + snp_unmap_cmd_buf_desc(&desc_list[i]); + + return -EFAULT; +} + +static int snp_unmap_cmd_buf_desc_list(struct cmd_buf_desc *desc_list) +{ + int i, ret = 0; + + for (i = 0; i < CMD_BUF_DESC_MAX; i++) { + struct cmd_buf_desc *desc = &desc_list[i]; + + if (!desc->paddr_ptr) + break; + + if (snp_unmap_cmd_buf_desc(&desc_list[i])) + ret = -EFAULT; + } + + return ret; +} + +static bool sev_cmd_buf_writable(int cmd) +{ + switch (cmd) { + case SEV_CMD_PLATFORM_STATUS: + case SEV_CMD_GUEST_STATUS: + case SEV_CMD_LAUNCH_START: + case SEV_CMD_RECEIVE_START: + case SEV_CMD_LAUNCH_MEASURE: + case SEV_CMD_SEND_START: + case SEV_CMD_SEND_UPDATE_DATA: + case SEV_CMD_SEND_UPDATE_VMSA: + case SEV_CMD_PEK_CSR: + case SEV_CMD_PDH_CERT_EXPORT: + case SEV_CMD_GET_ID: + case SEV_CMD_ATTESTATION_REPORT: + return true; + default: + return false; + } +} + +/* After SNP is INIT'ed, the behavior of legacy SEV commands is changed. */ +static bool snp_legacy_handling_needed(int cmd) +{ + struct sev_device *sev = psp_master->sev_data; + + return cmd < SEV_CMD_SNP_INIT && sev->snp_initialized; +} + +static int snp_prep_cmd_buf(int cmd, void *cmd_buf, struct cmd_buf_desc *desc_list) +{ + if (!snp_legacy_handling_needed(cmd)) + return 0; + + if (snp_map_cmd_buf_desc_list(cmd, cmd_buf, desc_list)) + return -EFAULT; + + /* + * Before command execution, the command buffer needs to be put into + * the firmware-owned state. + */ + if (sev_cmd_buf_writable(cmd)) { + if (rmp_mark_pages_firmware(__pa(cmd_buf), 1, true)) + return -EFAULT; + } + + return 0; +} + +static int snp_reclaim_cmd_buf(int cmd, void *cmd_buf) +{ + if (!snp_legacy_handling_needed(cmd)) + return 0; + + /* + * After command completion, the command buffer needs to be put back + * into the hypervisor-owned state. + */ + if (sev_cmd_buf_writable(cmd)) + if (snp_reclaim_pages(__pa(cmd_buf), 1, true)) + return -EFAULT; + + return 0; +} + static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret) { + struct cmd_buf_desc desc_list[CMD_BUF_DESC_MAX] = {0}; struct psp_device *psp = psp_master; struct sev_device *sev; unsigned int cmdbuff_hi, cmdbuff_lo; unsigned int phys_lsb, phys_msb; unsigned int reg, ret = 0; + void *cmd_buf; int buf_len; if (!psp || !psp->sev_data) @@ -331,12 +857,47 @@ static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret) * work for some memory, e.g. vmalloc'd addresses, and @data may not be * physically contiguous. */ - if (data) - memcpy(sev->cmd_buf, data, buf_len); + if (data) { + /* + * Commands are generally issued one at a time and require the + * sev_cmd_mutex, but there could be recursive firmware requests + * due to SEV_CMD_SNP_PAGE_RECLAIM needing to be issued while + * preparing buffers for another command. This is the only known + * case of nesting in the current code, so exactly one + * additional command buffer is available for that purpose. + */ + if (!sev->cmd_buf_active) { + cmd_buf = sev->cmd_buf; + sev->cmd_buf_active = true; + } else if (!sev->cmd_buf_backup_active) { + cmd_buf = sev->cmd_buf_backup; + sev->cmd_buf_backup_active = true; + } else { + dev_err(sev->dev, + "SEV: too many firmware commands in progress, no command buffers available.\n"); + return -EBUSY; + } + + memcpy(cmd_buf, data, buf_len); + + /* + * The behavior of the SEV-legacy commands is altered when the + * SNP firmware is in the INIT state. + */ + ret = snp_prep_cmd_buf(cmd, cmd_buf, desc_list); + if (ret) { + dev_err(sev->dev, + "SEV: failed to prepare buffer for legacy command 0x%x. Error: %d\n", + cmd, ret); + return ret; + } + } else { + cmd_buf = sev->cmd_buf; + } /* Get the physical address of the command buffer */ - phys_lsb = data ? lower_32_bits(__psp_pa(sev->cmd_buf)) : 0; - phys_msb = data ? upper_32_bits(__psp_pa(sev->cmd_buf)) : 0; + phys_lsb = data ? lower_32_bits(__psp_pa(cmd_buf)) : 0; + phys_msb = data ? upper_32_bits(__psp_pa(cmd_buf)) : 0; dev_dbg(sev->dev, "sev command id %#x buffer 0x%08x%08x timeout %us\n", cmd, phys_msb, phys_lsb, psp_timeout); @@ -390,20 +951,41 @@ static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret) ret = sev_write_init_ex_file_if_required(cmd); } - print_hex_dump_debug("(out): ", DUMP_PREFIX_OFFSET, 16, 2, data, - buf_len, false); - /* * Copy potential output from the PSP back to data. Do this even on * failure in case the caller wants to glean something from the error. */ - if (data) - memcpy(data, sev->cmd_buf, buf_len); + if (data) { + int ret_reclaim; + /* + * Restore the page state after the command completes. + */ + ret_reclaim = snp_reclaim_cmd_buf(cmd, cmd_buf); + if (ret_reclaim) { + dev_err(sev->dev, + "SEV: failed to reclaim buffer for legacy command %#x. Error: %d\n", + cmd, ret_reclaim); + return ret_reclaim; + } + + memcpy(data, cmd_buf, buf_len); + + if (sev->cmd_buf_backup_active) + sev->cmd_buf_backup_active = false; + else + sev->cmd_buf_active = false; + + if (snp_unmap_cmd_buf_desc_list(desc_list)) + return -EFAULT; + } + + print_hex_dump_debug("(out): ", DUMP_PREFIX_OFFSET, 16, 2, data, + buf_len, false); return ret; } -static int sev_do_cmd(int cmd, void *data, int *psp_ret) +int sev_do_cmd(int cmd, void *data, int *psp_ret) { int rc; @@ -413,6 +995,7 @@ static int sev_do_cmd(int cmd, void *data, int *psp_ret) return rc; } +EXPORT_SYMBOL_GPL(sev_do_cmd); static int __sev_init_locked(int *error) { @@ -427,7 +1010,7 @@ static int __sev_init_locked(int *error) data.tmr_address = __pa(sev_es_tmr); data.flags |= SEV_INIT_FLAGS_SEV_ES; - data.tmr_len = SEV_ES_TMR_SIZE; + data.tmr_len = sev_es_tmr_size; } return __sev_do_cmd_locked(SEV_CMD_INIT, &data, error); @@ -450,7 +1033,7 @@ static int __sev_init_ex_locked(int *error) data.tmr_address = __pa(sev_es_tmr); data.flags |= SEV_INIT_FLAGS_SEV_ES; - data.tmr_len = SEV_ES_TMR_SIZE; + data.tmr_len = sev_es_tmr_size; } return __sev_do_cmd_locked(SEV_CMD_INIT_EX, &data, error); @@ -464,25 +1047,217 @@ static inline int __sev_do_init_locked(int *psp_ret) return __sev_init_locked(psp_ret); } -static int __sev_platform_init_locked(int *error) +static void snp_set_hsave_pa(void *arg) +{ + wrmsrl(MSR_VM_HSAVE_PA, 0); +} + +static int snp_filter_reserved_mem_regions(struct resource *rs, void *arg) +{ + struct sev_data_range_list *range_list = arg; + struct sev_data_range *range = &range_list->ranges[range_list->num_elements]; + size_t size; + + /* + * Ensure the list of HV_FIXED pages that will be passed to firmware + * do not exceed the page-sized argument buffer. + */ + if ((range_list->num_elements * sizeof(struct sev_data_range) + + sizeof(struct sev_data_range_list)) > PAGE_SIZE) + return -E2BIG; + + switch (rs->desc) { + case E820_TYPE_RESERVED: + case E820_TYPE_PMEM: + case E820_TYPE_ACPI: + range->base = rs->start & PAGE_MASK; + size = PAGE_ALIGN((rs->end + 1) - rs->start); + range->page_count = size >> PAGE_SHIFT; + range_list->num_elements++; + break; + default: + break; + } + + return 0; +} + +static int __sev_snp_init_locked(int *error) { - int rc = 0, psp_ret = SEV_RET_NO_FW_CALL; struct psp_device *psp = psp_master; + struct sev_data_snp_init_ex data; struct sev_device *sev; + void *arg = &data; + int cmd, rc = 0; - if (!psp || !psp->sev_data) + if (!cpu_feature_enabled(X86_FEATURE_SEV_SNP)) return -ENODEV; sev = psp->sev_data; - if (sev->state == SEV_STATE_INIT) + if (sev->snp_initialized) return 0; - if (sev_init_ex_buffer) { - rc = sev_read_init_ex_file(); - if (rc) + if (!sev_version_greater_or_equal(SNP_MIN_API_MAJOR, SNP_MIN_API_MINOR)) { + dev_dbg(sev->dev, "SEV-SNP support requires firmware version >= %d:%d\n", + SNP_MIN_API_MAJOR, SNP_MIN_API_MINOR); + return 0; + } + + /* SNP_INIT requires MSR_VM_HSAVE_PA to be cleared on all CPUs. */ + on_each_cpu(snp_set_hsave_pa, NULL, 1); + + /* + * Starting in SNP firmware v1.52, the SNP_INIT_EX command takes a list + * of system physical address ranges to convert into HV-fixed page + * states during the RMP initialization. For instance, the memory that + * UEFI reserves should be included in the that list. This allows system + * components that occasionally write to memory (e.g. logging to UEFI + * reserved regions) to not fail due to RMP initialization and SNP + * enablement. + * + */ + if (sev_version_greater_or_equal(SNP_MIN_API_MAJOR, 52)) { + /* + * Firmware checks that the pages containing the ranges enumerated + * in the RANGES structure are either in the default page state or in the + * firmware page state. + */ + snp_range_list = kzalloc(PAGE_SIZE, GFP_KERNEL); + if (!snp_range_list) { + dev_err(sev->dev, + "SEV: SNP_INIT_EX range list memory allocation failed\n"); + return -ENOMEM; + } + + /* + * Retrieve all reserved memory regions from the e820 memory map + * to be setup as HV-fixed pages. + */ + rc = walk_iomem_res_desc(IORES_DESC_NONE, IORESOURCE_MEM, 0, ~0, + snp_range_list, snp_filter_reserved_mem_regions); + if (rc) { + dev_err(sev->dev, + "SEV: SNP_INIT_EX walk_iomem_res_desc failed rc = %d\n", rc); return rc; + } + + memset(&data, 0, sizeof(data)); + data.init_rmp = 1; + data.list_paddr_en = 1; + data.list_paddr = __psp_pa(snp_range_list); + cmd = SEV_CMD_SNP_INIT_EX; + } else { + cmd = SEV_CMD_SNP_INIT; + arg = NULL; + } + + /* + * The following sequence must be issued before launching the first SNP + * guest to ensure all dirty cache lines are flushed, including from + * updates to the RMP table itself via the RMPUPDATE instruction: + * + * - WBINVD on all running CPUs + * - SEV_CMD_SNP_INIT[_EX] firmware command + * - WBINVD on all running CPUs + * - SEV_CMD_SNP_DF_FLUSH firmware command + */ + wbinvd_on_all_cpus(); + + rc = __sev_do_cmd_locked(cmd, arg, error); + if (rc) + return rc; + + /* Prepare for first SNP guest launch after INIT. */ + wbinvd_on_all_cpus(); + rc = __sev_do_cmd_locked(SEV_CMD_SNP_DF_FLUSH, NULL, error); + if (rc) + return rc; + + sev->snp_initialized = true; + dev_dbg(sev->dev, "SEV-SNP firmware initialized\n"); + + sev_es_tmr_size = SNP_TMR_SIZE; + + return rc; +} + +static void __sev_platform_init_handle_tmr(struct sev_device *sev) +{ + if (sev_es_tmr) + return; + + /* Obtain the TMR memory area for SEV-ES use */ + sev_es_tmr = sev_fw_alloc(sev_es_tmr_size); + if (sev_es_tmr) { + /* Must flush the cache before giving it to the firmware */ + if (!sev->snp_initialized) + clflush_cache_range(sev_es_tmr, sev_es_tmr_size); + } else { + dev_warn(sev->dev, "SEV: TMR allocation failed, SEV-ES support unavailable\n"); } +} + +/* + * If an init_ex_path is provided allocate a buffer for the file and + * read in the contents. Additionally, if SNP is initialized, convert + * the buffer pages to firmware pages. + */ +static int __sev_platform_init_handle_init_ex_path(struct sev_device *sev) +{ + struct page *page; + int rc; + + if (!init_ex_path) + return 0; + + if (sev_init_ex_buffer) + return 0; + + page = alloc_pages(GFP_KERNEL, get_order(NV_LENGTH)); + if (!page) { + dev_err(sev->dev, "SEV: INIT_EX NV memory allocation failed\n"); + return -ENOMEM; + } + + sev_init_ex_buffer = page_address(page); + + rc = sev_read_init_ex_file(); + if (rc) + return rc; + + /* If SEV-SNP is initialized, transition to firmware page. */ + if (sev->snp_initialized) { + unsigned long npages; + + npages = 1UL << get_order(NV_LENGTH); + if (rmp_mark_pages_firmware(__pa(sev_init_ex_buffer), npages, false)) { + dev_err(sev->dev, "SEV: INIT_EX NV memory page state change failed.\n"); + return -ENOMEM; + } + } + + return 0; +} + +static int __sev_platform_init_locked(int *error) +{ + int rc, psp_ret = SEV_RET_NO_FW_CALL; + struct sev_device *sev; + + if (!psp_master || !psp_master->sev_data) + return -ENODEV; + + sev = psp_master->sev_data; + + if (sev->state == SEV_STATE_INIT) + return 0; + + __sev_platform_init_handle_tmr(sev); + + rc = __sev_platform_init_handle_init_ex_path(sev); + if (rc) + return rc; rc = __sev_do_init_locked(&psp_ret); if (rc && psp_ret == SEV_RET_SECURE_DATA_INVALID) { @@ -520,12 +1295,46 @@ static int __sev_platform_init_locked(int *error) return 0; } -int sev_platform_init(int *error) +static int _sev_platform_init_locked(struct sev_platform_init_args *args) +{ + struct sev_device *sev; + int rc; + + if (!psp_master || !psp_master->sev_data) + return -ENODEV; + + sev = psp_master->sev_data; + + if (sev->state == SEV_STATE_INIT) + return 0; + + /* + * Legacy guests cannot be running while SNP_INIT(_EX) is executing, + * so perform SEV-SNP initialization at probe time. + */ + rc = __sev_snp_init_locked(&args->error); + if (rc && rc != -ENODEV) { + /* + * Don't abort the probe if SNP INIT failed, + * continue to initialize the legacy SEV firmware. + */ + dev_err(sev->dev, "SEV-SNP: failed to INIT rc %d, error %#x\n", + rc, args->error); + } + + /* Defer legacy SEV/SEV-ES support if allowed by caller/module. */ + if (args->probe && !psp_init_on_probe) + return 0; + + return __sev_platform_init_locked(&args->error); +} + +int sev_platform_init(struct sev_platform_init_args *args) { int rc; mutex_lock(&sev_cmd_mutex); - rc = __sev_platform_init_locked(error); + rc = _sev_platform_init_locked(args); mutex_unlock(&sev_cmd_mutex); return rc; @@ -534,10 +1343,16 @@ EXPORT_SYMBOL_GPL(sev_platform_init); static int __sev_platform_shutdown_locked(int *error) { - struct sev_device *sev = psp_master->sev_data; + struct psp_device *psp = psp_master; + struct sev_device *sev; int ret; - if (!sev || sev->state == SEV_STATE_UNINIT) + if (!psp || !psp->sev_data) + return 0; + + sev = psp->sev_data; + + if (sev->state == SEV_STATE_UNINIT) return 0; ret = __sev_do_cmd_locked(SEV_CMD_SHUTDOWN, NULL, error); @@ -550,17 +1365,6 @@ static int __sev_platform_shutdown_locked(int *error) return ret; } -static int sev_platform_shutdown(int *error) -{ - int rc; - - mutex_lock(&sev_cmd_mutex); - rc = __sev_platform_shutdown_locked(NULL); - mutex_unlock(&sev_cmd_mutex); - - return rc; -} - static int sev_get_platform_state(int *state, int *error) { struct sev_user_data_status data; @@ -836,6 +1640,72 @@ fw_err: return ret; } +static int __sev_snp_shutdown_locked(int *error, bool panic) +{ + struct sev_device *sev = psp_master->sev_data; + struct sev_data_snp_shutdown_ex data; + int ret; + + if (!sev->snp_initialized) + return 0; + + memset(&data, 0, sizeof(data)); + data.len = sizeof(data); + data.iommu_snp_shutdown = 1; + + /* + * If invoked during panic handling, local interrupts are disabled + * and all CPUs are stopped, so wbinvd_on_all_cpus() can't be called. + * In that case, a wbinvd() is done on remote CPUs via the NMI + * callback, so only a local wbinvd() is needed here. + */ + if (!panic) + wbinvd_on_all_cpus(); + else + wbinvd(); + + ret = __sev_do_cmd_locked(SEV_CMD_SNP_SHUTDOWN_EX, &data, error); + /* SHUTDOWN may require DF_FLUSH */ + if (*error == SEV_RET_DFFLUSH_REQUIRED) { + ret = __sev_do_cmd_locked(SEV_CMD_SNP_DF_FLUSH, NULL, NULL); + if (ret) { + dev_err(sev->dev, "SEV-SNP DF_FLUSH failed\n"); + return ret; + } + /* reissue the shutdown command */ + ret = __sev_do_cmd_locked(SEV_CMD_SNP_SHUTDOWN_EX, &data, + error); + } + if (ret) { + dev_err(sev->dev, "SEV-SNP firmware shutdown failed\n"); + return ret; + } + + /* + * SNP_SHUTDOWN_EX with IOMMU_SNP_SHUTDOWN set to 1 disables SNP + * enforcement by the IOMMU and also transitions all pages + * associated with the IOMMU to the Reclaim state. + * Firmware was transitioning the IOMMU pages to Hypervisor state + * before version 1.53. But, accounting for the number of assigned + * 4kB pages in a 2M page was done incorrectly by not transitioning + * to the Reclaim state. This resulted in RMP #PF when later accessing + * the 2M page containing those pages during kexec boot. Hence, the + * firmware now transitions these pages to Reclaim state and hypervisor + * needs to transition these pages to shared state. SNP Firmware + * version 1.53 and above are needed for kexec boot. + */ + ret = amd_iommu_snp_disable(); + if (ret) { + dev_err(sev->dev, "SNP IOMMU shutdown failed\n"); + return ret; + } + + sev->snp_initialized = false; + dev_dbg(sev->dev, "SEV-SNP firmware shutdown\n"); + + return ret; +} + static int sev_ioctl_do_pek_import(struct sev_issue_cmd *argp, bool writable) { struct sev_device *sev = psp_master->sev_data; @@ -1078,6 +1948,85 @@ e_free_pdh: return ret; } +static int sev_ioctl_do_snp_platform_status(struct sev_issue_cmd *argp) +{ + struct sev_device *sev = psp_master->sev_data; + struct sev_data_snp_addr buf; + struct page *status_page; + void *data; + int ret; + + if (!sev->snp_initialized || !argp->data) + return -EINVAL; + + status_page = alloc_page(GFP_KERNEL_ACCOUNT); + if (!status_page) + return -ENOMEM; + + data = page_address(status_page); + + /* + * Firmware expects status page to be in firmware-owned state, otherwise + * it will report firmware error code INVALID_PAGE_STATE (0x1A). + */ + if (rmp_mark_pages_firmware(__pa(data), 1, true)) { + ret = -EFAULT; + goto cleanup; + } + + buf.address = __psp_pa(data); + ret = __sev_do_cmd_locked(SEV_CMD_SNP_PLATFORM_STATUS, &buf, &argp->error); + + /* + * Status page will be transitioned to Reclaim state upon success, or + * left in Firmware state in failure. Use snp_reclaim_pages() to + * transition either case back to Hypervisor-owned state. + */ + if (snp_reclaim_pages(__pa(data), 1, true)) + return -EFAULT; + + if (ret) + goto cleanup; + + if (copy_to_user((void __user *)argp->data, data, + sizeof(struct sev_user_data_snp_status))) + ret = -EFAULT; + +cleanup: + __free_pages(status_page, 0); + return ret; +} + +static int sev_ioctl_do_snp_commit(struct sev_issue_cmd *argp) +{ + struct sev_device *sev = psp_master->sev_data; + struct sev_data_snp_commit buf; + + if (!sev->snp_initialized) + return -EINVAL; + + buf.len = sizeof(buf); + + return __sev_do_cmd_locked(SEV_CMD_SNP_COMMIT, &buf, &argp->error); +} + +static int sev_ioctl_do_snp_set_config(struct sev_issue_cmd *argp, bool writable) +{ + struct sev_device *sev = psp_master->sev_data; + struct sev_user_data_snp_config config; + + if (!sev->snp_initialized || !argp->data) + return -EINVAL; + + if (!writable) + return -EPERM; + + if (copy_from_user(&config, (void __user *)argp->data, sizeof(config))) + return -EFAULT; + + return __sev_do_cmd_locked(SEV_CMD_SNP_CONFIG, &config, &argp->error); +} + static long sev_ioctl(struct file *file, unsigned int ioctl, unsigned long arg) { void __user *argp = (void __user *)arg; @@ -1129,6 +2078,15 @@ static long sev_ioctl(struct file *file, unsigned int ioctl, unsigned long arg) case SEV_GET_ID2: ret = sev_ioctl_do_get_id2(&input); break; + case SNP_PLATFORM_STATUS: + ret = sev_ioctl_do_snp_platform_status(&input); + break; + case SNP_COMMIT: + ret = sev_ioctl_do_snp_commit(&input); + break; + case SNP_SET_CONFIG: + ret = sev_ioctl_do_snp_set_config(&input, writable); + break; default: ret = -EINVAL; goto out; @@ -1239,10 +2197,12 @@ int sev_dev_init(struct psp_device *psp) if (!sev) goto e_err; - sev->cmd_buf = (void *)devm_get_free_pages(dev, GFP_KERNEL, 0); + sev->cmd_buf = (void *)devm_get_free_pages(dev, GFP_KERNEL, 1); if (!sev->cmd_buf) goto e_sev; + sev->cmd_buf_backup = (uint8_t *)sev->cmd_buf + PAGE_SIZE; + psp->sev_data = sev; sev->dev = dev; @@ -1281,24 +2241,51 @@ e_err: return ret; } -static void sev_firmware_shutdown(struct sev_device *sev) +static void __sev_firmware_shutdown(struct sev_device *sev, bool panic) { - sev_platform_shutdown(NULL); + int error; + + __sev_platform_shutdown_locked(NULL); if (sev_es_tmr) { - /* The TMR area was encrypted, flush it from the cache */ - wbinvd_on_all_cpus(); + /* + * The TMR area was encrypted, flush it from the cache. + * + * If invoked during panic handling, local interrupts are + * disabled and all CPUs are stopped, so wbinvd_on_all_cpus() + * can't be used. In that case, wbinvd() is done on remote CPUs + * via the NMI callback, and done for this CPU later during + * SNP shutdown, so wbinvd_on_all_cpus() can be skipped. + */ + if (!panic) + wbinvd_on_all_cpus(); - free_pages((unsigned long)sev_es_tmr, - get_order(SEV_ES_TMR_SIZE)); + __snp_free_firmware_pages(virt_to_page(sev_es_tmr), + get_order(sev_es_tmr_size), + true); sev_es_tmr = NULL; } if (sev_init_ex_buffer) { - free_pages((unsigned long)sev_init_ex_buffer, - get_order(NV_LENGTH)); + __snp_free_firmware_pages(virt_to_page(sev_init_ex_buffer), + get_order(NV_LENGTH), + true); sev_init_ex_buffer = NULL; } + + if (snp_range_list) { + kfree(snp_range_list); + snp_range_list = NULL; + } + + __sev_snp_shutdown_locked(&error, panic); +} + +static void sev_firmware_shutdown(struct sev_device *sev) +{ + mutex_lock(&sev_cmd_mutex); + __sev_firmware_shutdown(sev, false); + mutex_unlock(&sev_cmd_mutex); } void sev_dev_destroy(struct psp_device *psp) @@ -1316,6 +2303,29 @@ void sev_dev_destroy(struct psp_device *psp) psp_clear_sev_irq_handler(psp); } +static int snp_shutdown_on_panic(struct notifier_block *nb, + unsigned long reason, void *arg) +{ + struct sev_device *sev = psp_master->sev_data; + + /* + * If sev_cmd_mutex is already acquired, then it's likely + * another PSP command is in flight and issuing a shutdown + * would fail in unexpected ways. Rather than create even + * more confusion during a panic, just bail out here. + */ + if (mutex_is_locked(&sev_cmd_mutex)) + return NOTIFY_DONE; + + __sev_firmware_shutdown(sev, true); + + return NOTIFY_DONE; +} + +static struct notifier_block snp_panic_notifier = { + .notifier_call = snp_shutdown_on_panic, +}; + int sev_issue_cmd_external_user(struct file *filep, unsigned int cmd, void *data, int *error) { @@ -1329,7 +2339,8 @@ EXPORT_SYMBOL_GPL(sev_issue_cmd_external_user); void sev_pci_init(void) { struct sev_device *sev = psp_master->sev_data; - int error, rc; + struct sev_platform_init_args args = {0}; + int rc; if (!sev) return; @@ -1342,36 +2353,18 @@ void sev_pci_init(void) if (sev_update_firmware(sev->dev) == 0) sev_get_api_version(); - /* If an init_ex_path is provided rely on INIT_EX for PSP initialization - * instead of INIT. - */ - if (init_ex_path) { - sev_init_ex_buffer = sev_fw_alloc(NV_LENGTH); - if (!sev_init_ex_buffer) { - dev_err(sev->dev, - "SEV: INIT_EX NV memory allocation failed\n"); - goto err; - } - } - - /* Obtain the TMR memory area for SEV-ES use */ - sev_es_tmr = sev_fw_alloc(SEV_ES_TMR_SIZE); - if (sev_es_tmr) - /* Must flush the cache before giving it to the firmware */ - clflush_cache_range(sev_es_tmr, SEV_ES_TMR_SIZE); - else - dev_warn(sev->dev, - "SEV: TMR allocation failed, SEV-ES support unavailable\n"); - - if (!psp_init_on_probe) - return; - /* Initialize the platform */ - rc = sev_platform_init(&error); + args.probe = true; + rc = sev_platform_init(&args); if (rc) dev_err(sev->dev, "SEV: failed to INIT error %#x, rc %d\n", - error, rc); + args.error, rc); + dev_info(sev->dev, "SEV%s API:%d.%d build:%d\n", sev->snp_initialized ? + "-SNP" : "", sev->api_major, sev->api_minor, sev->build); + + atomic_notifier_chain_register(&panic_notifier_list, + &snp_panic_notifier); return; err: @@ -1386,4 +2379,7 @@ void sev_pci_exit(void) return; sev_firmware_shutdown(sev); + + atomic_notifier_chain_unregister(&panic_notifier_list, + &snp_panic_notifier); } diff --git a/drivers/crypto/ccp/sev-dev.h b/drivers/crypto/ccp/sev-dev.h index 778c95155e74..3e4e5574e88a 100644 --- a/drivers/crypto/ccp/sev-dev.h +++ b/drivers/crypto/ccp/sev-dev.h @@ -52,6 +52,11 @@ struct sev_device { u8 build; void *cmd_buf; + void *cmd_buf_backup; + bool cmd_buf_active; + bool cmd_buf_backup_active; + + bool snp_initialized; }; int sev_dev_init(struct psp_device *psp); diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c index 479062aa5e6b..94a0ebb03d8c 100644 --- a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c +++ b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c @@ -463,6 +463,7 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id) hw_data->fw_name = ADF_402XX_FW; hw_data->fw_mmp_name = ADF_402XX_MMP; hw_data->uof_get_name = uof_get_name_402xx; + hw_data->get_ena_thd_mask = get_ena_thd_mask; break; case ADF_401XX_PCI_DEVICE_ID: hw_data->fw_name = ADF_4XXX_FW; diff --git a/drivers/crypto/rockchip/rk3288_crypto_ahash.c b/drivers/crypto/rockchip/rk3288_crypto_ahash.c index 1b13b4aa16ec..a235e6c300f1 100644 --- a/drivers/crypto/rockchip/rk3288_crypto_ahash.c +++ b/drivers/crypto/rockchip/rk3288_crypto_ahash.c @@ -332,12 +332,12 @@ static int rk_hash_run(struct crypto_engine *engine, void *breq) theend: pm_runtime_put_autosuspend(rkc->dev); + rk_hash_unprepare(engine, breq); + local_bh_disable(); crypto_finalize_hash_request(engine, breq, err); local_bh_enable(); - rk_hash_unprepare(engine, breq); - return 0; } diff --git a/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c b/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c index 2621ff8a9376..de53eddf6796 100644 --- a/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c +++ b/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c @@ -104,7 +104,8 @@ static void virtio_crypto_dataq_akcipher_callback(struct virtio_crypto_request * } static int virtio_crypto_alg_akcipher_init_session(struct virtio_crypto_akcipher_ctx *ctx, - struct virtio_crypto_ctrl_header *header, void *para, + struct virtio_crypto_ctrl_header *header, + struct virtio_crypto_akcipher_session_para *para, const uint8_t *key, unsigned int keylen) { struct scatterlist outhdr_sg, key_sg, inhdr_sg, *sgs[3]; @@ -128,7 +129,7 @@ static int virtio_crypto_alg_akcipher_init_session(struct virtio_crypto_akcipher ctrl = &vc_ctrl_req->ctrl; memcpy(&ctrl->header, header, sizeof(ctrl->header)); - memcpy(&ctrl->u, para, sizeof(ctrl->u)); + memcpy(&ctrl->u.akcipher_create_session.para, para, sizeof(*para)); input = &vc_ctrl_req->input; input->status = cpu_to_le32(VIRTIO_CRYPTO_ERR); diff --git a/drivers/cxl/acpi.c b/drivers/cxl/acpi.c index dcf2b39e1048..1a3e6aafbdcc 100644 --- a/drivers/cxl/acpi.c +++ b/drivers/cxl/acpi.c @@ -316,31 +316,27 @@ static const struct cxl_root_ops acpi_root_ops = { .qos_class = cxl_acpi_qos_class, }; -static int cxl_parse_cfmws(union acpi_subtable_headers *header, void *arg, - const unsigned long end) +static int __cxl_parse_cfmws(struct acpi_cedt_cfmws *cfmws, + struct cxl_cfmws_context *ctx) { int target_map[CXL_DECODER_MAX_INTERLEAVE]; - struct cxl_cfmws_context *ctx = arg; struct cxl_port *root_port = ctx->root_port; struct resource *cxl_res = ctx->cxl_res; struct cxl_cxims_context cxims_ctx; struct cxl_root_decoder *cxlrd; struct device *dev = ctx->dev; - struct acpi_cedt_cfmws *cfmws; cxl_calc_hb_fn cxl_calc_hb; struct cxl_decoder *cxld; unsigned int ways, i, ig; struct resource *res; int rc; - cfmws = (struct acpi_cedt_cfmws *) header; - rc = cxl_acpi_cfmws_verify(dev, cfmws); if (rc) { dev_err(dev, "CFMWS range %#llx-%#llx not registered\n", cfmws->base_hpa, cfmws->base_hpa + cfmws->window_size - 1); - return 0; + return rc; } rc = eiw_to_ways(cfmws->interleave_ways, &ways); @@ -376,7 +372,7 @@ static int cxl_parse_cfmws(union acpi_subtable_headers *header, void *arg, cxlrd = cxl_root_decoder_alloc(root_port, ways, cxl_calc_hb); if (IS_ERR(cxlrd)) - return 0; + return PTR_ERR(cxlrd); cxld = &cxlrd->cxlsd.cxld; cxld->flags = cfmws_to_decoder_flags(cfmws->restrictions); @@ -420,16 +416,7 @@ err_xormap: put_device(&cxld->dev); else rc = cxl_decoder_autoremove(dev, cxld); - if (rc) { - dev_err(dev, "Failed to add decode range: %pr", res); - return rc; - } - dev_dbg(dev, "add: %s node: %d range [%#llx - %#llx]\n", - dev_name(&cxld->dev), - phys_to_target_node(cxld->hpa_range.start), - cxld->hpa_range.start, cxld->hpa_range.end); - - return 0; + return rc; err_insert: kfree(res->name); @@ -438,6 +425,29 @@ err_name: return -ENOMEM; } +static int cxl_parse_cfmws(union acpi_subtable_headers *header, void *arg, + const unsigned long end) +{ + struct acpi_cedt_cfmws *cfmws = (struct acpi_cedt_cfmws *)header; + struct cxl_cfmws_context *ctx = arg; + struct device *dev = ctx->dev; + int rc; + + rc = __cxl_parse_cfmws(cfmws, ctx); + if (rc) + dev_err(dev, + "Failed to add decode range: [%#llx - %#llx] (%d)\n", + cfmws->base_hpa, + cfmws->base_hpa + cfmws->window_size - 1, rc); + else + dev_dbg(dev, "decode range: node: %d range [%#llx - %#llx]\n", + phys_to_target_node(cfmws->base_hpa), cfmws->base_hpa, + cfmws->base_hpa + cfmws->window_size - 1); + + /* never fail cxl_acpi load for a single window failure */ + return 0; +} + __mock struct acpi_device *to_cxl_host_bridge(struct device *host, struct device *dev) { diff --git a/drivers/cxl/core/cdat.c b/drivers/cxl/core/cdat.c index 6fe11546889f..08fd0baea7a0 100644 --- a/drivers/cxl/core/cdat.c +++ b/drivers/cxl/core/cdat.c @@ -210,19 +210,12 @@ static int cxl_port_perf_data_calculate(struct cxl_port *port, return 0; } -static void add_perf_entry(struct device *dev, struct dsmas_entry *dent, - struct list_head *list) +static void update_perf_entry(struct device *dev, struct dsmas_entry *dent, + struct cxl_dpa_perf *dpa_perf) { - struct cxl_dpa_perf *dpa_perf; - - dpa_perf = kzalloc(sizeof(*dpa_perf), GFP_KERNEL); - if (!dpa_perf) - return; - dpa_perf->dpa_range = dent->dpa_range; dpa_perf->coord = dent->coord; dpa_perf->qos_class = dent->qos_class; - list_add_tail(&dpa_perf->list, list); dev_dbg(dev, "DSMAS: dpa: %#llx qos: %d read_bw: %d write_bw %d read_lat: %d write_lat: %d\n", dent->dpa_range.start, dpa_perf->qos_class, @@ -230,20 +223,6 @@ static void add_perf_entry(struct device *dev, struct dsmas_entry *dent, dent->coord.read_latency, dent->coord.write_latency); } -static void free_perf_ents(void *data) -{ - struct cxl_memdev_state *mds = data; - struct cxl_dpa_perf *dpa_perf, *n; - LIST_HEAD(discard); - - list_splice_tail_init(&mds->ram_perf_list, &discard); - list_splice_tail_init(&mds->pmem_perf_list, &discard); - list_for_each_entry_safe(dpa_perf, n, &discard, list) { - list_del(&dpa_perf->list); - kfree(dpa_perf); - } -} - static void cxl_memdev_set_qos_class(struct cxl_dev_state *cxlds, struct xarray *dsmas_xa) { @@ -263,16 +242,14 @@ static void cxl_memdev_set_qos_class(struct cxl_dev_state *cxlds, xa_for_each(dsmas_xa, index, dent) { if (resource_size(&cxlds->ram_res) && range_contains(&ram_range, &dent->dpa_range)) - add_perf_entry(dev, dent, &mds->ram_perf_list); + update_perf_entry(dev, dent, &mds->ram_perf); else if (resource_size(&cxlds->pmem_res) && range_contains(&pmem_range, &dent->dpa_range)) - add_perf_entry(dev, dent, &mds->pmem_perf_list); + update_perf_entry(dev, dent, &mds->pmem_perf); else dev_dbg(dev, "no partition for dsmas dpa: %#llx\n", dent->dpa_range.start); } - - devm_add_action_or_reset(&cxlds->cxlmd->dev, free_perf_ents, mds); } static int match_cxlrd_qos_class(struct device *dev, void *data) @@ -293,24 +270,24 @@ static int match_cxlrd_qos_class(struct device *dev, void *data) return 0; } -static void cxl_qos_match(struct cxl_port *root_port, - struct list_head *work_list, - struct list_head *discard_list) +static void reset_dpa_perf(struct cxl_dpa_perf *dpa_perf) { - struct cxl_dpa_perf *dpa_perf, *n; + *dpa_perf = (struct cxl_dpa_perf) { + .qos_class = CXL_QOS_CLASS_INVALID, + }; +} - list_for_each_entry_safe(dpa_perf, n, work_list, list) { - int rc; +static bool cxl_qos_match(struct cxl_port *root_port, + struct cxl_dpa_perf *dpa_perf) +{ + if (dpa_perf->qos_class == CXL_QOS_CLASS_INVALID) + return false; - if (dpa_perf->qos_class == CXL_QOS_CLASS_INVALID) - return; + if (!device_for_each_child(&root_port->dev, &dpa_perf->qos_class, + match_cxlrd_qos_class)) + return false; - rc = device_for_each_child(&root_port->dev, - (void *)&dpa_perf->qos_class, - match_cxlrd_qos_class); - if (!rc) - list_move_tail(&dpa_perf->list, discard_list); - } + return true; } static int match_cxlrd_hb(struct device *dev, void *data) @@ -334,23 +311,10 @@ static int match_cxlrd_hb(struct device *dev, void *data) return 0; } -static void discard_dpa_perf(struct list_head *list) -{ - struct cxl_dpa_perf *dpa_perf, *n; - - list_for_each_entry_safe(dpa_perf, n, list, list) { - list_del(&dpa_perf->list); - kfree(dpa_perf); - } -} -DEFINE_FREE(dpa_perf, struct list_head *, if (!list_empty(_T)) discard_dpa_perf(_T)) - static int cxl_qos_class_verify(struct cxl_memdev *cxlmd) { struct cxl_dev_state *cxlds = cxlmd->cxlds; struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds); - LIST_HEAD(__discard); - struct list_head *discard __free(dpa_perf) = &__discard; struct cxl_port *root_port; int rc; @@ -363,16 +327,17 @@ static int cxl_qos_class_verify(struct cxl_memdev *cxlmd) root_port = &cxl_root->port; /* Check that the QTG IDs are all sane between end device and root decoders */ - cxl_qos_match(root_port, &mds->ram_perf_list, discard); - cxl_qos_match(root_port, &mds->pmem_perf_list, discard); + if (!cxl_qos_match(root_port, &mds->ram_perf)) + reset_dpa_perf(&mds->ram_perf); + if (!cxl_qos_match(root_port, &mds->pmem_perf)) + reset_dpa_perf(&mds->pmem_perf); /* Check to make sure that the device's host bridge is under a root decoder */ rc = device_for_each_child(&root_port->dev, - (void *)cxlmd->endpoint->host_bridge, - match_cxlrd_hb); + cxlmd->endpoint->host_bridge, match_cxlrd_hb); if (!rc) { - list_splice_tail_init(&mds->ram_perf_list, discard); - list_splice_tail_init(&mds->pmem_perf_list, discard); + reset_dpa_perf(&mds->ram_perf); + reset_dpa_perf(&mds->pmem_perf); } return rc; @@ -417,6 +382,7 @@ void cxl_endpoint_parse_cdat(struct cxl_port *port) cxl_memdev_set_qos_class(cxlds, dsmas_xa); cxl_qos_class_verify(cxlmd); + cxl_memdev_update_perf(cxlmd); } EXPORT_SYMBOL_NS_GPL(cxl_endpoint_parse_cdat, CXL); diff --git a/drivers/cxl/core/mbox.c b/drivers/cxl/core/mbox.c index 27166a411705..9adda4795eb7 100644 --- a/drivers/cxl/core/mbox.c +++ b/drivers/cxl/core/mbox.c @@ -1391,8 +1391,8 @@ struct cxl_memdev_state *cxl_memdev_state_create(struct device *dev) mds->cxlds.reg_map.host = dev; mds->cxlds.reg_map.resource = CXL_RESOURCE_NONE; mds->cxlds.type = CXL_DEVTYPE_CLASSMEM; - INIT_LIST_HEAD(&mds->ram_perf_list); - INIT_LIST_HEAD(&mds->pmem_perf_list); + mds->ram_perf.qos_class = CXL_QOS_CLASS_INVALID; + mds->pmem_perf.qos_class = CXL_QOS_CLASS_INVALID; return mds; } diff --git a/drivers/cxl/core/memdev.c b/drivers/cxl/core/memdev.c index dae8802ecdb0..d4e259f3a7e9 100644 --- a/drivers/cxl/core/memdev.c +++ b/drivers/cxl/core/memdev.c @@ -447,13 +447,41 @@ static struct attribute *cxl_memdev_attributes[] = { NULL, }; +static ssize_t pmem_qos_class_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct cxl_memdev *cxlmd = to_cxl_memdev(dev); + struct cxl_dev_state *cxlds = cxlmd->cxlds; + struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds); + + return sysfs_emit(buf, "%d\n", mds->pmem_perf.qos_class); +} + +static struct device_attribute dev_attr_pmem_qos_class = + __ATTR(qos_class, 0444, pmem_qos_class_show, NULL); + static struct attribute *cxl_memdev_pmem_attributes[] = { &dev_attr_pmem_size.attr, + &dev_attr_pmem_qos_class.attr, NULL, }; +static ssize_t ram_qos_class_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct cxl_memdev *cxlmd = to_cxl_memdev(dev); + struct cxl_dev_state *cxlds = cxlmd->cxlds; + struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds); + + return sysfs_emit(buf, "%d\n", mds->ram_perf.qos_class); +} + +static struct device_attribute dev_attr_ram_qos_class = + __ATTR(qos_class, 0444, ram_qos_class_show, NULL); + static struct attribute *cxl_memdev_ram_attributes[] = { &dev_attr_ram_size.attr, + &dev_attr_ram_qos_class.attr, NULL, }; @@ -477,14 +505,42 @@ static struct attribute_group cxl_memdev_attribute_group = { .is_visible = cxl_memdev_visible, }; +static umode_t cxl_ram_visible(struct kobject *kobj, struct attribute *a, int n) +{ + struct device *dev = kobj_to_dev(kobj); + struct cxl_memdev *cxlmd = to_cxl_memdev(dev); + struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds); + + if (a == &dev_attr_ram_qos_class.attr) + if (mds->ram_perf.qos_class == CXL_QOS_CLASS_INVALID) + return 0; + + return a->mode; +} + static struct attribute_group cxl_memdev_ram_attribute_group = { .name = "ram", .attrs = cxl_memdev_ram_attributes, + .is_visible = cxl_ram_visible, }; +static umode_t cxl_pmem_visible(struct kobject *kobj, struct attribute *a, int n) +{ + struct device *dev = kobj_to_dev(kobj); + struct cxl_memdev *cxlmd = to_cxl_memdev(dev); + struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds); + + if (a == &dev_attr_pmem_qos_class.attr) + if (mds->pmem_perf.qos_class == CXL_QOS_CLASS_INVALID) + return 0; + + return a->mode; +} + static struct attribute_group cxl_memdev_pmem_attribute_group = { .name = "pmem", .attrs = cxl_memdev_pmem_attributes, + .is_visible = cxl_pmem_visible, }; static umode_t cxl_memdev_security_visible(struct kobject *kobj, @@ -519,6 +575,13 @@ static const struct attribute_group *cxl_memdev_attribute_groups[] = { NULL, }; +void cxl_memdev_update_perf(struct cxl_memdev *cxlmd) +{ + sysfs_update_group(&cxlmd->dev.kobj, &cxl_memdev_ram_attribute_group); + sysfs_update_group(&cxlmd->dev.kobj, &cxl_memdev_pmem_attribute_group); +} +EXPORT_SYMBOL_NS_GPL(cxl_memdev_update_perf, CXL); + static const struct device_type cxl_memdev_type = { .name = "cxl_memdev", .release = cxl_memdev_release, diff --git a/drivers/cxl/core/pci.c b/drivers/cxl/core/pci.c index 6c9c8d92f8f7..e9e6c81ce034 100644 --- a/drivers/cxl/core/pci.c +++ b/drivers/cxl/core/pci.c @@ -477,9 +477,9 @@ int cxl_hdm_decode_init(struct cxl_dev_state *cxlds, struct cxl_hdm *cxlhdm, allowed++; } - if (!allowed) { - cxl_set_mem_enable(cxlds, 0); - info->mem_enabled = 0; + if (!allowed && info->mem_enabled) { + dev_err(dev, "Range register decodes outside platform defined CXL ranges.\n"); + return -ENXIO; } /* @@ -932,11 +932,21 @@ static void cxl_handle_rdport_errors(struct cxl_dev_state *cxlds) { } void cxl_cor_error_detected(struct pci_dev *pdev) { struct cxl_dev_state *cxlds = pci_get_drvdata(pdev); + struct device *dev = &cxlds->cxlmd->dev; + + scoped_guard(device, dev) { + if (!dev->driver) { + dev_warn(&pdev->dev, + "%s: memdev disabled, abort error handling\n", + dev_name(dev)); + return; + } - if (cxlds->rcd) - cxl_handle_rdport_errors(cxlds); + if (cxlds->rcd) + cxl_handle_rdport_errors(cxlds); - cxl_handle_endpoint_cor_ras(cxlds); + cxl_handle_endpoint_cor_ras(cxlds); + } } EXPORT_SYMBOL_NS_GPL(cxl_cor_error_detected, CXL); @@ -948,16 +958,25 @@ pci_ers_result_t cxl_error_detected(struct pci_dev *pdev, struct device *dev = &cxlmd->dev; bool ue; - if (cxlds->rcd) - cxl_handle_rdport_errors(cxlds); + scoped_guard(device, dev) { + if (!dev->driver) { + dev_warn(&pdev->dev, + "%s: memdev disabled, abort error handling\n", + dev_name(dev)); + return PCI_ERS_RESULT_DISCONNECT; + } + + if (cxlds->rcd) + cxl_handle_rdport_errors(cxlds); + /* + * A frozen channel indicates an impending reset which is fatal to + * CXL.mem operation, and will likely crash the system. On the off + * chance the situation is recoverable dump the status of the RAS + * capability registers and bounce the active state of the memdev. + */ + ue = cxl_handle_endpoint_ras(cxlds); + } - /* - * A frozen channel indicates an impending reset which is fatal to - * CXL.mem operation, and will likely crash the system. On the off - * chance the situation is recoverable dump the status of the RAS - * capability registers and bounce the active state of the memdev. - */ - ue = cxl_handle_endpoint_ras(cxlds); switch (state) { case pci_channel_io_normal: diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c index ce0e2d82bb2b..4c7fd2d5cccb 100644 --- a/drivers/cxl/core/region.c +++ b/drivers/cxl/core/region.c @@ -730,12 +730,17 @@ static int match_auto_decoder(struct device *dev, void *data) return 0; } -static struct cxl_decoder *cxl_region_find_decoder(struct cxl_port *port, - struct cxl_region *cxlr) +static struct cxl_decoder * +cxl_region_find_decoder(struct cxl_port *port, + struct cxl_endpoint_decoder *cxled, + struct cxl_region *cxlr) { struct device *dev; int id = 0; + if (port == cxled_to_port(cxled)) + return &cxled->cxld; + if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags)) dev = device_find_child(&port->dev, &cxlr->params, match_auto_decoder); @@ -753,8 +758,31 @@ static struct cxl_decoder *cxl_region_find_decoder(struct cxl_port *port, return to_cxl_decoder(dev); } -static struct cxl_region_ref *alloc_region_ref(struct cxl_port *port, - struct cxl_region *cxlr) +static bool auto_order_ok(struct cxl_port *port, struct cxl_region *cxlr_iter, + struct cxl_decoder *cxld) +{ + struct cxl_region_ref *rr = cxl_rr_load(port, cxlr_iter); + struct cxl_decoder *cxld_iter = rr->decoder; + + /* + * Allow the out of order assembly of auto-discovered regions. + * Per CXL Spec 3.1 8.2.4.20.12 software must commit decoders + * in HPA order. Confirm that the decoder with the lesser HPA + * starting address has the lesser id. + */ + dev_dbg(&cxld->dev, "check for HPA violation %s:%d < %s:%d\n", + dev_name(&cxld->dev), cxld->id, + dev_name(&cxld_iter->dev), cxld_iter->id); + + if (cxld_iter->id > cxld->id) + return true; + + return false; +} + +static struct cxl_region_ref * +alloc_region_ref(struct cxl_port *port, struct cxl_region *cxlr, + struct cxl_endpoint_decoder *cxled) { struct cxl_region_params *p = &cxlr->params; struct cxl_region_ref *cxl_rr, *iter; @@ -764,16 +792,21 @@ static struct cxl_region_ref *alloc_region_ref(struct cxl_port *port, xa_for_each(&port->regions, index, iter) { struct cxl_region_params *ip = &iter->region->params; - if (!ip->res) + if (!ip->res || ip->res->start < p->res->start) continue; - if (ip->res->start > p->res->start) { - dev_dbg(&cxlr->dev, - "%s: HPA order violation %s:%pr vs %pr\n", - dev_name(&port->dev), - dev_name(&iter->region->dev), ip->res, p->res); - return ERR_PTR(-EBUSY); + if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags)) { + struct cxl_decoder *cxld; + + cxld = cxl_region_find_decoder(port, cxled, cxlr); + if (auto_order_ok(port, iter->region, cxld)) + continue; } + dev_dbg(&cxlr->dev, "%s: HPA order violation %s:%pr vs %pr\n", + dev_name(&port->dev), + dev_name(&iter->region->dev), ip->res, p->res); + + return ERR_PTR(-EBUSY); } cxl_rr = kzalloc(sizeof(*cxl_rr), GFP_KERNEL); @@ -853,10 +886,7 @@ static int cxl_rr_alloc_decoder(struct cxl_port *port, struct cxl_region *cxlr, { struct cxl_decoder *cxld; - if (port == cxled_to_port(cxled)) - cxld = &cxled->cxld; - else - cxld = cxl_region_find_decoder(port, cxlr); + cxld = cxl_region_find_decoder(port, cxled, cxlr); if (!cxld) { dev_dbg(&cxlr->dev, "%s: no decoder available\n", dev_name(&port->dev)); @@ -953,7 +983,7 @@ static int cxl_port_attach_region(struct cxl_port *port, nr_targets_inc = true; } } else { - cxl_rr = alloc_region_ref(port, cxlr); + cxl_rr = alloc_region_ref(port, cxlr, cxled); if (IS_ERR(cxl_rr)) { dev_dbg(&cxlr->dev, "%s: failed to allocate region reference\n", diff --git a/drivers/cxl/core/trace.h b/drivers/cxl/core/trace.h index 89445435303a..bdf117a33744 100644 --- a/drivers/cxl/core/trace.h +++ b/drivers/cxl/core/trace.h @@ -338,7 +338,7 @@ TRACE_EVENT(cxl_general_media, TP_fast_assign( CXL_EVT_TP_fast_assign(cxlmd, log, rec->hdr); - memcpy(&__entry->hdr_uuid, &CXL_EVENT_GEN_MEDIA_UUID, sizeof(uuid_t)); + __entry->hdr_uuid = CXL_EVENT_GEN_MEDIA_UUID; /* General Media */ __entry->dpa = le64_to_cpu(rec->phys_addr); @@ -425,7 +425,7 @@ TRACE_EVENT(cxl_dram, TP_fast_assign( CXL_EVT_TP_fast_assign(cxlmd, log, rec->hdr); - memcpy(&__entry->hdr_uuid, &CXL_EVENT_DRAM_UUID, sizeof(uuid_t)); + __entry->hdr_uuid = CXL_EVENT_DRAM_UUID; /* DRAM */ __entry->dpa = le64_to_cpu(rec->phys_addr); @@ -573,7 +573,7 @@ TRACE_EVENT(cxl_memory_module, TP_fast_assign( CXL_EVT_TP_fast_assign(cxlmd, log, rec->hdr); - memcpy(&__entry->hdr_uuid, &CXL_EVENT_MEM_MODULE_UUID, sizeof(uuid_t)); + __entry->hdr_uuid = CXL_EVENT_MEM_MODULE_UUID; /* Memory Module Event */ __entry->event_type = rec->event_type; diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h index b6017c0c57b4..003feebab79b 100644 --- a/drivers/cxl/cxl.h +++ b/drivers/cxl/cxl.h @@ -880,6 +880,8 @@ void cxl_switch_parse_cdat(struct cxl_port *port); int cxl_endpoint_get_perf_coordinates(struct cxl_port *port, struct access_coordinate *coord); +void cxl_memdev_update_perf(struct cxl_memdev *cxlmd); + /* * Unit test builds overrides this to __weak, find the 'strong' version * of these symbols in tools/testing/cxl/. diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h index 5303d6942b88..20fb3b35e89e 100644 --- a/drivers/cxl/cxlmem.h +++ b/drivers/cxl/cxlmem.h @@ -395,13 +395,11 @@ enum cxl_devtype { /** * struct cxl_dpa_perf - DPA performance property entry - * @list - list entry * @dpa_range - range for DPA address * @coord - QoS performance data (i.e. latency, bandwidth) * @qos_class - QoS Class cookies */ struct cxl_dpa_perf { - struct list_head list; struct range dpa_range; struct access_coordinate coord; int qos_class; @@ -471,8 +469,8 @@ struct cxl_dev_state { * @security: security driver state info * @fw: firmware upload / activation state * @mbox_send: @dev specific transport for transmitting mailbox commands - * @ram_perf_list: performance data entries matched to RAM - * @pmem_perf_list: performance data entries matched to PMEM + * @ram_perf: performance data entry matched to RAM partition + * @pmem_perf: performance data entry matched to PMEM partition * * See CXL 3.0 8.2.9.8.2 Capacity Configuration and Label Storage for * details on capacity parameters. @@ -494,8 +492,8 @@ struct cxl_memdev_state { u64 next_volatile_bytes; u64 next_persistent_bytes; - struct list_head ram_perf_list; - struct list_head pmem_perf_list; + struct cxl_dpa_perf ram_perf; + struct cxl_dpa_perf pmem_perf; struct cxl_event_state event; struct cxl_poison_state poison; diff --git a/drivers/cxl/mem.c b/drivers/cxl/mem.c index c5c9d8e0d88d..0c79d9ce877c 100644 --- a/drivers/cxl/mem.c +++ b/drivers/cxl/mem.c @@ -215,52 +215,6 @@ static ssize_t trigger_poison_list_store(struct device *dev, } static DEVICE_ATTR_WO(trigger_poison_list); -static ssize_t ram_qos_class_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct cxl_memdev *cxlmd = to_cxl_memdev(dev); - struct cxl_dev_state *cxlds = cxlmd->cxlds; - struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds); - struct cxl_dpa_perf *dpa_perf; - - if (!dev->driver) - return -ENOENT; - - if (list_empty(&mds->ram_perf_list)) - return -ENOENT; - - dpa_perf = list_first_entry(&mds->ram_perf_list, struct cxl_dpa_perf, - list); - - return sysfs_emit(buf, "%d\n", dpa_perf->qos_class); -} - -static struct device_attribute dev_attr_ram_qos_class = - __ATTR(qos_class, 0444, ram_qos_class_show, NULL); - -static ssize_t pmem_qos_class_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct cxl_memdev *cxlmd = to_cxl_memdev(dev); - struct cxl_dev_state *cxlds = cxlmd->cxlds; - struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds); - struct cxl_dpa_perf *dpa_perf; - - if (!dev->driver) - return -ENOENT; - - if (list_empty(&mds->pmem_perf_list)) - return -ENOENT; - - dpa_perf = list_first_entry(&mds->pmem_perf_list, struct cxl_dpa_perf, - list); - - return sysfs_emit(buf, "%d\n", dpa_perf->qos_class); -} - -static struct device_attribute dev_attr_pmem_qos_class = - __ATTR(qos_class, 0444, pmem_qos_class_show, NULL); - static umode_t cxl_mem_visible(struct kobject *kobj, struct attribute *a, int n) { struct device *dev = kobj_to_dev(kobj); @@ -272,21 +226,11 @@ static umode_t cxl_mem_visible(struct kobject *kobj, struct attribute *a, int n) mds->poison.enabled_cmds)) return 0; - if (a == &dev_attr_pmem_qos_class.attr) - if (list_empty(&mds->pmem_perf_list)) - return 0; - - if (a == &dev_attr_ram_qos_class.attr) - if (list_empty(&mds->ram_perf_list)) - return 0; - return a->mode; } static struct attribute *cxl_mem_attrs[] = { &dev_attr_trigger_poison_list.attr, - &dev_attr_ram_qos_class.attr, - &dev_attr_pmem_qos_class.attr, NULL }; diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c index 233e7c42c161..2ff361e756d6 100644 --- a/drivers/cxl/pci.c +++ b/drivers/cxl/pci.c @@ -974,61 +974,6 @@ static struct pci_driver cxl_pci_driver = { }, }; -#define CXL_EVENT_HDR_FLAGS_REC_SEVERITY GENMASK(1, 0) -static void cxl_cper_event_call(enum cxl_event_type ev_type, - struct cxl_cper_event_rec *rec) -{ - struct cper_cxl_event_devid *device_id = &rec->hdr.device_id; - struct pci_dev *pdev __free(pci_dev_put) = NULL; - enum cxl_event_log_type log_type; - struct cxl_dev_state *cxlds; - unsigned int devfn; - u32 hdr_flags; - - devfn = PCI_DEVFN(device_id->device_num, device_id->func_num); - pdev = pci_get_domain_bus_and_slot(device_id->segment_num, - device_id->bus_num, devfn); - if (!pdev) - return; - - guard(pci_dev)(pdev); - if (pdev->driver != &cxl_pci_driver) - return; - - cxlds = pci_get_drvdata(pdev); - if (!cxlds) - return; - - /* Fabricate a log type */ - hdr_flags = get_unaligned_le24(rec->event.generic.hdr.flags); - log_type = FIELD_GET(CXL_EVENT_HDR_FLAGS_REC_SEVERITY, hdr_flags); - - cxl_event_trace_record(cxlds->cxlmd, log_type, ev_type, - &uuid_null, &rec->event); -} - -static int __init cxl_pci_driver_init(void) -{ - int rc; - - rc = cxl_cper_register_callback(cxl_cper_event_call); - if (rc) - return rc; - - rc = pci_register_driver(&cxl_pci_driver); - if (rc) - cxl_cper_unregister_callback(cxl_cper_event_call); - - return rc; -} - -static void __exit cxl_pci_driver_exit(void) -{ - pci_unregister_driver(&cxl_pci_driver); - cxl_cper_unregister_callback(cxl_cper_event_call); -} - -module_init(cxl_pci_driver_init); -module_exit(cxl_pci_driver_exit); +module_pci_driver(cxl_pci_driver); MODULE_LICENSE("GPL v2"); MODULE_IMPORT_NS(CXL); diff --git a/drivers/dma-buf/heaps/cma_heap.c b/drivers/dma-buf/heaps/cma_heap.c index ee899f8e6721..4a63567e93ba 100644 --- a/drivers/dma-buf/heaps/cma_heap.c +++ b/drivers/dma-buf/heaps/cma_heap.c @@ -168,10 +168,7 @@ static vm_fault_t cma_heap_vm_fault(struct vm_fault *vmf) if (vmf->pgoff > buffer->pagecount) return VM_FAULT_SIGBUS; - vmf->page = buffer->pages[vmf->pgoff]; - get_page(vmf->page); - - return 0; + return vmf_insert_pfn(vma, vmf->address, page_to_pfn(buffer->pages[vmf->pgoff])); } static const struct vm_operations_struct dma_heap_vm_ops = { @@ -185,6 +182,8 @@ static int cma_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma) if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0) return -EINVAL; + vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP); + vma->vm_ops = &dma_heap_vm_ops; vma->vm_private_data = buffer; diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index fb89ecbf0cc5..40052d1bd0b5 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c @@ -222,8 +222,14 @@ struct atdma_sg { * @vd: pointer to the virtual dma descriptor. * @atchan: pointer to the atmel dma channel. * @total_len: total transaction byte count - * @sg_len: number of sg entries. + * @sglen: number of sg entries. * @sg: array of sgs. + * @boundary: number of transfers to perform before the automatic address increment operation + * @dst_hole: value to add to the destination address when the boundary has been reached + * @src_hole: value to add to the source address when the boundary has been reached + * @memset_buffer: buffer used for the memset operation + * @memset_paddr: physical address of the buffer used for the memset operation + * @memset_vaddr: virtual address of the buffer used for the memset operation */ struct at_desc { struct virt_dma_desc vd; @@ -245,7 +251,10 @@ struct at_desc { /*-- Channels --------------------------------------------------------*/ /** - * atc_status - information bits stored in channel status flag + * enum atc_status - information bits stored in channel status flag + * + * @ATC_IS_PAUSED: If channel is pauses + * @ATC_IS_CYCLIC: If channel is cyclic * * Manipulated with atomic operations. */ @@ -282,7 +291,6 @@ struct at_dma_chan { u32 save_cfg; u32 save_dscr; struct dma_slave_config dma_sconfig; - bool cyclic; struct at_desc *desc; }; @@ -328,12 +336,12 @@ static inline u8 convert_buswidth(enum dma_slave_buswidth addr_width) /** * struct at_dma - internal representation of an Atmel HDMA Controller * @dma_device: dmaengine dma_device object members - * @atdma_devtype: identifier of DMA controller compatibility - * @ch_regs: memory mapped register base + * @regs: memory mapped register base * @clk: dma controller clock * @save_imr: interrupt mask register that is saved on suspend/resume cycle * @all_chan_mask: all channels availlable in a mask * @lli_pool: hw lli table + * @memset_pool: hw memset pool * @chan: channels table to store at_dma_chan structures */ struct at_dma { @@ -626,6 +634,9 @@ static inline u32 atc_calc_bytes_left(u32 current_len, u32 ctrla) /** * atc_get_llis_residue - Get residue for a hardware linked list transfer + * @atchan: pointer to an atmel hdmac channel. + * @desc: pointer to the descriptor for which the residue is calculated. + * @residue: residue to be set to dma_tx_state. * * Calculate the residue by removing the length of the Linked List Item (LLI) * already transferred from the total length. To get the current LLI we can use @@ -661,10 +672,8 @@ static inline u32 atc_calc_bytes_left(u32 current_len, u32 ctrla) * two DSCR values are different, we read again the CTRLA then the DSCR till two * consecutive read values from DSCR are equal or till the maximum trials is * reach. This algorithm is very unlikely not to find a stable value for DSCR. - * @atchan: pointer to an atmel hdmac channel. - * @desc: pointer to the descriptor for which the residue is calculated. - * @residue: residue to be set to dma_tx_state. - * Returns 0 on success, -errno otherwise. + * + * Returns: %0 on success, -errno otherwise. */ static int atc_get_llis_residue(struct at_dma_chan *atchan, struct at_desc *desc, u32 *residue) @@ -731,7 +740,8 @@ static int atc_get_llis_residue(struct at_dma_chan *atchan, * @chan: DMA channel * @cookie: transaction identifier to check status of * @residue: residue to be updated. - * Return 0 on success, -errono otherwise. + * + * Return: %0 on success, -errno otherwise. */ static int atc_get_residue(struct dma_chan *chan, dma_cookie_t cookie, u32 *residue) @@ -1710,7 +1720,7 @@ static void atc_issue_pending(struct dma_chan *chan) * atc_alloc_chan_resources - allocate resources for DMA channel * @chan: allocate descriptor resources for this channel * - * return - the number of allocated descriptors + * Return: the number of allocated descriptors */ static int atc_alloc_chan_resources(struct dma_chan *chan) { diff --git a/drivers/dma/dw-edma/dw-edma-v0-core.c b/drivers/dma/dw-edma/dw-edma-v0-core.c index b38786f0ad79..b75fdaffad9a 100644 --- a/drivers/dma/dw-edma/dw-edma-v0-core.c +++ b/drivers/dma/dw-edma/dw-edma-v0-core.c @@ -346,6 +346,20 @@ static void dw_edma_v0_core_write_chunk(struct dw_edma_chunk *chunk) dw_edma_v0_write_ll_link(chunk, i, control, chunk->ll_region.paddr); } +static void dw_edma_v0_sync_ll_data(struct dw_edma_chunk *chunk) +{ + /* + * In case of remote eDMA engine setup, the DW PCIe RP/EP internal + * configuration registers and application memory are normally accessed + * over different buses. Ensure LL-data reaches the memory before the + * doorbell register is toggled by issuing the dummy-read from the remote + * LL memory in a hope that the MRd TLP will return only after the + * last MWr TLP is completed + */ + if (!(chunk->chan->dw->chip->flags & DW_EDMA_CHIP_LOCAL)) + readl(chunk->ll_region.vaddr.io); +} + static void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first) { struct dw_edma_chan *chan = chunk->chan; @@ -412,6 +426,9 @@ static void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first) SET_CH_32(dw, chan->dir, chan->id, llp.msb, upper_32_bits(chunk->ll_region.paddr)); } + + dw_edma_v0_sync_ll_data(chunk); + /* Doorbell */ SET_RW_32(dw, chan->dir, doorbell, FIELD_PREP(EDMA_V0_DOORBELL_CH_MASK, chan->id)); diff --git a/drivers/dma/dw-edma/dw-hdma-v0-core.c b/drivers/dma/dw-edma/dw-hdma-v0-core.c index 00b735a0202a..10e8f0715114 100644 --- a/drivers/dma/dw-edma/dw-hdma-v0-core.c +++ b/drivers/dma/dw-edma/dw-hdma-v0-core.c @@ -65,18 +65,12 @@ static void dw_hdma_v0_core_off(struct dw_edma *dw) static u16 dw_hdma_v0_core_ch_count(struct dw_edma *dw, enum dw_edma_dir dir) { - u32 num_ch = 0; - int id; - - for (id = 0; id < HDMA_V0_MAX_NR_CH; id++) { - if (GET_CH_32(dw, id, dir, ch_en) & BIT(0)) - num_ch++; - } - - if (num_ch > HDMA_V0_MAX_NR_CH) - num_ch = HDMA_V0_MAX_NR_CH; - - return (u16)num_ch; + /* + * The HDMA IP have no way to know the number of hardware channels + * available, we set it to maximum channels and let the platform + * set the right number of channels. + */ + return HDMA_V0_MAX_NR_CH; } static enum dma_status dw_hdma_v0_core_ch_status(struct dw_edma_chan *chan) @@ -228,6 +222,20 @@ static void dw_hdma_v0_core_write_chunk(struct dw_edma_chunk *chunk) dw_hdma_v0_write_ll_link(chunk, i, control, chunk->ll_region.paddr); } +static void dw_hdma_v0_sync_ll_data(struct dw_edma_chunk *chunk) +{ + /* + * In case of remote HDMA engine setup, the DW PCIe RP/EP internal + * configuration registers and application memory are normally accessed + * over different buses. Ensure LL-data reaches the memory before the + * doorbell register is toggled by issuing the dummy-read from the remote + * LL memory in a hope that the MRd TLP will return only after the + * last MWr TLP is completed + */ + if (!(chunk->chan->dw->chip->flags & DW_EDMA_CHIP_LOCAL)) + readl(chunk->ll_region.vaddr.io); +} + static void dw_hdma_v0_core_start(struct dw_edma_chunk *chunk, bool first) { struct dw_edma_chan *chan = chunk->chan; @@ -242,7 +250,9 @@ static void dw_hdma_v0_core_start(struct dw_edma_chunk *chunk, bool first) /* Interrupt enable&unmask - done, abort */ tmp = GET_CH_32(dw, chan->dir, chan->id, int_setup) | HDMA_V0_STOP_INT_MASK | HDMA_V0_ABORT_INT_MASK | - HDMA_V0_LOCAL_STOP_INT_EN | HDMA_V0_LOCAL_STOP_INT_EN; + HDMA_V0_LOCAL_STOP_INT_EN | HDMA_V0_LOCAL_ABORT_INT_EN; + if (!(dw->chip->flags & DW_EDMA_CHIP_LOCAL)) + tmp |= HDMA_V0_REMOTE_STOP_INT_EN | HDMA_V0_REMOTE_ABORT_INT_EN; SET_CH_32(dw, chan->dir, chan->id, int_setup, tmp); /* Channel control */ SET_CH_32(dw, chan->dir, chan->id, control1, HDMA_V0_LINKLIST_EN); @@ -256,6 +266,9 @@ static void dw_hdma_v0_core_start(struct dw_edma_chunk *chunk, bool first) /* Set consumer cycle */ SET_CH_32(dw, chan->dir, chan->id, cycle_sync, HDMA_V0_CONSUMER_CYCLE_STAT | HDMA_V0_CONSUMER_CYCLE_BIT); + + dw_hdma_v0_sync_ll_data(chunk); + /* Doorbell */ SET_CH_32(dw, chan->dir, chan->id, doorbell, HDMA_V0_DOORBELL_START); } diff --git a/drivers/dma/dw-edma/dw-hdma-v0-regs.h b/drivers/dma/dw-edma/dw-hdma-v0-regs.h index a974abdf8aaf..eab5fd7177e5 100644 --- a/drivers/dma/dw-edma/dw-hdma-v0-regs.h +++ b/drivers/dma/dw-edma/dw-hdma-v0-regs.h @@ -15,7 +15,7 @@ #define HDMA_V0_LOCAL_ABORT_INT_EN BIT(6) #define HDMA_V0_REMOTE_ABORT_INT_EN BIT(5) #define HDMA_V0_LOCAL_STOP_INT_EN BIT(4) -#define HDMA_V0_REMOTEL_STOP_INT_EN BIT(3) +#define HDMA_V0_REMOTE_STOP_INT_EN BIT(3) #define HDMA_V0_ABORT_INT_MASK BIT(2) #define HDMA_V0_STOP_INT_MASK BIT(0) #define HDMA_V0_LINKLIST_EN BIT(0) diff --git a/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c b/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c index 7958ac33e36c..5a8061a307cd 100644 --- a/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c +++ b/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c @@ -38,15 +38,17 @@ static int dpaa2_qdma_alloc_chan_resources(struct dma_chan *chan) if (!dpaa2_chan->fd_pool) goto err; - dpaa2_chan->fl_pool = dma_pool_create("fl_pool", dev, - sizeof(struct dpaa2_fl_entry), - sizeof(struct dpaa2_fl_entry), 0); + dpaa2_chan->fl_pool = + dma_pool_create("fl_pool", dev, + sizeof(struct dpaa2_fl_entry) * 3, + sizeof(struct dpaa2_fl_entry), 0); + if (!dpaa2_chan->fl_pool) goto err_fd; dpaa2_chan->sdd_pool = dma_pool_create("sdd_pool", dev, - sizeof(struct dpaa2_qdma_sd_d), + sizeof(struct dpaa2_qdma_sd_d) * 2, sizeof(struct dpaa2_qdma_sd_d), 0); if (!dpaa2_chan->sdd_pool) goto err_fl; diff --git a/drivers/dma/fsl-edma-common.c b/drivers/dma/fsl-edma-common.c index b53f46245c37..793f1a7ad5e3 100644 --- a/drivers/dma/fsl-edma-common.c +++ b/drivers/dma/fsl-edma-common.c @@ -503,7 +503,7 @@ void fsl_edma_fill_tcd(struct fsl_edma_chan *fsl_chan, if (fsl_chan->is_multi_fifo) { /* set mloff to support multiple fifo */ burst = cfg->direction == DMA_DEV_TO_MEM ? - cfg->src_addr_width : cfg->dst_addr_width; + cfg->src_maxburst : cfg->dst_maxburst; nbytes |= EDMA_V3_TCD_NBYTES_MLOFF(-(burst * 4)); /* enable DMLOE/SMLOE */ if (cfg->direction == DMA_MEM_TO_DEV) { diff --git a/drivers/dma/fsl-edma-common.h b/drivers/dma/fsl-edma-common.h index bb5221158a77..f5e216b157c7 100644 --- a/drivers/dma/fsl-edma-common.h +++ b/drivers/dma/fsl-edma-common.h @@ -30,8 +30,9 @@ #define EDMA_TCD_ATTR_SSIZE(x) (((x) & GENMASK(2, 0)) << 8) #define EDMA_TCD_ATTR_SMOD(x) (((x) & GENMASK(4, 0)) << 11) -#define EDMA_TCD_CITER_CITER(x) ((x) & GENMASK(14, 0)) -#define EDMA_TCD_BITER_BITER(x) ((x) & GENMASK(14, 0)) +#define EDMA_TCD_ITER_MASK GENMASK(14, 0) +#define EDMA_TCD_CITER_CITER(x) ((x) & EDMA_TCD_ITER_MASK) +#define EDMA_TCD_BITER_BITER(x) ((x) & EDMA_TCD_ITER_MASK) #define EDMA_TCD_CSR_START BIT(0) #define EDMA_TCD_CSR_INT_MAJOR BIT(1) diff --git a/drivers/dma/fsl-edma-main.c b/drivers/dma/fsl-edma-main.c index 45cc419b1b4a..d36e28b9c767 100644 --- a/drivers/dma/fsl-edma-main.c +++ b/drivers/dma/fsl-edma-main.c @@ -10,6 +10,7 @@ */ #include <dt-bindings/dma/fsl-edma.h> +#include <linux/bitfield.h> #include <linux/module.h> #include <linux/interrupt.h> #include <linux/clk.h> @@ -582,7 +583,8 @@ static int fsl_edma_probe(struct platform_device *pdev) DMAENGINE_ALIGN_32_BYTES; /* Per worst case 'nbytes = 1' take CITER as the max_seg_size */ - dma_set_max_seg_size(fsl_edma->dma_dev.dev, 0x3fff); + dma_set_max_seg_size(fsl_edma->dma_dev.dev, + FIELD_GET(EDMA_TCD_ITER_MASK, EDMA_TCD_ITER_MASK)); fsl_edma->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT; diff --git a/drivers/dma/fsl-qdma.c b/drivers/dma/fsl-qdma.c index a1d0aa63142a..5005e138fc23 100644 --- a/drivers/dma/fsl-qdma.c +++ b/drivers/dma/fsl-qdma.c @@ -109,6 +109,7 @@ #define FSL_QDMA_CMD_WTHROTL_OFFSET 20 #define FSL_QDMA_CMD_DSEN_OFFSET 19 #define FSL_QDMA_CMD_LWC_OFFSET 16 +#define FSL_QDMA_CMD_PF BIT(17) /* Field definition for Descriptor status */ #define QDMA_CCDF_STATUS_RTE BIT(5) @@ -160,6 +161,10 @@ struct fsl_qdma_format { u8 __reserved1[2]; u8 cfg8b_w1; } __packed; + struct { + __le32 __reserved2; + __le32 cmd; + } __packed; __le64 data; }; } __packed; @@ -354,7 +359,6 @@ static void fsl_qdma_free_chan_resources(struct dma_chan *chan) static void fsl_qdma_comp_fill_memcpy(struct fsl_qdma_comp *fsl_comp, dma_addr_t dst, dma_addr_t src, u32 len) { - u32 cmd; struct fsl_qdma_format *sdf, *ddf; struct fsl_qdma_format *ccdf, *csgf_desc, *csgf_src, *csgf_dest; @@ -383,14 +387,11 @@ static void fsl_qdma_comp_fill_memcpy(struct fsl_qdma_comp *fsl_comp, /* This entry is the last entry. */ qdma_csgf_set_f(csgf_dest, len); /* Descriptor Buffer */ - cmd = cpu_to_le32(FSL_QDMA_CMD_RWTTYPE << - FSL_QDMA_CMD_RWTTYPE_OFFSET); - sdf->data = QDMA_SDDF_CMD(cmd); - - cmd = cpu_to_le32(FSL_QDMA_CMD_RWTTYPE << - FSL_QDMA_CMD_RWTTYPE_OFFSET); - cmd |= cpu_to_le32(FSL_QDMA_CMD_LWC << FSL_QDMA_CMD_LWC_OFFSET); - ddf->data = QDMA_SDDF_CMD(cmd); + sdf->cmd = cpu_to_le32((FSL_QDMA_CMD_RWTTYPE << FSL_QDMA_CMD_RWTTYPE_OFFSET) | + FSL_QDMA_CMD_PF); + + ddf->cmd = cpu_to_le32((FSL_QDMA_CMD_RWTTYPE << FSL_QDMA_CMD_RWTTYPE_OFFSET) | + (FSL_QDMA_CMD_LWC << FSL_QDMA_CMD_LWC_OFFSET)); } /* @@ -514,11 +515,11 @@ static struct fsl_qdma_queue queue_temp = queue_head + i + (j * queue_num); queue_temp->cq = - dma_alloc_coherent(&pdev->dev, - sizeof(struct fsl_qdma_format) * - queue_size[i], - &queue_temp->bus_addr, - GFP_KERNEL); + dmam_alloc_coherent(&pdev->dev, + sizeof(struct fsl_qdma_format) * + queue_size[i], + &queue_temp->bus_addr, + GFP_KERNEL); if (!queue_temp->cq) return NULL; queue_temp->block_base = fsl_qdma->block_base + @@ -563,15 +564,14 @@ static struct fsl_qdma_queue /* * Buffer for queue command */ - status_head->cq = dma_alloc_coherent(&pdev->dev, - sizeof(struct fsl_qdma_format) * - status_size, - &status_head->bus_addr, - GFP_KERNEL); - if (!status_head->cq) { - devm_kfree(&pdev->dev, status_head); + status_head->cq = dmam_alloc_coherent(&pdev->dev, + sizeof(struct fsl_qdma_format) * + status_size, + &status_head->bus_addr, + GFP_KERNEL); + if (!status_head->cq) return NULL; - } + status_head->n_cq = status_size; status_head->virt_head = status_head->cq; status_head->virt_tail = status_head->cq; @@ -625,7 +625,7 @@ static int fsl_qdma_halt(struct fsl_qdma_engine *fsl_qdma) static int fsl_qdma_queue_transfer_complete(struct fsl_qdma_engine *fsl_qdma, - void *block, + __iomem void *block, int id) { bool duplicate; @@ -1197,10 +1197,6 @@ static int fsl_qdma_probe(struct platform_device *pdev) if (!fsl_qdma->queue) return -ENOMEM; - ret = fsl_qdma_irq_init(pdev, fsl_qdma); - if (ret) - return ret; - fsl_qdma->irq_base = platform_get_irq_byname(pdev, "qdma-queue0"); if (fsl_qdma->irq_base < 0) return fsl_qdma->irq_base; @@ -1239,16 +1235,19 @@ static int fsl_qdma_probe(struct platform_device *pdev) platform_set_drvdata(pdev, fsl_qdma); - ret = dma_async_device_register(&fsl_qdma->dma_dev); + ret = fsl_qdma_reg_init(fsl_qdma); if (ret) { - dev_err(&pdev->dev, - "Can't register NXP Layerscape qDMA engine.\n"); + dev_err(&pdev->dev, "Can't Initialize the qDMA engine.\n"); return ret; } - ret = fsl_qdma_reg_init(fsl_qdma); + ret = fsl_qdma_irq_init(pdev, fsl_qdma); + if (ret) + return ret; + + ret = dma_async_device_register(&fsl_qdma->dma_dev); if (ret) { - dev_err(&pdev->dev, "Can't Initialize the qDMA engine.\n"); + dev_err(&pdev->dev, "Can't register NXP Layerscape qDMA engine.\n"); return ret; } @@ -1268,8 +1267,6 @@ static void fsl_qdma_cleanup_vchan(struct dma_device *dmadev) static void fsl_qdma_remove(struct platform_device *pdev) { - int i; - struct fsl_qdma_queue *status; struct device_node *np = pdev->dev.of_node; struct fsl_qdma_engine *fsl_qdma = platform_get_drvdata(pdev); @@ -1277,12 +1274,6 @@ static void fsl_qdma_remove(struct platform_device *pdev) fsl_qdma_cleanup_vchan(&fsl_qdma->dma_dev); of_dma_controller_free(np); dma_async_device_unregister(&fsl_qdma->dma_dev); - - for (i = 0; i < fsl_qdma->block_number; i++) { - status = fsl_qdma->status[i]; - dma_free_coherent(&pdev->dev, sizeof(struct fsl_qdma_format) * - status->n_cq, status->cq, status->bus_addr); - } } static const struct of_device_id fsl_qdma_dt_ids[] = { diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c index 77f8885cf407..e5a94a93a3cc 100644 --- a/drivers/dma/idxd/cdev.c +++ b/drivers/dma/idxd/cdev.c @@ -345,7 +345,7 @@ static void idxd_cdev_evl_drain_pasid(struct idxd_wq *wq, u32 pasid) spin_lock(&evl->lock); status.bits = ioread64(idxd->reg_base + IDXD_EVLSTATUS_OFFSET); t = status.tail; - h = evl->head; + h = status.head; size = evl->size; while (h != t) { diff --git a/drivers/dma/idxd/debugfs.c b/drivers/dma/idxd/debugfs.c index 9cfbd9b14c4c..f3f25ee676f3 100644 --- a/drivers/dma/idxd/debugfs.c +++ b/drivers/dma/idxd/debugfs.c @@ -68,9 +68,9 @@ static int debugfs_evl_show(struct seq_file *s, void *d) spin_lock(&evl->lock); - h = evl->head; evl_status.bits = ioread64(idxd->reg_base + IDXD_EVLSTATUS_OFFSET); t = evl_status.tail; + h = evl_status.head; evl_size = evl->size; seq_printf(s, "Event Log head %u tail %u interrupt pending %u\n\n", diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h index 47de3f93ff1e..d0f5db6cf1ed 100644 --- a/drivers/dma/idxd/idxd.h +++ b/drivers/dma/idxd/idxd.h @@ -300,7 +300,6 @@ struct idxd_evl { unsigned int log_size; /* The number of entries in the event log. */ u16 size; - u16 head; unsigned long *bmap; bool batch_fail[IDXD_MAX_BATCH_IDENT]; }; diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c index 14df1f1347a8..4954adc6bb60 100644 --- a/drivers/dma/idxd/init.c +++ b/drivers/dma/idxd/init.c @@ -343,7 +343,9 @@ static void idxd_cleanup_internals(struct idxd_device *idxd) static int idxd_init_evl(struct idxd_device *idxd) { struct device *dev = &idxd->pdev->dev; + unsigned int evl_cache_size; struct idxd_evl *evl; + const char *idxd_name; if (idxd->hw.gen_cap.evl_support == 0) return 0; @@ -355,9 +357,16 @@ static int idxd_init_evl(struct idxd_device *idxd) spin_lock_init(&evl->lock); evl->size = IDXD_EVL_SIZE_MIN; - idxd->evl_cache = kmem_cache_create(dev_name(idxd_confdev(idxd)), - sizeof(struct idxd_evl_fault) + evl_ent_size(idxd), - 0, 0, NULL); + idxd_name = dev_name(idxd_confdev(idxd)); + evl_cache_size = sizeof(struct idxd_evl_fault) + evl_ent_size(idxd); + /* + * Since completion record in evl_cache will be copied to user + * when handling completion record page fault, need to create + * the cache suitable for user copy. + */ + idxd->evl_cache = kmem_cache_create_usercopy(idxd_name, evl_cache_size, + 0, 0, 0, evl_cache_size, + NULL); if (!idxd->evl_cache) { kfree(evl); return -ENOMEM; diff --git a/drivers/dma/idxd/irq.c b/drivers/dma/idxd/irq.c index c8a0aa874b11..348aa21389a9 100644 --- a/drivers/dma/idxd/irq.c +++ b/drivers/dma/idxd/irq.c @@ -367,9 +367,9 @@ static void process_evl_entries(struct idxd_device *idxd) /* Clear interrupt pending bit */ iowrite32(evl_status.bits_upper32, idxd->reg_base + IDXD_EVLSTATUS_OFFSET + sizeof(u32)); - h = evl->head; evl_status.bits = ioread64(idxd->reg_base + IDXD_EVLSTATUS_OFFSET); t = evl_status.tail; + h = evl_status.head; size = idxd->evl->size; while (h != t) { @@ -378,7 +378,6 @@ static void process_evl_entries(struct idxd_device *idxd) h = (h + 1) % size; } - evl->head = h; evl_status.head = h; iowrite32(evl_status.bits_lower32, idxd->reg_base + IDXD_EVLSTATUS_OFFSET); spin_unlock(&evl->lock); diff --git a/drivers/dma/mv_xor_v2.c b/drivers/dma/mv_xor_v2.c index 1ebfbe88e733..97ebc791a30b 100644 --- a/drivers/dma/mv_xor_v2.c +++ b/drivers/dma/mv_xor_v2.c @@ -747,8 +747,8 @@ static int mv_xor_v2_probe(struct platform_device *pdev) if (IS_ERR(xor_dev->clk)) return PTR_ERR(xor_dev->clk); - ret = platform_msi_domain_alloc_irqs(&pdev->dev, 1, - mv_xor_v2_set_msi_msg); + ret = platform_device_msi_init_and_alloc_irqs(&pdev->dev, 1, + mv_xor_v2_set_msi_msg); if (ret) return ret; @@ -851,7 +851,7 @@ free_hw_desq: xor_dev->desc_size * MV_XOR_V2_DESC_NUM, xor_dev->hw_desq_virt, xor_dev->hw_desq); free_msi_irqs: - platform_msi_domain_free_irqs(&pdev->dev); + platform_device_msi_free_irqs_all(&pdev->dev); return ret; } @@ -867,7 +867,7 @@ static void mv_xor_v2_remove(struct platform_device *pdev) devm_free_irq(&pdev->dev, xor_dev->irq, xor_dev); - platform_msi_domain_free_irqs(&pdev->dev); + platform_device_msi_free_irqs_all(&pdev->dev); tasklet_kill(&xor_dev->irq_tasklet); } diff --git a/drivers/dma/ptdma/ptdma-dmaengine.c b/drivers/dma/ptdma/ptdma-dmaengine.c index 1aa65e5de0f3..f79240734807 100644 --- a/drivers/dma/ptdma/ptdma-dmaengine.c +++ b/drivers/dma/ptdma/ptdma-dmaengine.c @@ -385,8 +385,6 @@ int pt_dmaengine_register(struct pt_device *pt) chan->vc.desc_free = pt_do_cleanup; vchan_init(&chan->vc, dma_dev); - dma_set_mask_and_coherent(pt->dev, DMA_BIT_MASK(64)); - ret = dma_async_device_register(dma_dev); if (ret) goto err_reg; diff --git a/drivers/dma/qcom/hidma.c b/drivers/dma/qcom/hidma.c index d63b93dc7047..202ac95227cb 100644 --- a/drivers/dma/qcom/hidma.c +++ b/drivers/dma/qcom/hidma.c @@ -696,7 +696,7 @@ static void hidma_free_msis(struct hidma_dev *dmadev) devm_free_irq(dev, virq, &dmadev->lldev); } - platform_msi_domain_free_irqs(dev); + platform_device_msi_free_irqs_all(dev); #endif } @@ -706,8 +706,8 @@ static int hidma_request_msi(struct hidma_dev *dmadev, #ifdef CONFIG_GENERIC_MSI_IRQ int rc, i, virq; - rc = platform_msi_domain_alloc_irqs(&pdev->dev, HIDMA_MSI_INTS, - hidma_write_msi_msg); + rc = platform_device_msi_init_and_alloc_irqs(&pdev->dev, HIDMA_MSI_INTS, + hidma_write_msi_msg); if (rc) return rc; diff --git a/drivers/dma/ti/edma.c b/drivers/dma/ti/edma.c index f1f920861fa9..5f8d2e93ff3f 100644 --- a/drivers/dma/ti/edma.c +++ b/drivers/dma/ti/edma.c @@ -2404,6 +2404,11 @@ static int edma_probe(struct platform_device *pdev) if (irq > 0) { irq_name = devm_kasprintf(dev, GFP_KERNEL, "%s_ccint", dev_name(dev)); + if (!irq_name) { + ret = -ENOMEM; + goto err_disable_pm; + } + ret = devm_request_irq(dev, irq, dma_irq_handler, 0, irq_name, ecc); if (ret) { @@ -2420,6 +2425,11 @@ static int edma_probe(struct platform_device *pdev) if (irq > 0) { irq_name = devm_kasprintf(dev, GFP_KERNEL, "%s_ccerrint", dev_name(dev)); + if (!irq_name) { + ret = -ENOMEM; + goto err_disable_pm; + } + ret = devm_request_irq(dev, irq, dma_ccerr_handler, 0, irq_name, ecc); if (ret) { diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c index 2841a539c264..6400d06588a2 100644 --- a/drivers/dma/ti/k3-udma.c +++ b/drivers/dma/ti/k3-udma.c @@ -3968,6 +3968,7 @@ static void udma_desc_pre_callback(struct virt_dma_chan *vc, { struct udma_chan *uc = to_udma_chan(&vc->chan); struct udma_desc *d; + u8 status; if (!vd) return; @@ -3977,12 +3978,12 @@ static void udma_desc_pre_callback(struct virt_dma_chan *vc, if (d->metadata_size) udma_fetch_epib(uc, d); - /* Provide residue information for the client */ if (result) { void *desc_vaddr = udma_curr_cppi5_desc_vaddr(d, d->desc_idx); if (cppi5_desc_get_type(desc_vaddr) == CPPI5_INFO0_DESC_TYPE_VAL_HOST) { + /* Provide residue information for the client */ result->residue = d->residue - cppi5_hdesc_get_pktlen(desc_vaddr); if (result->residue) @@ -3991,7 +3992,12 @@ static void udma_desc_pre_callback(struct virt_dma_chan *vc, result->result = DMA_TRANS_NOERROR; } else { result->residue = 0; - result->result = DMA_TRANS_NOERROR; + /* Propagate TR Response errors to the client */ + status = d->hwdesc[0].tr_resp_base->status; + if (status) + result->result = DMA_TRANS_ABORTED; + else + result->result = DMA_TRANS_NOERROR; } } } diff --git a/drivers/dpll/dpll_core.c b/drivers/dpll/dpll_core.c index 5152bd1b0daf..7f686d179fc9 100644 --- a/drivers/dpll/dpll_core.c +++ b/drivers/dpll/dpll_core.c @@ -508,6 +508,26 @@ err_pin_prop: return ERR_PTR(ret); } +static void dpll_netdev_pin_assign(struct net_device *dev, struct dpll_pin *dpll_pin) +{ + rtnl_lock(); + rcu_assign_pointer(dev->dpll_pin, dpll_pin); + rtnl_unlock(); +} + +void dpll_netdev_pin_set(struct net_device *dev, struct dpll_pin *dpll_pin) +{ + WARN_ON(!dpll_pin); + dpll_netdev_pin_assign(dev, dpll_pin); +} +EXPORT_SYMBOL(dpll_netdev_pin_set); + +void dpll_netdev_pin_clear(struct net_device *dev) +{ + dpll_netdev_pin_assign(dev, NULL); +} +EXPORT_SYMBOL(dpll_netdev_pin_clear); + /** * dpll_pin_get - find existing or create new dpll pin * @clock_id: clock_id of creator @@ -564,7 +584,7 @@ void dpll_pin_put(struct dpll_pin *pin) xa_destroy(&pin->parent_refs); xa_erase(&dpll_pin_xa, pin->id); dpll_pin_prop_free(&pin->prop); - kfree(pin); + kfree_rcu(pin, rcu); } mutex_unlock(&dpll_lock); } diff --git a/drivers/dpll/dpll_core.h b/drivers/dpll/dpll_core.h index 717f715015c7..2b6d8ef1cdf3 100644 --- a/drivers/dpll/dpll_core.h +++ b/drivers/dpll/dpll_core.h @@ -47,6 +47,7 @@ struct dpll_device { * @prop: pin properties copied from the registerer * @rclk_dev_name: holds name of device when pin can recover clock from it * @refcount: refcount + * @rcu: rcu_head for kfree_rcu() **/ struct dpll_pin { u32 id; @@ -57,6 +58,7 @@ struct dpll_pin { struct xarray parent_refs; struct dpll_pin_properties prop; refcount_t refcount; + struct rcu_head rcu; }; /** diff --git a/drivers/dpll/dpll_netlink.c b/drivers/dpll/dpll_netlink.c index 314bb3775465..b57355e0c214 100644 --- a/drivers/dpll/dpll_netlink.c +++ b/drivers/dpll/dpll_netlink.c @@ -8,6 +8,7 @@ */ #include <linux/module.h> #include <linux/kernel.h> +#include <linux/netdevice.h> #include <net/genetlink.h> #include "dpll_core.h" #include "dpll_netlink.h" @@ -48,18 +49,6 @@ dpll_msg_add_dev_parent_handle(struct sk_buff *msg, u32 id) } /** - * dpll_msg_pin_handle_size - get size of pin handle attribute for given pin - * @pin: pin pointer - * - * Return: byte size of pin handle attribute for given pin. - */ -size_t dpll_msg_pin_handle_size(struct dpll_pin *pin) -{ - return pin ? nla_total_size(4) : 0; /* DPLL_A_PIN_ID */ -} -EXPORT_SYMBOL_GPL(dpll_msg_pin_handle_size); - -/** * dpll_msg_add_pin_handle - attach pin handle attribute to a given message * @msg: pointer to sk_buff message to attach a pin handle * @pin: pin pointer @@ -68,7 +57,7 @@ EXPORT_SYMBOL_GPL(dpll_msg_pin_handle_size); * * 0 - success * * -EMSGSIZE - no space in message to attach pin handle */ -int dpll_msg_add_pin_handle(struct sk_buff *msg, struct dpll_pin *pin) +static int dpll_msg_add_pin_handle(struct sk_buff *msg, struct dpll_pin *pin) { if (!pin) return 0; @@ -76,7 +65,28 @@ int dpll_msg_add_pin_handle(struct sk_buff *msg, struct dpll_pin *pin) return -EMSGSIZE; return 0; } -EXPORT_SYMBOL_GPL(dpll_msg_add_pin_handle); + +static struct dpll_pin *dpll_netdev_pin(const struct net_device *dev) +{ + return rcu_dereference_rtnl(dev->dpll_pin); +} + +/** + * dpll_netdev_pin_handle_size - get size of pin handle attribute of a netdev + * @dev: netdev from which to get the pin + * + * Return: byte size of pin handle attribute, or 0 if @dev has no pin. + */ +size_t dpll_netdev_pin_handle_size(const struct net_device *dev) +{ + return dpll_netdev_pin(dev) ? nla_total_size(4) : 0; /* DPLL_A_PIN_ID */ +} + +int dpll_netdev_add_pin_handle(struct sk_buff *msg, + const struct net_device *dev) +{ + return dpll_msg_add_pin_handle(msg, dpll_netdev_pin(dev)); +} static int dpll_msg_add_mode(struct sk_buff *msg, struct dpll_device *dpll, @@ -1199,6 +1209,7 @@ int dpll_nl_pin_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb) unsigned long i; int ret = 0; + mutex_lock(&dpll_lock); xa_for_each_marked_start(&dpll_pin_xa, i, pin, DPLL_REGISTERED, ctx->idx) { if (!dpll_pin_available(pin)) @@ -1218,6 +1229,8 @@ int dpll_nl_pin_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb) } genlmsg_end(skb, hdr); } + mutex_unlock(&dpll_lock); + if (ret == -EMSGSIZE) { ctx->idx = i; return skb->len; @@ -1373,6 +1386,7 @@ int dpll_nl_device_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb) unsigned long i; int ret = 0; + mutex_lock(&dpll_lock); xa_for_each_marked_start(&dpll_device_xa, i, dpll, DPLL_REGISTERED, ctx->idx) { hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, @@ -1389,6 +1403,8 @@ int dpll_nl_device_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb) } genlmsg_end(skb, hdr); } + mutex_unlock(&dpll_lock); + if (ret == -EMSGSIZE) { ctx->idx = i; return skb->len; @@ -1439,20 +1455,6 @@ dpll_unlock_doit(const struct genl_split_ops *ops, struct sk_buff *skb, mutex_unlock(&dpll_lock); } -int dpll_lock_dumpit(struct netlink_callback *cb) -{ - mutex_lock(&dpll_lock); - - return 0; -} - -int dpll_unlock_dumpit(struct netlink_callback *cb) -{ - mutex_unlock(&dpll_lock); - - return 0; -} - int dpll_pin_pre_doit(const struct genl_split_ops *ops, struct sk_buff *skb, struct genl_info *info) { diff --git a/drivers/dpll/dpll_nl.c b/drivers/dpll/dpll_nl.c index eaee5be7aa64..1e95f5397cfc 100644 --- a/drivers/dpll/dpll_nl.c +++ b/drivers/dpll/dpll_nl.c @@ -95,9 +95,7 @@ static const struct genl_split_ops dpll_nl_ops[] = { }, { .cmd = DPLL_CMD_DEVICE_GET, - .start = dpll_lock_dumpit, .dumpit = dpll_nl_device_get_dumpit, - .done = dpll_unlock_dumpit, .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DUMP, }, { @@ -129,9 +127,7 @@ static const struct genl_split_ops dpll_nl_ops[] = { }, { .cmd = DPLL_CMD_PIN_GET, - .start = dpll_lock_dumpit, .dumpit = dpll_nl_pin_get_dumpit, - .done = dpll_unlock_dumpit, .policy = dpll_pin_get_dump_nl_policy, .maxattr = DPLL_A_PIN_ID, .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DUMP, diff --git a/drivers/dpll/dpll_nl.h b/drivers/dpll/dpll_nl.h index 92d4c9c4f788..f491262bee4f 100644 --- a/drivers/dpll/dpll_nl.h +++ b/drivers/dpll/dpll_nl.h @@ -30,8 +30,6 @@ dpll_post_doit(const struct genl_split_ops *ops, struct sk_buff *skb, void dpll_pin_post_doit(const struct genl_split_ops *ops, struct sk_buff *skb, struct genl_info *info); -int dpll_lock_dumpit(struct netlink_callback *cb); -int dpll_unlock_dumpit(struct netlink_callback *cb); int dpll_nl_device_id_get_doit(struct sk_buff *skb, struct genl_info *info); int dpll_nl_device_get_doit(struct sk_buff *skb, struct genl_info *info); diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig index 5a7f3fabee22..16c8de5050e5 100644 --- a/drivers/edac/Kconfig +++ b/drivers/edac/Kconfig @@ -78,6 +78,7 @@ config EDAC_GHES config EDAC_AMD64 tristate "AMD64 (Opteron, Athlon64)" depends on AMD_NB && EDAC_DECODE_MCE + imply AMD_ATL help Support for error detection and correction of DRAM ECC errors on the AMD64 families (>= K8) of memory controllers. diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c index 537b9987a431..1f3520d76861 100644 --- a/drivers/edac/amd64_edac.c +++ b/drivers/edac/amd64_edac.c @@ -1,4 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only +#include <linux/ras.h> #include "amd64_edac.h" #include <asm/amd_nb.h> @@ -1051,281 +1052,6 @@ static int fixup_node_id(int node_id, struct mce *m) return nid - gpu_node_map.base_node_id + 1; } -/* Protect the PCI config register pairs used for DF indirect access. */ -static DEFINE_MUTEX(df_indirect_mutex); - -/* - * Data Fabric Indirect Access uses FICAA/FICAD. - * - * Fabric Indirect Configuration Access Address (FICAA): Constructed based - * on the device's Instance Id and the PCI function and register offset of - * the desired register. - * - * Fabric Indirect Configuration Access Data (FICAD): There are FICAD LO - * and FICAD HI registers but so far we only need the LO register. - * - * Use Instance Id 0xFF to indicate a broadcast read. - */ -#define DF_BROADCAST 0xFF -static int __df_indirect_read(u16 node, u8 func, u16 reg, u8 instance_id, u32 *lo) -{ - struct pci_dev *F4; - u32 ficaa; - int err = -ENODEV; - - if (node >= amd_nb_num()) - goto out; - - F4 = node_to_amd_nb(node)->link; - if (!F4) - goto out; - - ficaa = (instance_id == DF_BROADCAST) ? 0 : 1; - ficaa |= reg & 0x3FC; - ficaa |= (func & 0x7) << 11; - ficaa |= instance_id << 16; - - mutex_lock(&df_indirect_mutex); - - err = pci_write_config_dword(F4, 0x5C, ficaa); - if (err) { - pr_warn("Error writing DF Indirect FICAA, FICAA=0x%x\n", ficaa); - goto out_unlock; - } - - err = pci_read_config_dword(F4, 0x98, lo); - if (err) - pr_warn("Error reading DF Indirect FICAD LO, FICAA=0x%x.\n", ficaa); - -out_unlock: - mutex_unlock(&df_indirect_mutex); - -out: - return err; -} - -static int df_indirect_read_instance(u16 node, u8 func, u16 reg, u8 instance_id, u32 *lo) -{ - return __df_indirect_read(node, func, reg, instance_id, lo); -} - -static int df_indirect_read_broadcast(u16 node, u8 func, u16 reg, u32 *lo) -{ - return __df_indirect_read(node, func, reg, DF_BROADCAST, lo); -} - -struct addr_ctx { - u64 ret_addr; - u32 tmp; - u16 nid; - u8 inst_id; -}; - -static int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr) -{ - u64 dram_base_addr, dram_limit_addr, dram_hole_base; - - u8 die_id_shift, die_id_mask, socket_id_shift, socket_id_mask; - u8 intlv_num_dies, intlv_num_chan, intlv_num_sockets; - u8 intlv_addr_sel, intlv_addr_bit; - u8 num_intlv_bits, hashed_bit; - u8 lgcy_mmio_hole_en, base = 0; - u8 cs_mask, cs_id = 0; - bool hash_enabled = false; - - struct addr_ctx ctx; - - memset(&ctx, 0, sizeof(ctx)); - - /* Start from the normalized address */ - ctx.ret_addr = norm_addr; - - ctx.nid = nid; - ctx.inst_id = umc; - - /* Read D18F0x1B4 (DramOffset), check if base 1 is used. */ - if (df_indirect_read_instance(nid, 0, 0x1B4, umc, &ctx.tmp)) - goto out_err; - - /* Remove HiAddrOffset from normalized address, if enabled: */ - if (ctx.tmp & BIT(0)) { - u64 hi_addr_offset = (ctx.tmp & GENMASK_ULL(31, 20)) << 8; - - if (norm_addr >= hi_addr_offset) { - ctx.ret_addr -= hi_addr_offset; - base = 1; - } - } - - /* Read D18F0x110 (DramBaseAddress). */ - if (df_indirect_read_instance(nid, 0, 0x110 + (8 * base), umc, &ctx.tmp)) - goto out_err; - - /* Check if address range is valid. */ - if (!(ctx.tmp & BIT(0))) { - pr_err("%s: Invalid DramBaseAddress range: 0x%x.\n", - __func__, ctx.tmp); - goto out_err; - } - - lgcy_mmio_hole_en = ctx.tmp & BIT(1); - intlv_num_chan = (ctx.tmp >> 4) & 0xF; - intlv_addr_sel = (ctx.tmp >> 8) & 0x7; - dram_base_addr = (ctx.tmp & GENMASK_ULL(31, 12)) << 16; - - /* {0, 1, 2, 3} map to address bits {8, 9, 10, 11} respectively */ - if (intlv_addr_sel > 3) { - pr_err("%s: Invalid interleave address select %d.\n", - __func__, intlv_addr_sel); - goto out_err; - } - - /* Read D18F0x114 (DramLimitAddress). */ - if (df_indirect_read_instance(nid, 0, 0x114 + (8 * base), umc, &ctx.tmp)) - goto out_err; - - intlv_num_sockets = (ctx.tmp >> 8) & 0x1; - intlv_num_dies = (ctx.tmp >> 10) & 0x3; - dram_limit_addr = ((ctx.tmp & GENMASK_ULL(31, 12)) << 16) | GENMASK_ULL(27, 0); - - intlv_addr_bit = intlv_addr_sel + 8; - - /* Re-use intlv_num_chan by setting it equal to log2(#channels) */ - switch (intlv_num_chan) { - case 0: intlv_num_chan = 0; break; - case 1: intlv_num_chan = 1; break; - case 3: intlv_num_chan = 2; break; - case 5: intlv_num_chan = 3; break; - case 7: intlv_num_chan = 4; break; - - case 8: intlv_num_chan = 1; - hash_enabled = true; - break; - default: - pr_err("%s: Invalid number of interleaved channels %d.\n", - __func__, intlv_num_chan); - goto out_err; - } - - num_intlv_bits = intlv_num_chan; - - if (intlv_num_dies > 2) { - pr_err("%s: Invalid number of interleaved nodes/dies %d.\n", - __func__, intlv_num_dies); - goto out_err; - } - - num_intlv_bits += intlv_num_dies; - - /* Add a bit if sockets are interleaved. */ - num_intlv_bits += intlv_num_sockets; - - /* Assert num_intlv_bits <= 4 */ - if (num_intlv_bits > 4) { - pr_err("%s: Invalid interleave bits %d.\n", - __func__, num_intlv_bits); - goto out_err; - } - - if (num_intlv_bits > 0) { - u64 temp_addr_x, temp_addr_i, temp_addr_y; - u8 die_id_bit, sock_id_bit, cs_fabric_id; - - /* - * Read FabricBlockInstanceInformation3_CS[BlockFabricID]. - * This is the fabric id for this coherent slave. Use - * umc/channel# as instance id of the coherent slave - * for FICAA. - */ - if (df_indirect_read_instance(nid, 0, 0x50, umc, &ctx.tmp)) - goto out_err; - - cs_fabric_id = (ctx.tmp >> 8) & 0xFF; - die_id_bit = 0; - - /* If interleaved over more than 1 channel: */ - if (intlv_num_chan) { - die_id_bit = intlv_num_chan; - cs_mask = (1 << die_id_bit) - 1; - cs_id = cs_fabric_id & cs_mask; - } - - sock_id_bit = die_id_bit; - - /* Read D18F1x208 (SystemFabricIdMask). */ - if (intlv_num_dies || intlv_num_sockets) - if (df_indirect_read_broadcast(nid, 1, 0x208, &ctx.tmp)) - goto out_err; - - /* If interleaved over more than 1 die. */ - if (intlv_num_dies) { - sock_id_bit = die_id_bit + intlv_num_dies; - die_id_shift = (ctx.tmp >> 24) & 0xF; - die_id_mask = (ctx.tmp >> 8) & 0xFF; - - cs_id |= ((cs_fabric_id & die_id_mask) >> die_id_shift) << die_id_bit; - } - - /* If interleaved over more than 1 socket. */ - if (intlv_num_sockets) { - socket_id_shift = (ctx.tmp >> 28) & 0xF; - socket_id_mask = (ctx.tmp >> 16) & 0xFF; - - cs_id |= ((cs_fabric_id & socket_id_mask) >> socket_id_shift) << sock_id_bit; - } - - /* - * The pre-interleaved address consists of XXXXXXIIIYYYYY - * where III is the ID for this CS, and XXXXXXYYYYY are the - * address bits from the post-interleaved address. - * "num_intlv_bits" has been calculated to tell us how many "I" - * bits there are. "intlv_addr_bit" tells us how many "Y" bits - * there are (where "I" starts). - */ - temp_addr_y = ctx.ret_addr & GENMASK_ULL(intlv_addr_bit - 1, 0); - temp_addr_i = (cs_id << intlv_addr_bit); - temp_addr_x = (ctx.ret_addr & GENMASK_ULL(63, intlv_addr_bit)) << num_intlv_bits; - ctx.ret_addr = temp_addr_x | temp_addr_i | temp_addr_y; - } - - /* Add dram base address */ - ctx.ret_addr += dram_base_addr; - - /* If legacy MMIO hole enabled */ - if (lgcy_mmio_hole_en) { - if (df_indirect_read_broadcast(nid, 0, 0x104, &ctx.tmp)) - goto out_err; - - dram_hole_base = ctx.tmp & GENMASK(31, 24); - if (ctx.ret_addr >= dram_hole_base) - ctx.ret_addr += (BIT_ULL(32) - dram_hole_base); - } - - if (hash_enabled) { - /* Save some parentheses and grab ls-bit at the end. */ - hashed_bit = (ctx.ret_addr >> 12) ^ - (ctx.ret_addr >> 18) ^ - (ctx.ret_addr >> 21) ^ - (ctx.ret_addr >> 30) ^ - cs_id; - - hashed_bit &= BIT(0); - - if (hashed_bit != ((ctx.ret_addr >> intlv_addr_bit) & BIT(0))) - ctx.ret_addr ^= BIT(intlv_addr_bit); - } - - /* Is calculated system address is above DRAM limit address? */ - if (ctx.ret_addr > dram_limit_addr) - goto out_err; - - *sys_addr = ctx.ret_addr; - return 0; - -out_err: - return -EINVAL; -} - static int get_channel_from_ecc_syndrome(struct mem_ctl_info *, u16); /* @@ -1915,7 +1641,7 @@ ddr3: /* On F10h and later ErrAddr is MC4_ADDR[47:1] */ static u64 get_error_address(struct amd64_pvt *pvt, struct mce *m) { - u16 mce_nid = topology_die_id(m->extcpu); + u16 mce_nid = topology_amd_node_id(m->extcpu); struct mem_ctl_info *mci; u8 start_bit = 1; u8 end_bit = 47; @@ -3073,9 +2799,10 @@ static void decode_umc_error(int node_id, struct mce *m) { u8 ecc_type = (m->status >> 45) & 0x3; struct mem_ctl_info *mci; + unsigned long sys_addr; struct amd64_pvt *pvt; + struct atl_err a_err; struct err_info err; - u64 sys_addr; node_id = fixup_node_id(node_id, m); @@ -3106,7 +2833,12 @@ static void decode_umc_error(int node_id, struct mce *m) pvt->ops->get_err_info(m, &err); - if (umc_normaddr_to_sysaddr(m->addr, pvt->mc_node_id, err.channel, &sys_addr)) { + a_err.addr = m->addr; + a_err.ipid = m->ipid; + a_err.cpu = m->extcpu; + + sys_addr = amd_convert_umc_mca_addr_to_sys_addr(&a_err); + if (IS_ERR_VALUE(sys_addr)) { err.err_code = ERR_NORM_ADDR; goto log_error; } @@ -3446,7 +3178,7 @@ static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, u16 nid) int cpu; for_each_online_cpu(cpu) - if (topology_die_id(cpu) == nid) + if (topology_amd_node_id(cpu) == nid) cpumask_set_cpu(cpu, mask); } diff --git a/drivers/edac/i10nm_base.c b/drivers/edac/i10nm_base.c index 2b83d6de9352..3fd22a1eb1a9 100644 --- a/drivers/edac/i10nm_base.c +++ b/drivers/edac/i10nm_base.c @@ -951,6 +951,7 @@ static const struct x86_cpu_id i10nm_cpuids[] = { X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(EMERALDRAPIDS_X, X86_STEPPINGS(0x0, 0xf), &spr_cfg), X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(GRANITERAPIDS_X, X86_STEPPINGS(0x0, 0xf), &gnr_cfg), X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(ATOM_CRESTMONT_X, X86_STEPPINGS(0x0, 0xf), &gnr_cfg), + X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(ATOM_CRESTMONT, X86_STEPPINGS(0x0, 0xf), &gnr_cfg), {} }; MODULE_DEVICE_TABLE(x86cpu, i10nm_cpuids); diff --git a/drivers/edac/igen6_edac.c b/drivers/edac/igen6_edac.c index 2b0ecdeba5cd..cdd8480e7368 100644 --- a/drivers/edac/igen6_edac.c +++ b/drivers/edac/igen6_edac.c @@ -238,6 +238,7 @@ static struct work_struct ecclog_work; #define DID_ADL_N_SKU9 0x4678 #define DID_ADL_N_SKU10 0x4679 #define DID_ADL_N_SKU11 0x467c +#define DID_ADL_N_SKU12 0x4632 /* Compute die IDs for Raptor Lake-P with IBECC */ #define DID_RPL_P_SKU1 0xa706 @@ -583,6 +584,7 @@ static const struct pci_device_id igen6_pci_tbl[] = { { PCI_VDEVICE(INTEL, DID_ADL_N_SKU9), (kernel_ulong_t)&adl_n_cfg }, { PCI_VDEVICE(INTEL, DID_ADL_N_SKU10), (kernel_ulong_t)&adl_n_cfg }, { PCI_VDEVICE(INTEL, DID_ADL_N_SKU11), (kernel_ulong_t)&adl_n_cfg }, + { PCI_VDEVICE(INTEL, DID_ADL_N_SKU12), (kernel_ulong_t)&adl_n_cfg }, { PCI_VDEVICE(INTEL, DID_RPL_P_SKU1), (kernel_ulong_t)&rpl_p_cfg }, { PCI_VDEVICE(INTEL, DID_RPL_P_SKU2), (kernel_ulong_t)&rpl_p_cfg }, { PCI_VDEVICE(INTEL, DID_RPL_P_SKU3), (kernel_ulong_t)&rpl_p_cfg }, diff --git a/drivers/edac/mce_amd.c b/drivers/edac/mce_amd.c index ec8b6c9fedfd..8130c3dc64da 100644 --- a/drivers/edac/mce_amd.c +++ b/drivers/edac/mce_amd.c @@ -584,7 +584,7 @@ static void decode_mc3_mce(struct mce *m) static void decode_mc4_mce(struct mce *m) { unsigned int fam = x86_family(m->cpuid); - int node_id = topology_die_id(m->extcpu); + int node_id = topology_amd_node_id(m->extcpu); u16 ec = EC(m->status); u8 xec = XEC(m->status, 0x1f); u8 offset = 0; @@ -746,7 +746,7 @@ static void decode_smca_error(struct mce *m) if ((bank_type == SMCA_UMC || bank_type == SMCA_UMC_V2) && xec == 0 && decode_dram_ecc) - decode_dram_ecc(topology_die_id(m->extcpu), m); + decode_dram_ecc(topology_amd_node_id(m->extcpu), m); } static inline void amd_decode_err_code(u16 ec) diff --git a/drivers/edac/synopsys_edac.c b/drivers/edac/synopsys_edac.c index 709babce43ba..5527055b0964 100644 --- a/drivers/edac/synopsys_edac.c +++ b/drivers/edac/synopsys_edac.c @@ -1324,11 +1324,9 @@ static int mc_probe(struct platform_device *pdev) struct synps_edac_priv *priv; struct mem_ctl_info *mci; void __iomem *baseaddr; - struct resource *res; int rc; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - baseaddr = devm_ioremap_resource(&pdev->dev, res); + baseaddr = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(baseaddr)) return PTR_ERR(baseaddr); diff --git a/drivers/edac/versal_edac.c b/drivers/edac/versal_edac.c index 62caf454b567..1688a5050f63 100644 --- a/drivers/edac/versal_edac.c +++ b/drivers/edac/versal_edac.c @@ -42,8 +42,11 @@ #define ECCW0_FLIP_CTRL 0x109C #define ECCW0_FLIP0_OFFSET 0x10A0 +#define ECCW0_FLIP0_BITS 31 +#define ECCW0_FLIP1_OFFSET 0x10A4 #define ECCW1_FLIP_CTRL 0x10AC #define ECCW1_FLIP0_OFFSET 0x10B0 +#define ECCW1_FLIP1_OFFSET 0x10B4 #define ECCR0_CERR_STAT_OFFSET 0x10BC #define ECCR0_CE_ADDR_LO_OFFSET 0x10C0 #define ECCR0_CE_ADDR_HI_OFFSET 0x10C4 @@ -116,9 +119,6 @@ #define XDDR_BUS_WIDTH_32 1 #define XDDR_BUS_WIDTH_16 2 -#define ECC_CEPOISON_MASK 0x1 -#define ECC_UEPOISON_MASK 0x3 - #define XDDR_MAX_ROW_CNT 18 #define XDDR_MAX_COL_CNT 10 #define XDDR_MAX_RANK_CNT 2 @@ -133,6 +133,7 @@ * https://docs.xilinx.com/r/en-US/am012-versal-register-reference/PCSR_LOCK-XRAM_SLCR-Register */ #define PCSR_UNLOCK_VAL 0xF9E8D7C6 +#define PCSR_LOCK_VAL 1 #define XDDR_ERR_TYPE_CE 0 #define XDDR_ERR_TYPE_UE 1 @@ -142,6 +143,7 @@ #define XILINX_DRAM_SIZE_12G 3 #define XILINX_DRAM_SIZE_16G 4 #define XILINX_DRAM_SIZE_32G 5 +#define NUM_UE_BITPOS 2 /** * struct ecc_error_info - ECC error log information. @@ -479,7 +481,7 @@ static void err_callback(const u32 *payload, void *data) writel(regval, priv->ddrmc_baseaddr + XDDR_ISR_OFFSET); /* Lock the PCSR registers */ - writel(1, priv->ddrmc_baseaddr + XDDR_PCSR_OFFSET); + writel(PCSR_LOCK_VAL, priv->ddrmc_baseaddr + XDDR_PCSR_OFFSET); edac_dbg(3, "Total error count CE %d UE %d\n", priv->ce_cnt, priv->ue_cnt); } @@ -650,7 +652,7 @@ static void enable_intr(struct edac_priv *priv) writel(XDDR_IRQ_UE_MASK, priv->ddrmc_baseaddr + XDDR_IRQ1_EN_OFFSET); /* Lock the PCSR registers */ - writel(1, priv->ddrmc_baseaddr + XDDR_PCSR_OFFSET); + writel(PCSR_LOCK_VAL, priv->ddrmc_baseaddr + XDDR_PCSR_OFFSET); } static void disable_intr(struct edac_priv *priv) @@ -663,7 +665,7 @@ static void disable_intr(struct edac_priv *priv) priv->ddrmc_baseaddr + XDDR_IRQ_DIS_OFFSET); /* Lock the PCSR registers */ - writel(1, priv->ddrmc_baseaddr + XDDR_PCSR_OFFSET); + writel(PCSR_LOCK_VAL, priv->ddrmc_baseaddr + XDDR_PCSR_OFFSET); } #define to_mci(k) container_of(k, struct mem_ctl_info, dev) @@ -734,38 +736,63 @@ static void poison_setup(struct edac_priv *priv) writel(regval, priv->ddrmc_noc_baseaddr + XDDR_NOC_REG_ADEC15_OFFSET); } -static ssize_t xddr_inject_data_poison_store(struct mem_ctl_info *mci, - const char __user *data) +static void xddr_inject_data_ce_store(struct mem_ctl_info *mci, u8 ce_bitpos) { + u32 ecc0_flip0, ecc1_flip0, ecc0_flip1, ecc1_flip1; struct edac_priv *priv = mci->pvt_info; - writel(0, priv->ddrmc_baseaddr + ECCW0_FLIP0_OFFSET); - writel(0, priv->ddrmc_baseaddr + ECCW1_FLIP0_OFFSET); - - if (strncmp(data, "CE", 2) == 0) { - writel(ECC_CEPOISON_MASK, priv->ddrmc_baseaddr + - ECCW0_FLIP0_OFFSET); - writel(ECC_CEPOISON_MASK, priv->ddrmc_baseaddr + - ECCW1_FLIP0_OFFSET); + if (ce_bitpos < ECCW0_FLIP0_BITS) { + ecc0_flip0 = BIT(ce_bitpos); + ecc1_flip0 = BIT(ce_bitpos); + ecc0_flip1 = 0; + ecc1_flip1 = 0; } else { - writel(ECC_UEPOISON_MASK, priv->ddrmc_baseaddr + - ECCW0_FLIP0_OFFSET); - writel(ECC_UEPOISON_MASK, priv->ddrmc_baseaddr + - ECCW1_FLIP0_OFFSET); + ce_bitpos = ce_bitpos - ECCW0_FLIP0_BITS; + ecc0_flip1 = BIT(ce_bitpos); + ecc1_flip1 = BIT(ce_bitpos); + ecc0_flip0 = 0; + ecc1_flip0 = 0; } - /* Lock the PCSR registers */ - writel(1, priv->ddrmc_baseaddr + XDDR_PCSR_OFFSET); - - return 0; + writel(ecc0_flip0, priv->ddrmc_baseaddr + ECCW0_FLIP0_OFFSET); + writel(ecc1_flip0, priv->ddrmc_baseaddr + ECCW1_FLIP0_OFFSET); + writel(ecc0_flip1, priv->ddrmc_baseaddr + ECCW0_FLIP1_OFFSET); + writel(ecc1_flip1, priv->ddrmc_baseaddr + ECCW1_FLIP1_OFFSET); } -static ssize_t inject_data_poison_store(struct file *file, const char __user *data, - size_t count, loff_t *ppos) +/* + * To inject a correctable error, the following steps are needed: + * + * - Write the correctable error bit position value: + * echo <bit_pos val> > /sys/kernel/debug/edac/<controller instance>/inject_ce + * + * poison_setup() derives the row, column, bank, group and rank and + * writes to the ADEC registers based on the address given by the user. + * + * The ADEC12 and ADEC13 are mask registers; write 0 to make sure default + * configuration is there and no addresses are masked. + * + * The row, column, bank, group and rank registers are written to the + * match ADEC bit to generate errors at the particular address. ADEC14 + * and ADEC15 have the match bits. + * + * xddr_inject_data_ce_store() updates the ECC FLIP registers with the + * bits to be corrupted based on the bit position given by the user. + * + * Upon doing a read to the address the errors are injected. + */ +static ssize_t inject_data_ce_store(struct file *file, const char __user *data, + size_t count, loff_t *ppos) { struct device *dev = file->private_data; struct mem_ctl_info *mci = to_mci(dev); struct edac_priv *priv = mci->pvt_info; + u8 ce_bitpos; + int ret; + + ret = kstrtou8_from_user(data, count, 0, &ce_bitpos); + if (ret) + return ret; /* Unlock the PCSR registers */ writel(PCSR_UNLOCK_VAL, priv->ddrmc_baseaddr + XDDR_PCSR_OFFSET); @@ -773,17 +800,110 @@ static ssize_t inject_data_poison_store(struct file *file, const char __user *da poison_setup(priv); + xddr_inject_data_ce_store(mci, ce_bitpos); + ret = count; + /* Lock the PCSR registers */ - writel(1, priv->ddrmc_noc_baseaddr + XDDR_PCSR_OFFSET); + writel(PCSR_LOCK_VAL, priv->ddrmc_baseaddr + XDDR_PCSR_OFFSET); + writel(PCSR_LOCK_VAL, priv->ddrmc_noc_baseaddr + XDDR_PCSR_OFFSET); + + return ret; +} + +static const struct file_operations xddr_inject_ce_fops = { + .open = simple_open, + .write = inject_data_ce_store, + .llseek = generic_file_llseek, +}; + +static void xddr_inject_data_ue_store(struct mem_ctl_info *mci, u32 val0, u32 val1) +{ + struct edac_priv *priv = mci->pvt_info; + + writel(val0, priv->ddrmc_baseaddr + ECCW0_FLIP0_OFFSET); + writel(val0, priv->ddrmc_baseaddr + ECCW0_FLIP1_OFFSET); + writel(val1, priv->ddrmc_baseaddr + ECCW1_FLIP1_OFFSET); + writel(val1, priv->ddrmc_baseaddr + ECCW1_FLIP1_OFFSET); +} + +/* + * To inject an uncorrectable error, the following steps are needed: + * echo <bit_pos val> > /sys/kernel/debug/edac/<controller instance>/inject_ue + * + * poison_setup() derives the row, column, bank, group and rank and + * writes to the ADEC registers based on the address given by the user. + * + * The ADEC12 and ADEC13 are mask registers; write 0 so that none of the + * addresses are masked. The row, column, bank, group and rank registers + * are written to the match ADEC bit to generate errors at the + * particular address. ADEC14 and ADEC15 have the match bits. + * + * xddr_inject_data_ue_store() updates the ECC FLIP registers with the + * bits to be corrupted based on the bit position given by the user. For + * uncorrectable errors + * 2 bit errors are injected. + * + * Upon doing a read to the address the errors are injected. + */ +static ssize_t inject_data_ue_store(struct file *file, const char __user *data, + size_t count, loff_t *ppos) +{ + struct device *dev = file->private_data; + struct mem_ctl_info *mci = to_mci(dev); + struct edac_priv *priv = mci->pvt_info; + char buf[6], *pbuf, *token[2]; + u32 val0 = 0, val1 = 0; + u8 len, ue0, ue1; + int i, ret; + + len = min_t(size_t, count, sizeof(buf)); + if (copy_from_user(buf, data, len)) + return -EFAULT; + + buf[len] = '\0'; + pbuf = &buf[0]; + for (i = 0; i < NUM_UE_BITPOS; i++) + token[i] = strsep(&pbuf, ","); + + ret = kstrtou8(token[0], 0, &ue0); + if (ret) + return ret; + + ret = kstrtou8(token[1], 0, &ue1); + if (ret) + return ret; + + if (ue0 < ECCW0_FLIP0_BITS) { + val0 = BIT(ue0); + } else { + ue0 = ue0 - ECCW0_FLIP0_BITS; + val1 = BIT(ue0); + } + + if (ue1 < ECCW0_FLIP0_BITS) { + val0 |= BIT(ue1); + } else { + ue1 = ue1 - ECCW0_FLIP0_BITS; + val1 |= BIT(ue1); + } - xddr_inject_data_poison_store(mci, data); + /* Unlock the PCSR registers */ + writel(PCSR_UNLOCK_VAL, priv->ddrmc_baseaddr + XDDR_PCSR_OFFSET); + writel(PCSR_UNLOCK_VAL, priv->ddrmc_noc_baseaddr + XDDR_PCSR_OFFSET); + poison_setup(priv); + + xddr_inject_data_ue_store(mci, val0, val1); + + /* Lock the PCSR registers */ + writel(PCSR_LOCK_VAL, priv->ddrmc_noc_baseaddr + XDDR_PCSR_OFFSET); + writel(PCSR_LOCK_VAL, priv->ddrmc_baseaddr + XDDR_PCSR_OFFSET); return count; } -static const struct file_operations xddr_inject_enable_fops = { +static const struct file_operations xddr_inject_ue_fops = { .open = simple_open, - .write = inject_data_poison_store, + .write = inject_data_ue_store, .llseek = generic_file_llseek, }; @@ -795,8 +915,17 @@ static void create_debugfs_attributes(struct mem_ctl_info *mci) if (!priv->debugfs) return; - edac_debugfs_create_file("inject_error", 0200, priv->debugfs, - &mci->dev, &xddr_inject_enable_fops); + if (!edac_debugfs_create_file("inject_ce", 0200, priv->debugfs, + &mci->dev, &xddr_inject_ce_fops)) { + debugfs_remove_recursive(priv->debugfs); + return; + } + + if (!edac_debugfs_create_file("inject_ue", 0200, priv->debugfs, + &mci->dev, &xddr_inject_ue_fops)) { + debugfs_remove_recursive(priv->debugfs); + return; + } debugfs_create_x64("address", 0600, priv->debugfs, &priv->err_inject_addr); mci->debugfs = priv->debugfs; @@ -1031,7 +1160,7 @@ free_edac_mc: return rc; } -static int mc_remove(struct platform_device *pdev) +static void mc_remove(struct platform_device *pdev) { struct mem_ctl_info *mci = platform_get_drvdata(pdev); struct edac_priv *priv = mci->pvt_info; @@ -1049,8 +1178,6 @@ static int mc_remove(struct platform_device *pdev) XPM_EVENT_ERROR_MASK_DDRMC_NCR, err_callback, mci); edac_mc_del_mc(&pdev->dev); edac_mc_free(mci); - - return 0; } static struct platform_driver xilinx_ddr_edac_mc_driver = { @@ -1059,7 +1186,7 @@ static struct platform_driver xilinx_ddr_edac_mc_driver = { .of_match_table = xlnx_edac_match, }, .probe = mc_probe, - .remove = mc_remove, + .remove_new = mc_remove, }; module_platform_driver(xilinx_ddr_edac_mc_driver); diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c index 6ac5ff20a2fe..401a77e3b5fa 100644 --- a/drivers/firewire/core-card.c +++ b/drivers/firewire/core-card.c @@ -429,7 +429,23 @@ static void bm_work(struct work_struct *work) */ card->bm_generation = generation; - if (root_device == NULL) { + if (card->gap_count == 0) { + /* + * If self IDs have inconsistent gap counts, do a + * bus reset ASAP. The config rom read might never + * complete, so don't wait for it. However, still + * send a PHY configuration packet prior to the + * bus reset. The PHY configuration packet might + * fail, but 1394-2008 8.4.5.2 explicitly permits + * it in this case, so it should be safe to try. + */ + new_root_id = local_id; + /* + * We must always send a bus reset if the gap count + * is inconsistent, so bypass the 5-reset limit. + */ + card->bm_retries = 0; + } else if (root_device == NULL) { /* * Either link_on is false, or we failed to read the * config rom. In either case, pick another root. @@ -484,7 +500,19 @@ static void bm_work(struct work_struct *work) fw_notice(card, "phy config: new root=%x, gap_count=%d\n", new_root_id, gap_count); fw_send_phy_config(card, new_root_id, generation, gap_count); - reset_bus(card, true); + /* + * Where possible, use a short bus reset to minimize + * disruption to isochronous transfers. But in the event + * of a gap count inconsistency, use a long bus reset. + * + * As noted in 1394a 8.4.6.2, nodes on a mixed 1394/1394a bus + * may set different gap counts after a bus reset. On a mixed + * 1394/1394a bus, a short bus reset can get doubled. Some + * nodes may treat the double reset as one bus reset and others + * may treat it as two, causing a gap count inconsistency + * again. Using a long bus reset prevents this. + */ + reset_bus(card, card->gap_count != 0); /* Will allocate broadcast channel after the reset. */ goto out; } diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c index 0547253d16fe..7d3346b3a2bf 100644 --- a/drivers/firewire/core-device.c +++ b/drivers/firewire/core-device.c @@ -118,10 +118,9 @@ static int textual_leaf_to_string(const u32 *block, char *buf, size_t size) * @buf: where to put the string * @size: size of @buf, in bytes * - * The string is taken from a minimal ASCII text descriptor leaf after - * the immediate entry with @key. The string is zero-terminated. - * An overlong string is silently truncated such that it and the - * zero byte fit into @size. + * The string is taken from a minimal ASCII text descriptor leaf just after the entry with the + * @key. The string is zero-terminated. An overlong string is silently truncated such that it + * and the zero byte fit into @size. * * Returns strlen(buf) or a negative error code. */ @@ -368,8 +367,17 @@ static ssize_t show_text_leaf(struct device *dev, for (i = 0; i < ARRAY_SIZE(directories) && !!directories[i]; ++i) { int result = fw_csr_string(directories[i], attr->key, buf, bufsize); // Detected. - if (result >= 0) + if (result >= 0) { ret = result; + } else if (i == 0 && attr->key == CSR_VENDOR) { + // Sony DVMC-DA1 has configuration ROM such that the descriptor leaf entry + // in the root directory follows to the directory entry for vendor ID + // instead of the immediate value for vendor ID. + result = fw_csr_string(directories[i], CSR_DIRECTORY | attr->key, buf, + bufsize); + if (result >= 0) + ret = result; + } } if (ret >= 0) { diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c index 9db9290c3269..7bc71f4be64a 100644 --- a/drivers/firewire/ohci.c +++ b/drivers/firewire/ohci.c @@ -3773,6 +3773,7 @@ static int pci_probe(struct pci_dev *dev, return 0; fail_msi: + devm_free_irq(&dev->dev, dev->irq, ohci); pci_disable_msi(dev); return err; @@ -3800,6 +3801,7 @@ static void pci_remove(struct pci_dev *dev) software_reset(ohci); + devm_free_irq(&dev->dev, dev->irq, ohci); pci_disable_msi(dev); dev_notice(&dev->dev, "removing fw-ohci device\n"); diff --git a/drivers/firmware/arm_ffa/bus.c b/drivers/firmware/arm_ffa/bus.c index 1c7940ba5539..2f557e90f2eb 100644 --- a/drivers/firmware/arm_ffa/bus.c +++ b/drivers/firmware/arm_ffa/bus.c @@ -105,7 +105,7 @@ static struct attribute *ffa_device_attributes_attrs[] = { }; ATTRIBUTE_GROUPS(ffa_device_attributes); -struct bus_type ffa_bus_type = { +const struct bus_type ffa_bus_type = { .name = "arm_ffa", .match = ffa_device_match, .probe = ffa_device_probe, diff --git a/drivers/firmware/arm_scmi/bus.c b/drivers/firmware/arm_scmi/bus.c index c15928b8c5cc..77c78be6e79c 100644 --- a/drivers/firmware/arm_scmi/bus.c +++ b/drivers/firmware/arm_scmi/bus.c @@ -141,6 +141,17 @@ out: return ret; } +static int scmi_protocol_table_register(const struct scmi_device_id *id_table) +{ + int ret = 0; + const struct scmi_device_id *entry; + + for (entry = id_table; entry->name && ret == 0; entry++) + ret = scmi_protocol_device_request(entry); + + return ret; +} + /** * scmi_protocol_device_unrequest - Helper to unrequest a device * @@ -186,6 +197,15 @@ static void scmi_protocol_device_unrequest(const struct scmi_device_id *id_table mutex_unlock(&scmi_requested_devices_mtx); } +static void +scmi_protocol_table_unregister(const struct scmi_device_id *id_table) +{ + const struct scmi_device_id *entry; + + for (entry = id_table; entry->name; entry++) + scmi_protocol_device_unrequest(entry); +} + static const struct scmi_device_id * scmi_dev_match_id(struct scmi_device *scmi_dev, struct scmi_driver *scmi_drv) { @@ -263,7 +283,7 @@ static void scmi_dev_remove(struct device *dev) scmi_drv->remove(scmi_dev); } -struct bus_type scmi_bus_type = { +const struct bus_type scmi_bus_type = { .name = "scmi_protocol", .match = scmi_dev_match, .probe = scmi_dev_probe, @@ -279,7 +299,7 @@ int scmi_driver_register(struct scmi_driver *driver, struct module *owner, if (!driver->probe) return -EINVAL; - retval = scmi_protocol_device_request(driver->id_table); + retval = scmi_protocol_table_register(driver->id_table); if (retval) return retval; @@ -299,7 +319,7 @@ EXPORT_SYMBOL_GPL(scmi_driver_register); void scmi_driver_unregister(struct scmi_driver *driver) { driver_unregister(&driver->driver); - scmi_protocol_device_unrequest(driver->id_table); + scmi_protocol_table_unregister(driver->id_table); } EXPORT_SYMBOL_GPL(scmi_driver_unregister); diff --git a/drivers/firmware/arm_scmi/clock.c b/drivers/firmware/arm_scmi/clock.c index e2050adbf85c..134019297d08 100644 --- a/drivers/firmware/arm_scmi/clock.c +++ b/drivers/firmware/arm_scmi/clock.c @@ -13,7 +13,7 @@ #include "notify.h" /* Updated only after ALL the mandatory features for that version are merged */ -#define SCMI_PROTOCOL_SUPPORTED_VERSION 0x20000 +#define SCMI_PROTOCOL_SUPPORTED_VERSION 0x30000 enum scmi_clock_protocol_cmd { CLOCK_ATTRIBUTES = 0x3, @@ -28,8 +28,13 @@ enum scmi_clock_protocol_cmd { CLOCK_POSSIBLE_PARENTS_GET = 0xC, CLOCK_PARENT_SET = 0xD, CLOCK_PARENT_GET = 0xE, + CLOCK_GET_PERMISSIONS = 0xF, }; +#define CLOCK_STATE_CONTROL_ALLOWED BIT(31) +#define CLOCK_PARENT_CONTROL_ALLOWED BIT(30) +#define CLOCK_RATE_CONTROL_ALLOWED BIT(29) + enum clk_state { CLK_STATE_DISABLE, CLK_STATE_ENABLE, @@ -49,6 +54,8 @@ struct scmi_msg_resp_clock_attributes { #define SUPPORTS_RATE_CHANGE_REQUESTED_NOTIF(x) ((x) & BIT(30)) #define SUPPORTS_EXTENDED_NAMES(x) ((x) & BIT(29)) #define SUPPORTS_PARENT_CLOCK(x) ((x) & BIT(28)) +#define SUPPORTS_EXTENDED_CONFIG(x) ((x) & BIT(27)) +#define SUPPORTS_GET_PERMISSIONS(x) ((x) & BIT(1)) u8 name[SCMI_SHORT_NAME_MAX_SIZE]; __le32 clock_enable_latency; }; @@ -152,14 +159,18 @@ struct clock_info { u32 version; int num_clocks; int max_async_req; + bool notify_rate_changed_cmd; + bool notify_rate_change_requested_cmd; atomic_t cur_async_req; struct scmi_clock_info *clk; int (*clock_config_set)(const struct scmi_protocol_handle *ph, u32 clk_id, enum clk_state state, - u8 oem_type, u32 oem_val, bool atomic); + enum scmi_clock_oem_config oem_type, + u32 oem_val, bool atomic); int (*clock_config_get)(const struct scmi_protocol_handle *ph, - u32 clk_id, u8 oem_type, u32 *attributes, - bool *enabled, u32 *oem_val, bool atomic); + u32 clk_id, enum scmi_clock_oem_config oem_type, + u32 *attributes, bool *enabled, u32 *oem_val, + bool atomic); }; static enum scmi_clock_protocol_cmd evt_2_cmd[] = { @@ -167,6 +178,15 @@ static enum scmi_clock_protocol_cmd evt_2_cmd[] = { CLOCK_RATE_CHANGE_REQUESTED_NOTIFY, }; +static inline struct scmi_clock_info * +scmi_clock_domain_lookup(struct clock_info *ci, u32 clk_id) +{ + if (clk_id >= ci->num_clocks) + return ERR_PTR(-EINVAL); + + return ci->clk + clk_id; +} + static int scmi_clock_protocol_attributes_get(const struct scmi_protocol_handle *ph, struct clock_info *ci) @@ -189,6 +209,17 @@ scmi_clock_protocol_attributes_get(const struct scmi_protocol_handle *ph, } ph->xops->xfer_put(ph, t); + + if (!ret) { + if (!ph->hops->protocol_msg_check(ph, CLOCK_RATE_NOTIFY, NULL)) + ci->notify_rate_changed_cmd = true; + + if (!ph->hops->protocol_msg_check(ph, + CLOCK_RATE_CHANGE_REQUESTED_NOTIFY, + NULL)) + ci->notify_rate_change_requested_cmd = true; + } + return ret; } @@ -284,14 +315,44 @@ static int scmi_clock_possible_parents(const struct scmi_protocol_handle *ph, u3 return ret; } +static int +scmi_clock_get_permissions(const struct scmi_protocol_handle *ph, u32 clk_id, + struct scmi_clock_info *clk) +{ + struct scmi_xfer *t; + u32 perm; + int ret; + + ret = ph->xops->xfer_get_init(ph, CLOCK_GET_PERMISSIONS, + sizeof(clk_id), sizeof(perm), &t); + if (ret) + return ret; + + put_unaligned_le32(clk_id, t->tx.buf); + + ret = ph->xops->do_xfer(ph, t); + if (!ret) { + perm = get_unaligned_le32(t->rx.buf); + + clk->state_ctrl_forbidden = !(perm & CLOCK_STATE_CONTROL_ALLOWED); + clk->rate_ctrl_forbidden = !(perm & CLOCK_RATE_CONTROL_ALLOWED); + clk->parent_ctrl_forbidden = !(perm & CLOCK_PARENT_CONTROL_ALLOWED); + } + + ph->xops->xfer_put(ph, t); + + return ret; +} + static int scmi_clock_attributes_get(const struct scmi_protocol_handle *ph, - u32 clk_id, struct scmi_clock_info *clk, + u32 clk_id, struct clock_info *cinfo, u32 version) { int ret; u32 attributes; struct scmi_xfer *t; struct scmi_msg_resp_clock_attributes *attr; + struct scmi_clock_info *clk = cinfo->clk + clk_id; ret = ph->xops->xfer_get_init(ph, CLOCK_ATTRIBUTES, sizeof(clk_id), sizeof(*attr), &t); @@ -324,12 +385,20 @@ static int scmi_clock_attributes_get(const struct scmi_protocol_handle *ph, NULL, clk->name, SCMI_MAX_STR_SIZE); - if (SUPPORTS_RATE_CHANGED_NOTIF(attributes)) + if (cinfo->notify_rate_changed_cmd && + SUPPORTS_RATE_CHANGED_NOTIF(attributes)) clk->rate_changed_notifications = true; - if (SUPPORTS_RATE_CHANGE_REQUESTED_NOTIF(attributes)) + if (cinfo->notify_rate_change_requested_cmd && + SUPPORTS_RATE_CHANGE_REQUESTED_NOTIF(attributes)) clk->rate_change_requested_notifications = true; - if (SUPPORTS_PARENT_CLOCK(attributes)) - scmi_clock_possible_parents(ph, clk_id, clk); + if (PROTOCOL_REV_MAJOR(version) >= 0x3) { + if (SUPPORTS_PARENT_CLOCK(attributes)) + scmi_clock_possible_parents(ph, clk_id, clk); + if (SUPPORTS_GET_PERMISSIONS(attributes)) + scmi_clock_get_permissions(ph, clk_id, clk); + if (SUPPORTS_EXTENDED_CONFIG(attributes)) + clk->extended_config = true; + } } return ret; @@ -502,6 +571,14 @@ static int scmi_clock_rate_set(const struct scmi_protocol_handle *ph, struct scmi_xfer *t; struct scmi_clock_set_rate *cfg; struct clock_info *ci = ph->get_priv(ph); + struct scmi_clock_info *clk; + + clk = scmi_clock_domain_lookup(ci, clk_id); + if (IS_ERR(clk)) + return PTR_ERR(clk); + + if (clk->rate_ctrl_forbidden) + return -EACCES; ret = ph->xops->xfer_get_init(ph, CLOCK_RATE_SET, sizeof(*cfg), 0, &t); if (ret) @@ -543,7 +620,8 @@ static int scmi_clock_rate_set(const struct scmi_protocol_handle *ph, static int scmi_clock_config_set(const struct scmi_protocol_handle *ph, u32 clk_id, - enum clk_state state, u8 __unused0, u32 __unused1, + enum clk_state state, + enum scmi_clock_oem_config __unused0, u32 __unused1, bool atomic) { int ret; @@ -580,14 +658,16 @@ scmi_clock_set_parent(const struct scmi_protocol_handle *ph, u32 clk_id, struct clock_info *ci = ph->get_priv(ph); struct scmi_clock_info *clk; - if (clk_id >= ci->num_clocks) - return -EINVAL; - - clk = ci->clk + clk_id; + clk = scmi_clock_domain_lookup(ci, clk_id); + if (IS_ERR(clk)) + return PTR_ERR(clk); if (parent_id >= clk->num_parents) return -EINVAL; + if (clk->parent_ctrl_forbidden) + return -EACCES; + ret = ph->xops->xfer_get_init(ph, CLOCK_PARENT_SET, sizeof(*cfg), 0, &t); if (ret) @@ -628,10 +708,11 @@ scmi_clock_get_parent(const struct scmi_protocol_handle *ph, u32 clk_id, return ret; } -/* For SCMI clock v2.1 and onwards */ +/* For SCMI clock v3.0 and onwards */ static int scmi_clock_config_set_v2(const struct scmi_protocol_handle *ph, u32 clk_id, - enum clk_state state, u8 oem_type, u32 oem_val, + enum clk_state state, + enum scmi_clock_oem_config oem_type, u32 oem_val, bool atomic) { int ret; @@ -671,6 +752,14 @@ static int scmi_clock_enable(const struct scmi_protocol_handle *ph, u32 clk_id, bool atomic) { struct clock_info *ci = ph->get_priv(ph); + struct scmi_clock_info *clk; + + clk = scmi_clock_domain_lookup(ci, clk_id); + if (IS_ERR(clk)) + return PTR_ERR(clk); + + if (clk->state_ctrl_forbidden) + return -EACCES; return ci->clock_config_set(ph, clk_id, CLK_STATE_ENABLE, NULL_OEM_TYPE, 0, atomic); @@ -680,16 +769,24 @@ static int scmi_clock_disable(const struct scmi_protocol_handle *ph, u32 clk_id, bool atomic) { struct clock_info *ci = ph->get_priv(ph); + struct scmi_clock_info *clk; + + clk = scmi_clock_domain_lookup(ci, clk_id); + if (IS_ERR(clk)) + return PTR_ERR(clk); + + if (clk->state_ctrl_forbidden) + return -EACCES; return ci->clock_config_set(ph, clk_id, CLK_STATE_DISABLE, NULL_OEM_TYPE, 0, atomic); } -/* For SCMI clock v2.1 and onwards */ +/* For SCMI clock v3.0 and onwards */ static int scmi_clock_config_get_v2(const struct scmi_protocol_handle *ph, u32 clk_id, - u8 oem_type, u32 *attributes, bool *enabled, - u32 *oem_val, bool atomic) + enum scmi_clock_oem_config oem_type, u32 *attributes, + bool *enabled, u32 *oem_val, bool atomic) { int ret; u32 flags; @@ -730,8 +827,8 @@ scmi_clock_config_get_v2(const struct scmi_protocol_handle *ph, u32 clk_id, static int scmi_clock_config_get(const struct scmi_protocol_handle *ph, u32 clk_id, - u8 oem_type, u32 *attributes, bool *enabled, - u32 *oem_val, bool atomic) + enum scmi_clock_oem_config oem_type, u32 *attributes, + bool *enabled, u32 *oem_val, bool atomic) { int ret; struct scmi_xfer *t; @@ -768,20 +865,38 @@ static int scmi_clock_state_get(const struct scmi_protocol_handle *ph, } static int scmi_clock_config_oem_set(const struct scmi_protocol_handle *ph, - u32 clk_id, u8 oem_type, u32 oem_val, - bool atomic) + u32 clk_id, + enum scmi_clock_oem_config oem_type, + u32 oem_val, bool atomic) { struct clock_info *ci = ph->get_priv(ph); + struct scmi_clock_info *clk; + + clk = scmi_clock_domain_lookup(ci, clk_id); + if (IS_ERR(clk)) + return PTR_ERR(clk); + + if (!clk->extended_config) + return -EOPNOTSUPP; return ci->clock_config_set(ph, clk_id, CLK_STATE_UNCHANGED, oem_type, oem_val, atomic); } static int scmi_clock_config_oem_get(const struct scmi_protocol_handle *ph, - u32 clk_id, u8 oem_type, u32 *oem_val, - u32 *attributes, bool atomic) + u32 clk_id, + enum scmi_clock_oem_config oem_type, + u32 *oem_val, u32 *attributes, bool atomic) { struct clock_info *ci = ph->get_priv(ph); + struct scmi_clock_info *clk; + + clk = scmi_clock_domain_lookup(ci, clk_id); + if (IS_ERR(clk)) + return PTR_ERR(clk); + + if (!clk->extended_config) + return -EOPNOTSUPP; return ci->clock_config_get(ph, clk_id, oem_type, attributes, NULL, oem_val, atomic); @@ -800,10 +915,10 @@ scmi_clock_info_get(const struct scmi_protocol_handle *ph, u32 clk_id) struct scmi_clock_info *clk; struct clock_info *ci = ph->get_priv(ph); - if (clk_id >= ci->num_clocks) + clk = scmi_clock_domain_lookup(ci, clk_id); + if (IS_ERR(clk)) return NULL; - clk = ci->clk + clk_id; if (!clk->name[0]) return NULL; @@ -824,6 +939,28 @@ static const struct scmi_clk_proto_ops clk_proto_ops = { .parent_get = scmi_clock_get_parent, }; +static bool scmi_clk_notify_supported(const struct scmi_protocol_handle *ph, + u8 evt_id, u32 src_id) +{ + bool supported; + struct scmi_clock_info *clk; + struct clock_info *ci = ph->get_priv(ph); + + if (evt_id >= ARRAY_SIZE(evt_2_cmd)) + return false; + + clk = scmi_clock_domain_lookup(ci, src_id); + if (IS_ERR(clk)) + return false; + + if (evt_id == SCMI_EVENT_CLOCK_RATE_CHANGED) + supported = clk->rate_changed_notifications; + else + supported = clk->rate_change_requested_notifications; + + return supported; +} + static int scmi_clk_rate_notify(const struct scmi_protocol_handle *ph, u32 clk_id, int message_id, bool enable) { @@ -908,6 +1045,7 @@ static const struct scmi_event clk_events[] = { }; static const struct scmi_event_ops clk_event_ops = { + .is_notify_supported = scmi_clk_notify_supported, .get_num_sources = scmi_clk_get_num_sources, .set_notify_enabled = scmi_clk_set_notify_enabled, .fill_custom_report = scmi_clk_fill_custom_report, @@ -949,7 +1087,7 @@ static int scmi_clock_protocol_init(const struct scmi_protocol_handle *ph) for (clkid = 0; clkid < cinfo->num_clocks; clkid++) { struct scmi_clock_info *clk = cinfo->clk + clkid; - ret = scmi_clock_attributes_get(ph, clkid, clk, version); + ret = scmi_clock_attributes_get(ph, clkid, cinfo, version); if (!ret) scmi_clock_describe_rates_get(ph, clkid, clk); } diff --git a/drivers/firmware/arm_scmi/common.h b/drivers/firmware/arm_scmi/common.h index 00b165d1f502..6affbfdd1dec 100644 --- a/drivers/firmware/arm_scmi/common.h +++ b/drivers/firmware/arm_scmi/common.h @@ -141,7 +141,7 @@ scmi_revision_area_get(const struct scmi_protocol_handle *ph); void scmi_setup_protocol_implemented(const struct scmi_protocol_handle *ph, u8 *prot_imp); -extern struct bus_type scmi_bus_type; +extern const struct bus_type scmi_bus_type; #define SCMI_BUS_NOTIFY_DEVICE_REQUEST 0 #define SCMI_BUS_NOTIFY_DEVICE_UNREQUEST 1 diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c index 3ea64b22cf0d..34d77802c990 100644 --- a/drivers/firmware/arm_scmi/driver.c +++ b/drivers/firmware/arm_scmi/driver.c @@ -86,6 +86,12 @@ struct scmi_xfers_info { * @users: A refcount to track effective users of this protocol. * @priv: Reference for optional protocol private data. * @version: Protocol version supported by the platform as detected at runtime. + * @negotiated_version: When the platform supports a newer protocol version, + * the agent will try to negotiate with the platform the + * usage of the newest version known to it, since + * backward compatibility is NOT automatically assured. + * This field is NON-zero when a successful negotiation + * has completed. * @ph: An embedded protocol handle that will be passed down to protocol * initialization code to identify this instance. * @@ -99,6 +105,7 @@ struct scmi_protocol_instance { refcount_t users; void *priv; unsigned int version; + unsigned int negotiated_version; struct scmi_protocol_handle ph; }; @@ -1754,10 +1761,44 @@ static void scmi_common_fastchannel_db_ring(struct scmi_fc_db_info *db) #endif } +/** + * scmi_protocol_msg_check - Check protocol message attributes + * + * @ph: A reference to the protocol handle. + * @message_id: The ID of the message to check. + * @attributes: A parameter to optionally return the retrieved message + * attributes, in case of Success. + * + * An helper to check protocol message attributes for a specific protocol + * and message pair. + * + * Return: 0 on SUCCESS + */ +static int scmi_protocol_msg_check(const struct scmi_protocol_handle *ph, + u32 message_id, u32 *attributes) +{ + int ret; + struct scmi_xfer *t; + + ret = xfer_get_init(ph, PROTOCOL_MESSAGE_ATTRIBUTES, + sizeof(__le32), 0, &t); + if (ret) + return ret; + + put_unaligned_le32(message_id, t->tx.buf); + ret = do_xfer(ph, t); + if (!ret && attributes) + *attributes = get_unaligned_le32(t->rx.buf); + xfer_put(ph, t); + + return ret; +} + static const struct scmi_proto_helpers_ops helpers_ops = { .extended_name_get = scmi_common_extended_name_get, .iter_response_init = scmi_iterator_init, .iter_response_run = scmi_iterator_run, + .protocol_msg_check = scmi_protocol_msg_check, .fastchannel_init = scmi_common_fastchannel_init, .fastchannel_db_ring = scmi_common_fastchannel_db_ring, }; @@ -1782,6 +1823,44 @@ scmi_revision_area_get(const struct scmi_protocol_handle *ph) } /** + * scmi_protocol_version_negotiate - Negotiate protocol version + * + * @ph: A reference to the protocol handle. + * + * An helper to negotiate a protocol version different from the latest + * advertised as supported from the platform: on Success backward + * compatibility is assured by the platform. + * + * Return: 0 on Success + */ +static int scmi_protocol_version_negotiate(struct scmi_protocol_handle *ph) +{ + int ret; + struct scmi_xfer *t; + struct scmi_protocol_instance *pi = ph_to_pi(ph); + + /* At first check if NEGOTIATE_PROTOCOL_VERSION is supported ... */ + ret = scmi_protocol_msg_check(ph, NEGOTIATE_PROTOCOL_VERSION, NULL); + if (ret) + return ret; + + /* ... then attempt protocol version negotiation */ + ret = xfer_get_init(ph, NEGOTIATE_PROTOCOL_VERSION, + sizeof(__le32), 0, &t); + if (ret) + return ret; + + put_unaligned_le32(pi->proto->supported_version, t->tx.buf); + ret = do_xfer(ph, t); + if (!ret) + pi->negotiated_version = pi->proto->supported_version; + + xfer_put(ph, t); + + return ret; +} + +/** * scmi_alloc_init_protocol_instance - Allocate and initialize a protocol * instance descriptor. * @info: The reference to the related SCMI instance. @@ -1853,11 +1932,21 @@ scmi_alloc_init_protocol_instance(struct scmi_info *info, devres_close_group(handle->dev, pi->gid); dev_dbg(handle->dev, "Initialized protocol: 0x%X\n", pi->proto->id); - if (pi->version > proto->supported_version) - dev_warn(handle->dev, - "Detected UNSUPPORTED higher version 0x%X for protocol 0x%X." - "Backward compatibility is NOT assured.\n", - pi->version, pi->proto->id); + if (pi->version > proto->supported_version) { + ret = scmi_protocol_version_negotiate(&pi->ph); + if (!ret) { + dev_info(handle->dev, + "Protocol 0x%X successfully negotiated version 0x%X\n", + proto->id, pi->negotiated_version); + } else { + dev_warn(handle->dev, + "Detected UNSUPPORTED higher version 0x%X for protocol 0x%X.\n", + pi->version, pi->proto->id); + dev_warn(handle->dev, + "Trying version 0x%X. Backward compatibility is NOT assured.\n", + pi->proto->supported_version); + } + } return pi; diff --git a/drivers/firmware/arm_scmi/notify.c b/drivers/firmware/arm_scmi/notify.c index 0efd20cd9d69..27c52531194d 100644 --- a/drivers/firmware/arm_scmi/notify.c +++ b/drivers/firmware/arm_scmi/notify.c @@ -99,6 +99,7 @@ #define PROTO_ID_MASK GENMASK(31, 24) #define EVT_ID_MASK GENMASK(23, 16) #define SRC_ID_MASK GENMASK(15, 0) +#define NOTIF_UNSUPP -1 /* * Builds an unsigned 32bit key from the given input tuple to be used @@ -788,6 +789,7 @@ int scmi_register_protocol_events(const struct scmi_handle *handle, u8 proto_id, pd->ph = ph; for (i = 0; i < ee->num_events; i++, evt++) { + int id; struct scmi_registered_event *r_evt; r_evt = devm_kzalloc(ni->handle->dev, sizeof(*r_evt), @@ -809,6 +811,11 @@ int scmi_register_protocol_events(const struct scmi_handle *handle, u8 proto_id, if (!r_evt->report) return -ENOMEM; + for (id = 0; id < r_evt->num_sources; id++) + if (ee->ops->is_notify_supported && + !ee->ops->is_notify_supported(ph, r_evt->evt->id, id)) + refcount_set(&r_evt->sources[id], NOTIF_UNSUPP); + pd->registered_events[i] = r_evt; /* Ensure events are updated */ smp_wmb(); @@ -1166,7 +1173,13 @@ static inline int __scmi_enable_evt(struct scmi_registered_event *r_evt, int ret = 0; sid = &r_evt->sources[src_id]; - if (refcount_read(sid) == 0) { + if (refcount_read(sid) == NOTIF_UNSUPP) { + dev_dbg(r_evt->proto->ph->dev, + "Notification NOT supported - proto_id:%d evt_id:%d src_id:%d", + r_evt->proto->id, r_evt->evt->id, + src_id); + ret = -EOPNOTSUPP; + } else if (refcount_read(sid) == 0) { ret = REVT_NOTIFY_ENABLE(r_evt, r_evt->evt->id, src_id); if (!ret) @@ -1179,6 +1192,8 @@ static inline int __scmi_enable_evt(struct scmi_registered_event *r_evt, } else { for (; num_sources; src_id++, num_sources--) { sid = &r_evt->sources[src_id]; + if (refcount_read(sid) == NOTIF_UNSUPP) + continue; if (refcount_dec_and_test(sid)) REVT_NOTIFY_DISABLE(r_evt, r_evt->evt->id, src_id); diff --git a/drivers/firmware/arm_scmi/notify.h b/drivers/firmware/arm_scmi/notify.h index 4e9b627edfef..76758a736cf4 100644 --- a/drivers/firmware/arm_scmi/notify.h +++ b/drivers/firmware/arm_scmi/notify.h @@ -35,6 +35,8 @@ struct scmi_protocol_handle; /** * struct scmi_event_ops - Protocol helpers called by the notification core. + * @is_notify_supported: Return 0 if the specified notification for the + * specified resource (src_id) is supported. * @get_num_sources: Returns the number of possible events' sources for this * protocol * @set_notify_enabled: Enable/disable the required evt_id/src_id notifications @@ -50,6 +52,8 @@ struct scmi_protocol_handle; * process context. */ struct scmi_event_ops { + bool (*is_notify_supported)(const struct scmi_protocol_handle *ph, + u8 evt_id, u32 src_id); int (*get_num_sources)(const struct scmi_protocol_handle *ph); int (*set_notify_enabled)(const struct scmi_protocol_handle *ph, u8 evt_id, u32 src_id, bool enabled); diff --git a/drivers/firmware/arm_scmi/optee.c b/drivers/firmware/arm_scmi/optee.c index 25bfb465484d..4e7944b91e38 100644 --- a/drivers/firmware/arm_scmi/optee.c +++ b/drivers/firmware/arm_scmi/optee.c @@ -109,8 +109,10 @@ enum scmi_optee_pta_cmd { * @rx_len: Response size * @mu: Mutex protection on channel access * @cinfo: SCMI channel information - * @shmem: Virtual base address of the shared memory - * @req: Shared memory protocol handle for SCMI request and synchronous response + * @req: union for SCMI interface + * @req.shmem: Virtual base address of the shared memory + * @req.msg: Shared memory protocol handle for SCMI request and + * synchronous response * @tee_shm: TEE shared memory handle @req or NULL if using IOMEM shmem * @link: Reference in agent's channel list */ diff --git a/drivers/firmware/arm_scmi/perf.c b/drivers/firmware/arm_scmi/perf.c index 211e8e0aef2c..981e327e63e3 100644 --- a/drivers/firmware/arm_scmi/perf.c +++ b/drivers/firmware/arm_scmi/perf.c @@ -182,6 +182,8 @@ struct scmi_perf_info { enum scmi_power_scale power_scale; u64 stats_addr; u32 stats_size; + bool notify_lvl_cmd; + bool notify_lim_cmd; struct perf_dom_info *dom_info; }; @@ -222,6 +224,15 @@ static int scmi_perf_attributes_get(const struct scmi_protocol_handle *ph, } ph->xops->xfer_put(ph, t); + + if (!ret) { + if (!ph->hops->protocol_msg_check(ph, PERF_NOTIFY_LEVEL, NULL)) + pi->notify_lvl_cmd = true; + + if (!ph->hops->protocol_msg_check(ph, PERF_NOTIFY_LIMITS, NULL)) + pi->notify_lim_cmd = true; + } + return ret; } @@ -239,6 +250,7 @@ static void scmi_perf_xa_destroy(void *data) static int scmi_perf_domain_attributes_get(const struct scmi_protocol_handle *ph, struct perf_dom_info *dom_info, + bool notify_lim_cmd, bool notify_lvl_cmd, u32 version) { int ret; @@ -260,8 +272,12 @@ scmi_perf_domain_attributes_get(const struct scmi_protocol_handle *ph, dom_info->set_limits = SUPPORTS_SET_LIMITS(flags); dom_info->info.set_perf = SUPPORTS_SET_PERF_LVL(flags); - dom_info->perf_limit_notify = SUPPORTS_PERF_LIMIT_NOTIFY(flags); - dom_info->perf_level_notify = SUPPORTS_PERF_LEVEL_NOTIFY(flags); + if (notify_lim_cmd) + dom_info->perf_limit_notify = + SUPPORTS_PERF_LIMIT_NOTIFY(flags); + if (notify_lvl_cmd) + dom_info->perf_level_notify = + SUPPORTS_PERF_LEVEL_NOTIFY(flags); dom_info->perf_fastchannels = SUPPORTS_PERF_FASTCHANNELS(flags); if (PROTOCOL_REV_MAJOR(version) >= 0x4) dom_info->level_indexing_mode = @@ -270,15 +286,30 @@ scmi_perf_domain_attributes_get(const struct scmi_protocol_handle *ph, le32_to_cpu(attr->sustained_freq_khz); dom_info->sustained_perf_level = le32_to_cpu(attr->sustained_perf_level); + /* + * sustained_freq_khz = mult_factor * sustained_perf_level + * mult_factor must be non zero positive integer(not fraction) + */ if (!dom_info->sustained_freq_khz || !dom_info->sustained_perf_level || - dom_info->level_indexing_mode) + dom_info->level_indexing_mode) { /* CPUFreq converts to kHz, hence default 1000 */ dom_info->mult_factor = 1000; - else + } else { dom_info->mult_factor = (dom_info->sustained_freq_khz * 1000UL) / dom_info->sustained_perf_level; + if ((dom_info->sustained_freq_khz * 1000UL) % + dom_info->sustained_perf_level) + dev_warn(ph->dev, + "multiplier for domain %d rounded\n", + dom_info->id); + } + if (!dom_info->mult_factor) + dev_warn(ph->dev, + "Wrong sustained perf/frequency(domain %d)\n", + dom_info->id); + strscpy(dom_info->info.name, attr->name, SCMI_SHORT_NAME_MAX_SIZE); } @@ -295,9 +326,9 @@ scmi_perf_domain_attributes_get(const struct scmi_protocol_handle *ph, dom_info->id, NULL, dom_info->info.name, SCMI_MAX_STR_SIZE); + xa_init(&dom_info->opps_by_lvl); if (dom_info->level_indexing_mode) { xa_init(&dom_info->opps_by_idx); - xa_init(&dom_info->opps_by_lvl); hash_init(dom_info->opps_by_freq); } @@ -340,13 +371,21 @@ static int iter_perf_levels_update_state(struct scmi_iterator_state *st, } static inline void -process_response_opp(struct scmi_opp *opp, unsigned int loop_idx, +process_response_opp(struct device *dev, struct perf_dom_info *dom, + struct scmi_opp *opp, unsigned int loop_idx, const struct scmi_msg_resp_perf_describe_levels *r) { + int ret; + opp->perf = le32_to_cpu(r->opp[loop_idx].perf_val); opp->power = le32_to_cpu(r->opp[loop_idx].power); opp->trans_latency_us = le16_to_cpu(r->opp[loop_idx].transition_latency_us); + + ret = xa_insert(&dom->opps_by_lvl, opp->perf, opp, GFP_KERNEL); + if (ret) + dev_warn(dev, "Failed to add opps_by_lvl at %d - ret:%d\n", + opp->perf, ret); } static inline void @@ -354,16 +393,21 @@ process_response_opp_v4(struct device *dev, struct perf_dom_info *dom, struct scmi_opp *opp, unsigned int loop_idx, const struct scmi_msg_resp_perf_describe_levels_v4 *r) { + int ret; + opp->perf = le32_to_cpu(r->opp[loop_idx].perf_val); opp->power = le32_to_cpu(r->opp[loop_idx].power); opp->trans_latency_us = le16_to_cpu(r->opp[loop_idx].transition_latency_us); + ret = xa_insert(&dom->opps_by_lvl, opp->perf, opp, GFP_KERNEL); + if (ret) + dev_warn(dev, "Failed to add opps_by_lvl at %d - ret:%d\n", + opp->perf, ret); + /* Note that PERF v4 reports always five 32-bit words */ opp->indicative_freq = le32_to_cpu(r->opp[loop_idx].indicative_freq); if (dom->level_indexing_mode) { - int ret; - opp->level_index = le32_to_cpu(r->opp[loop_idx].level_index); ret = xa_insert(&dom->opps_by_idx, opp->level_index, opp, @@ -373,12 +417,6 @@ process_response_opp_v4(struct device *dev, struct perf_dom_info *dom, "Failed to add opps_by_idx at %d - ret:%d\n", opp->level_index, ret); - ret = xa_insert(&dom->opps_by_lvl, opp->perf, opp, GFP_KERNEL); - if (ret) - dev_warn(dev, - "Failed to add opps_by_lvl at %d - ret:%d\n", - opp->perf, ret); - hash_add(dom->opps_by_freq, &opp->hash, opp->indicative_freq); } } @@ -393,7 +431,8 @@ iter_perf_levels_process_response(const struct scmi_protocol_handle *ph, opp = &p->perf_dom->opp[st->desc_index + st->loop_idx]; if (PROTOCOL_REV_MAJOR(p->version) <= 0x3) - process_response_opp(opp, st->loop_idx, response); + process_response_opp(ph->dev, p->perf_dom, opp, st->loop_idx, + response); else process_response_opp_v4(ph->dev, p->perf_dom, opp, st->loop_idx, response); @@ -978,6 +1017,27 @@ static const struct scmi_perf_proto_ops perf_proto_ops = { .power_scale_get = scmi_power_scale_get, }; +static bool scmi_perf_notify_supported(const struct scmi_protocol_handle *ph, + u8 evt_id, u32 src_id) +{ + bool supported; + struct perf_dom_info *dom; + + if (evt_id >= ARRAY_SIZE(evt_2_cmd)) + return false; + + dom = scmi_perf_domain_lookup(ph, src_id); + if (IS_ERR(dom)) + return false; + + if (evt_id == SCMI_EVENT_PERFORMANCE_LIMITS_CHANGED) + supported = dom->perf_limit_notify; + else + supported = dom->perf_level_notify; + + return supported; +} + static int scmi_perf_set_notify_enabled(const struct scmi_protocol_handle *ph, u8 evt_id, u32 src_id, bool enable) { @@ -995,18 +1055,47 @@ static int scmi_perf_set_notify_enabled(const struct scmi_protocol_handle *ph, return ret; } +static int +scmi_perf_xlate_opp_to_freq(struct perf_dom_info *dom, + unsigned int index, unsigned long *freq) +{ + struct scmi_opp *opp; + + if (!dom || !freq) + return -EINVAL; + + if (!dom->level_indexing_mode) { + opp = xa_load(&dom->opps_by_lvl, index); + if (!opp) + return -ENODEV; + + *freq = opp->perf * dom->mult_factor; + } else { + opp = xa_load(&dom->opps_by_idx, index); + if (!opp) + return -ENODEV; + + *freq = opp->indicative_freq * dom->mult_factor; + } + + return 0; +} + static void *scmi_perf_fill_custom_report(const struct scmi_protocol_handle *ph, u8 evt_id, ktime_t timestamp, const void *payld, size_t payld_sz, void *report, u32 *src_id) { + int ret; void *rep = NULL; + struct perf_dom_info *dom; switch (evt_id) { case SCMI_EVENT_PERFORMANCE_LIMITS_CHANGED: { const struct scmi_perf_limits_notify_payld *p = payld; struct scmi_perf_limits_report *r = report; + unsigned long freq_min, freq_max; if (sizeof(*p) != payld_sz) break; @@ -1016,14 +1105,36 @@ static void *scmi_perf_fill_custom_report(const struct scmi_protocol_handle *ph, r->domain_id = le32_to_cpu(p->domain_id); r->range_max = le32_to_cpu(p->range_max); r->range_min = le32_to_cpu(p->range_min); + /* Check if the reported domain exist at all */ + dom = scmi_perf_domain_lookup(ph, r->domain_id); + if (IS_ERR(dom)) + break; + /* + * Event will be reported from this point on... + * ...even if, later, xlated frequencies were not retrieved. + */ *src_id = r->domain_id; rep = r; + + ret = scmi_perf_xlate_opp_to_freq(dom, r->range_max, &freq_max); + if (ret) + break; + + ret = scmi_perf_xlate_opp_to_freq(dom, r->range_min, &freq_min); + if (ret) + break; + + /* Report translated freqs ONLY if both available */ + r->range_max_freq = freq_max; + r->range_min_freq = freq_min; + break; } case SCMI_EVENT_PERFORMANCE_LEVEL_CHANGED: { const struct scmi_perf_level_notify_payld *p = payld; struct scmi_perf_level_report *r = report; + unsigned long freq; if (sizeof(*p) != payld_sz) break; @@ -1031,9 +1142,27 @@ static void *scmi_perf_fill_custom_report(const struct scmi_protocol_handle *ph, r->timestamp = timestamp; r->agent_id = le32_to_cpu(p->agent_id); r->domain_id = le32_to_cpu(p->domain_id); + /* Report translated freqs ONLY if available */ r->performance_level = le32_to_cpu(p->performance_level); + /* Check if the reported domain exist at all */ + dom = scmi_perf_domain_lookup(ph, r->domain_id); + if (IS_ERR(dom)) + break; + /* + * Event will be reported from this point on... + * ...even if, later, xlated frequencies were not retrieved. + */ *src_id = r->domain_id; rep = r; + + /* Report translated freqs ONLY if available */ + ret = scmi_perf_xlate_opp_to_freq(dom, r->performance_level, + &freq); + if (ret) + break; + + r->performance_level_freq = freq; + break; } default: @@ -1067,6 +1196,7 @@ static const struct scmi_event perf_events[] = { }; static const struct scmi_event_ops perf_event_ops = { + .is_notify_supported = scmi_perf_notify_supported, .get_num_sources = scmi_perf_get_num_sources, .set_notify_enabled = scmi_perf_set_notify_enabled, .fill_custom_report = scmi_perf_fill_custom_report, @@ -1111,7 +1241,8 @@ static int scmi_perf_protocol_init(const struct scmi_protocol_handle *ph) struct perf_dom_info *dom = pinfo->dom_info + domain; dom->id = domain; - scmi_perf_domain_attributes_get(ph, dom, version); + scmi_perf_domain_attributes_get(ph, dom, pinfo->notify_lim_cmd, + pinfo->notify_lvl_cmd, version); scmi_perf_describe_levels_get(ph, dom, version); if (dom->perf_fastchannels) diff --git a/drivers/firmware/arm_scmi/power.c b/drivers/firmware/arm_scmi/power.c index c2e6b9b4d941..49666bd1d8ac 100644 --- a/drivers/firmware/arm_scmi/power.c +++ b/drivers/firmware/arm_scmi/power.c @@ -68,6 +68,7 @@ struct power_dom_info { struct scmi_power_info { u32 version; + bool notify_state_change_cmd; int num_domains; u64 stats_addr; u32 stats_size; @@ -97,13 +98,18 @@ static int scmi_power_attributes_get(const struct scmi_protocol_handle *ph, } ph->xops->xfer_put(ph, t); + + if (!ret) + if (!ph->hops->protocol_msg_check(ph, POWER_STATE_NOTIFY, NULL)) + pi->notify_state_change_cmd = true; + return ret; } static int scmi_power_domain_attributes_get(const struct scmi_protocol_handle *ph, u32 domain, struct power_dom_info *dom_info, - u32 version) + u32 version, bool notify_state_change_cmd) { int ret; u32 flags; @@ -122,7 +128,9 @@ scmi_power_domain_attributes_get(const struct scmi_protocol_handle *ph, if (!ret) { flags = le32_to_cpu(attr->flags); - dom_info->state_set_notify = SUPPORTS_STATE_SET_NOTIFY(flags); + if (notify_state_change_cmd) + dom_info->state_set_notify = + SUPPORTS_STATE_SET_NOTIFY(flags); dom_info->state_set_async = SUPPORTS_STATE_SET_ASYNC(flags); dom_info->state_set_sync = SUPPORTS_STATE_SET_SYNC(flags); strscpy(dom_info->name, attr->name, SCMI_SHORT_NAME_MAX_SIZE); @@ -231,6 +239,20 @@ static int scmi_power_request_notify(const struct scmi_protocol_handle *ph, return ret; } +static bool scmi_power_notify_supported(const struct scmi_protocol_handle *ph, + u8 evt_id, u32 src_id) +{ + struct power_dom_info *dom; + struct scmi_power_info *pinfo = ph->get_priv(ph); + + if (evt_id != SCMI_EVENT_POWER_STATE_CHANGED || + src_id >= pinfo->num_domains) + return false; + + dom = pinfo->dom_info + src_id; + return dom->state_set_notify; +} + static int scmi_power_set_notify_enabled(const struct scmi_protocol_handle *ph, u8 evt_id, u32 src_id, bool enable) { @@ -285,6 +307,7 @@ static const struct scmi_event power_events[] = { }; static const struct scmi_event_ops power_event_ops = { + .is_notify_supported = scmi_power_notify_supported, .get_num_sources = scmi_power_get_num_sources, .set_notify_enabled = scmi_power_set_notify_enabled, .fill_custom_report = scmi_power_fill_custom_report, @@ -326,7 +349,8 @@ static int scmi_power_protocol_init(const struct scmi_protocol_handle *ph) for (domain = 0; domain < pinfo->num_domains; domain++) { struct power_dom_info *dom = pinfo->dom_info + domain; - scmi_power_domain_attributes_get(ph, domain, dom, version); + scmi_power_domain_attributes_get(ph, domain, dom, version, + pinfo->notify_state_change_cmd); } pinfo->version = version; diff --git a/drivers/firmware/arm_scmi/powercap.c b/drivers/firmware/arm_scmi/powercap.c index a4c6cd4716fe..2fab92367e42 100644 --- a/drivers/firmware/arm_scmi/powercap.c +++ b/drivers/firmware/arm_scmi/powercap.c @@ -124,6 +124,8 @@ struct scmi_powercap_state { struct powercap_info { u32 version; int num_domains; + bool notify_cap_cmd; + bool notify_measurements_cmd; struct scmi_powercap_state *states; struct scmi_powercap_info *powercaps; }; @@ -157,6 +159,18 @@ scmi_powercap_attributes_get(const struct scmi_protocol_handle *ph, } ph->xops->xfer_put(ph, t); + + if (!ret) { + if (!ph->hops->protocol_msg_check(ph, + POWERCAP_CAP_NOTIFY, NULL)) + pi->notify_cap_cmd = true; + + if (!ph->hops->protocol_msg_check(ph, + POWERCAP_MEASUREMENTS_NOTIFY, + NULL)) + pi->notify_measurements_cmd = true; + } + return ret; } @@ -200,10 +214,12 @@ scmi_powercap_domain_attributes_get(const struct scmi_protocol_handle *ph, flags = le32_to_cpu(resp->attributes); dom_info->id = domain; - dom_info->notify_powercap_cap_change = - SUPPORTS_POWERCAP_CAP_CHANGE_NOTIFY(flags); - dom_info->notify_powercap_measurement_change = - SUPPORTS_POWERCAP_MEASUREMENTS_CHANGE_NOTIFY(flags); + if (pinfo->notify_cap_cmd) + dom_info->notify_powercap_cap_change = + SUPPORTS_POWERCAP_CAP_CHANGE_NOTIFY(flags); + if (pinfo->notify_measurements_cmd) + dom_info->notify_powercap_measurement_change = + SUPPORTS_POWERCAP_MEASUREMENTS_CHANGE_NOTIFY(flags); dom_info->async_powercap_cap_set = SUPPORTS_ASYNC_POWERCAP_CAP_SET(flags); dom_info->powercap_cap_config = @@ -788,6 +804,26 @@ static int scmi_powercap_notify(const struct scmi_protocol_handle *ph, return ret; } +static bool +scmi_powercap_notify_supported(const struct scmi_protocol_handle *ph, + u8 evt_id, u32 src_id) +{ + bool supported = false; + const struct scmi_powercap_info *dom_info; + struct powercap_info *pi = ph->get_priv(ph); + + if (evt_id >= ARRAY_SIZE(evt_2_cmd) || src_id >= pi->num_domains) + return false; + + dom_info = pi->powercaps + src_id; + if (evt_id == SCMI_EVENT_POWERCAP_CAP_CHANGED) + supported = dom_info->notify_powercap_cap_change; + else if (evt_id == SCMI_EVENT_POWERCAP_MEASUREMENTS_CHANGED) + supported = dom_info->notify_powercap_measurement_change; + + return supported; +} + static int scmi_powercap_set_notify_enabled(const struct scmi_protocol_handle *ph, u8 evt_id, u32 src_id, bool enable) @@ -904,6 +940,7 @@ static const struct scmi_event powercap_events[] = { }; static const struct scmi_event_ops powercap_event_ops = { + .is_notify_supported = scmi_powercap_notify_supported, .get_num_sources = scmi_powercap_get_num_sources, .set_notify_enabled = scmi_powercap_set_notify_enabled, .fill_custom_report = scmi_powercap_fill_custom_report, diff --git a/drivers/firmware/arm_scmi/protocols.h b/drivers/firmware/arm_scmi/protocols.h index e683c26f24eb..693019fff0f6 100644 --- a/drivers/firmware/arm_scmi/protocols.h +++ b/drivers/firmware/arm_scmi/protocols.h @@ -33,6 +33,7 @@ enum scmi_common_cmd { PROTOCOL_VERSION = 0x0, PROTOCOL_ATTRIBUTES = 0x1, PROTOCOL_MESSAGE_ATTRIBUTES = 0x2, + NEGOTIATE_PROTOCOL_VERSION = 0x10, }; /** @@ -251,6 +252,8 @@ struct scmi_fc_info { * provided in @ops. * @iter_response_run: A common helper to trigger the run of a previously * initialized iterator. + * @protocol_msg_check: A common helper to check is a specific protocol message + * is supported. * @fastchannel_init: A common helper used to initialize FC descriptors by * gathering FC descriptions from the SCMI platform server. * @fastchannel_db_ring: A common helper to ring a FC doorbell. @@ -264,6 +267,8 @@ struct scmi_proto_helpers_ops { unsigned int max_resources, u8 msg_id, size_t tx_size, void *priv); int (*iter_response_run)(void *iter); + int (*protocol_msg_check)(const struct scmi_protocol_handle *ph, + u32 message_id, u32 *attributes); void (*fastchannel_init)(const struct scmi_protocol_handle *ph, u8 describe_id, u32 message_id, u32 valid_size, u32 domain, diff --git a/drivers/firmware/arm_scmi/reset.c b/drivers/firmware/arm_scmi/reset.c index 19970d9f9e36..1b318316535e 100644 --- a/drivers/firmware/arm_scmi/reset.c +++ b/drivers/firmware/arm_scmi/reset.c @@ -67,6 +67,7 @@ struct reset_dom_info { struct scmi_reset_info { u32 version; int num_domains; + bool notify_reset_cmd; struct reset_dom_info *dom_info; }; @@ -89,18 +90,24 @@ static int scmi_reset_attributes_get(const struct scmi_protocol_handle *ph, } ph->xops->xfer_put(ph, t); + + if (!ret) + if (!ph->hops->protocol_msg_check(ph, RESET_NOTIFY, NULL)) + pi->notify_reset_cmd = true; + return ret; } static int scmi_reset_domain_attributes_get(const struct scmi_protocol_handle *ph, - u32 domain, struct reset_dom_info *dom_info, - u32 version) + struct scmi_reset_info *pinfo, + u32 domain, u32 version) { int ret; u32 attributes; struct scmi_xfer *t; struct scmi_msg_resp_reset_domain_attributes *attr; + struct reset_dom_info *dom_info = pinfo->dom_info + domain; ret = ph->xops->xfer_get_init(ph, RESET_DOMAIN_ATTRIBUTES, sizeof(domain), sizeof(*attr), &t); @@ -115,7 +122,9 @@ scmi_reset_domain_attributes_get(const struct scmi_protocol_handle *ph, attributes = le32_to_cpu(attr->attributes); dom_info->async_reset = SUPPORTS_ASYNC_RESET(attributes); - dom_info->reset_notify = SUPPORTS_NOTIFY_RESET(attributes); + if (pinfo->notify_reset_cmd) + dom_info->reset_notify = + SUPPORTS_NOTIFY_RESET(attributes); dom_info->latency_us = le32_to_cpu(attr->latency); if (dom_info->latency_us == U32_MAX) dom_info->latency_us = 0; @@ -226,6 +235,20 @@ static const struct scmi_reset_proto_ops reset_proto_ops = { .deassert = scmi_reset_domain_deassert, }; +static bool scmi_reset_notify_supported(const struct scmi_protocol_handle *ph, + u8 evt_id, u32 src_id) +{ + struct reset_dom_info *dom; + struct scmi_reset_info *pi = ph->get_priv(ph); + + if (evt_id != SCMI_EVENT_RESET_ISSUED || src_id >= pi->num_domains) + return false; + + dom = pi->dom_info + src_id; + + return dom->reset_notify; +} + static int scmi_reset_notify(const struct scmi_protocol_handle *ph, u32 domain_id, bool enable) { @@ -301,6 +324,7 @@ static const struct scmi_event reset_events[] = { }; static const struct scmi_event_ops reset_event_ops = { + .is_notify_supported = scmi_reset_notify_supported, .get_num_sources = scmi_reset_get_num_sources, .set_notify_enabled = scmi_reset_set_notify_enabled, .fill_custom_report = scmi_reset_fill_custom_report, @@ -339,11 +363,8 @@ static int scmi_reset_protocol_init(const struct scmi_protocol_handle *ph) if (!pinfo->dom_info) return -ENOMEM; - for (domain = 0; domain < pinfo->num_domains; domain++) { - struct reset_dom_info *dom = pinfo->dom_info + domain; - - scmi_reset_domain_attributes_get(ph, domain, dom, version); - } + for (domain = 0; domain < pinfo->num_domains; domain++) + scmi_reset_domain_attributes_get(ph, pinfo, domain, version); pinfo->version = version; return ph->set_priv(ph, pinfo, version); diff --git a/drivers/firmware/arm_scmi/sensors.c b/drivers/firmware/arm_scmi/sensors.c index 311149965370..7fc5535ca34c 100644 --- a/drivers/firmware/arm_scmi/sensors.c +++ b/drivers/firmware/arm_scmi/sensors.c @@ -215,6 +215,8 @@ struct scmi_sensor_update_notify_payld { struct sensors_info { u32 version; + bool notify_trip_point_cmd; + bool notify_continuos_update_cmd; int num_sensors; int max_requests; u64 reg_addr; @@ -246,6 +248,18 @@ static int scmi_sensor_attributes_get(const struct scmi_protocol_handle *ph, } ph->xops->xfer_put(ph, t); + + if (!ret) { + if (!ph->hops->protocol_msg_check(ph, + SENSOR_TRIP_POINT_NOTIFY, NULL)) + si->notify_trip_point_cmd = true; + + if (!ph->hops->protocol_msg_check(ph, + SENSOR_CONTINUOUS_UPDATE_NOTIFY, + NULL)) + si->notify_continuos_update_cmd = true; + } + return ret; } @@ -594,7 +608,8 @@ iter_sens_descr_process_response(const struct scmi_protocol_handle *ph, * Such bitfields are assumed to be zeroed on non * relevant fw versions...assuming fw not buggy ! */ - s->update = SUPPORTS_UPDATE_NOTIFY(attrl); + if (si->notify_continuos_update_cmd) + s->update = SUPPORTS_UPDATE_NOTIFY(attrl); s->timestamped = SUPPORTS_TIMESTAMP(attrl); if (s->timestamped) s->tstamp_scale = S32_EXT(SENSOR_TSTAMP_EXP(attrl)); @@ -988,6 +1003,25 @@ static const struct scmi_sensor_proto_ops sensor_proto_ops = { .config_set = scmi_sensor_config_set, }; +static bool scmi_sensor_notify_supported(const struct scmi_protocol_handle *ph, + u8 evt_id, u32 src_id) +{ + bool supported = false; + const struct scmi_sensor_info *s; + struct sensors_info *sinfo = ph->get_priv(ph); + + s = scmi_sensor_info_get(ph, src_id); + if (!s) + return false; + + if (evt_id == SCMI_EVENT_SENSOR_TRIP_POINT_EVENT) + supported = sinfo->notify_trip_point_cmd; + else if (evt_id == SCMI_EVENT_SENSOR_UPDATE) + supported = s->update; + + return supported; +} + static int scmi_sensor_set_notify_enabled(const struct scmi_protocol_handle *ph, u8 evt_id, u32 src_id, bool enable) { @@ -1099,6 +1133,7 @@ static const struct scmi_event sensor_events[] = { }; static const struct scmi_event_ops sensor_event_ops = { + .is_notify_supported = scmi_sensor_notify_supported, .get_num_sources = scmi_sensor_get_num_sources, .set_notify_enabled = scmi_sensor_set_notify_enabled, .fill_custom_report = scmi_sensor_fill_custom_report, diff --git a/drivers/firmware/arm_scmi/smc.c b/drivers/firmware/arm_scmi/smc.c index 7611e9665038..39936e1dd30e 100644 --- a/drivers/firmware/arm_scmi/smc.c +++ b/drivers/firmware/arm_scmi/smc.c @@ -214,6 +214,13 @@ static int smc_chan_free(int id, void *p, void *data) struct scmi_chan_info *cinfo = p; struct scmi_smc *scmi_info = cinfo->transport_info; + /* + * Different protocols might share the same chan info, so a previous + * smc_chan_free call might have already freed the structure. + */ + if (!scmi_info) + return 0; + /* Ignore any possible further reception on the IRQ path */ if (scmi_info->irq > 0) free_irq(scmi_info->irq, scmi_info); diff --git a/drivers/firmware/arm_scmi/system.c b/drivers/firmware/arm_scmi/system.c index 1621da97bcbb..b6358c155f7f 100644 --- a/drivers/firmware/arm_scmi/system.c +++ b/drivers/firmware/arm_scmi/system.c @@ -36,8 +36,20 @@ struct scmi_system_power_state_notifier_payld { struct scmi_system_info { u32 version; bool graceful_timeout_supported; + bool power_state_notify_cmd; }; +static bool scmi_system_notify_supported(const struct scmi_protocol_handle *ph, + u8 evt_id, u32 src_id) +{ + struct scmi_system_info *pinfo = ph->get_priv(ph); + + if (evt_id != SCMI_EVENT_SYSTEM_POWER_STATE_NOTIFIER) + return false; + + return pinfo->power_state_notify_cmd; +} + static int scmi_system_request_notify(const struct scmi_protocol_handle *ph, bool enable) { @@ -114,6 +126,7 @@ static const struct scmi_event system_events[] = { }; static const struct scmi_event_ops system_event_ops = { + .is_notify_supported = scmi_system_notify_supported, .set_notify_enabled = scmi_system_set_notify_enabled, .fill_custom_report = scmi_system_fill_custom_report, }; @@ -147,6 +160,9 @@ static int scmi_system_protocol_init(const struct scmi_protocol_handle *ph) if (PROTOCOL_REV_MAJOR(pinfo->version) >= 0x2) pinfo->graceful_timeout_supported = true; + if (!ph->hops->protocol_msg_check(ph, SYSTEM_POWER_STATE_NOTIFY, NULL)) + pinfo->power_state_notify_cmd = true; + return ph->set_priv(ph, pinfo, version); } diff --git a/drivers/firmware/efi/arm-runtime.c b/drivers/firmware/efi/arm-runtime.c index 83f5bb57fa4c..83092d93f36a 100644 --- a/drivers/firmware/efi/arm-runtime.c +++ b/drivers/firmware/efi/arm-runtime.c @@ -107,7 +107,7 @@ static int __init arm_enable_runtime_services(void) efi_memory_desc_t *md; for_each_efi_memory_desc(md) { - int md_size = md->num_pages << EFI_PAGE_SHIFT; + u64 md_size = md->num_pages << EFI_PAGE_SHIFT; struct resource *res; if (!(md->attribute & EFI_MEMORY_SP)) diff --git a/drivers/firmware/efi/capsule-loader.c b/drivers/firmware/efi/capsule-loader.c index 3e8d4b51a814..97bafb5f7038 100644 --- a/drivers/firmware/efi/capsule-loader.c +++ b/drivers/firmware/efi/capsule-loader.c @@ -292,7 +292,7 @@ static int efi_capsule_open(struct inode *inode, struct file *file) return -ENOMEM; } - cap_info->phys = kzalloc(sizeof(void *), GFP_KERNEL); + cap_info->phys = kzalloc(sizeof(phys_addr_t), GFP_KERNEL); if (!cap_info->phys) { kfree(cap_info->pages); kfree(cap_info); diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c index 35c37f667781..9b3884ff81e6 100644 --- a/drivers/firmware/efi/cper.c +++ b/drivers/firmware/efi/cper.c @@ -523,6 +523,17 @@ static void cper_print_tstamp(const char *pfx, } } +struct ignore_section { + guid_t guid; + const char *name; +}; + +static const struct ignore_section ignore_sections[] = { + { .guid = CPER_SEC_CXL_GEN_MEDIA_GUID, .name = "CXL General Media Event" }, + { .guid = CPER_SEC_CXL_DRAM_GUID, .name = "CXL DRAM Event" }, + { .guid = CPER_SEC_CXL_MEM_MODULE_GUID, .name = "CXL Memory Module Event" }, +}; + static void cper_estatus_print_section(const char *pfx, struct acpi_hest_generic_data *gdata, int sec_no) @@ -543,6 +554,14 @@ cper_estatus_print_section(const char *pfx, struct acpi_hest_generic_data *gdata printk("%s""fru_text: %.20s\n", pfx, gdata->fru_text); snprintf(newpfx, sizeof(newpfx), "%s ", pfx); + + for (int i = 0; i < ARRAY_SIZE(ignore_sections); i++) { + if (guid_equal(sec_type, &ignore_sections[i].guid)) { + printk("%ssection_type: %s\n", newpfx, ignore_sections[i].name); + return; + } + } + if (guid_equal(sec_type, &CPER_SEC_PROC_GENERIC)) { struct cper_sec_proc_generic *proc_err = acpi_hest_get_payload(gdata); diff --git a/drivers/firmware/efi/efi-init.c b/drivers/firmware/efi/efi-init.c index d4987d013080..a00e07b853f2 100644 --- a/drivers/firmware/efi/efi-init.c +++ b/drivers/firmware/efi/efi-init.c @@ -144,15 +144,6 @@ static __init int is_usable_memory(efi_memory_desc_t *md) case EFI_CONVENTIONAL_MEMORY: case EFI_PERSISTENT_MEMORY: /* - * Special purpose memory is 'soft reserved', which means it - * is set aside initially, but can be hotplugged back in or - * be assigned to the dax driver after boot. - */ - if (efi_soft_reserve_enabled() && - (md->attribute & EFI_MEMORY_SP)) - return false; - - /* * According to the spec, these regions are no longer reserved * after calling ExitBootServices(). However, we can only use * them as System RAM if they can be mapped writeback cacheable. @@ -196,6 +187,16 @@ static __init void reserve_regions(void) size = npages << PAGE_SHIFT; if (is_memory(md)) { + /* + * Special purpose memory is 'soft reserved', which + * means it is set aside initially. Don't add a memblock + * for it now so that it can be hotplugged back in or + * be assigned to the dax driver after boot. + */ + if (efi_soft_reserve_enabled() && + (md->attribute & EFI_MEMORY_SP)) + continue; + early_init_dt_add_memory_arch(paddr, size); if (!is_usable_memory(md)) diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile index 06964a3c130f..73f4810f6db3 100644 --- a/drivers/firmware/efi/libstub/Makefile +++ b/drivers/firmware/efi/libstub/Makefile @@ -28,7 +28,7 @@ cflags-$(CONFIG_ARM) += -DEFI_HAVE_STRLEN -DEFI_HAVE_STRNLEN \ -DEFI_HAVE_MEMCHR -DEFI_HAVE_STRRCHR \ -DEFI_HAVE_STRCMP -fno-builtin -fpic \ $(call cc-option,-mno-single-pic-base) -cflags-$(CONFIG_RISCV) += -fpic -DNO_ALTERNATIVE +cflags-$(CONFIG_RISCV) += -fpic -DNO_ALTERNATIVE -mno-relax cflags-$(CONFIG_LOONGARCH) += -fpie cflags-$(CONFIG_EFI_PARAMS_FROM_FDT) += -I$(srctree)/scripts/dtc/libfdt @@ -143,7 +143,7 @@ STUBCOPY_RELOC-$(CONFIG_ARM64) := R_AARCH64_ABS # exist. STUBCOPY_FLAGS-$(CONFIG_RISCV) += --prefix-alloc-sections=.init \ --prefix-symbols=__efistub_ -STUBCOPY_RELOC-$(CONFIG_RISCV) := R_RISCV_HI20 +STUBCOPY_RELOC-$(CONFIG_RISCV) := -E R_RISCV_HI20\|R_RISCV_$(BITS)\|R_RISCV_RELAX # For LoongArch, keep all the symbols in .init section and make sure that no # absolute symbols references exist. diff --git a/drivers/firmware/efi/libstub/alignedmem.c b/drivers/firmware/efi/libstub/alignedmem.c index 6b83c492c3b8..31928bd87e0f 100644 --- a/drivers/firmware/efi/libstub/alignedmem.c +++ b/drivers/firmware/efi/libstub/alignedmem.c @@ -14,6 +14,7 @@ * @max: the address that the last allocated memory page shall not * exceed * @align: minimum alignment of the base of the allocation + * @memory_type: the type of memory to allocate * * Allocate pages as EFI_LOADER_DATA. The allocated pages are aligned according * to @align, which should be >= EFI_ALLOC_ALIGN. The last allocated page will diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c index bfa30625f5d0..3dc2f9aaf08d 100644 --- a/drivers/firmware/efi/libstub/efi-stub-helper.c +++ b/drivers/firmware/efi/libstub/efi-stub-helper.c @@ -24,6 +24,8 @@ static bool efi_noinitrd; static bool efi_nosoftreserve; static bool efi_disable_pci_dma = IS_ENABLED(CONFIG_EFI_DISABLE_PCI_DMA); +int efi_mem_encrypt; + bool __pure __efi_soft_reserve_enabled(void) { return !efi_nosoftreserve; @@ -75,6 +77,12 @@ efi_status_t efi_parse_options(char const *cmdline) efi_noinitrd = true; } else if (IS_ENABLED(CONFIG_X86_64) && !strcmp(param, "no5lvl")) { efi_no5lvl = true; + } else if (IS_ENABLED(CONFIG_ARCH_HAS_MEM_ENCRYPT) && + !strcmp(param, "mem_encrypt") && val) { + if (parse_option_str(val, "on")) + efi_mem_encrypt = 1; + else if (parse_option_str(val, "off")) + efi_mem_encrypt = -1; } else if (!strcmp(param, "efi") && val) { efi_nochunk = parse_option_str(val, "nochunk"); efi_novamap |= parse_option_str(val, "novamap"); diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h index 212687c30d79..fc18fd649ed7 100644 --- a/drivers/firmware/efi/libstub/efistub.h +++ b/drivers/firmware/efi/libstub/efistub.h @@ -37,8 +37,8 @@ extern bool efi_no5lvl; extern bool efi_nochunk; extern bool efi_nokaslr; extern int efi_loglevel; +extern int efi_mem_encrypt; extern bool efi_novamap; - extern const efi_system_table_t *efi_system_table; typedef union efi_dxe_services_table efi_dxe_services_table_t; @@ -956,7 +956,8 @@ efi_status_t efi_get_random_bytes(unsigned long size, u8 *out); efi_status_t efi_random_alloc(unsigned long size, unsigned long align, unsigned long *addr, unsigned long random_seed, - int memory_type, unsigned long alloc_limit); + int memory_type, unsigned long alloc_min, + unsigned long alloc_max); efi_status_t efi_random_get_seed(void); diff --git a/drivers/firmware/efi/libstub/kaslr.c b/drivers/firmware/efi/libstub/kaslr.c index 62d63f7a2645..1a9808012abd 100644 --- a/drivers/firmware/efi/libstub/kaslr.c +++ b/drivers/firmware/efi/libstub/kaslr.c @@ -119,7 +119,7 @@ efi_status_t efi_kaslr_relocate_kernel(unsigned long *image_addr, */ status = efi_random_alloc(*reserve_size, min_kimg_align, reserve_addr, phys_seed, - EFI_LOADER_CODE, EFI_ALLOC_LIMIT); + EFI_LOADER_CODE, 0, EFI_ALLOC_LIMIT); if (status != EFI_SUCCESS) efi_warn("efi_random_alloc() failed: 0x%lx\n", status); } else { diff --git a/drivers/firmware/efi/libstub/randomalloc.c b/drivers/firmware/efi/libstub/randomalloc.c index 674a064b8f7a..4e96a855fdf4 100644 --- a/drivers/firmware/efi/libstub/randomalloc.c +++ b/drivers/firmware/efi/libstub/randomalloc.c @@ -17,7 +17,7 @@ static unsigned long get_entry_num_slots(efi_memory_desc_t *md, unsigned long size, unsigned long align_shift, - u64 alloc_limit) + u64 alloc_min, u64 alloc_max) { unsigned long align = 1UL << align_shift; u64 first_slot, last_slot, region_end; @@ -30,11 +30,11 @@ static unsigned long get_entry_num_slots(efi_memory_desc_t *md, return 0; region_end = min(md->phys_addr + md->num_pages * EFI_PAGE_SIZE - 1, - alloc_limit); + alloc_max); if (region_end < size) return 0; - first_slot = round_up(md->phys_addr, align); + first_slot = round_up(max(md->phys_addr, alloc_min), align); last_slot = round_down(region_end - size + 1, align); if (first_slot > last_slot) @@ -56,7 +56,8 @@ efi_status_t efi_random_alloc(unsigned long size, unsigned long *addr, unsigned long random_seed, int memory_type, - unsigned long alloc_limit) + unsigned long alloc_min, + unsigned long alloc_max) { unsigned long total_slots = 0, target_slot; unsigned long total_mirrored_slots = 0; @@ -78,7 +79,8 @@ efi_status_t efi_random_alloc(unsigned long size, efi_memory_desc_t *md = (void *)map->map + map_offset; unsigned long slots; - slots = get_entry_num_slots(md, size, ilog2(align), alloc_limit); + slots = get_entry_num_slots(md, size, ilog2(align), alloc_min, + alloc_max); MD_NUM_SLOTS(md) = slots; total_slots += slots; if (md->attribute & EFI_MEMORY_MORE_RELIABLE) diff --git a/drivers/firmware/efi/libstub/x86-stub.c b/drivers/firmware/efi/libstub/x86-stub.c index 0d510c9a06a4..0336ed175e67 100644 --- a/drivers/firmware/efi/libstub/x86-stub.c +++ b/drivers/firmware/efi/libstub/x86-stub.c @@ -223,8 +223,8 @@ static void retrieve_apple_device_properties(struct boot_params *boot_params) } } -void efi_adjust_memory_range_protection(unsigned long start, - unsigned long size) +efi_status_t efi_adjust_memory_range_protection(unsigned long start, + unsigned long size) { efi_status_t status; efi_gcd_memory_space_desc_t desc; @@ -236,13 +236,17 @@ void efi_adjust_memory_range_protection(unsigned long start, rounded_end = roundup(start + size, EFI_PAGE_SIZE); if (memattr != NULL) { - efi_call_proto(memattr, clear_memory_attributes, rounded_start, - rounded_end - rounded_start, EFI_MEMORY_XP); - return; + status = efi_call_proto(memattr, clear_memory_attributes, + rounded_start, + rounded_end - rounded_start, + EFI_MEMORY_XP); + if (status != EFI_SUCCESS) + efi_warn("Failed to clear EFI_MEMORY_XP attribute\n"); + return status; } if (efi_dxe_table == NULL) - return; + return EFI_SUCCESS; /* * Don't modify memory region attributes, they are @@ -255,7 +259,7 @@ void efi_adjust_memory_range_protection(unsigned long start, status = efi_dxe_call(get_memory_space_descriptor, start, &desc); if (status != EFI_SUCCESS) - return; + break; next = desc.base_address + desc.length; @@ -280,8 +284,10 @@ void efi_adjust_memory_range_protection(unsigned long start, unprotect_start, unprotect_start + unprotect_size, status); + break; } } + return EFI_SUCCESS; } static void setup_unaccepted_memory(void) @@ -793,6 +799,7 @@ static efi_status_t efi_decompress_kernel(unsigned long *kernel_entry) status = efi_random_alloc(alloc_size, CONFIG_PHYSICAL_ALIGN, &addr, seed[0], EFI_LOADER_CODE, + LOAD_PHYSICAL_ADDR, EFI_X86_KERNEL_ALLOC_LIMIT); if (status != EFI_SUCCESS) return status; @@ -805,9 +812,7 @@ static efi_status_t efi_decompress_kernel(unsigned long *kernel_entry) *kernel_entry = addr + entry; - efi_adjust_memory_range_protection(addr, kernel_total_size); - - return EFI_SUCCESS; + return efi_adjust_memory_range_protection(addr, kernel_total_size); } static void __noreturn enter_kernel(unsigned long kernel_addr, @@ -879,6 +884,9 @@ void __noreturn efi_stub_entry(efi_handle_t handle, } } + if (efi_mem_encrypt > 0) + hdr->xloadflags |= XLF_MEM_ENCRYPTION; + status = efi_decompress_kernel(&kernel_entry); if (status != EFI_SUCCESS) { efi_err("Failed to decompress kernel\n"); diff --git a/drivers/firmware/efi/libstub/x86-stub.h b/drivers/firmware/efi/libstub/x86-stub.h index 37c5a36b9d8c..1c20e99a6494 100644 --- a/drivers/firmware/efi/libstub/x86-stub.h +++ b/drivers/firmware/efi/libstub/x86-stub.h @@ -5,8 +5,8 @@ extern void trampoline_32bit_src(void *, bool); extern const u16 trampoline_ljmp_imm_offset; -void efi_adjust_memory_range_protection(unsigned long start, - unsigned long size); +efi_status_t efi_adjust_memory_range_protection(unsigned long start, + unsigned long size); #ifdef CONFIG_X86_64 efi_status_t efi_setup_5level_paging(void); diff --git a/drivers/firmware/efi/libstub/zboot.c b/drivers/firmware/efi/libstub/zboot.c index bdb17eac0cb4..1ceace956758 100644 --- a/drivers/firmware/efi/libstub/zboot.c +++ b/drivers/firmware/efi/libstub/zboot.c @@ -119,7 +119,7 @@ efi_zboot_entry(efi_handle_t handle, efi_system_table_t *systab) } status = efi_random_alloc(alloc_size, min_kimg_align, &image_base, - seed, EFI_LOADER_CODE, EFI_ALLOC_LIMIT); + seed, EFI_LOADER_CODE, 0, EFI_ALLOC_LIMIT); if (status != EFI_SUCCESS) { efi_err("Failed to allocate memory\n"); goto free_cmdline; diff --git a/drivers/firmware/efi/riscv-runtime.c b/drivers/firmware/efi/riscv-runtime.c index 09525fb5c240..01f0f90ea418 100644 --- a/drivers/firmware/efi/riscv-runtime.c +++ b/drivers/firmware/efi/riscv-runtime.c @@ -85,7 +85,7 @@ static int __init riscv_enable_runtime_services(void) efi_memory_desc_t *md; for_each_efi_memory_desc(md) { - int md_size = md->num_pages << EFI_PAGE_SHIFT; + u64 md_size = md->num_pages << EFI_PAGE_SHIFT; struct resource *res; if (!(md->attribute & EFI_MEMORY_SP)) diff --git a/drivers/firmware/microchip/mpfs-auto-update.c b/drivers/firmware/microchip/mpfs-auto-update.c index 81f5f62e34fc..fbeeaee4ac85 100644 --- a/drivers/firmware/microchip/mpfs-auto-update.c +++ b/drivers/firmware/microchip/mpfs-auto-update.c @@ -167,7 +167,7 @@ static int mpfs_auto_update_verify_image(struct fw_upload *fw_uploader) u32 *response_msg; int ret; - response_msg = devm_kzalloc(priv->dev, AUTO_UPDATE_FEATURE_RESP_SIZE * sizeof(response_msg), + response_msg = devm_kzalloc(priv->dev, AUTO_UPDATE_FEATURE_RESP_SIZE * sizeof(*response_msg), GFP_KERNEL); if (!response_msg) return -ENOMEM; @@ -384,7 +384,8 @@ static int mpfs_auto_update_available(struct mpfs_auto_update_priv *priv) u32 *response_msg; int ret; - response_msg = devm_kzalloc(priv->dev, AUTO_UPDATE_FEATURE_RESP_SIZE * sizeof(response_msg), + response_msg = devm_kzalloc(priv->dev, + AUTO_UPDATE_FEATURE_RESP_SIZE * sizeof(*response_msg), GFP_KERNEL); if (!response_msg) return -ENOMEM; diff --git a/drivers/firmware/tegra/bpmp-debugfs.c b/drivers/firmware/tegra/bpmp-debugfs.c index bbcdd9fed3fb..4221fed70ad4 100644 --- a/drivers/firmware/tegra/bpmp-debugfs.c +++ b/drivers/firmware/tegra/bpmp-debugfs.c @@ -77,7 +77,7 @@ static const char *get_filename(struct tegra_bpmp *bpmp, root_path_buf = kzalloc(root_path_buf_len, GFP_KERNEL); if (!root_path_buf) - goto out; + return NULL; root_path = dentry_path(bpmp->debugfs_mirror, root_path_buf, root_path_buf_len); diff --git a/drivers/gpio/gpio-74x164.c b/drivers/gpio/gpio-74x164.c index e00c33310517..753e7be039e4 100644 --- a/drivers/gpio/gpio-74x164.c +++ b/drivers/gpio/gpio-74x164.c @@ -127,8 +127,6 @@ static int gen_74x164_probe(struct spi_device *spi) if (IS_ERR(chip->gpiod_oe)) return PTR_ERR(chip->gpiod_oe); - gpiod_set_value_cansleep(chip->gpiod_oe, 1); - spi_set_drvdata(spi, chip); chip->gpio_chip.label = spi->modalias; @@ -153,6 +151,8 @@ static int gen_74x164_probe(struct spi_device *spi) goto exit_destroy; } + gpiod_set_value_cansleep(chip->gpiod_oe, 1); + ret = gpiochip_add_data(&chip->gpio_chip, chip); if (!ret) return 0; diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index 44c8f5743a24..75be4a3ca7f8 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -968,11 +968,11 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data, ret = gpiochip_irqchip_init_valid_mask(gc); if (ret) - goto err_remove_acpi_chip; + goto err_free_hogs; ret = gpiochip_irqchip_init_hw(gc); if (ret) - goto err_remove_acpi_chip; + goto err_remove_irqchip_mask; ret = gpiochip_add_irqchip(gc, lock_key, request_key); if (ret) @@ -997,23 +997,23 @@ err_remove_irqchip: gpiochip_irqchip_remove(gc); err_remove_irqchip_mask: gpiochip_irqchip_free_valid_mask(gc); -err_remove_acpi_chip: +err_free_hogs: + gpiochip_free_hogs(gc); acpi_gpiochip_remove(gc); + gpiochip_remove_pin_ranges(gc); err_remove_of_chip: - gpiochip_free_hogs(gc); of_gpiochip_remove(gc); err_free_gpiochip_mask: - gpiochip_remove_pin_ranges(gc); gpiochip_free_valid_mask(gc); +err_remove_from_list: + spin_lock_irqsave(&gpio_lock, flags); + list_del(&gdev->list); + spin_unlock_irqrestore(&gpio_lock, flags); if (gdev->dev.release) { /* release() has been registered by gpiochip_setup_dev() */ gpio_device_put(gdev); goto err_print_message; } -err_remove_from_list: - spin_lock_irqsave(&gpio_lock, flags); - list_del(&gdev->list); - spin_unlock_irqrestore(&gpio_lock, flags); err_free_label: kfree_const(gdev->label); err_free_descs: @@ -2042,6 +2042,11 @@ EXPORT_SYMBOL_GPL(gpiochip_generic_free); int gpiochip_generic_config(struct gpio_chip *gc, unsigned int offset, unsigned long config) { +#ifdef CONFIG_PINCTRL + if (list_empty(&gc->gpiodev->pin_ranges)) + return -ENOTSUPP; +#endif + return pinctrl_gpio_set_config(gc, offset, config); } EXPORT_SYMBOL_GPL(gpiochip_generic_config); diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index 2520db0b776e..c7edba18a6f0 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -199,7 +199,7 @@ config DRM_TTM config DRM_TTM_KUNIT_TEST tristate "KUnit tests for TTM" if !KUNIT_ALL_TESTS default n - depends on DRM && KUNIT && MMU + depends on DRM && KUNIT && MMU && (UML || COMPILE_TEST) select DRM_TTM select DRM_EXPORT_FOR_TESTS if m select DRM_KUNIT_TEST_HELPERS @@ -207,7 +207,8 @@ config DRM_TTM_KUNIT_TEST help Enables unit tests for TTM, a GPU memory manager subsystem used to manage memory buffers. This option is mostly useful for kernel - developers. + developers. It depends on (UML || COMPILE_TEST) since no other driver + which uses TTM can be loaded while running the tests. If in doubt, say "N". diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 3d8a48f46b01..79827a6dcd7f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -200,6 +200,7 @@ extern uint amdgpu_dc_debug_mask; extern uint amdgpu_dc_visual_confirm; extern uint amdgpu_dm_abm_level; extern int amdgpu_backlight; +extern int amdgpu_damage_clips; extern struct amdgpu_mgpu_info mgpu_info; extern int amdgpu_ras_enable; extern uint amdgpu_ras_mask; @@ -1078,6 +1079,8 @@ struct amdgpu_device { bool in_s3; bool in_s4; bool in_s0ix; + /* indicate amdgpu suspension status */ + bool suspend_complete; enum pp_mp1_state mp1_state; struct amdgpu_doorbell_index doorbell_index; @@ -1547,9 +1550,11 @@ static inline int amdgpu_acpi_smart_shift_update(struct drm_device *dev, #if defined(CONFIG_ACPI) && defined(CONFIG_SUSPEND) bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev); bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev); +void amdgpu_choose_low_power_state(struct amdgpu_device *adev); #else static inline bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) { return false; } static inline bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev) { return false; } +static inline void amdgpu_choose_low_power_state(struct amdgpu_device *adev) { } #endif #if defined(CONFIG_DRM_AMD_DC) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c index 2deebece810e..7099ff9cf8c5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c @@ -1519,4 +1519,22 @@ bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) #endif /* CONFIG_AMD_PMC */ } +/** + * amdgpu_choose_low_power_state + * + * @adev: amdgpu_device_pointer + * + * Choose the target low power state for the GPU + */ +void amdgpu_choose_low_power_state(struct amdgpu_device *adev) +{ + if (adev->in_runpm) + return; + + if (amdgpu_acpi_is_s0ix_active(adev)) + adev->in_s0ix = true; + else if (amdgpu_acpi_is_s3_active(adev)) + adev->in_s3 = true; +} + #endif /* CONFIG_SUSPEND */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index 77e263660288..41db030ddc4e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -141,11 +141,31 @@ static void amdgpu_amdkfd_reset_work(struct work_struct *work) static const struct drm_client_funcs kfd_client_funcs = { .unregister = drm_client_release, }; + +int amdgpu_amdkfd_drm_client_create(struct amdgpu_device *adev) +{ + int ret; + + if (!adev->kfd.init_complete) + return 0; + + ret = drm_client_init(&adev->ddev, &adev->kfd.client, "kfd", + &kfd_client_funcs); + if (ret) { + dev_err(adev->dev, "Failed to init DRM client: %d\n", + ret); + return ret; + } + + drm_client_register(&adev->kfd.client); + + return 0; +} + void amdgpu_amdkfd_device_init(struct amdgpu_device *adev) { int i; int last_valid_bit; - int ret; amdgpu_amdkfd_gpuvm_init_mem_limits(); @@ -164,12 +184,6 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev) .enable_mes = adev->enable_mes, }; - ret = drm_client_init(&adev->ddev, &adev->kfd.client, "kfd", &kfd_client_funcs); - if (ret) { - dev_err(adev->dev, "Failed to init DRM client: %d\n", ret); - return; - } - /* this is going to have a few of the MSBs set that we need to * clear */ @@ -208,10 +222,6 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev) adev->kfd.init_complete = kgd2kfd_device_init(adev->kfd.dev, &gpu_resources); - if (adev->kfd.init_complete) - drm_client_register(&adev->kfd.client); - else - drm_client_release(&adev->kfd.client); amdgpu_amdkfd_total_mem_size += adev->gmc.real_vram_size; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index f262b9d89541..27c61c535e29 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -182,6 +182,8 @@ int amdgpu_queue_mask_bit_to_set_resource_bit(struct amdgpu_device *adev, struct amdgpu_amdkfd_fence *amdgpu_amdkfd_fence_create(u64 context, struct mm_struct *mm, struct svm_range_bo *svm_bo); + +int amdgpu_amdkfd_drm_client_create(struct amdgpu_device *adev); #if defined(CONFIG_DEBUG_FS) int kfd_debugfs_kfd_mem_limits(struct seq_file *m, void *data); #endif @@ -301,7 +303,7 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv); int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu( struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv); -void amdgpu_amdkfd_gpuvm_dmaunmap_mem(struct kgd_mem *mem, void *drm_priv); +int amdgpu_amdkfd_gpuvm_dmaunmap_mem(struct kgd_mem *mem, void *drm_priv); int amdgpu_amdkfd_gpuvm_sync_memory( struct amdgpu_device *adev, struct kgd_mem *mem, bool intr); int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_mem *mem, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c index 899e31e3a5e8..3a3f3ce09f00 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c @@ -290,7 +290,7 @@ static int suspend_resume_compute_scheduler(struct amdgpu_device *adev, bool sus for (i = 0; i < adev->gfx.num_compute_rings; i++) { struct amdgpu_ring *ring = &adev->gfx.compute_ring[i]; - if (!(ring && drm_sched_wqueue_ready(&ring->sched))) + if (!amdgpu_ring_sched_ready(ring)) continue; /* stop secheduler and drain ring. */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index f183d7faeeec..231fd927dcfb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -2085,21 +2085,35 @@ out: return ret; } -void amdgpu_amdkfd_gpuvm_dmaunmap_mem(struct kgd_mem *mem, void *drm_priv) +int amdgpu_amdkfd_gpuvm_dmaunmap_mem(struct kgd_mem *mem, void *drm_priv) { struct kfd_mem_attachment *entry; struct amdgpu_vm *vm; + int ret; vm = drm_priv_to_vm(drm_priv); mutex_lock(&mem->lock); + ret = amdgpu_bo_reserve(mem->bo, true); + if (ret) + goto out; + list_for_each_entry(entry, &mem->attachments, list) { - if (entry->bo_va->base.vm == vm) - kfd_mem_dmaunmap_attachment(mem, entry); + if (entry->bo_va->base.vm != vm) + continue; + if (entry->bo_va->base.bo->tbo.ttm && + !entry->bo_va->base.bo->tbo.ttm->sg) + continue; + + kfd_mem_dmaunmap_attachment(mem, entry); } + amdgpu_bo_unreserve(mem->bo); +out: mutex_unlock(&mem->lock); + + return ret; } int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu( diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c index e485dd3357c6..1afbb2e932c6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c @@ -1678,7 +1678,7 @@ static int amdgpu_debugfs_test_ib_show(struct seq_file *m, void *unused) for (i = 0; i < AMDGPU_MAX_RINGS; i++) { struct amdgpu_ring *ring = adev->rings[i]; - if (!ring || !drm_sched_wqueue_ready(&ring->sched)) + if (!amdgpu_ring_sched_ready(ring)) continue; drm_sched_wqueue_stop(&ring->sched); } @@ -1694,7 +1694,7 @@ static int amdgpu_debugfs_test_ib_show(struct seq_file *m, void *unused) for (i = 0; i < AMDGPU_MAX_RINGS; i++) { struct amdgpu_ring *ring = adev->rings[i]; - if (!ring || !drm_sched_wqueue_ready(&ring->sched)) + if (!amdgpu_ring_sched_ready(ring)) continue; drm_sched_wqueue_start(&ring->sched); } @@ -1916,8 +1916,8 @@ static int amdgpu_debugfs_ib_preempt(void *data, u64 val) ring = adev->rings[val]; - if (!ring || !ring->funcs->preempt_ib || - !drm_sched_wqueue_ready(&ring->sched)) + if (!amdgpu_ring_sched_ready(ring) || + !ring->funcs->preempt_ib) return -EINVAL; /* the last preemption failed */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index b158d27d0a71..94bdb5fa6ebc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -4121,23 +4121,13 @@ int amdgpu_device_init(struct amdgpu_device *adev, } } } else { - switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { - case IP_VERSION(13, 0, 0): - case IP_VERSION(13, 0, 7): - case IP_VERSION(13, 0, 10): - r = psp_gpu_reset(adev); - break; - default: - tmp = amdgpu_reset_method; - /* It should do a default reset when loading or reloading the driver, - * regardless of the module parameter reset_method. - */ - amdgpu_reset_method = AMD_RESET_METHOD_NONE; - r = amdgpu_asic_reset(adev); - amdgpu_reset_method = tmp; - break; - } - + tmp = amdgpu_reset_method; + /* It should do a default reset when loading or reloading the driver, + * regardless of the module parameter reset_method. + */ + amdgpu_reset_method = AMD_RESET_METHOD_NONE; + r = amdgpu_asic_reset(adev); + amdgpu_reset_method = tmp; if (r) { dev_err(adev->dev, "asic reset on init failed\n"); goto failed; @@ -4524,13 +4514,15 @@ int amdgpu_device_prepare(struct drm_device *dev) struct amdgpu_device *adev = drm_to_adev(dev); int i, r; + amdgpu_choose_low_power_state(adev); + if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) return 0; /* Evict the majority of BOs before starting suspend sequence */ r = amdgpu_device_evict_resources(adev); if (r) - return r; + goto unprepare; for (i = 0; i < adev->num_ip_blocks; i++) { if (!adev->ip_blocks[i].status.valid) @@ -4539,10 +4531,15 @@ int amdgpu_device_prepare(struct drm_device *dev) continue; r = adev->ip_blocks[i].version->funcs->prepare_suspend((void *)adev); if (r) - return r; + goto unprepare; } return 0; + +unprepare: + adev->in_s0ix = adev->in_s3 = false; + + return r; } /** @@ -4579,7 +4576,6 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon) drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, true); cancel_delayed_work_sync(&adev->delayed_init_work); - flush_delayed_work(&adev->gfx.gfx_off_delay_work); amdgpu_ras_suspend(adev); @@ -5031,7 +5027,7 @@ bool amdgpu_device_has_job_running(struct amdgpu_device *adev) for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { struct amdgpu_ring *ring = adev->rings[i]; - if (!ring || !drm_sched_wqueue_ready(&ring->sched)) + if (!amdgpu_ring_sched_ready(ring)) continue; spin_lock(&ring->sched.job_list_lock); @@ -5170,7 +5166,7 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev, for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { struct amdgpu_ring *ring = adev->rings[i]; - if (!ring || !drm_sched_wqueue_ready(&ring->sched)) + if (!amdgpu_ring_sched_ready(ring)) continue; /* Clear job fence from fence drv to avoid force_completion @@ -5637,7 +5633,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { struct amdgpu_ring *ring = tmp_adev->rings[i]; - if (!ring || !drm_sched_wqueue_ready(&ring->sched)) + if (!amdgpu_ring_sched_ready(ring)) continue; drm_sched_stop(&ring->sched, job ? &job->base : NULL); @@ -5706,7 +5702,7 @@ skip_hw_reset: for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { struct amdgpu_ring *ring = tmp_adev->rings[i]; - if (!ring || !drm_sched_wqueue_ready(&ring->sched)) + if (!amdgpu_ring_sched_ready(ring)) continue; drm_sched_start(&ring->sched, true); @@ -6061,7 +6057,7 @@ pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_sta for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { struct amdgpu_ring *ring = adev->rings[i]; - if (!ring || !drm_sched_wqueue_ready(&ring->sched)) + if (!amdgpu_ring_sched_ready(ring)) continue; drm_sched_stop(&ring->sched, NULL); @@ -6189,7 +6185,7 @@ void amdgpu_pci_resume(struct pci_dev *pdev) for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { struct amdgpu_ring *ring = adev->rings[i]; - if (!ring || !drm_sched_wqueue_ready(&ring->sched)) + if (!amdgpu_ring_sched_ready(ring)) continue; drm_sched_start(&ring->sched, true); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index cc69005f5b46..586f4d03039d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -211,6 +211,7 @@ int amdgpu_seamless = -1; /* auto */ uint amdgpu_debug_mask; int amdgpu_agp = -1; /* auto */ int amdgpu_wbrf = -1; +int amdgpu_damage_clips = -1; /* auto */ static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work); @@ -860,6 +861,18 @@ MODULE_PARM_DESC(backlight, "Backlight control (0 = pwm, 1 = aux, -1 auto (defau module_param_named(backlight, amdgpu_backlight, bint, 0444); /** + * DOC: damageclips (int) + * Enable or disable damage clips support. If damage clips support is disabled, + * we will force full frame updates, irrespective of what user space sends to + * us. + * + * Defaults to -1 (where it is enabled unless a PSR-SU display is detected). + */ +MODULE_PARM_DESC(damageclips, + "Damage clips support (0 = disable, 1 = enable, -1 auto (default))"); +module_param_named(damageclips, amdgpu_damage_clips, int, 0444); + +/** * DOC: tmz (int) * Trusted Memory Zone (TMZ) is a method to protect data being written * to or read from memory. @@ -2255,6 +2268,10 @@ retry_init: if (ret) goto err_pci; + ret = amdgpu_amdkfd_drm_client_create(adev); + if (ret) + goto err_pci; + /* * 1. don't init fbdev on hw without DCE * 2. don't init fbdev if there are no connectors @@ -2472,6 +2489,7 @@ static int amdgpu_pmops_suspend(struct device *dev) struct drm_device *drm_dev = dev_get_drvdata(dev); struct amdgpu_device *adev = drm_to_adev(drm_dev); + adev->suspend_complete = false; if (amdgpu_acpi_is_s0ix_active(adev)) adev->in_s0ix = true; else if (amdgpu_acpi_is_s3_active(adev)) @@ -2486,6 +2504,7 @@ static int amdgpu_pmops_suspend_noirq(struct device *dev) struct drm_device *drm_dev = dev_get_drvdata(dev); struct amdgpu_device *adev = drm_to_adev(drm_dev); + adev->suspend_complete = true; if (amdgpu_acpi_should_gpu_reset(adev)) return amdgpu_asic_reset(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index b9674c57c436..6ddc8e3360e2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -723,8 +723,15 @@ void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable) if (adev->gfx.gfx_off_req_count == 0 && !adev->gfx.gfx_off_state) { - schedule_delayed_work(&adev->gfx.gfx_off_delay_work, + /* If going to s2idle, no need to wait */ + if (adev->in_s0ix) { + if (!amdgpu_dpm_set_powergating_by_smu(adev, + AMD_IP_BLOCK_TYPE_GFX, true)) + adev->gfx.gfx_off_state = true; + } else { + schedule_delayed_work(&adev->gfx.gfx_off_delay_work, delay); + } } } else { if (adev->gfx.gfx_off_req_count == 0) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c index 468a67b302d4..ca5c86e5f7cd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c @@ -362,7 +362,7 @@ static ssize_t ta_if_invoke_debugfs_write(struct file *fp, const char *buf, size } } - if (copy_to_user((char *)buf, context->mem_context.shared_buf, shared_buf_len)) + if (copy_to_user((char *)&buf[copy_pos], context->mem_context.shared_buf, shared_buf_len)) ret = -EFAULT; err_free_shared_buf: diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c index 45424ebf9681..5505d646f43a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c @@ -635,6 +635,7 @@ int amdgpu_ring_test_helper(struct amdgpu_ring *ring) ring->name); ring->sched.ready = !r; + return r; } @@ -717,3 +718,14 @@ void amdgpu_ring_ib_on_emit_de(struct amdgpu_ring *ring) if (ring->is_sw_ring) amdgpu_sw_ring_ib_mark_offset(ring, AMDGPU_MUX_OFFSET_TYPE_DE); } + +bool amdgpu_ring_sched_ready(struct amdgpu_ring *ring) +{ + if (!ring) + return false; + + if (ring->no_scheduler || !drm_sched_wqueue_ready(&ring->sched)) + return false; + + return true; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h index bbb53720a018..fe1a61eb6e4c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h @@ -450,5 +450,5 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, int amdgpu_ib_pool_init(struct amdgpu_device *adev); void amdgpu_ib_pool_fini(struct amdgpu_device *adev); int amdgpu_ib_ring_tests(struct amdgpu_device *adev); - +bool amdgpu_ring_sched_ready(struct amdgpu_ring *ring); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/cik_ih.c b/drivers/gpu/drm/amd/amdgpu/cik_ih.c index 6f7c031dd197..f24e34dc33d1 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/cik_ih.c @@ -204,6 +204,12 @@ static u32 cik_ih_get_wptr(struct amdgpu_device *adev, tmp = RREG32(mmIH_RB_CNTL); tmp |= IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK; WREG32(mmIH_RB_CNTL, tmp); + + /* Unset the CLEAR_OVERFLOW bit immediately so new overflows + * can be detected. + */ + tmp &= ~IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK; + WREG32(mmIH_RB_CNTL, tmp); } return (wptr & ih->ptr_mask); } diff --git a/drivers/gpu/drm/amd/amdgpu/cz_ih.c b/drivers/gpu/drm/amd/amdgpu/cz_ih.c index b8c47e0cf37a..c19681492efa 100644 --- a/drivers/gpu/drm/amd/amdgpu/cz_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/cz_ih.c @@ -216,6 +216,11 @@ static u32 cz_ih_get_wptr(struct amdgpu_device *adev, tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1); WREG32(mmIH_RB_CNTL, tmp); + /* Unset the CLEAR_OVERFLOW bit immediately so new overflows + * can be detected. + */ + tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0); + WREG32(mmIH_RB_CNTL, tmp); out: return (wptr & ih->ptr_mask); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index ecb622b7f970..dcdecb18b230 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -4027,8 +4027,6 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev) err = 0; adev->gfx.mec2_fw = NULL; } - amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC2); - amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC2_JT); gfx_v10_0_check_fw_write_wait(adev); out: diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c index d9cf9fd03d30..4f3bfdc75b37 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c @@ -107,23 +107,6 @@ static const struct soc15_reg_golden golden_settings_gc_11_0_1[] = SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CNTL2, 0xfcffffff, 0x0000000a) }; -static const struct soc15_reg_golden golden_settings_gc_11_5_0[] = { - SOC15_REG_GOLDEN_VALUE(GC, 0, regDB_DEBUG5, 0xffffffff, 0x00000800), - SOC15_REG_GOLDEN_VALUE(GC, 0, regGB_ADDR_CONFIG, 0x0c1807ff, 0x00000242), - SOC15_REG_GOLDEN_VALUE(GC, 0, regGCR_GENERAL_CNTL, 0x1ff1ffff, 0x00000500), - SOC15_REG_GOLDEN_VALUE(GC, 0, regGL2A_ADDR_MATCH_MASK, 0xffffffff, 0xfffffff3), - SOC15_REG_GOLDEN_VALUE(GC, 0, regGL2C_ADDR_MATCH_MASK, 0xffffffff, 0xfffffff3), - SOC15_REG_GOLDEN_VALUE(GC, 0, regGL2C_CTRL, 0xffffffff, 0xf37fff3f), - SOC15_REG_GOLDEN_VALUE(GC, 0, regGL2C_CTRL3, 0xfffffffb, 0x00f40188), - SOC15_REG_GOLDEN_VALUE(GC, 0, regGL2C_CTRL4, 0xf0ffffff, 0x80009007), - SOC15_REG_GOLDEN_VALUE(GC, 0, regPA_CL_ENHANCE, 0xf1ffffff, 0x00880007), - SOC15_REG_GOLDEN_VALUE(GC, 0, regPC_CONFIG_CNTL_1, 0xffffffff, 0x00010000), - SOC15_REG_GOLDEN_VALUE(GC, 0, regTA_CNTL_AUX, 0xf7f7ffff, 0x01030000), - SOC15_REG_GOLDEN_VALUE(GC, 0, regTA_CNTL2, 0x007f0000, 0x00000000), - SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CNTL2, 0xffcfffff, 0x0000200a), - SOC15_REG_GOLDEN_VALUE(GC, 0, regUTCL1_CTRL_2, 0xffffffff, 0x0000048f) -}; - #define DEFAULT_SH_MEM_CONFIG \ ((SH_MEM_ADDRESS_MODE_64 << SH_MEM_CONFIG__ADDRESS_MODE__SHIFT) | \ (SH_MEM_ALIGNMENT_MODE_UNALIGNED << SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) | \ @@ -304,11 +287,6 @@ static void gfx_v11_0_init_golden_registers(struct amdgpu_device *adev) golden_settings_gc_11_0_1, (const u32)ARRAY_SIZE(golden_settings_gc_11_0_1)); break; - case IP_VERSION(11, 5, 0): - soc15_program_register_sequence(adev, - golden_settings_gc_11_5_0, - (const u32)ARRAY_SIZE(golden_settings_gc_11_5_0)); - break; default: break; } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 69c500910746..3bc6943365a4 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -3034,6 +3034,14 @@ static int gfx_v9_0_cp_gfx_start(struct amdgpu_device *adev) gfx_v9_0_cp_gfx_enable(adev, true); + /* Now only limit the quirk on the APU gfx9 series and already + * confirmed that the APU gfx10/gfx11 needn't such update. + */ + if (adev->flags & AMD_IS_APU && + adev->in_s3 && !adev->suspend_complete) { + DRM_INFO(" Will skip the CSB packet resubmit\n"); + return 0; + } r = amdgpu_ring_alloc(ring, gfx_v9_0_get_csb_size(adev) + 4 + 3); if (r) { DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r); diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c index 42e103d7077d..59d9215e5556 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c @@ -915,8 +915,8 @@ static int gmc_v6_0_hw_init(void *handle) if (amdgpu_emu_mode == 1) return amdgpu_gmc_vram_checking(adev); - else - return r; + + return 0; } static int gmc_v6_0_hw_fini(void *handle) diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index efc16e580f1e..45a2f8e031a2 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c @@ -1099,8 +1099,8 @@ static int gmc_v7_0_hw_init(void *handle) if (amdgpu_emu_mode == 1) return amdgpu_gmc_vram_checking(adev); - else - return r; + + return 0; } static int gmc_v7_0_hw_fini(void *handle) diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index ff4ae73d27ec..4422b27a3cc2 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -1219,8 +1219,8 @@ static int gmc_v8_0_hw_init(void *handle) if (amdgpu_emu_mode == 1) return amdgpu_gmc_vram_checking(adev); - else - return r; + + return 0; } static int gmc_v8_0_hw_fini(void *handle) diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 17b7a25121b0..e67a62db9e12 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -1947,14 +1947,6 @@ static int gmc_v9_0_init_mem_ranges(struct amdgpu_device *adev) static void gmc_v9_4_3_init_vram_info(struct amdgpu_device *adev) { - static const u32 regBIF_BIOS_SCRATCH_4 = 0x50; - u32 vram_info; - - /* Only for dGPU, vendor informaton is reliable */ - if (!amdgpu_sriov_vf(adev) && !(adev->flags & AMD_IS_APU)) { - vram_info = RREG32(regBIF_BIOS_SCRATCH_4); - adev->gmc.vram_vendor = vram_info & 0xF; - } adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM; adev->gmc.vram_width = 128 * 64; } @@ -2341,8 +2333,8 @@ static int gmc_v9_0_hw_init(void *handle) if (amdgpu_emu_mode == 1) return amdgpu_gmc_vram_checking(adev); - else - return r; + + return 0; } /** diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c index aecad530b10a..2c02ae69883d 100644 --- a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c @@ -215,6 +215,11 @@ static u32 iceland_ih_get_wptr(struct amdgpu_device *adev, tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1); WREG32(mmIH_RB_CNTL, tmp); + /* Unset the CLEAR_OVERFLOW bit immediately so new overflows + * can be detected. + */ + tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0); + WREG32(mmIH_RB_CNTL, tmp); out: return (wptr & ih->ptr_mask); diff --git a/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c b/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c index d9ed7332d805..ad4ad39f128f 100644 --- a/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c @@ -418,6 +418,12 @@ static u32 ih_v6_0_get_wptr(struct amdgpu_device *adev, tmp = RREG32_NO_KIQ(ih_regs->ih_rb_cntl); tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1); WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp); + + /* Unset the CLEAR_OVERFLOW bit immediately so new overflows + * can be detected. + */ + tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0); + WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp); out: return (wptr & ih->ptr_mask); } diff --git a/drivers/gpu/drm/amd/amdgpu/ih_v6_1.c b/drivers/gpu/drm/amd/amdgpu/ih_v6_1.c index 8fb05eae340a..b8da0fc29378 100644 --- a/drivers/gpu/drm/amd/amdgpu/ih_v6_1.c +++ b/drivers/gpu/drm/amd/amdgpu/ih_v6_1.c @@ -418,6 +418,13 @@ static u32 ih_v6_1_get_wptr(struct amdgpu_device *adev, tmp = RREG32_NO_KIQ(ih_regs->ih_rb_cntl); tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1); WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp); + + /* Unset the CLEAR_OVERFLOW bit immediately so new overflows + * can be detected. + */ + tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0); + WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp); + out: return (wptr & ih->ptr_mask); } diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c index bc38b90f8cf8..88ea58d5c4ab 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c @@ -674,14 +674,6 @@ static int jpeg_v4_0_set_powergating_state(void *handle, return ret; } -static int jpeg_v4_0_set_interrupt_state(struct amdgpu_device *adev, - struct amdgpu_irq_src *source, - unsigned type, - enum amdgpu_interrupt_state state) -{ - return 0; -} - static int jpeg_v4_0_set_ras_interrupt_state(struct amdgpu_device *adev, struct amdgpu_irq_src *source, unsigned int type, @@ -765,7 +757,6 @@ static void jpeg_v4_0_set_dec_ring_funcs(struct amdgpu_device *adev) } static const struct amdgpu_irq_src_funcs jpeg_v4_0_irq_funcs = { - .set = jpeg_v4_0_set_interrupt_state, .process = jpeg_v4_0_process_interrupt, }; diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c index 6ede85b28cc8..78b74daf4eeb 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c @@ -181,7 +181,6 @@ static int jpeg_v4_0_5_hw_fini(void *handle) RREG32_SOC15(JPEG, 0, regUVD_JRBC_STATUS)) jpeg_v4_0_5_set_powergating_state(adev, AMD_PG_STATE_GATE); } - amdgpu_irq_put(adev, &adev->jpeg.inst->irq, 0); return 0; } @@ -516,14 +515,6 @@ static int jpeg_v4_0_5_set_powergating_state(void *handle, return ret; } -static int jpeg_v4_0_5_set_interrupt_state(struct amdgpu_device *adev, - struct amdgpu_irq_src *source, - unsigned type, - enum amdgpu_interrupt_state state) -{ - return 0; -} - static int jpeg_v4_0_5_process_interrupt(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) @@ -603,7 +594,6 @@ static void jpeg_v4_0_5_set_dec_ring_funcs(struct amdgpu_device *adev) } static const struct amdgpu_irq_src_funcs jpeg_v4_0_5_irq_funcs = { - .set = jpeg_v4_0_5_set_interrupt_state, .process = jpeg_v4_0_5_process_interrupt, }; diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c index e64b33115848..de93614726c9 100644 --- a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c @@ -442,6 +442,12 @@ static u32 navi10_ih_get_wptr(struct amdgpu_device *adev, tmp = RREG32_NO_KIQ(ih_regs->ih_rb_cntl); tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1); WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp); + + /* Unset the CLEAR_OVERFLOW bit immediately so new overflows + * can be detected. + */ + tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0); + WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp); out: return (wptr & ih->ptr_mask); } diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c index e90f33780803..b4723d68eab0 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c @@ -431,6 +431,12 @@ static void nbio_v7_9_init_registers(struct amdgpu_device *adev) u32 inst_mask; int i; + if (amdgpu_sriov_vf(adev)) + adev->rmmio_remap.reg_offset = + SOC15_REG_OFFSET( + NBIO, 0, + regBIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_FLUSH_CNTL) + << 2; WREG32_SOC15(NBIO, 0, regXCC_DOORBELL_FENCE, 0xff & ~(adev->gfx.xcc_mask)); diff --git a/drivers/gpu/drm/amd/amdgpu/si_ih.c b/drivers/gpu/drm/amd/amdgpu/si_ih.c index 9a24f17a5750..cada9f300a7f 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/si_ih.c @@ -119,6 +119,12 @@ static u32 si_ih_get_wptr(struct amdgpu_device *adev, tmp = RREG32(IH_RB_CNTL); tmp |= IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK; WREG32(IH_RB_CNTL, tmp); + + /* Unset the CLEAR_OVERFLOW bit immediately so new overflows + * can be detected. + */ + tmp &= ~IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK; + WREG32(IH_RB_CNTL, tmp); } return (wptr & ih->ptr_mask); } diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 15033efec2ba..1c614451dead 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -574,11 +574,34 @@ soc15_asic_reset_method(struct amdgpu_device *adev) return AMD_RESET_METHOD_MODE1; } +static bool soc15_need_reset_on_resume(struct amdgpu_device *adev) +{ + u32 sol_reg; + + sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81); + + /* Will reset for the following suspend abort cases. + * 1) Only reset limit on APU side, dGPU hasn't checked yet. + * 2) S3 suspend abort and TOS already launched. + */ + if (adev->flags & AMD_IS_APU && adev->in_s3 && + !adev->suspend_complete && + sol_reg) + return true; + + return false; +} + static int soc15_asic_reset(struct amdgpu_device *adev) { /* original raven doesn't have full asic reset */ - if ((adev->apu_flags & AMD_APU_IS_RAVEN) || - (adev->apu_flags & AMD_APU_IS_RAVEN2)) + /* On the latest Raven, the GPU reset can be performed + * successfully. So now, temporarily enable it for the + * S3 suspend abort case. + */ + if (((adev->apu_flags & AMD_APU_IS_RAVEN) || + (adev->apu_flags & AMD_APU_IS_RAVEN2)) && + !soc15_need_reset_on_resume(adev)) return 0; switch (soc15_asic_reset_method(adev)) { @@ -1302,6 +1325,10 @@ static int soc15_common_resume(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + if (soc15_need_reset_on_resume(adev)) { + dev_info(adev->dev, "S3 suspend abort case, let's reset ASIC.\n"); + soc15_asic_reset(adev); + } return soc15_common_hw_init(adev); } diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c index 48c6efcdeac9..4d7188912edf 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc21.c +++ b/drivers/gpu/drm/amd/amdgpu/soc21.c @@ -50,13 +50,13 @@ static const struct amd_ip_funcs soc21_common_ip_funcs; /* SOC21 */ static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_encode_array_vcn0[] = { {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)}, - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 0)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)}, }; static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_encode_array_vcn1[] = { {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)}, - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 0)}, }; static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_encode_vcn0 = { diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c index 917707bba7f3..450b6e831509 100644 --- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c @@ -219,6 +219,12 @@ static u32 tonga_ih_get_wptr(struct amdgpu_device *adev, tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1); WREG32(mmIH_RB_CNTL, tmp); + /* Unset the CLEAR_OVERFLOW bit immediately so new overflows + * can be detected. + */ + tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0); + WREG32(mmIH_RB_CNTL, tmp); + out: return (wptr & ih->ptr_mask); } diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c index 169ed400ee7b..8ab01ae919d2 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c @@ -2018,22 +2018,6 @@ static int vcn_v4_0_set_powergating_state(void *handle, enum amd_powergating_sta } /** - * vcn_v4_0_set_interrupt_state - set VCN block interrupt state - * - * @adev: amdgpu_device pointer - * @source: interrupt sources - * @type: interrupt types - * @state: interrupt states - * - * Set VCN block interrupt state - */ -static int vcn_v4_0_set_interrupt_state(struct amdgpu_device *adev, struct amdgpu_irq_src *source, - unsigned type, enum amdgpu_interrupt_state state) -{ - return 0; -} - -/** * vcn_v4_0_set_ras_interrupt_state - set VCN block RAS interrupt state * * @adev: amdgpu_device pointer @@ -2097,7 +2081,6 @@ static int vcn_v4_0_process_interrupt(struct amdgpu_device *adev, struct amdgpu_ } static const struct amdgpu_irq_src_funcs vcn_v4_0_irq_funcs = { - .set = vcn_v4_0_set_interrupt_state, .process = vcn_v4_0_process_interrupt, }; diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c index 2eda30e78f61..49e4c3c09aca 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c @@ -269,8 +269,6 @@ static int vcn_v4_0_5_hw_fini(void *handle) vcn_v4_0_5_set_powergating_state(adev, AMD_PG_STATE_GATE); } } - - amdgpu_irq_put(adev, &adev->vcn.inst[i].irq, 0); } return 0; @@ -1669,22 +1667,6 @@ static int vcn_v4_0_5_set_powergating_state(void *handle, enum amd_powergating_s } /** - * vcn_v4_0_5_set_interrupt_state - set VCN block interrupt state - * - * @adev: amdgpu_device pointer - * @source: interrupt sources - * @type: interrupt types - * @state: interrupt states - * - * Set VCN block interrupt state - */ -static int vcn_v4_0_5_set_interrupt_state(struct amdgpu_device *adev, struct amdgpu_irq_src *source, - unsigned type, enum amdgpu_interrupt_state state) -{ - return 0; -} - -/** * vcn_v4_0_5_process_interrupt - process VCN block interrupt * * @adev: amdgpu_device pointer @@ -1726,7 +1708,6 @@ static int vcn_v4_0_5_process_interrupt(struct amdgpu_device *adev, struct amdgp } static const struct amdgpu_irq_src_funcs vcn_v4_0_5_irq_funcs = { - .set = vcn_v4_0_5_set_interrupt_state, .process = vcn_v4_0_5_process_interrupt, }; diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c index d364c6dd152c..bf68e18e3824 100644 --- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c @@ -373,6 +373,12 @@ static u32 vega10_ih_get_wptr(struct amdgpu_device *adev, tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1); WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp); + /* Unset the CLEAR_OVERFLOW bit immediately so new overflows + * can be detected. + */ + tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0); + WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp); + out: return (wptr & ih->ptr_mask); } diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c index ddfc6941f9d5..db66e6cccaf2 100644 --- a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c @@ -421,6 +421,12 @@ static u32 vega20_ih_get_wptr(struct amdgpu_device *adev, tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1); WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp); + /* Unset the CLEAR_OVERFLOW bit immediately so new overflows + * can be detected. + */ + tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0); + WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp); + out: return (wptr & ih->ptr_mask); } diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h index df75863393fc..d1caaf0e6a7c 100644 --- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h +++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h @@ -674,7 +674,7 @@ static const uint32_t cwsr_trap_gfx9_hex[] = { 0x86ea6a6a, 0x8f6e837a, 0xb96ee0c2, 0xbf800002, 0xb97a0002, 0xbf8a0000, - 0xbe801f6c, 0xbf810000, + 0xbe801f6c, 0xbf9b0000, }; static const uint32_t cwsr_trap_nv1x_hex[] = { @@ -1091,7 +1091,7 @@ static const uint32_t cwsr_trap_nv1x_hex[] = { 0xb9eef807, 0x876dff6d, 0x0000ffff, 0x87fe7e7e, 0x87ea6a6a, 0xb9faf802, - 0xbe80226c, 0xbf810000, + 0xbe80226c, 0xbf9b0000, 0xbf9f0000, 0xbf9f0000, 0xbf9f0000, 0xbf9f0000, 0xbf9f0000, 0x00000000, @@ -1574,7 +1574,7 @@ static const uint32_t cwsr_trap_arcturus_hex[] = { 0x86ea6a6a, 0x8f6e837a, 0xb96ee0c2, 0xbf800002, 0xb97a0002, 0xbf8a0000, - 0xbe801f6c, 0xbf810000, + 0xbe801f6c, 0xbf9b0000, }; static const uint32_t cwsr_trap_aldebaran_hex[] = { @@ -2065,7 +2065,7 @@ static const uint32_t cwsr_trap_aldebaran_hex[] = { 0x86ea6a6a, 0x8f6e837a, 0xb96ee0c2, 0xbf800002, 0xb97a0002, 0xbf8a0000, - 0xbe801f6c, 0xbf810000, + 0xbe801f6c, 0xbf9b0000, }; static const uint32_t cwsr_trap_gfx10_hex[] = { @@ -2500,7 +2500,7 @@ static const uint32_t cwsr_trap_gfx10_hex[] = { 0x876dff6d, 0x0000ffff, 0x87fe7e7e, 0x87ea6a6a, 0xb9faf802, 0xbe80226c, - 0xbf810000, 0xbf9f0000, + 0xbf9b0000, 0xbf9f0000, 0xbf9f0000, 0xbf9f0000, 0xbf9f0000, 0xbf9f0000, }; @@ -2944,7 +2944,7 @@ static const uint32_t cwsr_trap_gfx11_hex[] = { 0xb8eef802, 0xbf0d866e, 0xbfa20002, 0xb97af802, 0xbe80486c, 0xb97af802, - 0xbe804a6c, 0xbfb00000, + 0xbe804a6c, 0xbfb10000, 0xbf9f0000, 0xbf9f0000, 0xbf9f0000, 0xbf9f0000, 0xbf9f0000, 0x00000000, @@ -3436,5 +3436,5 @@ static const uint32_t cwsr_trap_gfx9_4_3_hex[] = { 0x86ea6a6a, 0x8f6e837a, 0xb96ee0c2, 0xbf800002, 0xb97a0002, 0xbf8a0000, - 0xbe801f6c, 0xbf810000, + 0xbe801f6c, 0xbf9b0000, }; diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm index e0140df0b0ec..71b3dc0c7363 100644 --- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm +++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm @@ -1104,7 +1104,7 @@ L_RETURN_WITHOUT_PRIV: s_rfe_b64 s_restore_pc_lo //Return to the main shader program and resume execution L_END_PGM: - s_endpgm + s_endpgm_saved end function write_hwreg_to_mem(s, s_rsrc, s_mem_offset) diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm index e506411ad28a..bb26338204f4 100644 --- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm +++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm @@ -921,7 +921,7 @@ L_RESTORE: /* the END */ /**************************************************************************/ L_END_PGM: - s_endpgm + s_endpgm_saved end diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index ce4c52ec34d8..80e90fdef291 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -1442,7 +1442,9 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep, kfd_flush_tlb(peer_pdd, TLB_FLUSH_HEAVYWEIGHT); /* Remove dma mapping after tlb flush to avoid IO_PAGE_FAULT */ - amdgpu_amdkfd_gpuvm_dmaunmap_mem(mem, peer_pdd->drm_priv); + err = amdgpu_amdkfd_gpuvm_dmaunmap_mem(mem, peer_pdd->drm_priv); + if (err) + goto sync_memory_failed; } mutex_unlock(&p->mutex); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c index f856901055d3..bdc01ca9609a 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c @@ -574,7 +574,7 @@ svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange, pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms, prange->start, prange->last); - addr = prange->start << PAGE_SHIFT; + addr = migrate->start; src = (uint64_t *)(scratch + npages); dst = scratch; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c index d722cbd31783..826bc4f6c8a7 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c @@ -55,8 +55,8 @@ static void update_cu_mask(struct mqd_manager *mm, void *mqd, m = get_mqd(mqd); if (has_wa_flag) { - uint32_t wa_mask = minfo->update_flag == UPDATE_FLAG_DBG_WA_ENABLE ? - 0xffff : 0xffffffff; + uint32_t wa_mask = + (minfo->update_flag & UPDATE_FLAG_DBG_WA_ENABLE) ? 0xffff : 0xffffffff; m->compute_static_thread_mgmt_se0 = wa_mask; m->compute_static_thread_mgmt_se1 = wa_mask; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c index 42d881809dc7..697b6d530d12 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c @@ -303,6 +303,15 @@ static void update_mqd(struct mqd_manager *mm, void *mqd, update_cu_mask(mm, mqd, minfo, 0); set_priority(m, q); + if (minfo && KFD_GC_VERSION(mm->dev) >= IP_VERSION(9, 4, 2)) { + if (minfo->update_flag & UPDATE_FLAG_IS_GWS) + m->compute_resource_limits |= + COMPUTE_RESOURCE_LIMITS__FORCE_SIMD_DIST_MASK; + else + m->compute_resource_limits &= + ~COMPUTE_RESOURCE_LIMITS__FORCE_SIMD_DIST_MASK; + } + q->is_active = QUEUE_IS_ACTIVE(*q); } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index 17fbedbf3651..80320b8603fc 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -532,6 +532,7 @@ struct queue_properties { enum mqd_update_flag { UPDATE_FLAG_DBG_WA_ENABLE = 1, UPDATE_FLAG_DBG_WA_DISABLE = 2, + UPDATE_FLAG_IS_GWS = 4, /* quirk for gfx9 IP */ }; struct mqd_update_info { @@ -1488,10 +1489,15 @@ void kfd_dec_compute_active(struct kfd_node *dev); /* Cgroup Support */ /* Check with device cgroup if @kfd device is accessible */ -static inline int kfd_devcgroup_check_permission(struct kfd_node *kfd) +static inline int kfd_devcgroup_check_permission(struct kfd_node *node) { #if defined(CONFIG_CGROUP_DEVICE) || defined(CONFIG_CGROUP_BPF) - struct drm_device *ddev = adev_to_drm(kfd->adev); + struct drm_device *ddev; + + if (node->xcp) + ddev = node->xcp->ddev; + else + ddev = adev_to_drm(node->adev); return devcgroup_check_permission(DEVCG_DEV_CHAR, DRM_MAJOR, ddev->render->index, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c index 43eff221eae5..4858112f9a53 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c @@ -95,6 +95,7 @@ void kfd_process_dequeue_from_device(struct kfd_process_device *pdd) int pqm_set_gws(struct process_queue_manager *pqm, unsigned int qid, void *gws) { + struct mqd_update_info minfo = {0}; struct kfd_node *dev = NULL; struct process_queue_node *pqn; struct kfd_process_device *pdd; @@ -146,9 +147,10 @@ int pqm_set_gws(struct process_queue_manager *pqm, unsigned int qid, } pdd->qpd.num_gws = gws ? dev->adev->gds.gws_size : 0; + minfo.update_flag = gws ? UPDATE_FLAG_IS_GWS : 0; return pqn->q->device->dqm->ops.update_queue(pqn->q->device->dqm, - pqn->q, NULL); + pqn->q, &minfo); } void kfd_process_dequeue_from_all_devices(struct kfd_process *p) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index e5f7c92eebcb..6ed2ec381aaa 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c @@ -1638,12 +1638,10 @@ static int fill_in_l2_l3_pcache(struct kfd_cache_properties **props_ext, else mode = UNKNOWN_MEMORY_PARTITION_MODE; - if (pcache->cache_level == 2) - pcache->cache_size = pcache_info[cache_type].cache_size * num_xcc; - else if (mode) - pcache->cache_size = pcache_info[cache_type].cache_size / mode; - else - pcache->cache_size = pcache_info[cache_type].cache_size; + pcache->cache_size = pcache_info[cache_type].cache_size; + /* Partition mode only affects L3 cache size */ + if (mode && pcache->cache_level == 3) + pcache->cache_size /= mode; if (pcache_info[cache_type].flags & CRAT_CACHE_FLAGS_DATA_CACHE) pcache->cache_type |= HSA_CACHE_TYPE_DATA; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 6cda5b536362..1a9bbb04bd5e 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -1843,21 +1843,12 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) DRM_ERROR("amdgpu: fail to register dmub aux callback"); goto error; } - if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) { - DRM_ERROR("amdgpu: fail to register dmub hpd callback"); - goto error; - } - if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true)) { - DRM_ERROR("amdgpu: fail to register dmub hpd callback"); - goto error; - } - } - - /* Enable outbox notification only after IRQ handlers are registered and DMUB is alive. - * It is expected that DMUB will resend any pending notifications at this point, for - * example HPD from DPIA. - */ - if (dc_is_dmub_outbox_supported(adev->dm.dc)) { + /* Enable outbox notification only after IRQ handlers are registered and DMUB is alive. + * It is expected that DMUB will resend any pending notifications at this point. Note + * that hpd and hpd_irq handler registration are deferred to register_hpd_handlers() to + * align legacy interface initialization sequence. Connection status will be proactivly + * detected once in the amdgpu_dm_initialize_drm_device. + */ dc_enable_dmub_outbox(adev->dm.dc); /* DPIA trace goes to dmesg logs only if outbox is enabled */ @@ -1956,7 +1947,7 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev) &adev->dm.dmub_bo_gpu_addr, &adev->dm.dmub_bo_cpu_addr); - if (adev->dm.hpd_rx_offload_wq) { + if (adev->dm.hpd_rx_offload_wq && adev->dm.dc) { for (i = 0; i < adev->dm.dc->caps.max_links; i++) { if (adev->dm.hpd_rx_offload_wq[i].wq) { destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq); @@ -2287,6 +2278,7 @@ static int dm_sw_fini(void *handle) if (adev->dm.dmub_srv) { dmub_srv_destroy(adev->dm.dmub_srv); + kfree(adev->dm.dmub_srv); adev->dm.dmub_srv = NULL; } @@ -3536,6 +3528,14 @@ static void register_hpd_handlers(struct amdgpu_device *adev) int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT; int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT; + if (dc_is_dmub_outbox_supported(adev->dm.dc)) { + if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) + DRM_ERROR("amdgpu: fail to register dmub hpd callback"); + + if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true)) + DRM_ERROR("amdgpu: fail to register dmub hpd callback"); + } + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { @@ -3564,10 +3564,6 @@ static void register_hpd_handlers(struct amdgpu_device *adev) handle_hpd_rx_irq, (void *) aconnector); } - - if (adev->dm.hpd_rx_offload_wq) - adev->dm.hpd_rx_offload_wq[connector->index].aconnector = - aconnector; } } @@ -4561,6 +4557,10 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) goto fail; } + if (dm->hpd_rx_offload_wq) + dm->hpd_rx_offload_wq[aconnector->base.index].aconnector = + aconnector; + if (!dc_link_detect_connection_type(link, &new_connection_type)) DRM_ERROR("KMS: Failed to detect connector\n"); @@ -5219,6 +5219,7 @@ static void fill_dc_dirty_rects(struct drm_plane *plane, struct drm_plane_state *new_plane_state, struct drm_crtc_state *crtc_state, struct dc_flip_addrs *flip_addrs, + bool is_psr_su, bool *dirty_regions_changed) { struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state); @@ -5243,6 +5244,10 @@ static void fill_dc_dirty_rects(struct drm_plane *plane, num_clips = drm_plane_get_damage_clips_count(new_plane_state); clips = drm_plane_get_damage_clips(new_plane_state); + if (num_clips && (!amdgpu_damage_clips || (amdgpu_damage_clips < 0 && + is_psr_su))) + goto ffu; + if (!dm_crtc_state->mpo_requested) { if (!num_clips || num_clips > DC_MAX_DIRTY_RECTS) goto ffu; @@ -6194,7 +6199,9 @@ create_stream_for_sink(struct drm_connector *connector, if (recalculate_timing) { freesync_mode = get_highest_refresh_rate_mode(aconnector, false); drm_mode_copy(&saved_mode, &mode); + saved_mode.picture_aspect_ratio = mode.picture_aspect_ratio; drm_mode_copy(&mode, freesync_mode); + mode.picture_aspect_ratio = saved_mode.picture_aspect_ratio; } else { decide_crtc_timing_for_drm_display_mode( &mode, preferred_mode, scale); @@ -6527,10 +6534,15 @@ amdgpu_dm_connector_late_register(struct drm_connector *connector) static void amdgpu_dm_connector_funcs_force(struct drm_connector *connector) { struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); - struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); struct dc_link *dc_link = aconnector->dc_link; struct dc_sink *dc_em_sink = aconnector->dc_em_sink; struct edid *edid; + struct i2c_adapter *ddc; + + if (dc_link && dc_link->aux_mode) + ddc = &aconnector->dm_dp_aux.aux.ddc; + else + ddc = &aconnector->i2c->base; /* * Note: drm_get_edid gets edid in the following order: @@ -6538,7 +6550,7 @@ static void amdgpu_dm_connector_funcs_force(struct drm_connector *connector) * 2) firmware EDID if set via edid_firmware module parameter * 3) regular DDC read. */ - edid = drm_get_edid(connector, &amdgpu_connector->ddc_bus->aux.ddc); + edid = drm_get_edid(connector, ddc); if (!edid) { DRM_ERROR("No EDID found on connector: %s.\n", connector->name); return; @@ -6579,12 +6591,18 @@ static int get_modes(struct drm_connector *connector) static void create_eml_sink(struct amdgpu_dm_connector *aconnector) { struct drm_connector *connector = &aconnector->base; - struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(&aconnector->base); + struct dc_link *dc_link = aconnector->dc_link; struct dc_sink_init_data init_params = { .link = aconnector->dc_link, .sink_signal = SIGNAL_TYPE_VIRTUAL }; struct edid *edid; + struct i2c_adapter *ddc; + + if (dc_link->aux_mode) + ddc = &aconnector->dm_dp_aux.aux.ddc; + else + ddc = &aconnector->i2c->base; /* * Note: drm_get_edid gets edid in the following order: @@ -6592,7 +6610,7 @@ static void create_eml_sink(struct amdgpu_dm_connector *aconnector) * 2) firmware EDID if set via edid_firmware module parameter * 3) regular DDC read. */ - edid = drm_get_edid(connector, &amdgpu_connector->ddc_bus->aux.ddc); + edid = drm_get_edid(connector, ddc); if (!edid) { DRM_ERROR("No EDID found on connector: %s.\n", connector->name); return; @@ -8298,6 +8316,8 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, fill_dc_dirty_rects(plane, old_plane_state, new_plane_state, new_crtc_state, &bundle->flip_addrs[planes_count], + acrtc_state->stream->link->psr_settings.psr_version == + DC_PSR_VERSION_SU_1, &dirty_rects_changed); /* @@ -9187,6 +9207,10 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) * To fix this, DC should permit updating only stream properties. */ dummy_updates = kzalloc(sizeof(struct dc_surface_update) * MAX_SURFACES, GFP_ATOMIC); + if (!dummy_updates) { + DRM_ERROR("Failed to allocate memory for dummy_updates.\n"); + continue; + } for (j = 0; j < status->plane_count; j++) dummy_updates[j].surface = status->plane_states[0]; @@ -10727,11 +10751,13 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, goto fail; } - ret = compute_mst_dsc_configs_for_state(state, dm_state->context, vars); - if (ret) { - DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n"); - ret = -EINVAL; - goto fail; + if (dc_resource_is_dsc_encoding_supported(dc)) { + ret = compute_mst_dsc_configs_for_state(state, dm_state->context, vars); + if (ret) { + DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n"); + ret = -EINVAL; + goto fail; + } } ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars); @@ -11143,14 +11169,23 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, if (range->flags != 1) continue; - amdgpu_dm_connector->min_vfreq = range->min_vfreq; - amdgpu_dm_connector->max_vfreq = range->max_vfreq; - amdgpu_dm_connector->pixel_clock_mhz = - range->pixel_clock_mhz * 10; - connector->display_info.monitor_range.min_vfreq = range->min_vfreq; connector->display_info.monitor_range.max_vfreq = range->max_vfreq; + if (edid->revision >= 4) { + if (data->pad2 & DRM_EDID_RANGE_OFFSET_MIN_VFREQ) + connector->display_info.monitor_range.min_vfreq += 255; + if (data->pad2 & DRM_EDID_RANGE_OFFSET_MAX_VFREQ) + connector->display_info.monitor_range.max_vfreq += 255; + } + + amdgpu_dm_connector->min_vfreq = + connector->display_info.monitor_range.min_vfreq; + amdgpu_dm_connector->max_vfreq = + connector->display_info.monitor_range.max_vfreq; + amdgpu_dm_connector->pixel_clock_mhz = + range->pixel_clock_mhz * 10; + break; } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c index 85b7f58a7f35..c27063305a13 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c @@ -67,6 +67,8 @@ static void apply_edid_quirks(struct edid *edid, struct dc_edid_caps *edid_caps) /* Workaround for some monitors that do not clear DPCD 0x317 if FreeSync is unsupported */ case drm_edid_encode_panel_id('A', 'U', 'O', 0xA7AB): case drm_edid_encode_panel_id('A', 'U', 'O', 0xE69B): + case drm_edid_encode_panel_id('B', 'O', 'E', 0x092A): + case drm_edid_encode_panel_id('L', 'G', 'D', 0x06D1): DRM_DEBUG_DRIVER("Clearing DPCD 0x317 on monitor with panel id %X\n", panel_id); edid_caps->panel_patch.remove_sink_ext_caps = true; break; @@ -120,6 +122,8 @@ enum dc_edid_status dm_helpers_parse_edid_caps( edid_caps->edid_hdmi = connector->display_info.is_hdmi; + apply_edid_quirks(edid_buf, edid_caps); + sad_count = drm_edid_to_sad((struct edid *) edid->raw_edid, &sads); if (sad_count <= 0) return result; @@ -146,8 +150,6 @@ enum dc_edid_status dm_helpers_parse_edid_caps( else edid_caps->speaker_flags = DEFAULT_SPEAKER_LOCATION; - apply_edid_quirks(edid_buf, edid_caps); - kfree(sads); kfree(sadb); diff --git a/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c b/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c index f2dfa96f9ef5..39530b2ea495 100644 --- a/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c +++ b/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c @@ -94,7 +94,7 @@ static void calculate_bandwidth( const uint32_t s_high = 7; const uint32_t dmif_chunk_buff_margin = 1; - uint32_t max_chunks_fbc_mode; + uint32_t max_chunks_fbc_mode = 0; int32_t num_cursor_lines; int32_t i, j, k; diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c index 960c4b4f6ddf..05f392501c0a 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c @@ -1850,19 +1850,21 @@ static enum bp_result get_firmware_info_v3_2( /* Vega12 */ smu_info_v3_2 = GET_IMAGE(struct atom_smu_info_v3_2, DATA_TABLES(smu_info)); - DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", smu_info_v3_2->gpuclk_ss_percentage); if (!smu_info_v3_2) return BP_RESULT_BADBIOSTABLE; + DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", smu_info_v3_2->gpuclk_ss_percentage); + info->default_engine_clk = smu_info_v3_2->bootup_dcefclk_10khz * 10; } else if (revision.minor == 3) { /* Vega20 */ smu_info_v3_3 = GET_IMAGE(struct atom_smu_info_v3_3, DATA_TABLES(smu_info)); - DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", smu_info_v3_3->gpuclk_ss_percentage); if (!smu_info_v3_3) return BP_RESULT_BADBIOSTABLE; + DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", smu_info_v3_3->gpuclk_ss_percentage); + info->default_engine_clk = smu_info_v3_3->bootup_dcefclk_10khz * 10; } @@ -2422,10 +2424,11 @@ static enum bp_result get_integrated_info_v11( info_v11 = GET_IMAGE(struct atom_integrated_system_info_v1_11, DATA_TABLES(integratedsysteminfo)); - DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", info_v11->gpuclk_ss_percentage); if (info_v11 == NULL) return BP_RESULT_BADBIOSTABLE; + DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", info_v11->gpuclk_ss_percentage); + info->gpu_cap_info = le32_to_cpu(info_v11->gpucapinfo); /* @@ -2637,11 +2640,12 @@ static enum bp_result get_integrated_info_v2_1( info_v2_1 = GET_IMAGE(struct atom_integrated_system_info_v2_1, DATA_TABLES(integratedsysteminfo)); - DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", info_v2_1->gpuclk_ss_percentage); if (info_v2_1 == NULL) return BP_RESULT_BADBIOSTABLE; + DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", info_v2_1->gpuclk_ss_percentage); + info->gpu_cap_info = le32_to_cpu(info_v2_1->gpucapinfo); /* @@ -2799,11 +2803,11 @@ static enum bp_result get_integrated_info_v2_2( info_v2_2 = GET_IMAGE(struct atom_integrated_system_info_v2_2, DATA_TABLES(integratedsysteminfo)); - DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", info_v2_2->gpuclk_ss_percentage); - if (info_v2_2 == NULL) return BP_RESULT_BADBIOSTABLE; + DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", info_v2_2->gpuclk_ss_percentage); + info->gpu_cap_info = le32_to_cpu(info_v2_2->gpucapinfo); /* diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c index a5489fe6875f..aa9fd1dc550a 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c @@ -546,6 +546,8 @@ static unsigned int find_dcfclk_for_voltage(const struct vg_dpm_clocks *clock_ta int i; for (i = 0; i < VG_NUM_SOC_VOLTAGE_LEVELS; i++) { + if (i >= VG_NUM_DCFCLK_DPM_LEVELS) + break; if (clock_table->SocVoltage[i] == voltage) return clock_table->DcfClocks[i]; } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c index 9c660d1facc7..e64890259235 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c @@ -437,32 +437,32 @@ static struct wm_table ddr5_wm_table = { .wm_inst = WM_A, .wm_type = WM_TYPE_PSTATE_CHG, .pstate_latency_us = 11.72, - .sr_exit_time_us = 14.0, - .sr_enter_plus_exit_time_us = 16.0, + .sr_exit_time_us = 28.0, + .sr_enter_plus_exit_time_us = 30.0, .valid = true, }, { .wm_inst = WM_B, .wm_type = WM_TYPE_PSTATE_CHG, .pstate_latency_us = 11.72, - .sr_exit_time_us = 14.0, - .sr_enter_plus_exit_time_us = 16.0, + .sr_exit_time_us = 28.0, + .sr_enter_plus_exit_time_us = 30.0, .valid = true, }, { .wm_inst = WM_C, .wm_type = WM_TYPE_PSTATE_CHG, .pstate_latency_us = 11.72, - .sr_exit_time_us = 14.0, - .sr_enter_plus_exit_time_us = 16.0, + .sr_exit_time_us = 28.0, + .sr_enter_plus_exit_time_us = 30.0, .valid = true, }, { .wm_inst = WM_D, .wm_type = WM_TYPE_PSTATE_CHG, .pstate_latency_us = 11.72, - .sr_exit_time_us = 14.0, - .sr_enter_plus_exit_time_us = 16.0, + .sr_exit_time_us = 28.0, + .sr_enter_plus_exit_time_us = 30.0, .valid = true, }, } @@ -474,32 +474,32 @@ static struct wm_table lpddr5_wm_table = { .wm_inst = WM_A, .wm_type = WM_TYPE_PSTATE_CHG, .pstate_latency_us = 11.65333, - .sr_exit_time_us = 14.0, - .sr_enter_plus_exit_time_us = 16.0, + .sr_exit_time_us = 28.0, + .sr_enter_plus_exit_time_us = 30.0, .valid = true, }, { .wm_inst = WM_B, .wm_type = WM_TYPE_PSTATE_CHG, .pstate_latency_us = 11.65333, - .sr_exit_time_us = 14.0, - .sr_enter_plus_exit_time_us = 16.0, + .sr_exit_time_us = 28.0, + .sr_enter_plus_exit_time_us = 30.0, .valid = true, }, { .wm_inst = WM_C, .wm_type = WM_TYPE_PSTATE_CHG, .pstate_latency_us = 11.65333, - .sr_exit_time_us = 14.0, - .sr_enter_plus_exit_time_us = 16.0, + .sr_exit_time_us = 28.0, + .sr_enter_plus_exit_time_us = 30.0, .valid = true, }, { .wm_inst = WM_D, .wm_type = WM_TYPE_PSTATE_CHG, .pstate_latency_us = 11.65333, - .sr_exit_time_us = 14.0, - .sr_enter_plus_exit_time_us = 16.0, + .sr_exit_time_us = 28.0, + .sr_enter_plus_exit_time_us = 30.0, .valid = true, }, } @@ -655,10 +655,13 @@ static void dcn35_clk_mgr_helper_populate_bw_params(struct clk_mgr_internal *clk struct clk_limit_table_entry def_max = bw_params->clk_table.entries[bw_params->clk_table.num_entries - 1]; uint32_t max_fclk = 0, min_pstate = 0, max_dispclk = 0, max_dppclk = 0; uint32_t max_pstate = 0, max_dram_speed_mts = 0, min_dram_speed_mts = 0; + uint32_t num_memps, num_fclk, num_dcfclk; int i; /* Determine min/max p-state values. */ - for (i = 0; i < clock_table->NumMemPstatesEnabled; i++) { + num_memps = (clock_table->NumMemPstatesEnabled > NUM_MEM_PSTATE_LEVELS) ? NUM_MEM_PSTATE_LEVELS : + clock_table->NumMemPstatesEnabled; + for (i = 0; i < num_memps; i++) { uint32_t dram_speed_mts = calc_dram_speed_mts(&clock_table->MemPstateTable[i]); if (is_valid_clock_value(dram_speed_mts) && dram_speed_mts > max_dram_speed_mts) { @@ -670,7 +673,7 @@ static void dcn35_clk_mgr_helper_populate_bw_params(struct clk_mgr_internal *clk min_dram_speed_mts = max_dram_speed_mts; min_pstate = max_pstate; - for (i = 0; i < clock_table->NumMemPstatesEnabled; i++) { + for (i = 0; i < num_memps; i++) { uint32_t dram_speed_mts = calc_dram_speed_mts(&clock_table->MemPstateTable[i]); if (is_valid_clock_value(dram_speed_mts) && dram_speed_mts < min_dram_speed_mts) { @@ -699,9 +702,13 @@ static void dcn35_clk_mgr_helper_populate_bw_params(struct clk_mgr_internal *clk /* Base the clock table on dcfclk, need at least one entry regardless of pmfw table */ ASSERT(clock_table->NumDcfClkLevelsEnabled > 0); - max_fclk = find_max_clk_value(clock_table->FclkClocks_Freq, clock_table->NumFclkLevelsEnabled); + num_fclk = (clock_table->NumFclkLevelsEnabled > NUM_FCLK_DPM_LEVELS) ? NUM_FCLK_DPM_LEVELS : + clock_table->NumFclkLevelsEnabled; + max_fclk = find_max_clk_value(clock_table->FclkClocks_Freq, num_fclk); - for (i = 0; i < clock_table->NumDcfClkLevelsEnabled; i++) { + num_dcfclk = (clock_table->NumFclkLevelsEnabled > NUM_DCFCLK_DPM_LEVELS) ? NUM_DCFCLK_DPM_LEVELS : + clock_table->NumDcfClkLevelsEnabled; + for (i = 0; i < num_dcfclk; i++) { int j; /* First search defaults for the clocks we don't read using closest lower or equal default dcfclk */ diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index aa7c02ba948e..2c424e435962 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -3817,7 +3817,9 @@ static void commit_planes_for_stream(struct dc *dc, * programming has completed (we turn on phantom OTG in order * to complete the plane disable for phantom pipes). */ - dc->hwss.apply_ctx_to_hw(dc, context); + + if (dc->hwss.disable_phantom_streams) + dc->hwss.disable_phantom_streams(dc, context); } if (update_type != UPDATE_TYPE_FAST) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_state.c b/drivers/gpu/drm/amd/display/dc/core/dc_state.c index 88c6436b28b6..180ac47868c2 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_state.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_state.c @@ -291,11 +291,14 @@ void dc_state_destruct(struct dc_state *state) dc_stream_release(state->phantom_streams[i]); state->phantom_streams[i] = NULL; } + state->phantom_stream_count = 0; for (i = 0; i < state->phantom_plane_count; i++) { dc_plane_state_release(state->phantom_planes[i]); state->phantom_planes[i] = NULL; } + state->phantom_plane_count = 0; + state->stream_mask = 0; memset(&state->res_ctx, 0, sizeof(state->res_ctx)); memset(&state->pp_display_cfg, 0, sizeof(state->pp_display_cfg)); diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c index 2b79a0e5638e..363d522603a2 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c @@ -125,7 +125,7 @@ bool dc_dmub_srv_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv, unsigned int count, union dmub_rb_cmd *cmd_list) { - struct dc_context *dc_ctx = dc_dmub_srv->ctx; + struct dc_context *dc_ctx; struct dmub_srv *dmub; enum dmub_status status; int i; @@ -133,6 +133,7 @@ bool dc_dmub_srv_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv, if (!dc_dmub_srv || !dc_dmub_srv->dmub) return false; + dc_ctx = dc_dmub_srv->ctx; dmub = dc_dmub_srv->dmub; for (i = 0 ; i < count; i++) { @@ -1161,7 +1162,7 @@ void dc_dmub_srv_subvp_save_surf_addr(const struct dc_dmub_srv *dc_dmub_srv, con bool dc_dmub_srv_is_hw_pwr_up(struct dc_dmub_srv *dc_dmub_srv, bool wait) { - struct dc_context *dc_ctx = dc_dmub_srv->ctx; + struct dc_context *dc_ctx; enum dmub_status status; if (!dc_dmub_srv || !dc_dmub_srv->dmub) @@ -1170,6 +1171,8 @@ bool dc_dmub_srv_is_hw_pwr_up(struct dc_dmub_srv *dc_dmub_srv, bool wait) if (dc_dmub_srv->ctx->dc->debug.dmcub_emulation) return true; + dc_ctx = dc_dmub_srv->ctx; + if (wait) { if (dc_dmub_srv->ctx->dc->debug.disable_timeout) { do { diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c b/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c index e8570060d007..5bca67407c5b 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c @@ -290,4 +290,5 @@ void dce_panel_cntl_construct( dce_panel_cntl->base.funcs = &dce_link_panel_cntl_funcs; dce_panel_cntl->base.ctx = init_data->ctx; dce_panel_cntl->base.inst = init_data->inst; + dce_panel_cntl->base.pwrseq_inst = 0; } diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c index e43f77c11c00..5f97a868ada3 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c @@ -56,16 +56,13 @@ static void dpp3_enable_cm_block( static enum dc_lut_mode dpp30_get_gamcor_current(struct dpp *dpp_base) { - enum dc_lut_mode mode; + enum dc_lut_mode mode = LUT_BYPASS; uint32_t state_mode; uint32_t lut_mode; struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); REG_GET(CM_GAMCOR_CONTROL, CM_GAMCOR_MODE_CURRENT, &state_mode); - if (state_mode == 0) - mode = LUT_BYPASS; - if (state_mode == 2) {//Programmable RAM LUT REG_GET(CM_GAMCOR_CONTROL, CM_GAMCOR_SELECT_CURRENT, &lut_mode); if (lut_mode == 0) diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_panel_cntl.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_panel_cntl.c index ad0df1a72a90..9e96a3ace207 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_panel_cntl.c +++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_panel_cntl.c @@ -215,4 +215,5 @@ void dcn301_panel_cntl_construct( dcn301_panel_cntl->base.funcs = &dcn301_link_panel_cntl_funcs; dcn301_panel_cntl->base.ctx = init_data->ctx; dcn301_panel_cntl->base.inst = init_data->inst; + dcn301_panel_cntl->base.pwrseq_inst = 0; } diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c index 03248422d6ff..281be20b1a10 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c @@ -154,8 +154,24 @@ void dcn31_panel_cntl_construct( struct dcn31_panel_cntl *dcn31_panel_cntl, const struct panel_cntl_init_data *init_data) { + uint8_t pwrseq_inst = 0xF; + dcn31_panel_cntl->base.funcs = &dcn31_link_panel_cntl_funcs; dcn31_panel_cntl->base.ctx = init_data->ctx; dcn31_panel_cntl->base.inst = init_data->inst; - dcn31_panel_cntl->base.pwrseq_inst = init_data->pwrseq_inst; + + switch (init_data->eng_id) { + case ENGINE_ID_DIGA: + pwrseq_inst = 0; + break; + case ENGINE_ID_DIGB: + pwrseq_inst = 1; + break; + default: + DC_LOG_WARNING("Unsupported pwrseq engine id: %d!\n", init_data->eng_id); + ASSERT(false); + break; + } + + dcn31_panel_cntl->base.pwrseq_inst = pwrseq_inst; } diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c index 501388014855..d761b0df2878 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c @@ -203,12 +203,12 @@ void dcn32_link_encoder_construct( enc10->base.hpd_source = init_data->hpd_source; enc10->base.connector = init_data->connector; - if (enc10->base.connector.id == CONNECTOR_ID_USBC) - enc10->base.features.flags.bits.DP_IS_USB_C = 1; enc10->base.preferred_engine = ENGINE_ID_UNKNOWN; enc10->base.features = *enc_features; + if (enc10->base.connector.id == CONNECTOR_ID_USBC) + enc10->base.features.flags.bits.DP_IS_USB_C = 1; enc10->base.transmitter = init_data->transmitter; diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.c index da94e5309fba..81e349d5835b 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.c @@ -184,8 +184,6 @@ void dcn35_link_encoder_construct( enc10->base.hpd_source = init_data->hpd_source; enc10->base.connector = init_data->connector; - if (enc10->base.connector.id == CONNECTOR_ID_USBC) - enc10->base.features.flags.bits.DP_IS_USB_C = 1; enc10->base.preferred_engine = ENGINE_ID_UNKNOWN; @@ -240,6 +238,8 @@ void dcn35_link_encoder_construct( } enc10->base.features.flags.bits.HDMI_6GB_EN = 1; + if (enc10->base.connector.id == CONNECTOR_ID_USBC) + enc10->base.features.flags.bits.DP_IS_USB_C = 1; if (bp_funcs->get_connector_speed_cap_info) result = bp_funcs->get_connector_speed_cap_info(enc10->base.ctx->dc_bios, diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile index 6042a5a6a44f..59ade76ffb18 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile @@ -72,11 +72,11 @@ CFLAGS_$(AMDDALPATH)/dc/dml/display_mode_lib.o := $(dml_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml/display_mode_vba.o := $(dml_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml/dcn10/dcn10_fpu.o := $(dml_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/dcn20_fpu.o := $(dml_ccflags) -CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20.o := $(dml_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20.o := $(dml_ccflags) $(frame_warn_flag) CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_rq_dlg_calc_20.o := $(dml_ccflags) -CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20v2.o := $(dml_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20v2.o := $(dml_ccflags) $(frame_warn_flag) CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_rq_dlg_calc_20v2.o := $(dml_ccflags) -CFLAGS_$(AMDDALPATH)/dc/dml/dcn21/display_mode_vba_21.o := $(dml_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml/dcn21/display_mode_vba_21.o := $(dml_ccflags) $(frame_warn_flag) CFLAGS_$(AMDDALPATH)/dc/dml/dcn21/display_rq_dlg_calc_21.o := $(dml_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/display_mode_vba_30.o := $(dml_ccflags) $(frame_warn_flag) CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/display_rq_dlg_calc_30.o := $(dml_ccflags) diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c index 9f37f717a1f8..a0a65e099104 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c @@ -1112,7 +1112,7 @@ struct pipe_slice_table { struct pipe_ctx *pri_pipe; struct dc_plane_state *plane; int slice_count; - } mpc_combines[MAX_SURFACES]; + } mpc_combines[MAX_PLANES]; int mpc_combine_count; }; @@ -1288,7 +1288,7 @@ static bool update_pipes_with_split_flags(struct dc *dc, struct dc_state *contex return updated; } -static bool should_allow_odm_power_optimization(struct dc *dc, +static bool should_apply_odm_power_optimization(struct dc *dc, struct dc_state *context, struct vba_vars_st *v, int *split, bool *merge) { @@ -1392,9 +1392,12 @@ static void try_odm_power_optimization_and_revalidate( { int i; unsigned int new_vlevel; + unsigned int cur_policy[MAX_PIPES]; - for (i = 0; i < pipe_cnt; i++) + for (i = 0; i < pipe_cnt; i++) { + cur_policy[i] = pipes[i].pipe.dest.odm_combine_policy; pipes[i].pipe.dest.odm_combine_policy = dm_odm_combine_policy_2to1; + } new_vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt); @@ -1403,6 +1406,9 @@ static void try_odm_power_optimization_and_revalidate( memset(merge, 0, MAX_PIPES * sizeof(bool)); *vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, new_vlevel, split, merge); context->bw_ctx.dml.vba.VoltageLevel = *vlevel; + } else { + for (i = 0; i < pipe_cnt; i++) + pipes[i].pipe.dest.odm_combine_policy = cur_policy[i]; } } @@ -1580,7 +1586,7 @@ static void dcn32_full_validate_bw_helper(struct dc *dc, } } - if (should_allow_odm_power_optimization(dc, context, vba, split, merge)) + if (should_apply_odm_power_optimization(dc, context, vba, split, merge)) try_odm_power_optimization_and_revalidate( dc, context, pipes, split, merge, vlevel, *pipe_cnt); @@ -2209,7 +2215,8 @@ bool dcn32_internal_validate_bw(struct dc *dc, int i; pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate); - dcn32_update_dml_pipes_odm_policy_based_on_context(dc, context, pipes); + if (!dc->config.enable_windowed_mpo_odm) + dcn32_update_dml_pipes_odm_policy_based_on_context(dc, context, pipes); /* repopulate_pipes = 1 means the pipes were either split or merged. In this case * we have to re-calculate the DET allocation and run through DML once more to diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c index 475c4ec43c01..7ea2bd5374d5 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c @@ -164,8 +164,8 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_5_soc = { }, }, .num_states = 5, - .sr_exit_time_us = 14.0, - .sr_enter_plus_exit_time_us = 16.0, + .sr_exit_time_us = 28.0, + .sr_enter_plus_exit_time_us = 30.0, .sr_exit_z8_time_us = 210.0, .sr_enter_plus_exit_z8_time_us = 320.0, .fclk_change_latency_us = 24.0, diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c index 64d01a9cd68c..1ba6933d2b36 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c @@ -341,9 +341,6 @@ void dml2_init_soc_states(struct dml2_context *dml2, const struct dc *in_dc, break; } - if (dml2->config.bbox_overrides.clks_table.num_states) - p->in_states->num_states = dml2->config.bbox_overrides.clks_table.num_states; - /* Override from passed values, if available */ for (i = 0; i < p->in_states->num_states; i++) { if (dml2->config.bbox_overrides.sr_exit_latency_us) { @@ -400,7 +397,7 @@ void dml2_init_soc_states(struct dml2_context *dml2, const struct dc *in_dc, } /* Copy clocks tables entries, if available */ if (dml2->config.bbox_overrides.clks_table.num_states) { - + p->in_states->num_states = dml2->config.bbox_overrides.clks_table.num_states; for (i = 0; i < dml2->config.bbox_overrides.clks_table.num_entries_per_clk.num_dcfclk_levels; i++) { p->in_states->state_array[i].dcfclk_mhz = dml2->config.bbox_overrides.clks_table.clk_entries[i].dcfclk_mhz; } @@ -439,6 +436,14 @@ void dml2_init_soc_states(struct dml2_context *dml2, const struct dc *in_dc, } dml2_policy_build_synthetic_soc_states(s, p); + if (dml2->v20.dml_core_ctx.project == dml_project_dcn35 || + dml2->v20.dml_core_ctx.project == dml_project_dcn351) { + // Override last out_state with data from last in_state + // This will ensure that out_state contains max fclk + memcpy(&p->out_states->state_array[p->out_states->num_states - 1], + &p->in_states->state_array[p->in_states->num_states - 1], + sizeof(struct soc_state_bounding_box_st)); + } } void dml2_translate_ip_params(const struct dc *in, struct ip_params_st *out) @@ -793,35 +798,28 @@ static void populate_dml_surface_cfg_from_plane_state(enum dml_project_id dml2_p } } -/*TODO no support for mpc combine, need rework - should calculate scaling params based on plane+stream*/ -static struct scaler_data get_scaler_data_for_plane(const struct dc_plane_state *in, const struct dc_state *context) +static struct scaler_data get_scaler_data_for_plane(const struct dc_plane_state *in, struct dc_state *context) { int i; - struct scaler_data data = { 0 }; + struct pipe_ctx *temp_pipe = &context->res_ctx.temp_pipe; + + memset(temp_pipe, 0, sizeof(struct pipe_ctx)); for (i = 0; i < MAX_PIPES; i++) { const struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; if (pipe->plane_state == in && !pipe->prev_odm_pipe) { - const struct pipe_ctx *next_pipe = pipe->next_odm_pipe; - - data = context->res_ctx.pipe_ctx[i].plane_res.scl_data; - while (next_pipe) { - data.h_active += next_pipe->plane_res.scl_data.h_active; - data.recout.width += next_pipe->plane_res.scl_data.recout.width; - if (in->rotation == ROTATION_ANGLE_0 || in->rotation == ROTATION_ANGLE_180) { - data.viewport.width += next_pipe->plane_res.scl_data.viewport.width; - } else { - data.viewport.height += next_pipe->plane_res.scl_data.viewport.height; - } - next_pipe = next_pipe->next_odm_pipe; - } + temp_pipe->stream = pipe->stream; + temp_pipe->plane_state = pipe->plane_state; + temp_pipe->plane_res.scl_data.taps = pipe->plane_res.scl_data.taps; + + resource_build_scaling_params(temp_pipe); break; } } ASSERT(i < MAX_PIPES); - return data; + return temp_pipe->plane_res.scl_data; } static void populate_dummy_dml_plane_cfg(struct dml_plane_cfg_st *out, unsigned int location, const struct dc_stream_state *in) @@ -866,7 +864,7 @@ static void populate_dummy_dml_plane_cfg(struct dml_plane_cfg_st *out, unsigned out->ScalerEnabled[location] = false; } -static void populate_dml_plane_cfg_from_plane_state(struct dml_plane_cfg_st *out, unsigned int location, const struct dc_plane_state *in, const struct dc_state *context) +static void populate_dml_plane_cfg_from_plane_state(struct dml_plane_cfg_st *out, unsigned int location, const struct dc_plane_state *in, struct dc_state *context) { const struct scaler_data scaler_data = get_scaler_data_for_plane(in, context); diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c index 26307e599614..2a58a7687bdb 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c @@ -76,6 +76,11 @@ static void map_hw_resources(struct dml2_context *dml2, in_out_display_cfg->hw.DLGRefClkFreqMHz = 50; } for (j = 0; j < mode_support_info->DPPPerSurface[i]; j++) { + if (i >= __DML2_WRAPPER_MAX_STREAMS_PLANES__) { + dml_print("DML::%s: Index out of bounds: i=%d, __DML2_WRAPPER_MAX_STREAMS_PLANES__=%d\n", + __func__, i, __DML2_WRAPPER_MAX_STREAMS_PLANES__); + break; + } dml2->v20.scratch.dml_to_dc_pipe_mapping.dml_pipe_idx_to_stream_id[num_pipes] = dml2->v20.scratch.dml_to_dc_pipe_mapping.disp_cfg_to_stream_id[i]; dml2->v20.scratch.dml_to_dc_pipe_mapping.dml_pipe_idx_to_stream_id_valid[num_pipes] = true; dml2->v20.scratch.dml_to_dc_pipe_mapping.dml_pipe_idx_to_plane_id[num_pipes] = dml2->v20.scratch.dml_to_dc_pipe_mapping.disp_cfg_to_plane_id[i]; diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c index 5660f15da291..01493c49bd7a 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c @@ -1183,9 +1183,9 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx) dto_params.timing = &pipe_ctx->stream->timing; dp_hpo_inst = pipe_ctx->stream_res.hpo_dp_stream_enc->inst; if (dccg) { - dccg->funcs->set_dtbclk_dto(dccg, &dto_params); dccg->funcs->disable_symclk32_se(dccg, dp_hpo_inst); dccg->funcs->set_dpstreamclk(dccg, REFCLK, tg->inst, dp_hpo_inst); + dccg->funcs->set_dtbclk_dto(dccg, &dto_params); } } else if (dccg && dccg->funcs->disable_symclk_se) { dccg->funcs->disable_symclk_se(dccg, stream_enc->stream_enc_inst, @@ -1476,7 +1476,7 @@ static enum dc_status dce110_enable_stream_timing( return DC_OK; } -static enum dc_status apply_single_controller_ctx_to_hw( +enum dc_status dce110_apply_single_controller_ctx_to_hw( struct pipe_ctx *pipe_ctx, struct dc_state *context, struct dc *dc) @@ -2302,7 +2302,7 @@ enum dc_status dce110_apply_ctx_to_hw( if (pipe_ctx->top_pipe || pipe_ctx->prev_odm_pipe) continue; - status = apply_single_controller_ctx_to_hw( + status = dce110_apply_single_controller_ctx_to_hw( pipe_ctx, context, dc); diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h index 08028a1779ae..ed3cc3648e8e 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h @@ -39,6 +39,10 @@ enum dc_status dce110_apply_ctx_to_hw( struct dc *dc, struct dc_state *context); +enum dc_status dce110_apply_single_controller_ctx_to_hw( + struct pipe_ctx *pipe_ctx, + struct dc_state *context, + struct dc *dc); void dce110_enable_stream(struct pipe_ctx *pipe_ctx); diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c index e931342fcf4c..931ac8ed7069 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c @@ -2561,7 +2561,7 @@ void dcn20_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx) tg->funcs->setup_vertical_interrupt2(tg, start_line); } -static void dcn20_reset_back_end_for_pipe( +void dcn20_reset_back_end_for_pipe( struct dc *dc, struct pipe_ctx *pipe_ctx, struct dc_state *context) @@ -2790,18 +2790,17 @@ void dcn20_enable_stream(struct pipe_ctx *pipe_ctx) } if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) { - dp_hpo_inst = pipe_ctx->stream_res.hpo_dp_stream_enc->inst; - dccg->funcs->set_dpstreamclk(dccg, DTBCLK0, tg->inst, dp_hpo_inst); - - phyd32clk = get_phyd32clk_src(link); - dccg->funcs->enable_symclk32_se(dccg, dp_hpo_inst, phyd32clk); - dto_params.otg_inst = tg->inst; dto_params.pixclk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10; dto_params.num_odm_segments = get_odm_segment_count(pipe_ctx); dto_params.timing = &pipe_ctx->stream->timing; dto_params.ref_dtbclk_khz = dc->clk_mgr->funcs->get_dtb_ref_clk_frequency(dc->clk_mgr); dccg->funcs->set_dtbclk_dto(dccg, &dto_params); + dp_hpo_inst = pipe_ctx->stream_res.hpo_dp_stream_enc->inst; + dccg->funcs->set_dpstreamclk(dccg, DTBCLK0, tg->inst, dp_hpo_inst); + + phyd32clk = get_phyd32clk_src(link); + dccg->funcs->enable_symclk32_se(dccg, dp_hpo_inst, phyd32clk); } else { if (dccg->funcs->enable_symclk_se) dccg->funcs->enable_symclk_se(dccg, stream_enc->stream_enc_inst, diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.h index b94c85340abf..d950b3e54ec2 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.h @@ -84,6 +84,10 @@ enum dc_status dcn20_enable_stream_timing( void dcn20_disable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx); void dcn20_enable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx); void dcn20_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx); +void dcn20_reset_back_end_for_pipe( + struct dc *dc, + struct pipe_ctx *pipe_ctx, + struct dc_state *context); void dcn20_init_blank( struct dc *dc, struct timing_generator *tg); diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c index 8e88dcaf88f5..7252f5f781f0 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c @@ -206,28 +206,32 @@ void dcn21_set_abm_immediate_disable(struct pipe_ctx *pipe_ctx) void dcn21_set_pipe(struct pipe_ctx *pipe_ctx) { struct abm *abm = pipe_ctx->stream_res.abm; - uint32_t otg_inst = pipe_ctx->stream_res.tg->inst; + struct timing_generator *tg = pipe_ctx->stream_res.tg; struct panel_cntl *panel_cntl = pipe_ctx->stream->link->panel_cntl; struct dmcu *dmcu = pipe_ctx->stream->ctx->dc->res_pool->dmcu; + uint32_t otg_inst; + + if (!abm || !tg || !panel_cntl) + return; + + otg_inst = tg->inst; if (dmcu) { dce110_set_pipe(pipe_ctx); return; } - if (abm && panel_cntl) { - if (abm->funcs && abm->funcs->set_pipe_ex) { - abm->funcs->set_pipe_ex(abm, + if (abm->funcs && abm->funcs->set_pipe_ex) { + abm->funcs->set_pipe_ex(abm, otg_inst, SET_ABM_PIPE_NORMAL, panel_cntl->inst, panel_cntl->pwrseq_inst); - } else { - dmub_abm_set_pipe(abm, otg_inst, - SET_ABM_PIPE_NORMAL, - panel_cntl->inst, - panel_cntl->pwrseq_inst); - } + } else { + dmub_abm_set_pipe(abm, otg_inst, + SET_ABM_PIPE_NORMAL, + panel_cntl->inst, + panel_cntl->pwrseq_inst); } } @@ -237,34 +241,35 @@ bool dcn21_set_backlight_level(struct pipe_ctx *pipe_ctx, { struct dc_context *dc = pipe_ctx->stream->ctx; struct abm *abm = pipe_ctx->stream_res.abm; + struct timing_generator *tg = pipe_ctx->stream_res.tg; struct panel_cntl *panel_cntl = pipe_ctx->stream->link->panel_cntl; + uint32_t otg_inst; + + if (!abm || !tg || !panel_cntl) + return false; + + otg_inst = tg->inst; if (dc->dc->res_pool->dmcu) { dce110_set_backlight_level(pipe_ctx, backlight_pwm_u16_16, frame_ramp); return true; } - if (abm != NULL) { - uint32_t otg_inst = pipe_ctx->stream_res.tg->inst; - - if (abm && panel_cntl) { - if (abm->funcs && abm->funcs->set_pipe_ex) { - abm->funcs->set_pipe_ex(abm, - otg_inst, - SET_ABM_PIPE_NORMAL, - panel_cntl->inst, - panel_cntl->pwrseq_inst); - } else { - dmub_abm_set_pipe(abm, - otg_inst, - SET_ABM_PIPE_NORMAL, - panel_cntl->inst, - panel_cntl->pwrseq_inst); - } - } + if (abm->funcs && abm->funcs->set_pipe_ex) { + abm->funcs->set_pipe_ex(abm, + otg_inst, + SET_ABM_PIPE_NORMAL, + panel_cntl->inst, + panel_cntl->pwrseq_inst); + } else { + dmub_abm_set_pipe(abm, + otg_inst, + SET_ABM_PIPE_NORMAL, + panel_cntl->inst, + panel_cntl->pwrseq_inst); } - if (abm && abm->funcs && abm->funcs->set_backlight_level_pwm) + if (abm->funcs && abm->funcs->set_backlight_level_pwm) abm->funcs->set_backlight_level_pwm(abm, backlight_pwm_u16_16, frame_ramp, 0, panel_cntl->inst); else diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c index 6c9299c7683d..aa36d7a56ca8 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c @@ -1474,9 +1474,44 @@ void dcn32_update_dsc_pg(struct dc *dc, } } +void dcn32_disable_phantom_streams(struct dc *dc, struct dc_state *context) +{ + struct dce_hwseq *hws = dc->hwseq; + int i; + + for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) { + struct pipe_ctx *pipe_ctx_old = + &dc->current_state->res_ctx.pipe_ctx[i]; + struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; + + if (!pipe_ctx_old->stream) + continue; + + if (dc_state_get_pipe_subvp_type(dc->current_state, pipe_ctx_old) != SUBVP_PHANTOM) + continue; + + if (pipe_ctx_old->top_pipe || pipe_ctx_old->prev_odm_pipe) + continue; + + if (!pipe_ctx->stream || pipe_need_reprogram(pipe_ctx_old, pipe_ctx) || + (pipe_ctx->stream && dc_state_get_pipe_subvp_type(context, pipe_ctx) != SUBVP_PHANTOM)) { + struct clock_source *old_clk = pipe_ctx_old->clock_source; + + if (hws->funcs.reset_back_end_for_pipe) + hws->funcs.reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state); + if (hws->funcs.enable_stream_gating) + hws->funcs.enable_stream_gating(dc, pipe_ctx_old); + if (old_clk) + old_clk->funcs->cs_power_down(old_clk); + } + } +} + void dcn32_enable_phantom_streams(struct dc *dc, struct dc_state *context) { unsigned int i; + enum dc_status status = DC_OK; + struct dce_hwseq *hws = dc->hwseq; for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; @@ -1497,16 +1532,39 @@ void dcn32_enable_phantom_streams(struct dc *dc, struct dc_state *context) } } for (i = 0; i < dc->res_pool->pipe_count; i++) { - struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i]; - - if (new_pipe->stream && dc_state_get_pipe_subvp_type(context, new_pipe) == SUBVP_PHANTOM) { - // If old context or new context has phantom pipes, apply - // the phantom timings now. We can't change the phantom - // pipe configuration safely without driver acquiring - // the DMCUB lock first. - dc->hwss.apply_ctx_to_hw(dc, context); - break; + struct pipe_ctx *pipe_ctx_old = + &dc->current_state->res_ctx.pipe_ctx[i]; + struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; + + if (pipe_ctx->stream == NULL) + continue; + + if (dc_state_get_pipe_subvp_type(context, pipe_ctx) != SUBVP_PHANTOM) + continue; + + if (pipe_ctx->stream == pipe_ctx_old->stream && + pipe_ctx->stream->link->link_state_valid) { + continue; } + + if (pipe_ctx_old->stream && !pipe_need_reprogram(pipe_ctx_old, pipe_ctx)) + continue; + + if (pipe_ctx->top_pipe || pipe_ctx->prev_odm_pipe) + continue; + + if (hws->funcs.apply_single_controller_ctx_to_hw) + status = hws->funcs.apply_single_controller_ctx_to_hw( + pipe_ctx, + context, + dc); + + ASSERT(status == DC_OK); + +#ifdef CONFIG_DRM_AMD_DC_FP + if (hws->funcs.resync_fifo_dccg_dio) + hws->funcs.resync_fifo_dccg_dio(hws, dc, context); +#endif } } diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h index cecf7f0f5671..069e20bc87c0 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h @@ -111,6 +111,8 @@ void dcn32_update_dsc_pg(struct dc *dc, void dcn32_enable_phantom_streams(struct dc *dc, struct dc_state *context); +void dcn32_disable_phantom_streams(struct dc *dc, struct dc_state *context); + void dcn32_init_blank( struct dc *dc, struct timing_generator *tg); diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c index 427cfc8c24a4..e8ac94a005b8 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c @@ -109,6 +109,7 @@ static const struct hw_sequencer_funcs dcn32_funcs = { .get_dcc_en_bits = dcn10_get_dcc_en_bits, .commit_subvp_config = dcn32_commit_subvp_config, .enable_phantom_streams = dcn32_enable_phantom_streams, + .disable_phantom_streams = dcn32_disable_phantom_streams, .subvp_pipe_control_lock = dcn32_subvp_pipe_control_lock, .update_visual_confirm_color = dcn10_update_visual_confirm_color, .subvp_pipe_control_lock_fast = dcn32_subvp_pipe_control_lock_fast, @@ -159,6 +160,8 @@ static const struct hwseq_private_funcs dcn32_private_funcs = { .set_pixels_per_cycle = dcn32_set_pixels_per_cycle, .resync_fifo_dccg_dio = dcn32_resync_fifo_dccg_dio, .is_dp_dig_pixel_rate_div_policy = dcn32_is_dp_dig_pixel_rate_div_policy, + .apply_single_controller_ctx_to_hw = dce110_apply_single_controller_ctx_to_hw, + .reset_back_end_for_pipe = dcn20_reset_back_end_for_pipe, }; void dcn32_hw_sequencer_init_functions(struct dc *dc) diff --git a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h index a54399383318..64ca7c66509b 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h @@ -379,6 +379,7 @@ struct hw_sequencer_funcs { struct dc_cursor_attributes *cursor_attr); void (*commit_subvp_config)(struct dc *dc, struct dc_state *context); void (*enable_phantom_streams)(struct dc *dc, struct dc_state *context); + void (*disable_phantom_streams)(struct dc *dc, struct dc_state *context); void (*subvp_pipe_control_lock)(struct dc *dc, struct dc_state *context, bool lock, diff --git a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h index 6137cf09aa54..b3c62a82cb1c 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h @@ -165,8 +165,15 @@ struct hwseq_private_funcs { void (*set_pixels_per_cycle)(struct pipe_ctx *pipe_ctx); void (*resync_fifo_dccg_dio)(struct dce_hwseq *hws, struct dc *dc, struct dc_state *context); + enum dc_status (*apply_single_controller_ctx_to_hw)( + struct pipe_ctx *pipe_ctx, + struct dc_state *context, + struct dc *dc); bool (*is_dp_dig_pixel_rate_div_policy)(struct pipe_ctx *pipe_ctx); #endif + void (*reset_back_end_for_pipe)(struct dc *dc, + struct pipe_ctx *pipe_ctx, + struct dc_state *context); }; struct dce_hwseq { diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h index f74ae0d41d3c..3a6bf77a6873 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h +++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h @@ -469,6 +469,8 @@ struct resource_context { unsigned int hpo_dp_link_enc_to_link_idx[MAX_HPO_DP2_LINK_ENCODERS]; int hpo_dp_link_enc_ref_cnts[MAX_HPO_DP2_LINK_ENCODERS]; bool is_mpc_3dlut_acquired[MAX_PIPES]; + /* solely used for build scalar data in dml2 */ + struct pipe_ctx temp_pipe; }; struct dce_bw_output { diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/panel_cntl.h b/drivers/gpu/drm/amd/display/dc/inc/hw/panel_cntl.h index 5dcbaa2db964..e97d964a1791 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/panel_cntl.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/panel_cntl.h @@ -57,7 +57,7 @@ struct panel_cntl_funcs { struct panel_cntl_init_data { struct dc_context *ctx; uint32_t inst; - uint32_t pwrseq_inst; + uint32_t eng_id; }; struct panel_cntl { diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h index c958ef37b78a..77a60aa9f27b 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/resource.h +++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h @@ -427,22 +427,18 @@ struct pipe_ctx *resource_get_primary_dpp_pipe(const struct pipe_ctx *dpp_pipe); int resource_get_mpc_slice_index(const struct pipe_ctx *dpp_pipe); /* - * Get number of MPC "cuts" of the plane associated with the pipe. MPC slice - * count is equal to MPC splits + 1. For example if a plane is cut 3 times, it - * will have 4 pieces of slice. - * return - 0 if pipe is not used for a plane with MPCC combine. otherwise - * the number of MPC "cuts" for the plane. + * Get the number of MPC slices associated with the pipe. + * The function returns 0 if the pipe is not associated with an MPC combine + * pipe topology. */ -int resource_get_mpc_slice_count(const struct pipe_ctx *opp_head); +int resource_get_mpc_slice_count(const struct pipe_ctx *pipe); /* - * Get number of ODM "cuts" of the timing associated with the pipe. ODM slice - * count is equal to ODM splits + 1. For example if a timing is cut 3 times, it - * will have 4 pieces of slice. - * return - 0 if pipe is not used for ODM combine. otherwise - * the number of ODM "cuts" for the timing. + * Get the number of ODM slices associated with the pipe. + * The function returns 0 if the pipe is not associated with an ODM combine + * pipe topology. */ -int resource_get_odm_slice_count(const struct pipe_ctx *otg_master); +int resource_get_odm_slice_count(const struct pipe_ctx *pipe); /* Get the ODM slice index counting from 0 from left most slice */ int resource_get_odm_slice_index(const struct pipe_ctx *opp_head); diff --git a/drivers/gpu/drm/amd/display/dc/link/link_factory.c b/drivers/gpu/drm/amd/display/dc/link/link_factory.c index 37d3027c32dc..cf22b8f28ba6 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_factory.c +++ b/drivers/gpu/drm/amd/display/dc/link/link_factory.c @@ -370,30 +370,6 @@ static enum transmitter translate_encoder_to_transmitter( } } -static uint8_t translate_dig_inst_to_pwrseq_inst(struct dc_link *link) -{ - uint8_t pwrseq_inst = 0xF; - struct dc_context *dc_ctx = link->dc->ctx; - - DC_LOGGER_INIT(dc_ctx->logger); - - switch (link->eng_id) { - case ENGINE_ID_DIGA: - pwrseq_inst = 0; - break; - case ENGINE_ID_DIGB: - pwrseq_inst = 1; - break; - default: - DC_LOG_WARNING("Unsupported pwrseq engine id: %d!\n", link->eng_id); - ASSERT(false); - break; - } - - return pwrseq_inst; -} - - static void link_destruct(struct dc_link *link) { int i; @@ -657,7 +633,7 @@ static bool construct_phy(struct dc_link *link, link->link_id.id == CONNECTOR_ID_LVDS)) { panel_cntl_init_data.ctx = dc_ctx; panel_cntl_init_data.inst = panel_cntl_init_data.ctx->dc_edp_id_count; - panel_cntl_init_data.pwrseq_inst = translate_dig_inst_to_pwrseq_inst(link); + panel_cntl_init_data.eng_id = link->eng_id; link->panel_cntl = link->dc->res_pool->funcs->panel_cntl_create( &panel_cntl_init_data); diff --git a/drivers/gpu/drm/amd/display/dc/link/link_validation.c b/drivers/gpu/drm/amd/display/dc/link/link_validation.c index 8fe66c367850..5b0bc7f6a188 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_validation.c +++ b/drivers/gpu/drm/amd/display/dc/link/link_validation.c @@ -361,7 +361,7 @@ bool link_validate_dpia_bandwidth(const struct dc_stream_state *stream, const un struct dc_link *dpia_link[MAX_DPIA_NUM] = {0}; int num_dpias = 0; - for (uint8_t i = 0; i < num_streams; ++i) { + for (unsigned int i = 0; i < num_streams; ++i) { if (stream[i].signal == SIGNAL_TYPE_DISPLAY_PORT) { /* new dpia sst stream, check whether it exceeds max dpia */ if (num_dpias >= MAX_DPIA_NUM) diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c index dd0d2b206462..5491b707cec8 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c @@ -196,7 +196,7 @@ static int get_host_router_total_dp_tunnel_bw(const struct dc *dc, uint8_t hr_in struct dc_link *link_dpia_primary, *link_dpia_secondary; int total_bw = 0; - for (uint8_t i = 0; i < MAX_PIPES * 2; ++i) { + for (uint8_t i = 0; i < (MAX_PIPES * 2) - 1; ++i) { if (!dc->links[i] || dc->links[i]->ep_type != DISPLAY_ENDPOINT_USB4_DPIA) continue; diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c index 5a0b04518956..16a62e018712 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c @@ -517,6 +517,7 @@ enum link_training_result dp_check_link_loss_status( { enum link_training_result status = LINK_TRAINING_SUCCESS; union lane_status lane_status; + union lane_align_status_updated dpcd_lane_status_updated; uint8_t dpcd_buf[6] = {0}; uint32_t lane; @@ -532,10 +533,12 @@ enum link_training_result dp_check_link_loss_status( * check lanes status */ lane_status.raw = dp_get_nibble_at_index(&dpcd_buf[2], lane); + dpcd_lane_status_updated.raw = dpcd_buf[4]; if (!lane_status.bits.CHANNEL_EQ_DONE_0 || !lane_status.bits.CR_DONE_0 || - !lane_status.bits.SYMBOL_LOCKED_0) { + !lane_status.bits.SYMBOL_LOCKED_0 || + !dp_is_interlane_aligned(dpcd_lane_status_updated)) { /* if one of the channel equalization, clock * recovery or symbol lock is dropped * consider it as (link has been diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c index e8dda44b23cb..5d36bab0029c 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c @@ -619,7 +619,7 @@ static enum link_training_result dpia_training_eq_non_transparent( uint32_t retries_eq = 0; enum dc_status status; enum dc_dp_training_pattern tr_pattern; - uint32_t wait_time_microsec; + uint32_t wait_time_microsec = 0; enum dc_lane_count lane_count = lt_settings->link_settings.lane_count; union lane_align_status_updated dpcd_lane_status_updated = {0}; union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0}; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c index 511ff6b5b985..7538b548c572 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c @@ -999,7 +999,7 @@ static struct stream_encoder *dcn301_stream_encoder_create(enum engine_id eng_id vpg = dcn301_vpg_create(ctx, vpg_inst); afmt = dcn301_afmt_create(ctx, afmt_inst); - if (!enc1 || !vpg || !afmt) { + if (!enc1 || !vpg || !afmt || eng_id >= ARRAY_SIZE(stream_enc_regs)) { kfree(enc1); kfree(vpg); kfree(afmt); diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c index c4d71e7f18af..6f10052caeef 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c @@ -1829,7 +1829,21 @@ int dcn32_populate_dml_pipes_from_context( dcn32_zero_pipe_dcc_fraction(pipes, pipe_cnt); DC_FP_END(); pipes[pipe_cnt].pipe.dest.vfront_porch = timing->v_front_porch; - pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_dal; + if (dc->config.enable_windowed_mpo_odm && + dc->debug.enable_single_display_2to1_odm_policy) { + switch (resource_get_odm_slice_count(pipe)) { + case 2: + pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_2to1; + break; + case 4: + pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_4to1; + break; + default: + pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_dal; + } + } else { + pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_dal; + } pipes[pipe_cnt].pipe.src.gpuvm_min_page_size_kbytes = 256; // according to spreadsheet pipes[pipe_cnt].pipe.src.unbounded_req_mode = false; pipes[pipe_cnt].pipe.scale_ratio_depth.lb_depth = dm_lb_19; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c index 761ec9891875..5fdcda8f8602 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c @@ -780,8 +780,8 @@ static const struct dc_debug_options debug_defaults_drv = { .disable_z10 = false, .ignore_pg = true, .psp_disabled_wa = true, - .ips2_eval_delay_us = 200, - .ips2_entry_delay_us = 400, + .ips2_eval_delay_us = 2000, + .ips2_entry_delay_us = 800, .static_screen_wait_frames = 2, }; @@ -2130,6 +2130,7 @@ static bool dcn35_resource_construct( dc->dml2_options.dcn_pipe_count = pool->base.pipe_count; dc->dml2_options.use_native_pstate_optimization = true; dc->dml2_options.use_native_soc_bb_construction = true; + dc->dml2_options.minimize_dispclk_using_odm = false; if (dc->config.EnableMinDispClkODM) dc->dml2_options.minimize_dispclk_using_odm = true; dc->dml2_options.enable_windowed_mpo_odm = dc->config.enable_windowed_mpo_odm; diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c index 087d57850304..39c5e1dfa275 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c @@ -2558,6 +2558,7 @@ static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev, { struct amdgpu_device *adev = dev_get_drvdata(dev); int err, ret; + u32 pwm_mode; int value; if (amdgpu_in_reset(adev)) @@ -2569,13 +2570,22 @@ static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev, if (err) return err; + if (value == 0) + pwm_mode = AMD_FAN_CTRL_NONE; + else if (value == 1) + pwm_mode = AMD_FAN_CTRL_MANUAL; + else if (value == 2) + pwm_mode = AMD_FAN_CTRL_AUTO; + else + return -EINVAL; + ret = pm_runtime_get_sync(adev_to_drm(adev)->dev); if (ret < 0) { pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); return ret; } - ret = amdgpu_dpm_set_fan_control_mode(adev, value); + ret = amdgpu_dpm_set_fan_control_mode(adev, pwm_mode); pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c index df4f20293c16..eb4da3666e05 100644 --- a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c +++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c @@ -6925,6 +6925,23 @@ static int si_dpm_enable(struct amdgpu_device *adev) return 0; } +static int si_set_temperature_range(struct amdgpu_device *adev) +{ + int ret; + + ret = si_thermal_enable_alert(adev, false); + if (ret) + return ret; + ret = si_thermal_set_temperature_range(adev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX); + if (ret) + return ret; + ret = si_thermal_enable_alert(adev, true); + if (ret) + return ret; + + return ret; +} + static void si_dpm_disable(struct amdgpu_device *adev) { struct rv7xx_power_info *pi = rv770_get_pi(adev); @@ -7608,6 +7625,18 @@ static int si_dpm_process_interrupt(struct amdgpu_device *adev, static int si_dpm_late_init(void *handle) { + int ret; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (!adev->pm.dpm_enabled) + return 0; + + ret = si_set_temperature_range(adev); + if (ret) + return ret; +#if 0 //TODO ? + si_dpm_powergate_uvd(adev, true); +#endif return 0; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c index 7ffad3eb0a01..0ad947df777a 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c @@ -734,7 +734,7 @@ static int smu_early_init(void *handle) smu->adev = adev; smu->pm_enabled = !!amdgpu_dpm; smu->is_apu = false; - smu->smu_baco.state = SMU_BACO_STATE_NONE; + smu->smu_baco.state = SMU_BACO_STATE_EXIT; smu->smu_baco.platform_support = false; smu->user_dpm_profile.fan_mode = -1; @@ -1954,31 +1954,10 @@ static int smu_smc_hw_cleanup(struct smu_context *smu) return 0; } -static int smu_reset_mp1_state(struct smu_context *smu) -{ - struct amdgpu_device *adev = smu->adev; - int ret = 0; - - if ((!adev->in_runpm) && (!adev->in_suspend) && - (!amdgpu_in_reset(adev))) - switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { - case IP_VERSION(13, 0, 0): - case IP_VERSION(13, 0, 7): - case IP_VERSION(13, 0, 10): - ret = smu_set_mp1_state(smu, PP_MP1_STATE_UNLOAD); - break; - default: - break; - } - - return ret; -} - static int smu_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct smu_context *smu = adev->powerplay.pp_handle; - int ret; if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) return 0; @@ -1996,15 +1975,7 @@ static int smu_hw_fini(void *handle) adev->pm.dpm_enabled = false; - ret = smu_smc_hw_cleanup(smu); - if (ret) - return ret; - - ret = smu_reset_mp1_state(smu); - if (ret) - return ret; - - return 0; + return smu_smc_hw_cleanup(smu); } static void smu_late_fini(void *handle) diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h index 2aa4fea87314..66e84defd0b6 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h @@ -424,7 +424,6 @@ enum smu_reset_mode { enum smu_baco_state { SMU_BACO_STATE_ENTER = 0, SMU_BACO_STATE_EXIT, - SMU_BACO_STATE_NONE, }; struct smu_baco_context { diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c index 4cd43bbec910..bcad42534da4 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c @@ -1303,13 +1303,12 @@ static int arcturus_get_power_limit(struct smu_context *smu, if (default_power_limit) *default_power_limit = power_limit; - if (smu->od_enabled) { + if (smu->od_enabled) od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_ODSETTING_POWERPERCENTAGE]); - od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_ODSETTING_POWERPERCENTAGE]); - } else { + else od_percent_upper = 0; - od_percent_lower = 100; - } + + od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_ODSETTING_POWERPERCENTAGE]); dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n", od_percent_upper, od_percent_lower, power_limit); diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c index 8d1d29ffb0f1..ed189a3878eb 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c @@ -2357,13 +2357,12 @@ static int navi10_get_power_limit(struct smu_context *smu, *default_power_limit = power_limit; if (smu->od_enabled && - navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_POWER_LIMIT)) { + navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_POWER_LIMIT)) od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_ODSETTING_POWERPERCENTAGE]); - od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_ODSETTING_POWERPERCENTAGE]); - } else { + else od_percent_upper = 0; - od_percent_lower = 100; - } + + od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_ODSETTING_POWERPERCENTAGE]); dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n", od_percent_upper, od_percent_lower, power_limit); diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c index 21fc033528fa..e2ad2b972ab0 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c @@ -640,13 +640,12 @@ static int sienna_cichlid_get_power_limit(struct smu_context *smu, if (default_power_limit) *default_power_limit = power_limit; - if (smu->od_enabled) { + if (smu->od_enabled) od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_7_ODSETTING_POWERPERCENTAGE]); - od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_7_ODSETTING_POWERPERCENTAGE]); - } else { + else od_percent_upper = 0; - od_percent_lower = 100; - } + + od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_7_ODSETTING_POWERPERCENTAGE]); dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n", od_percent_upper, od_percent_lower, power_limit); diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c index 2ff6deedef95..da1f43999d09 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c @@ -451,7 +451,7 @@ static int vangogh_init_smc_tables(struct smu_context *smu) #ifdef CONFIG_X86 /* AMD x86 APU only */ - smu->cpu_core_num = boot_cpu_data.x86_max_cores; + smu->cpu_core_num = topology_num_cores_per_package(); #else smu->cpu_core_num = 4; #endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c index 4fdf34fffa9a..9b80f18ea6c3 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c @@ -2369,13 +2369,12 @@ static int smu_v13_0_0_get_power_limit(struct smu_context *smu, if (default_power_limit) *default_power_limit = power_limit; - if (smu->od_enabled) { + if (smu->od_enabled) od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_13_0_0_ODSETTING_POWERPERCENTAGE]); - od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_13_0_0_ODSETTING_POWERPERCENTAGE]); - } else { + else od_percent_upper = 0; - od_percent_lower = 100; - } + + od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_13_0_0_ODSETTING_POWERPERCENTAGE]); dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n", od_percent_upper, od_percent_lower, power_limit); @@ -2748,13 +2747,7 @@ static int smu_v13_0_0_set_mp1_state(struct smu_context *smu, switch (mp1_state) { case PP_MP1_STATE_UNLOAD: - ret = smu_cmn_send_smc_msg_with_param(smu, - SMU_MSG_PrepareMp1ForUnload, - 0x55, NULL); - - if (!ret && smu->smu_baco.state == SMU_BACO_STATE_EXIT) - ret = smu_v13_0_disable_pmfw_state(smu); - + ret = smu_cmn_set_mp1_state(smu, mp1_state); break; default: /* Ignore others */ @@ -2950,7 +2943,7 @@ static bool smu_v13_0_0_wbrf_support_check(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; - switch (adev->ip_versions[MP1_HWIP][0]) { + switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { case IP_VERSION(13, 0, 0): return smu->smc_fw_version >= 0x004e6300; case IP_VERSION(13, 0, 10): diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c index 7c3e162e2d81..3dc7b60cb075 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c @@ -2333,13 +2333,12 @@ static int smu_v13_0_7_get_power_limit(struct smu_context *smu, if (default_power_limit) *default_power_limit = power_limit; - if (smu->od_enabled) { + if (smu->od_enabled) od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_13_0_7_ODSETTING_POWERPERCENTAGE]); - od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_13_0_7_ODSETTING_POWERPERCENTAGE]); - } else { + else od_percent_upper = 0; - od_percent_lower = 100; - } + + od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_13_0_7_ODSETTING_POWERPERCENTAGE]); dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n", od_percent_upper, od_percent_lower, power_limit); @@ -2505,13 +2504,7 @@ static int smu_v13_0_7_set_mp1_state(struct smu_context *smu, switch (mp1_state) { case PP_MP1_STATE_UNLOAD: - ret = smu_cmn_send_smc_msg_with_param(smu, - SMU_MSG_PrepareMp1ForUnload, - 0x55, NULL); - - if (!ret && smu->smu_baco.state == SMU_BACO_STATE_EXIT) - ret = smu_v13_0_disable_pmfw_state(smu); - + ret = smu_cmn_set_mp1_state(smu, mp1_state); break; default: /* Ignore others */ diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c index 4894f7ee737b..6dae5ad74ff0 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c @@ -229,8 +229,6 @@ int smu_v14_0_check_fw_version(struct smu_context *smu) smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_2; break; case IP_VERSION(14, 0, 0): - if ((smu->smc_fw_version < 0x5d3a00)) - dev_warn(smu->adev->dev, "The PMFW version(%x) is behind in this BIOS!\n", smu->smc_fw_version); smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_0; break; default: diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c index 47fdbae4adfc..9310c4758e38 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c @@ -261,7 +261,10 @@ static int smu_v14_0_0_get_smu_metrics_data(struct smu_context *smu, *value = metrics->MpipuclkFrequency; break; case METRICS_AVERAGE_GFXACTIVITY: - *value = metrics->GfxActivity / 100; + if ((smu->smc_fw_version > 0x5d4600)) + *value = metrics->GfxActivity; + else + *value = metrics->GfxActivity / 100; break; case METRICS_AVERAGE_VCNACTIVITY: *value = metrics->VcnActivity / 100; diff --git a/drivers/gpu/drm/bridge/aux-hpd-bridge.c b/drivers/gpu/drm/bridge/aux-hpd-bridge.c index bb55f697a181..6886db2d9e00 100644 --- a/drivers/gpu/drm/bridge/aux-hpd-bridge.c +++ b/drivers/gpu/drm/bridge/aux-hpd-bridge.c @@ -25,20 +25,18 @@ static void drm_aux_hpd_bridge_release(struct device *dev) ida_free(&drm_aux_hpd_bridge_ida, adev->id); of_node_put(adev->dev.platform_data); + of_node_put(adev->dev.of_node); kfree(adev); } -static void drm_aux_hpd_bridge_unregister_adev(void *_adev) +static void drm_aux_hpd_bridge_free_adev(void *_adev) { - struct auxiliary_device *adev = _adev; - - auxiliary_device_delete(adev); - auxiliary_device_uninit(adev); + auxiliary_device_uninit(_adev); } /** - * drm_dp_hpd_bridge_register - Create a simple HPD DisplayPort bridge + * devm_drm_dp_hpd_bridge_alloc - allocate a HPD DisplayPort bridge * @parent: device instance providing this bridge * @np: device node pointer corresponding to this bridge instance * @@ -46,11 +44,9 @@ static void drm_aux_hpd_bridge_unregister_adev(void *_adev) * DRM_MODE_CONNECTOR_DisplayPort, which terminates the bridge chain and is * able to send the HPD events. * - * Return: device instance that will handle created bridge or an error code - * encoded into the pointer. + * Return: bridge auxiliary device pointer or an error pointer */ -struct device *drm_dp_hpd_bridge_register(struct device *parent, - struct device_node *np) +struct auxiliary_device *devm_drm_dp_hpd_bridge_alloc(struct device *parent, struct device_node *np) { struct auxiliary_device *adev; int ret; @@ -74,18 +70,62 @@ struct device *drm_dp_hpd_bridge_register(struct device *parent, ret = auxiliary_device_init(adev); if (ret) { + of_node_put(adev->dev.platform_data); + of_node_put(adev->dev.of_node); ida_free(&drm_aux_hpd_bridge_ida, adev->id); kfree(adev); return ERR_PTR(ret); } - ret = auxiliary_device_add(adev); - if (ret) { - auxiliary_device_uninit(adev); + ret = devm_add_action_or_reset(parent, drm_aux_hpd_bridge_free_adev, adev); + if (ret) return ERR_PTR(ret); - } - ret = devm_add_action_or_reset(parent, drm_aux_hpd_bridge_unregister_adev, adev); + return adev; +} +EXPORT_SYMBOL_GPL(devm_drm_dp_hpd_bridge_alloc); + +static void drm_aux_hpd_bridge_del_adev(void *_adev) +{ + auxiliary_device_delete(_adev); +} + +/** + * devm_drm_dp_hpd_bridge_add - register a HDP DisplayPort bridge + * @dev: struct device to tie registration lifetime to + * @adev: bridge auxiliary device to be registered + * + * Returns: zero on success or a negative errno + */ +int devm_drm_dp_hpd_bridge_add(struct device *dev, struct auxiliary_device *adev) +{ + int ret; + + ret = auxiliary_device_add(adev); + if (ret) + return ret; + + return devm_add_action_or_reset(dev, drm_aux_hpd_bridge_del_adev, adev); +} +EXPORT_SYMBOL_GPL(devm_drm_dp_hpd_bridge_add); + +/** + * drm_dp_hpd_bridge_register - allocate and register a HDP DisplayPort bridge + * @parent: device instance providing this bridge + * @np: device node pointer corresponding to this bridge instance + * + * Return: device instance that will handle created bridge or an error pointer + */ +struct device *drm_dp_hpd_bridge_register(struct device *parent, struct device_node *np) +{ + struct auxiliary_device *adev; + int ret; + + adev = devm_drm_dp_hpd_bridge_alloc(parent, np); + if (IS_ERR(adev)) + return ERR_CAST(adev); + + ret = devm_drm_dp_hpd_bridge_add(parent, adev); if (ret) return ERR_PTR(ret); diff --git a/drivers/gpu/drm/drm_buddy.c b/drivers/gpu/drm/drm_buddy.c index f57e6d74fb0e..5ebdd6f8f36e 100644 --- a/drivers/gpu/drm/drm_buddy.c +++ b/drivers/gpu/drm/drm_buddy.c @@ -332,6 +332,7 @@ alloc_range_bias(struct drm_buddy *mm, u64 start, u64 end, unsigned int order) { + u64 req_size = mm->chunk_size << order; struct drm_buddy_block *block; struct drm_buddy_block *buddy; LIST_HEAD(dfs); @@ -367,6 +368,15 @@ alloc_range_bias(struct drm_buddy *mm, if (drm_buddy_block_is_allocated(block)) continue; + if (block_start < start || block_end > end) { + u64 adjusted_start = max(block_start, start); + u64 adjusted_end = min(block_end, end); + + if (round_down(adjusted_end + 1, req_size) <= + round_up(adjusted_start, req_size)) + continue; + } + if (contains(start, end, block_start, block_end) && order == drm_buddy_block_order(block)) { /* @@ -538,7 +548,13 @@ static int __alloc_range(struct drm_buddy *mm, list_add(&block->left->tmp_link, dfs); } while (1); + if (total_allocated < size) { + err = -ENOSPC; + goto err_free; + } + list_splice_tail(&allocated, blocks); + return 0; err_undo: @@ -755,8 +771,12 @@ int drm_buddy_alloc_blocks(struct drm_buddy *mm, return -EINVAL; /* Actual range allocation */ - if (start + size == end) + if (start + size == end) { + if (!IS_ALIGNED(start | end, min_block_size)) + return -EINVAL; + return __drm_buddy_alloc_range(mm, start, size, NULL, blocks); + } original_size = size; original_min_size = min_block_size; diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index cb90e70d85e8..65f9f66933bb 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -904,6 +904,7 @@ out: connector_set = NULL; fb = NULL; mode = NULL; + num_connectors = 0; DRM_MODESET_LOCK_ALL_END(dev, ctx, ret); diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c index 834a5e28abbe..7352bde299d5 100644 --- a/drivers/gpu/drm/drm_prime.c +++ b/drivers/gpu/drm/drm_prime.c @@ -820,7 +820,7 @@ struct sg_table *drm_prime_pages_to_sg(struct drm_device *dev, if (max_segment == 0) max_segment = UINT_MAX; err = sg_alloc_table_from_pages_segment(sg, pages, nr_pages, 0, - nr_pages << PAGE_SHIFT, + (unsigned long)nr_pages << PAGE_SHIFT, max_segment, GFP_KERNEL); if (err) { kfree(sg); diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c index 3f479483d7d8..23b4e9a3361d 100644 --- a/drivers/gpu/drm/drm_probe_helper.c +++ b/drivers/gpu/drm/drm_probe_helper.c @@ -760,9 +760,11 @@ static void output_poll_execute(struct work_struct *work) changed = dev->mode_config.delayed_event; dev->mode_config.delayed_event = false; - if (!drm_kms_helper_poll && dev->mode_config.poll_running) { - drm_kms_helper_disable_hpd(dev); - dev->mode_config.poll_running = false; + if (!drm_kms_helper_poll) { + if (dev->mode_config.poll_running) { + drm_kms_helper_disable_hpd(dev); + dev->mode_config.poll_running = false; + } goto out; } diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c index 84101baeecc6..a6c19de46292 100644 --- a/drivers/gpu/drm/drm_syncobj.c +++ b/drivers/gpu/drm/drm_syncobj.c @@ -1040,7 +1040,8 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs, uint64_t *points; uint32_t signaled_count, i; - if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) + if (flags & (DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT | + DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE)) lockdep_assert_none_held_once(); points = kmalloc_array(count, sizeof(*points), GFP_KERNEL); @@ -1109,7 +1110,8 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs, * fallthough and try a 0 timeout wait! */ - if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) { + if (flags & (DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT | + DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE)) { for (i = 0; i < count; ++i) drm_syncobj_fence_add_wait(syncobjs[i], &entries[i]); } @@ -1416,10 +1418,21 @@ syncobj_eventfd_entry_func(struct drm_syncobj *syncobj, /* This happens inside the syncobj lock */ fence = dma_fence_get(rcu_dereference_protected(syncobj->fence, 1)); + if (!fence) + return; + ret = dma_fence_chain_find_seqno(&fence, entry->point); - if (ret != 0 || !fence) { + if (ret != 0) { + /* The given seqno has not been submitted yet. */ dma_fence_put(fence); return; + } else if (!fence) { + /* If dma_fence_chain_find_seqno returns 0 but sets the fence + * to NULL, it implies that the given seqno is signaled and a + * later seqno has already been submitted. Assign a stub fence + * so that the eventfd still gets signaled below. + */ + fence = dma_fence_get_stub(); } list_del_init(&entry->node); diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig index b5d6e3352071..3089029abba4 100644 --- a/drivers/gpu/drm/i915/Kconfig +++ b/drivers/gpu/drm/i915/Kconfig @@ -140,7 +140,7 @@ config DRM_I915_GVT_KVMGT Note that this driver only supports newer device from Broadwell on. For further information and setup guide, you can visit: - http://01.org/igvt-g. + https://github.com/intel/gvt-linux/wiki. If in doubt, say "N". diff --git a/drivers/gpu/drm/i915/display/intel_display_power_well.c b/drivers/gpu/drm/i915/display/intel_display_power_well.c index 47cd6bb04366..06900ff307b2 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power_well.c +++ b/drivers/gpu/drm/i915/display/intel_display_power_well.c @@ -246,7 +246,14 @@ static enum phy icl_aux_pw_to_phy(struct drm_i915_private *i915, enum aux_ch aux_ch = icl_aux_pw_to_ch(power_well); struct intel_digital_port *dig_port = aux_ch_to_digital_port(i915, aux_ch); - return intel_port_to_phy(i915, dig_port->base.port); + /* + * FIXME should we care about the (VBT defined) dig_port->aux_ch + * relationship or should this be purely defined by the hardware layout? + * Currently if the port doesn't appear in the VBT, or if it's declared + * as HDMI-only and routed to a combo PHY, the encoder either won't be + * present at all or it will not have an aux_ch assigned. + */ + return dig_port ? intel_port_to_phy(i915, dig_port->base.port) : PHY_NONE; } static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv, @@ -414,7 +421,8 @@ icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv, intel_de_rmw(dev_priv, regs->driver, 0, HSW_PWR_WELL_CTL_REQ(pw_idx)); - if (DISPLAY_VER(dev_priv) < 12) + /* FIXME this is a mess */ + if (phy != PHY_NONE) intel_de_rmw(dev_priv, ICL_PORT_CL_DW12(phy), 0, ICL_LANE_ENABLE_AUX); @@ -437,7 +445,10 @@ icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv, drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv)); - intel_de_rmw(dev_priv, ICL_PORT_CL_DW12(phy), ICL_LANE_ENABLE_AUX, 0); + /* FIXME this is a mess */ + if (phy != PHY_NONE) + intel_de_rmw(dev_priv, ICL_PORT_CL_DW12(phy), + ICL_LANE_ENABLE_AUX, 0); intel_de_rmw(dev_priv, regs->driver, HSW_PWR_WELL_CTL_REQ(pw_idx), 0); diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h index 3fdd8a517983..ac7fe6281afe 100644 --- a/drivers/gpu/drm/i915/display/intel_display_types.h +++ b/drivers/gpu/drm/i915/display/intel_display_types.h @@ -609,6 +609,13 @@ struct intel_connector { * and active (i.e. dpms ON state). */ bool (*get_hw_state)(struct intel_connector *); + /* + * Optional hook called during init/resume to sync any state + * stored in the connector (eg. DSC state) wrt. the HW state. + */ + void (*sync_state)(struct intel_connector *connector, + const struct intel_crtc_state *crtc_state); + /* Panel info for eDP and LVDS */ struct intel_panel panel; diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index f5ef95da5534..94d2a15d8444 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -2355,6 +2355,9 @@ intel_dp_compute_config_limits(struct intel_dp *intel_dp, limits->min_rate = intel_dp_common_rate(intel_dp, 0); limits->max_rate = intel_dp_max_link_rate(intel_dp); + /* FIXME 128b/132b SST support missing */ + limits->max_rate = min(limits->max_rate, 810000); + limits->min_lane_count = 1; limits->max_lane_count = intel_dp_max_lane_count(intel_dp); @@ -5696,6 +5699,9 @@ intel_dp_detect(struct drm_connector *connector, goto out; } + if (!intel_dp_is_edp(intel_dp)) + intel_psr_init_dpcd(intel_dp); + intel_dp_detect_dsc_caps(intel_dp, intel_connector); intel_dp_configure_mst(intel_dp); @@ -5856,6 +5862,19 @@ intel_dp_connector_unregister(struct drm_connector *connector) intel_connector_unregister(connector); } +void intel_dp_connector_sync_state(struct intel_connector *connector, + const struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *i915 = to_i915(connector->base.dev); + + if (crtc_state && crtc_state->dsc.compression_enable) { + drm_WARN_ON(&i915->drm, !connector->dp.dsc_decompression_aux); + connector->dp.dsc_decompression_enabled = true; + } else { + connector->dp.dsc_decompression_enabled = false; + } +} + void intel_dp_encoder_flush_work(struct drm_encoder *encoder) { struct intel_digital_port *dig_port = enc_to_dig_port(to_intel_encoder(encoder)); diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h index 05db46b111f2..375d0677cd8c 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.h +++ b/drivers/gpu/drm/i915/display/intel_dp.h @@ -45,6 +45,8 @@ bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state, int intel_dp_min_bpp(enum intel_output_format output_format); bool intel_dp_init_connector(struct intel_digital_port *dig_port, struct intel_connector *intel_connector); +void intel_dp_connector_sync_state(struct intel_connector *connector, + const struct intel_crtc_state *crtc_state); void intel_dp_set_link_params(struct intel_dp *intel_dp, int link_rate, int lane_count); int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp, diff --git a/drivers/gpu/drm/i915/display/intel_dp_hdcp.c b/drivers/gpu/drm/i915/display/intel_dp_hdcp.c index 3a595cd433d4..8538d1ce2fcb 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_hdcp.c +++ b/drivers/gpu/drm/i915/display/intel_dp_hdcp.c @@ -330,23 +330,13 @@ static const struct hdcp2_dp_msg_data hdcp2_dp_msg_data[] = { 0, 0 }, }; -static struct drm_dp_aux * -intel_dp_hdcp_get_aux(struct intel_connector *connector) -{ - struct intel_digital_port *dig_port = intel_attached_dig_port(connector); - - if (intel_encoder_is_mst(connector->encoder)) - return &connector->port->aux; - else - return &dig_port->dp.aux; -} - static int intel_dp_hdcp2_read_rx_status(struct intel_connector *connector, u8 *rx_status) { struct drm_i915_private *i915 = to_i915(connector->base.dev); - struct drm_dp_aux *aux = intel_dp_hdcp_get_aux(connector); + struct intel_digital_port *dig_port = intel_attached_dig_port(connector); + struct drm_dp_aux *aux = &dig_port->dp.aux; ssize_t ret; ret = drm_dp_dpcd_read(aux, @@ -399,7 +389,9 @@ intel_dp_hdcp2_wait_for_msg(struct intel_connector *connector, const struct hdcp2_dp_msg_data *hdcp2_msg_data) { struct drm_i915_private *i915 = to_i915(connector->base.dev); - struct intel_hdcp *hdcp = &connector->hdcp; + struct intel_digital_port *dig_port = intel_attached_dig_port(connector); + struct intel_dp *dp = &dig_port->dp; + struct intel_hdcp *hdcp = &dp->attached_connector->hdcp; u8 msg_id = hdcp2_msg_data->msg_id; int ret, timeout; bool msg_ready = false; @@ -454,8 +446,9 @@ int intel_dp_hdcp2_write_msg(struct intel_connector *connector, unsigned int offset; u8 *byte = buf; ssize_t ret, bytes_to_write, len; + struct intel_digital_port *dig_port = intel_attached_dig_port(connector); + struct drm_dp_aux *aux = &dig_port->dp.aux; const struct hdcp2_dp_msg_data *hdcp2_msg_data; - struct drm_dp_aux *aux; hdcp2_msg_data = get_hdcp2_dp_msg_data(*byte); if (!hdcp2_msg_data) @@ -463,8 +456,6 @@ int intel_dp_hdcp2_write_msg(struct intel_connector *connector, offset = hdcp2_msg_data->offset; - aux = intel_dp_hdcp_get_aux(connector); - /* No msg_id in DP HDCP2.2 msgs */ bytes_to_write = size - 1; byte++; @@ -490,7 +481,8 @@ static ssize_t get_receiver_id_list_rx_info(struct intel_connector *connector, u32 *dev_cnt, u8 *byte) { - struct drm_dp_aux *aux = intel_dp_hdcp_get_aux(connector); + struct intel_digital_port *dig_port = intel_attached_dig_port(connector); + struct drm_dp_aux *aux = &dig_port->dp.aux; ssize_t ret; u8 *rx_info = byte; @@ -515,8 +507,9 @@ int intel_dp_hdcp2_read_msg(struct intel_connector *connector, { struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); - struct intel_hdcp *hdcp = &connector->hdcp; - struct drm_dp_aux *aux; + struct drm_dp_aux *aux = &dig_port->dp.aux; + struct intel_dp *dp = &dig_port->dp; + struct intel_hdcp *hdcp = &dp->attached_connector->hdcp; unsigned int offset; u8 *byte = buf; ssize_t ret, bytes_to_recv, len; @@ -530,8 +523,6 @@ int intel_dp_hdcp2_read_msg(struct intel_connector *connector, return -EINVAL; offset = hdcp2_msg_data->offset; - aux = intel_dp_hdcp_get_aux(connector); - ret = intel_dp_hdcp2_wait_for_msg(connector, hdcp2_msg_data); if (ret < 0) return ret; @@ -561,13 +552,8 @@ int intel_dp_hdcp2_read_msg(struct intel_connector *connector, /* Entire msg read timeout since initiate of msg read */ if (bytes_to_recv == size - 1 && hdcp2_msg_data->msg_read_timeout > 0) { - if (intel_encoder_is_mst(connector->encoder)) - msg_end = ktime_add_ms(ktime_get_raw(), - hdcp2_msg_data->msg_read_timeout * - connector->port->parent->num_ports); - else - msg_end = ktime_add_ms(ktime_get_raw(), - hdcp2_msg_data->msg_read_timeout); + msg_end = ktime_add_ms(ktime_get_raw(), + hdcp2_msg_data->msg_read_timeout); } ret = drm_dp_dpcd_read(aux, offset, @@ -651,12 +637,11 @@ static int intel_dp_hdcp2_capable(struct intel_connector *connector, bool *capable) { - struct drm_dp_aux *aux; + struct intel_digital_port *dig_port = intel_attached_dig_port(connector); + struct drm_dp_aux *aux = &dig_port->dp.aux; u8 rx_caps[3]; int ret; - aux = intel_dp_hdcp_get_aux(connector); - *capable = false; ret = drm_dp_dpcd_read(aux, DP_HDCP_2_2_REG_RX_CAPS_OFFSET, diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c index 8a9432335030..a01a59f57ae5 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c @@ -1534,6 +1534,7 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo return NULL; intel_connector->get_hw_state = intel_dp_mst_get_hw_state; + intel_connector->sync_state = intel_dp_connector_sync_state; intel_connector->mst_port = intel_dp; intel_connector->port = port; drm_dp_mst_get_port_malloc(port); diff --git a/drivers/gpu/drm/i915/display/intel_modeset_setup.c b/drivers/gpu/drm/i915/display/intel_modeset_setup.c index 94eece7f63be..caeca3a8442c 100644 --- a/drivers/gpu/drm/i915/display/intel_modeset_setup.c +++ b/drivers/gpu/drm/i915/display/intel_modeset_setup.c @@ -318,12 +318,6 @@ static void intel_modeset_update_connector_atomic_state(struct drm_i915_private const struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); - if (crtc_state->dsc.compression_enable) { - drm_WARN_ON(&i915->drm, !connector->dp.dsc_decompression_aux); - connector->dp.dsc_decompression_enabled = true; - } else { - connector->dp.dsc_decompression_enabled = false; - } conn_state->max_bpc = (crtc_state->pipe_bpp ?: 24) / 3; } } @@ -775,8 +769,9 @@ static void intel_modeset_readout_hw_state(struct drm_i915_private *i915) drm_connector_list_iter_begin(&i915->drm, &conn_iter); for_each_intel_connector_iter(connector, &conn_iter) { + struct intel_crtc_state *crtc_state = NULL; + if (connector->get_hw_state(connector)) { - struct intel_crtc_state *crtc_state; struct intel_crtc *crtc; connector->base.dpms = DRM_MODE_DPMS_ON; @@ -802,6 +797,10 @@ static void intel_modeset_readout_hw_state(struct drm_i915_private *i915) connector->base.dpms = DRM_MODE_DPMS_OFF; connector->base.encoder = NULL; } + + if (connector->sync_state) + connector->sync_state(connector, crtc_state); + drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] hw state readout: %s\n", connector->base.base.id, connector->base.name, diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c index 57bbf3e3af92..4faaf4b3fc53 100644 --- a/drivers/gpu/drm/i915/display/intel_psr.c +++ b/drivers/gpu/drm/i915/display/intel_psr.c @@ -2776,9 +2776,6 @@ void intel_psr_init(struct intel_dp *intel_dp) if (!(HAS_PSR(dev_priv) || HAS_DP20(dev_priv))) return; - if (!intel_dp_is_edp(intel_dp)) - intel_psr_init_dpcd(intel_dp); - /* * HSW spec explicitly says PSR is tied to port A. * BDW+ platforms have a instance of PSR registers per transcoder but diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c index acc6b6804105..2915d7afe5cc 100644 --- a/drivers/gpu/drm/i915/display/intel_sdvo.c +++ b/drivers/gpu/drm/i915/display/intel_sdvo.c @@ -1209,7 +1209,7 @@ static bool intel_sdvo_set_tv_format(struct intel_sdvo *intel_sdvo, struct intel_sdvo_tv_format format; u32 format_map; - format_map = 1 << conn_state->tv.mode; + format_map = 1 << conn_state->tv.legacy_mode; memset(&format, 0, sizeof(format)); memcpy(&format, &format_map, min(sizeof(format), sizeof(format_map))); @@ -2298,7 +2298,7 @@ static int intel_sdvo_get_tv_modes(struct drm_connector *connector) * Read the list of supported input resolutions for the selected TV * format. */ - format_map = 1 << conn_state->tv.mode; + format_map = 1 << conn_state->tv.legacy_mode; memcpy(&tv_res, &format_map, min(sizeof(format_map), sizeof(struct intel_sdvo_sdtv_resolution_request))); @@ -2363,7 +2363,7 @@ intel_sdvo_connector_atomic_get_property(struct drm_connector *connector, int i; for (i = 0; i < intel_sdvo_connector->format_supported_num; i++) - if (state->tv.mode == intel_sdvo_connector->tv_format_supported[i]) { + if (state->tv.legacy_mode == intel_sdvo_connector->tv_format_supported[i]) { *val = i; return 0; @@ -2419,7 +2419,7 @@ intel_sdvo_connector_atomic_set_property(struct drm_connector *connector, struct intel_sdvo_connector_state *sdvo_state = to_intel_sdvo_connector_state(state); if (property == intel_sdvo_connector->tv_format) { - state->tv.mode = intel_sdvo_connector->tv_format_supported[val]; + state->tv.legacy_mode = intel_sdvo_connector->tv_format_supported[val]; if (state->crtc) { struct drm_crtc_state *crtc_state = @@ -3076,7 +3076,7 @@ static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo, drm_property_add_enum(intel_sdvo_connector->tv_format, i, tv_format_names[intel_sdvo_connector->tv_format_supported[i]]); - intel_sdvo_connector->base.base.state->tv.mode = intel_sdvo_connector->tv_format_supported[0]; + intel_sdvo_connector->base.base.state->tv.legacy_mode = intel_sdvo_connector->tv_format_supported[0]; drm_object_attach_property(&intel_sdvo_connector->base.base.base, intel_sdvo_connector->tv_format, 0); return true; diff --git a/drivers/gpu/drm/i915/display/intel_tv.c b/drivers/gpu/drm/i915/display/intel_tv.c index d4386cb3569e..992a725de751 100644 --- a/drivers/gpu/drm/i915/display/intel_tv.c +++ b/drivers/gpu/drm/i915/display/intel_tv.c @@ -949,7 +949,7 @@ intel_disable_tv(struct intel_atomic_state *state, static const struct tv_mode *intel_tv_mode_find(const struct drm_connector_state *conn_state) { - int format = conn_state->tv.mode; + int format = conn_state->tv.legacy_mode; return &tv_modes[format]; } @@ -1704,7 +1704,7 @@ static void intel_tv_find_better_format(struct drm_connector *connector) break; } - connector->state->tv.mode = i; + connector->state->tv.legacy_mode = i; } static int @@ -1859,7 +1859,7 @@ static int intel_tv_atomic_check(struct drm_connector *connector, old_state = drm_atomic_get_old_connector_state(state, connector); new_crtc_state = drm_atomic_get_new_crtc_state(state, new_state->crtc); - if (old_state->tv.mode != new_state->tv.mode || + if (old_state->tv.legacy_mode != new_state->tv.legacy_mode || old_state->tv.margins.left != new_state->tv.margins.left || old_state->tv.margins.right != new_state->tv.margins.right || old_state->tv.margins.top != new_state->tv.margins.top || @@ -1896,7 +1896,7 @@ static void intel_tv_add_properties(struct drm_connector *connector) conn_state->tv.margins.right = 46; conn_state->tv.margins.bottom = 37; - conn_state->tv.mode = 0; + conn_state->tv.legacy_mode = 0; /* Create TV properties then attach current values */ for (i = 0; i < ARRAY_SIZE(tv_modes); i++) { @@ -1910,7 +1910,7 @@ static void intel_tv_add_properties(struct drm_connector *connector) drm_object_attach_property(&connector->base, i915->drm.mode_config.legacy_tv_mode_property, - conn_state->tv.mode); + conn_state->tv.legacy_mode); drm_object_attach_property(&connector->base, i915->drm.mode_config.tv_left_margin_property, conn_state->tv.margins.left); diff --git a/drivers/gpu/drm/i915/display/intel_vdsc_regs.h b/drivers/gpu/drm/i915/display/intel_vdsc_regs.h index 64f440fdc22b..8b21dc8e26d5 100644 --- a/drivers/gpu/drm/i915/display/intel_vdsc_regs.h +++ b/drivers/gpu/drm/i915/display/intel_vdsc_regs.h @@ -51,8 +51,8 @@ #define DSCC_PICTURE_PARAMETER_SET_0 _MMIO(0x6BA00) #define _DSCA_PPS_0 0x6B200 #define _DSCC_PPS_0 0x6BA00 -#define DSCA_PPS(pps) _MMIO(_DSCA_PPS_0 + (pps) * 4) -#define DSCC_PPS(pps) _MMIO(_DSCC_PPS_0 + (pps) * 4) +#define DSCA_PPS(pps) _MMIO(_DSCA_PPS_0 + ((pps) < 12 ? (pps) : (pps) + 12) * 4) +#define DSCC_PPS(pps) _MMIO(_DSCC_PPS_0 + ((pps) < 12 ? (pps) : (pps) + 12) * 4) #define _ICL_DSC0_PICTURE_PARAMETER_SET_0_PB 0x78270 #define _ICL_DSC1_PICTURE_PARAMETER_SET_0_PB 0x78370 #define _ICL_DSC0_PICTURE_PARAMETER_SET_0_PC 0x78470 diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c index 1d3ebdf4069b..c08b67593565 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c @@ -379,6 +379,9 @@ i915_gem_userptr_release(struct drm_i915_gem_object *obj) { GEM_WARN_ON(obj->userptr.page_ref); + if (!obj->userptr.notifier.mm) + return; + mmu_interval_notifier_remove(&obj->userptr.notifier); obj->userptr.notifier.mm = NULL; } diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index 90f6c1ece57d..efcb00472be2 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -2849,8 +2849,7 @@ static int handle_mmio(struct intel_gvt_mmio_table_iter *iter, u32 offset, for (i = start; i < end; i += 4) { p = intel_gvt_find_mmio_info(gvt, i); if (p) { - WARN(1, "dup mmio definition offset %x\n", - info->offset); + WARN(1, "dup mmio definition offset %x\n", i); /* We return -EEXIST here to make GVT-g load fail. * So duplicated MMIO can be found as soon as diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c index e98b6d69a91a..9b6d87c8b583 100644 --- a/drivers/gpu/drm/i915/intel_gvt.c +++ b/drivers/gpu/drm/i915/intel_gvt.c @@ -41,7 +41,7 @@ * To virtualize GPU resources GVT-g driver depends on hypervisor technology * e.g KVM/VFIO/mdev, Xen, etc. to provide resource access trapping capability * and be virtualized within GVT-g device module. More architectural design - * doc is available on https://01.org/group/2230/documentation-list. + * doc is available on https://github.com/intel/gvt-linux/wiki. */ static LIST_HEAD(intel_gvt_devices); diff --git a/drivers/gpu/drm/i915/selftests/intel_scheduler_helpers.c b/drivers/gpu/drm/i915/selftests/intel_scheduler_helpers.c index 2990dd4d4a0d..e14ac0ab1314 100644 --- a/drivers/gpu/drm/i915/selftests/intel_scheduler_helpers.c +++ b/drivers/gpu/drm/i915/selftests/intel_scheduler_helpers.c @@ -3,6 +3,8 @@ * Copyright © 2021 Intel Corporation */ +#include <linux/jiffies.h> + //#include "gt/intel_engine_user.h" #include "gt/intel_gt.h" #include "i915_drv.h" @@ -12,7 +14,7 @@ #define REDUCED_TIMESLICE 5 #define REDUCED_PREEMPT 10 -#define WAIT_FOR_RESET_TIME 10000 +#define WAIT_FOR_RESET_TIME_MS 10000 struct intel_engine_cs *intel_selftest_find_any_engine(struct intel_gt *gt) { @@ -91,7 +93,7 @@ int intel_selftest_wait_for_rq(struct i915_request *rq) { long ret; - ret = i915_request_wait(rq, 0, WAIT_FOR_RESET_TIME); + ret = i915_request_wait(rq, 0, msecs_to_jiffies(WAIT_FOR_RESET_TIME_MS)); if (ret < 0) return ret; diff --git a/drivers/gpu/drm/meson/meson_encoder_cvbs.c b/drivers/gpu/drm/meson/meson_encoder_cvbs.c index 3f73b211fa8e..3407450435e2 100644 --- a/drivers/gpu/drm/meson/meson_encoder_cvbs.c +++ b/drivers/gpu/drm/meson/meson_encoder_cvbs.c @@ -294,6 +294,5 @@ void meson_encoder_cvbs_remove(struct meson_drm *priv) if (priv->encoders[MESON_ENC_CVBS]) { meson_encoder_cvbs = priv->encoders[MESON_ENC_CVBS]; drm_bridge_remove(&meson_encoder_cvbs->bridge); - drm_bridge_remove(meson_encoder_cvbs->next_bridge); } } diff --git a/drivers/gpu/drm/meson/meson_encoder_dsi.c b/drivers/gpu/drm/meson/meson_encoder_dsi.c index 3f93c70488ca..311b91630fbe 100644 --- a/drivers/gpu/drm/meson/meson_encoder_dsi.c +++ b/drivers/gpu/drm/meson/meson_encoder_dsi.c @@ -168,6 +168,5 @@ void meson_encoder_dsi_remove(struct meson_drm *priv) if (priv->encoders[MESON_ENC_DSI]) { meson_encoder_dsi = priv->encoders[MESON_ENC_DSI]; drm_bridge_remove(&meson_encoder_dsi->bridge); - drm_bridge_remove(meson_encoder_dsi->next_bridge); } } diff --git a/drivers/gpu/drm/meson/meson_encoder_hdmi.c b/drivers/gpu/drm/meson/meson_encoder_hdmi.c index 25ea76558690..c4686568c9ca 100644 --- a/drivers/gpu/drm/meson/meson_encoder_hdmi.c +++ b/drivers/gpu/drm/meson/meson_encoder_hdmi.c @@ -474,6 +474,5 @@ void meson_encoder_hdmi_remove(struct meson_drm *priv) if (priv->encoders[MESON_ENC_HDMI]) { meson_encoder_hdmi = priv->encoders[MESON_ENC_HDMI]; drm_bridge_remove(&meson_encoder_hdmi->bridge); - drm_bridge_remove(meson_encoder_hdmi->next_bridge); } } diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c index c0bc924cd302..c9c55e2ea584 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c @@ -1287,7 +1287,7 @@ static void a6xx_calc_ubwc_config(struct adreno_gpu *gpu) gpu->ubwc_config.highest_bank_bit = 15; if (adreno_is_a610(gpu)) { - gpu->ubwc_config.highest_bank_bit = 14; + gpu->ubwc_config.highest_bank_bit = 13; gpu->ubwc_config.min_acc_len = 1; gpu->ubwc_config.ubwc_mode = 1; } diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c index 83380bc92a00..6a4b489d44e5 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c @@ -144,10 +144,6 @@ enum dpu_enc_rc_states { * to track crtc in the disable() hook which is called * _after_ encoder_mask is cleared. * @connector: If a mode is set, cached pointer to the active connector - * @crtc_kickoff_cb: Callback into CRTC that will flush & start - * all CTL paths - * @crtc_kickoff_cb_data: Opaque user data given to crtc_kickoff_cb - * @debugfs_root: Debug file system root file node * @enc_lock: Lock around physical encoder * create/destroy/enable/disable * @frame_busy_mask: Bitmask tracking which phys_enc we are still @@ -2072,7 +2068,7 @@ void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc) } /* reset the merge 3D HW block */ - if (phys_enc->hw_pp->merge_3d) { + if (phys_enc->hw_pp && phys_enc->hw_pp->merge_3d) { phys_enc->hw_pp->merge_3d->ops.setup_3d_mode(phys_enc->hw_pp->merge_3d, BLEND_3D_NONE); if (phys_enc->hw_ctl->ops.update_pending_flush_merge_3d) @@ -2103,7 +2099,7 @@ void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc) if (phys_enc->hw_wb) intf_cfg.wb = phys_enc->hw_wb->idx; - if (phys_enc->hw_pp->merge_3d) + if (phys_enc->hw_pp && phys_enc->hw_pp->merge_3d) intf_cfg.merge_3d = phys_enc->hw_pp->merge_3d->idx; if (ctl->ops.reset_intf_cfg) diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c index b58a9c2ae326..724537ab776d 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c @@ -29,7 +29,6 @@ static inline bool reserved_by_other(uint32_t *res_map, int idx, /** * struct dpu_rm_requirements - Reservation requirements parameter bundle * @topology: selected topology for the display - * @hw_res: Hardware resources required as reported by the encoders */ struct dpu_rm_requirements { struct msm_display_topology topology; @@ -204,6 +203,8 @@ static bool _dpu_rm_needs_split_display(const struct msm_display_topology *top) * _dpu_rm_get_lm_peer - get the id of a mixer which is a peer of the primary * @rm: dpu resource manager handle * @primary_idx: index of primary mixer in rm->mixer_blks[] + * + * Returns: lm peer mixed id on success or %-EINVAL on error */ static int _dpu_rm_get_lm_peer(struct dpu_rm *rm, int primary_idx) { diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c index 77a8d9366ed7..fb588fde298a 100644 --- a/drivers/gpu/drm/msm/dp/dp_ctrl.c +++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c @@ -135,11 +135,6 @@ static void dp_ctrl_config_ctrl(struct dp_ctrl_private *ctrl) tbd = dp_link_get_test_bits_depth(ctrl->link, ctrl->panel->dp_mode.bpp); - if (tbd == DP_TEST_BIT_DEPTH_UNKNOWN) { - pr_debug("BIT_DEPTH not set. Configure default\n"); - tbd = DP_TEST_BIT_DEPTH_8; - } - config |= tbd << DP_CONFIGURATION_CTRL_BPC_SHIFT; /* Num of Lanes */ diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c index d37d599aec27..4c72124ffb5d 100644 --- a/drivers/gpu/drm/msm/dp/dp_display.c +++ b/drivers/gpu/drm/msm/dp/dp_display.c @@ -329,10 +329,26 @@ static const struct component_ops dp_display_comp_ops = { .unbind = dp_display_unbind, }; +static void dp_display_send_hpd_event(struct msm_dp *dp_display) +{ + struct dp_display_private *dp; + struct drm_connector *connector; + + dp = container_of(dp_display, struct dp_display_private, dp_display); + + connector = dp->dp_display.connector; + drm_helper_hpd_irq_event(connector->dev); +} + static int dp_display_send_hpd_notification(struct dp_display_private *dp, bool hpd) { - struct drm_bridge *bridge = dp->dp_display.bridge; + if ((hpd && dp->dp_display.link_ready) || + (!hpd && !dp->dp_display.link_ready)) { + drm_dbg_dp(dp->drm_dev, "HPD already %s\n", + (hpd ? "on" : "off")); + return 0; + } /* reset video pattern flag on disconnect */ if (!hpd) { @@ -348,7 +364,7 @@ static int dp_display_send_hpd_notification(struct dp_display_private *dp, drm_dbg_dp(dp->drm_dev, "type=%d hpd=%d\n", dp->dp_display.connector_type, hpd); - drm_bridge_hpd_notify(bridge, dp->dp_display.link_ready); + dp_display_send_hpd_event(&dp->dp_display); return 0; } diff --git a/drivers/gpu/drm/msm/dp/dp_link.c b/drivers/gpu/drm/msm/dp/dp_link.c index 98427d45e9a7..49dfac1fd1ef 100644 --- a/drivers/gpu/drm/msm/dp/dp_link.c +++ b/drivers/gpu/drm/msm/dp/dp_link.c @@ -7,6 +7,7 @@ #include <drm/drm_print.h> +#include "dp_reg.h" #include "dp_link.h" #include "dp_panel.h" @@ -1082,7 +1083,7 @@ int dp_link_process_request(struct dp_link *dp_link) int dp_link_get_colorimetry_config(struct dp_link *dp_link) { - u32 cc; + u32 cc = DP_MISC0_COLORIMERY_CFG_LEGACY_RGB; struct dp_link_private *link; if (!dp_link) { @@ -1096,10 +1097,11 @@ int dp_link_get_colorimetry_config(struct dp_link *dp_link) * Unless a video pattern CTS test is ongoing, use RGB_VESA * Only RGB_VESA and RGB_CEA supported for now */ - if (dp_link_is_video_pattern_requested(link)) - cc = link->dp_link.test_video.test_dyn_range; - else - cc = DP_TEST_DYNAMIC_RANGE_VESA; + if (dp_link_is_video_pattern_requested(link)) { + if (link->dp_link.test_video.test_dyn_range & + DP_TEST_DYNAMIC_RANGE_CEA) + cc = DP_MISC0_COLORIMERY_CFG_CEA_RGB; + } return cc; } @@ -1179,6 +1181,9 @@ void dp_link_reset_phy_params_vx_px(struct dp_link *dp_link) u32 dp_link_get_test_bits_depth(struct dp_link *dp_link, u32 bpp) { u32 tbd; + struct dp_link_private *link; + + link = container_of(dp_link, struct dp_link_private, dp_link); /* * Few simplistic rules and assumptions made here: @@ -1196,12 +1201,13 @@ u32 dp_link_get_test_bits_depth(struct dp_link *dp_link, u32 bpp) tbd = DP_TEST_BIT_DEPTH_10; break; default: - tbd = DP_TEST_BIT_DEPTH_UNKNOWN; + drm_dbg_dp(link->drm_dev, "bpp=%d not supported, use bpc=8\n", + bpp); + tbd = DP_TEST_BIT_DEPTH_8; break; } - if (tbd != DP_TEST_BIT_DEPTH_UNKNOWN) - tbd = (tbd >> DP_TEST_BIT_DEPTH_SHIFT); + tbd = (tbd >> DP_TEST_BIT_DEPTH_SHIFT); return tbd; } diff --git a/drivers/gpu/drm/msm/dp/dp_reg.h b/drivers/gpu/drm/msm/dp/dp_reg.h index ea85a691e72b..78785ed4b40c 100644 --- a/drivers/gpu/drm/msm/dp/dp_reg.h +++ b/drivers/gpu/drm/msm/dp/dp_reg.h @@ -143,6 +143,9 @@ #define DP_MISC0_COLORIMETRY_CFG_SHIFT (0x00000001) #define DP_MISC0_TEST_BITS_DEPTH_SHIFT (0x00000005) +#define DP_MISC0_COLORIMERY_CFG_LEGACY_RGB (0) +#define DP_MISC0_COLORIMERY_CFG_CEA_RGB (0x04) + #define REG_DP_VALID_BOUNDARY (0x00000030) #define REG_DP_VALID_BOUNDARY_2 (0x00000034) diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c index 5f68e31a3e4e..0915f3b68752 100644 --- a/drivers/gpu/drm/msm/msm_gem_prime.c +++ b/drivers/gpu/drm/msm/msm_gem_prime.c @@ -26,7 +26,7 @@ int msm_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map) { void *vaddr; - vaddr = msm_gem_get_vaddr(obj); + vaddr = msm_gem_get_vaddr_locked(obj); if (IS_ERR(vaddr)) return PTR_ERR(vaddr); iosys_map_set_vaddr(map, vaddr); @@ -36,7 +36,7 @@ int msm_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map) void msm_gem_prime_vunmap(struct drm_gem_object *obj, struct iosys_map *map) { - msm_gem_put_vaddr(obj); + msm_gem_put_vaddr_locked(obj); } struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev, diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c index 095390774f22..655002b21b0d 100644 --- a/drivers/gpu/drm/msm/msm_gpu.c +++ b/drivers/gpu/drm/msm/msm_gpu.c @@ -751,12 +751,14 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) struct msm_ringbuffer *ring = submit->ring; unsigned long flags; - pm_runtime_get_sync(&gpu->pdev->dev); + WARN_ON(!mutex_is_locked(&gpu->lock)); - mutex_lock(&gpu->lock); + pm_runtime_get_sync(&gpu->pdev->dev); msm_gpu_hw_init(gpu); + submit->seqno = submit->hw_fence->seqno; + update_sw_cntrs(gpu); /* @@ -781,11 +783,8 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) gpu->funcs->submit(gpu, submit); gpu->cur_ctx_seqno = submit->queue->ctx->seqno; - hangcheck_timer_reset(gpu); - - mutex_unlock(&gpu->lock); - pm_runtime_put(&gpu->pdev->dev); + hangcheck_timer_reset(gpu); } /* diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c index 5cc8d358cc97..d5512037c38b 100644 --- a/drivers/gpu/drm/msm/msm_iommu.c +++ b/drivers/gpu/drm/msm/msm_iommu.c @@ -21,6 +21,8 @@ struct msm_iommu_pagetable { struct msm_mmu base; struct msm_mmu *parent; struct io_pgtable_ops *pgtbl_ops; + const struct iommu_flush_ops *tlb; + struct device *iommu_dev; unsigned long pgsize_bitmap; /* Bitmap of page sizes in use */ phys_addr_t ttbr; u32 asid; @@ -201,11 +203,33 @@ static const struct msm_mmu_funcs pagetable_funcs = { static void msm_iommu_tlb_flush_all(void *cookie) { + struct msm_iommu_pagetable *pagetable = cookie; + struct adreno_smmu_priv *adreno_smmu; + + if (!pm_runtime_get_if_in_use(pagetable->iommu_dev)) + return; + + adreno_smmu = dev_get_drvdata(pagetable->parent->dev); + + pagetable->tlb->tlb_flush_all((void *)adreno_smmu->cookie); + + pm_runtime_put_autosuspend(pagetable->iommu_dev); } static void msm_iommu_tlb_flush_walk(unsigned long iova, size_t size, size_t granule, void *cookie) { + struct msm_iommu_pagetable *pagetable = cookie; + struct adreno_smmu_priv *adreno_smmu; + + if (!pm_runtime_get_if_in_use(pagetable->iommu_dev)) + return; + + adreno_smmu = dev_get_drvdata(pagetable->parent->dev); + + pagetable->tlb->tlb_flush_walk(iova, size, granule, (void *)adreno_smmu->cookie); + + pm_runtime_put_autosuspend(pagetable->iommu_dev); } static void msm_iommu_tlb_add_page(struct iommu_iotlb_gather *gather, @@ -213,7 +237,7 @@ static void msm_iommu_tlb_add_page(struct iommu_iotlb_gather *gather, { } -static const struct iommu_flush_ops null_tlb_ops = { +static const struct iommu_flush_ops tlb_ops = { .tlb_flush_all = msm_iommu_tlb_flush_all, .tlb_flush_walk = msm_iommu_tlb_flush_walk, .tlb_add_page = msm_iommu_tlb_add_page, @@ -254,10 +278,10 @@ struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent) /* The incoming cfg will have the TTBR1 quirk enabled */ ttbr0_cfg.quirks &= ~IO_PGTABLE_QUIRK_ARM_TTBR1; - ttbr0_cfg.tlb = &null_tlb_ops; + ttbr0_cfg.tlb = &tlb_ops; pagetable->pgtbl_ops = alloc_io_pgtable_ops(ARM_64_LPAE_S1, - &ttbr0_cfg, iommu->domain); + &ttbr0_cfg, pagetable); if (!pagetable->pgtbl_ops) { kfree(pagetable); @@ -279,6 +303,8 @@ struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent) /* Needed later for TLB flush */ pagetable->parent = parent; + pagetable->tlb = ttbr1_cfg->tlb; + pagetable->iommu_dev = ttbr1_cfg->iommu_dev; pagetable->pgsize_bitmap = ttbr0_cfg.pgsize_bitmap; pagetable->ttbr = ttbr0_cfg.arm_lpae_s1_cfg.ttbr; diff --git a/drivers/gpu/drm/msm/msm_mdss.c b/drivers/gpu/drm/msm/msm_mdss.c index 455b2e3a0cdd..35423d10aafa 100644 --- a/drivers/gpu/drm/msm/msm_mdss.c +++ b/drivers/gpu/drm/msm/msm_mdss.c @@ -562,6 +562,7 @@ static const struct msm_mdss_data sdm670_data = { .ubwc_enc_version = UBWC_2_0, .ubwc_dec_version = UBWC_2_0, .highest_bank_bit = 1, + .reg_bus_bw = 76800, }; static const struct msm_mdss_data sdm845_data = { diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c index 4bc13f7d005a..9d6655f96f0c 100644 --- a/drivers/gpu/drm/msm/msm_ringbuffer.c +++ b/drivers/gpu/drm/msm/msm_ringbuffer.c @@ -21,8 +21,6 @@ static struct dma_fence *msm_job_run(struct drm_sched_job *job) msm_fence_init(submit->hw_fence, fctx); - submit->seqno = submit->hw_fence->seqno; - mutex_lock(&priv->lru.lock); for (i = 0; i < submit->nr_bos; i++) { @@ -35,8 +33,13 @@ static struct dma_fence *msm_job_run(struct drm_sched_job *job) mutex_unlock(&priv->lru.lock); + /* TODO move submit path over to using a per-ring lock.. */ + mutex_lock(&gpu->lock); + msm_gpu_submit(gpu, submit); + mutex_unlock(&gpu->lock); + return dma_fence_get(submit->hw_fence); } diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig index 1e6aaf95ff7c..ceef470c9fbf 100644 --- a/drivers/gpu/drm/nouveau/Kconfig +++ b/drivers/gpu/drm/nouveau/Kconfig @@ -100,3 +100,11 @@ config DRM_NOUVEAU_SVM help Say Y here if you want to enable experimental support for Shared Virtual Memory (SVM). + +config DRM_NOUVEAU_GSP_DEFAULT + bool "Use GSP firmware for Turing/Ampere (needs firmware installed)" + depends on DRM_NOUVEAU + default n + help + Say Y here if you want to use the GSP codepaths by default on + Turing and Ampere GPUs. diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/client.h b/drivers/gpu/drm/nouveau/include/nvkm/core/client.h index 0d9fc741a719..932c9fd0b2d8 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/core/client.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/core/client.h @@ -11,6 +11,7 @@ struct nvkm_client { u32 debug; struct rb_root objroot; + spinlock_t obj_lock; void *data; int (*event)(u64 token, void *argv, u32 argc); diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h index d1437c08645f..6f5d376d8fcc 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h @@ -9,7 +9,7 @@ #define GSP_PAGE_SIZE BIT(GSP_PAGE_SHIFT) struct nvkm_gsp_mem { - u32 size; + size_t size; void *data; dma_addr_t addr; }; diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c index a04156ca8390..80f74ee0fc78 100644 --- a/drivers/gpu/drm/nouveau/nouveau_abi16.c +++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c @@ -128,12 +128,14 @@ nouveau_abi16_chan_fini(struct nouveau_abi16 *abi16, struct nouveau_abi16_ntfy *ntfy, *temp; /* Cancel all jobs from the entity's queue. */ - drm_sched_entity_fini(&chan->sched.entity); + if (chan->sched) + drm_sched_entity_fini(&chan->sched->entity); if (chan->chan) nouveau_channel_idle(chan->chan); - nouveau_sched_fini(&chan->sched); + if (chan->sched) + nouveau_sched_destroy(&chan->sched); /* cleanup notifier state */ list_for_each_entry_safe(ntfy, temp, &chan->notifiers, head) { @@ -197,6 +199,7 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS) struct nouveau_cli *cli = nouveau_cli(file_priv); struct nouveau_drm *drm = nouveau_drm(dev); struct nvif_device *device = &drm->client.device; + struct nvkm_device *nvkm_device = nvxx_device(&drm->client.device); struct nvkm_gr *gr = nvxx_gr(device); struct drm_nouveau_getparam *getparam = data; struct pci_dev *pdev = to_pci_dev(dev->dev); @@ -261,6 +264,14 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS) getparam->value = nouveau_exec_push_max_from_ib_max(ib_max); break; } + case NOUVEAU_GETPARAM_VRAM_BAR_SIZE: + getparam->value = nvkm_device->func->resource_size(nvkm_device, 1); + break; + case NOUVEAU_GETPARAM_VRAM_USED: { + struct ttm_resource_manager *vram_mgr = ttm_manager_type(&drm->ttm.bdev, TTM_PL_VRAM); + getparam->value = (u64)ttm_resource_manager_usage(vram_mgr); + break; + } default: NV_PRINTK(dbg, cli, "unknown parameter %lld\n", getparam->param); return -EINVAL; @@ -337,10 +348,16 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS) if (ret) goto done; - ret = nouveau_sched_init(&chan->sched, drm, drm->sched_wq, - chan->chan->dma.ib_max); - if (ret) - goto done; + /* If we're not using the VM_BIND uAPI, we don't need a scheduler. + * + * The client lock is already acquired by nouveau_abi16_get(). + */ + if (nouveau_cli_uvmm(cli)) { + ret = nouveau_sched_create(&chan->sched, drm, drm->sched_wq, + chan->chan->dma.ib_max); + if (ret) + goto done; + } init->channel = chan->chan->chid; diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.h b/drivers/gpu/drm/nouveau/nouveau_abi16.h index 1f5e243c0c75..11c8c4a80079 100644 --- a/drivers/gpu/drm/nouveau/nouveau_abi16.h +++ b/drivers/gpu/drm/nouveau/nouveau_abi16.h @@ -26,7 +26,7 @@ struct nouveau_abi16_chan { struct nouveau_bo *ntfy; struct nouveau_vma *ntfy_vma; struct nvkm_mm heap; - struct nouveau_sched sched; + struct nouveau_sched *sched; }; struct nouveau_abi16 { diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index 6f6c31a9937b..a947e1d5f309 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -201,7 +201,8 @@ nouveau_cli_fini(struct nouveau_cli *cli) WARN_ON(!list_empty(&cli->worker)); usif_client_fini(cli); - nouveau_sched_fini(&cli->sched); + if (cli->sched) + nouveau_sched_destroy(&cli->sched); if (uvmm) nouveau_uvmm_fini(uvmm); nouveau_vmm_fini(&cli->svm); @@ -311,7 +312,7 @@ nouveau_cli_init(struct nouveau_drm *drm, const char *sname, cli->mem = &mems[ret]; /* Don't pass in the (shared) sched_wq in order to let - * nouveau_sched_init() create a dedicated one for VM_BIND jobs. + * nouveau_sched_create() create a dedicated one for VM_BIND jobs. * * This is required to ensure that for VM_BIND jobs free_job() work and * run_job() work can always run concurrently and hence, free_job() work @@ -320,7 +321,7 @@ nouveau_cli_init(struct nouveau_drm *drm, const char *sname, * locks which indirectly or directly are held for allocations * elsewhere. */ - ret = nouveau_sched_init(&cli->sched, drm, NULL, 1); + ret = nouveau_sched_create(&cli->sched, drm, NULL, 1); if (ret) goto done; diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 8a6d94c8b163..e239c6bf4afa 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -98,7 +98,7 @@ struct nouveau_cli { bool disabled; } uvmm; - struct nouveau_sched sched; + struct nouveau_sched *sched; const struct nvif_mclass *mem; diff --git a/drivers/gpu/drm/nouveau/nouveau_exec.c b/drivers/gpu/drm/nouveau/nouveau_exec.c index bc5d71b79ab2..e65c0ef23bc7 100644 --- a/drivers/gpu/drm/nouveau/nouveau_exec.c +++ b/drivers/gpu/drm/nouveau/nouveau_exec.c @@ -389,7 +389,7 @@ nouveau_exec_ioctl_exec(struct drm_device *dev, if (ret) goto out; - args.sched = &chan16->sched; + args.sched = chan16->sched; args.file_priv = file_priv; args.chan = chan; diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c index ca762ea55413..93f08f9479d8 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c @@ -103,6 +103,7 @@ nouveau_fence_context_kill(struct nouveau_fence_chan *fctx, int error) void nouveau_fence_context_del(struct nouveau_fence_chan *fctx) { + cancel_work_sync(&fctx->uevent_work); nouveau_fence_context_kill(fctx, 0); nvif_event_dtor(&fctx->event); fctx->dead = 1; @@ -145,12 +146,13 @@ nouveau_fence_update(struct nouveau_channel *chan, struct nouveau_fence_chan *fc return drop; } -static int -nouveau_fence_wait_uevent_handler(struct nvif_event *event, void *repv, u32 repc) +static void +nouveau_fence_uevent_work(struct work_struct *work) { - struct nouveau_fence_chan *fctx = container_of(event, typeof(*fctx), event); + struct nouveau_fence_chan *fctx = container_of(work, struct nouveau_fence_chan, + uevent_work); unsigned long flags; - int ret = NVIF_EVENT_KEEP; + int drop = 0; spin_lock_irqsave(&fctx->lock, flags); if (!list_empty(&fctx->pending)) { @@ -160,11 +162,20 @@ nouveau_fence_wait_uevent_handler(struct nvif_event *event, void *repv, u32 repc fence = list_entry(fctx->pending.next, typeof(*fence), head); chan = rcu_dereference_protected(fence->channel, lockdep_is_held(&fctx->lock)); if (nouveau_fence_update(chan, fctx)) - ret = NVIF_EVENT_DROP; + drop = 1; } + if (drop) + nvif_event_block(&fctx->event); + spin_unlock_irqrestore(&fctx->lock, flags); +} - return ret; +static int +nouveau_fence_wait_uevent_handler(struct nvif_event *event, void *repv, u32 repc) +{ + struct nouveau_fence_chan *fctx = container_of(event, typeof(*fctx), event); + schedule_work(&fctx->uevent_work); + return NVIF_EVENT_KEEP; } void @@ -178,6 +189,7 @@ nouveau_fence_context_new(struct nouveau_channel *chan, struct nouveau_fence_cha } args; int ret; + INIT_WORK(&fctx->uevent_work, nouveau_fence_uevent_work); INIT_LIST_HEAD(&fctx->flip); INIT_LIST_HEAD(&fctx->pending); spin_lock_init(&fctx->lock); diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h index 64d33ae7f356..8bc065acfe35 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.h +++ b/drivers/gpu/drm/nouveau/nouveau_fence.h @@ -44,6 +44,7 @@ struct nouveau_fence_chan { u32 context; char name[32]; + struct work_struct uevent_work; struct nvif_event event; int notify_ref, dead, killed; }; diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index 49c2bcbef129..5a887d67dc0e 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c @@ -764,7 +764,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data, return -ENOMEM; if (unlikely(nouveau_cli_uvmm(cli))) - return -ENOSYS; + return nouveau_abi16_put(abi16, -ENOSYS); list_for_each_entry(temp, &abi16->channels, head) { if (temp->chan->chid == req->channel) { diff --git a/drivers/gpu/drm/nouveau/nouveau_sched.c b/drivers/gpu/drm/nouveau/nouveau_sched.c index dd98f6910f9c..32fa2e273965 100644 --- a/drivers/gpu/drm/nouveau/nouveau_sched.c +++ b/drivers/gpu/drm/nouveau/nouveau_sched.c @@ -398,7 +398,7 @@ static const struct drm_sched_backend_ops nouveau_sched_ops = { .free_job = nouveau_sched_free_job, }; -int +static int nouveau_sched_init(struct nouveau_sched *sched, struct nouveau_drm *drm, struct workqueue_struct *wq, u32 credit_limit) { @@ -453,7 +453,30 @@ fail_wq: return ret; } -void +int +nouveau_sched_create(struct nouveau_sched **psched, struct nouveau_drm *drm, + struct workqueue_struct *wq, u32 credit_limit) +{ + struct nouveau_sched *sched; + int ret; + + sched = kzalloc(sizeof(*sched), GFP_KERNEL); + if (!sched) + return -ENOMEM; + + ret = nouveau_sched_init(sched, drm, wq, credit_limit); + if (ret) { + kfree(sched); + return ret; + } + + *psched = sched; + + return 0; +} + + +static void nouveau_sched_fini(struct nouveau_sched *sched) { struct drm_gpu_scheduler *drm_sched = &sched->base; @@ -471,3 +494,14 @@ nouveau_sched_fini(struct nouveau_sched *sched) if (sched->wq) destroy_workqueue(sched->wq); } + +void +nouveau_sched_destroy(struct nouveau_sched **psched) +{ + struct nouveau_sched *sched = *psched; + + nouveau_sched_fini(sched); + kfree(sched); + + *psched = NULL; +} diff --git a/drivers/gpu/drm/nouveau/nouveau_sched.h b/drivers/gpu/drm/nouveau/nouveau_sched.h index a6528f5981e6..e1f01a23e6f6 100644 --- a/drivers/gpu/drm/nouveau/nouveau_sched.h +++ b/drivers/gpu/drm/nouveau/nouveau_sched.h @@ -111,8 +111,8 @@ struct nouveau_sched { } job; }; -int nouveau_sched_init(struct nouveau_sched *sched, struct nouveau_drm *drm, - struct workqueue_struct *wq, u32 credit_limit); -void nouveau_sched_fini(struct nouveau_sched *sched); +int nouveau_sched_create(struct nouveau_sched **psched, struct nouveau_drm *drm, + struct workqueue_struct *wq, u32 credit_limit); +void nouveau_sched_destroy(struct nouveau_sched **psched); #endif diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c index cc03e0c22ff3..5e4565c5011a 100644 --- a/drivers/gpu/drm/nouveau/nouveau_svm.c +++ b/drivers/gpu/drm/nouveau/nouveau_svm.c @@ -1011,7 +1011,7 @@ nouveau_svm_fault_buffer_ctor(struct nouveau_svm *svm, s32 oclass, int id) if (ret) return ret; - buffer->fault = kvcalloc(sizeof(*buffer->fault), buffer->entries, GFP_KERNEL); + buffer->fault = kvcalloc(buffer->entries, sizeof(*buffer->fault), GFP_KERNEL); if (!buffer->fault) return -ENOMEM; diff --git a/drivers/gpu/drm/nouveau/nouveau_uvmm.c b/drivers/gpu/drm/nouveau/nouveau_uvmm.c index 4f223c972c6a..0a0a11dc9ec0 100644 --- a/drivers/gpu/drm/nouveau/nouveau_uvmm.c +++ b/drivers/gpu/drm/nouveau/nouveau_uvmm.c @@ -1740,7 +1740,7 @@ nouveau_uvmm_ioctl_vm_bind(struct drm_device *dev, if (ret) return ret; - args.sched = &cli->sched; + args.sched = cli->sched; args.file_priv = file_priv; ret = nouveau_uvmm_vm_bind(&args); diff --git a/drivers/gpu/drm/nouveau/nvkm/core/client.c b/drivers/gpu/drm/nouveau/nvkm/core/client.c index ebdeb8eb9e77..c55662937ab2 100644 --- a/drivers/gpu/drm/nouveau/nvkm/core/client.c +++ b/drivers/gpu/drm/nouveau/nvkm/core/client.c @@ -180,6 +180,7 @@ nvkm_client_new(const char *name, u64 device, const char *cfg, const char *dbg, client->device = device; client->debug = nvkm_dbgopt(dbg, "CLIENT"); client->objroot = RB_ROOT; + spin_lock_init(&client->obj_lock); client->event = event; INIT_LIST_HEAD(&client->umem); spin_lock_init(&client->lock); diff --git a/drivers/gpu/drm/nouveau/nvkm/core/object.c b/drivers/gpu/drm/nouveau/nvkm/core/object.c index 7c554c14e884..aea3ba72027a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/core/object.c +++ b/drivers/gpu/drm/nouveau/nvkm/core/object.c @@ -30,8 +30,10 @@ nvkm_object_search(struct nvkm_client *client, u64 handle, const struct nvkm_object_func *func) { struct nvkm_object *object; + unsigned long flags; if (handle) { + spin_lock_irqsave(&client->obj_lock, flags); struct rb_node *node = client->objroot.rb_node; while (node) { object = rb_entry(node, typeof(*object), node); @@ -40,9 +42,12 @@ nvkm_object_search(struct nvkm_client *client, u64 handle, else if (handle > object->object) node = node->rb_right; - else + else { + spin_unlock_irqrestore(&client->obj_lock, flags); goto done; + } } + spin_unlock_irqrestore(&client->obj_lock, flags); return ERR_PTR(-ENOENT); } else { object = &client->object; @@ -57,30 +62,39 @@ done: void nvkm_object_remove(struct nvkm_object *object) { + unsigned long flags; + + spin_lock_irqsave(&object->client->obj_lock, flags); if (!RB_EMPTY_NODE(&object->node)) rb_erase(&object->node, &object->client->objroot); + spin_unlock_irqrestore(&object->client->obj_lock, flags); } bool nvkm_object_insert(struct nvkm_object *object) { - struct rb_node **ptr = &object->client->objroot.rb_node; + struct rb_node **ptr; struct rb_node *parent = NULL; + unsigned long flags; + spin_lock_irqsave(&object->client->obj_lock, flags); + ptr = &object->client->objroot.rb_node; while (*ptr) { struct nvkm_object *this = rb_entry(*ptr, typeof(*this), node); parent = *ptr; - if (object->object < this->object) + if (object->object < this->object) { ptr = &parent->rb_left; - else - if (object->object > this->object) + } else if (object->object > this->object) { ptr = &parent->rb_right; - else + } else { + spin_unlock_irqrestore(&object->client->obj_lock, flags); return false; + } } rb_link_node(&object->node, parent, ptr); rb_insert_color(&object->node, &object->client->objroot); + spin_unlock_irqrestore(&object->client->obj_lock, flags); return true; } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/r535.c index 4135690326f4..3a30bea30e36 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/r535.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/r535.c @@ -168,12 +168,11 @@ r535_bar_new_(const struct nvkm_bar_func *hw, struct nvkm_device *device, rm->flush = r535_bar_flush; ret = gf100_bar_new_(rm, device, type, inst, &bar); - *pbar = bar; if (ret) { - if (!bar) - kfree(rm); + kfree(rm); return ret; } + *pbar = bar; bar->flushBAR2PhysMode = ioremap(device->func->resource_addr(device, 3), PAGE_SIZE); if (!bar->flushBAR2PhysMode) diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c index 19188683c8fc..8c2bf1c16f2a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c @@ -154,11 +154,17 @@ shadow_fw_init(struct nvkm_bios *bios, const char *name) return (void *)fw; } +static void +shadow_fw_release(void *fw) +{ + release_firmware(fw); +} + static const struct nvbios_source shadow_fw = { .name = "firmware", .init = shadow_fw_init, - .fini = (void(*)(void *))release_firmware, + .fini = shadow_fw_release, .read = shadow_fw_read, .rw = false, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c index 9ee58e2a0eb2..a73a5b589790 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c @@ -997,6 +997,32 @@ r535_gsp_rpc_get_gsp_static_info(struct nvkm_gsp *gsp) return 0; } +static void +nvkm_gsp_mem_dtor(struct nvkm_gsp *gsp, struct nvkm_gsp_mem *mem) +{ + if (mem->data) { + /* + * Poison the buffer to catch any unexpected access from + * GSP-RM if the buffer was prematurely freed. + */ + memset(mem->data, 0xFF, mem->size); + + dma_free_coherent(gsp->subdev.device->dev, mem->size, mem->data, mem->addr); + memset(mem, 0, sizeof(*mem)); + } +} + +static int +nvkm_gsp_mem_ctor(struct nvkm_gsp *gsp, size_t size, struct nvkm_gsp_mem *mem) +{ + mem->size = size; + mem->data = dma_alloc_coherent(gsp->subdev.device->dev, size, &mem->addr, GFP_KERNEL); + if (WARN_ON(!mem->data)) + return -ENOMEM; + + return 0; +} + static int r535_gsp_postinit(struct nvkm_gsp *gsp) { @@ -1024,6 +1050,11 @@ r535_gsp_postinit(struct nvkm_gsp *gsp) nvkm_inth_allow(&gsp->subdev.inth); nvkm_wr32(device, 0x110004, 0x00000040); + + /* Release the DMA buffers that were needed only for boot and init */ + nvkm_gsp_mem_dtor(gsp, &gsp->boot.fw); + nvkm_gsp_mem_dtor(gsp, &gsp->libos); + return ret; } @@ -1078,7 +1109,6 @@ r535_gsp_rpc_set_registry(struct nvkm_gsp *gsp) if (IS_ERR(rpc)) return PTR_ERR(rpc); - rpc->size = sizeof(*rpc); rpc->numEntries = NV_GSP_REG_NUM_ENTRIES; str_offset = offsetof(typeof(*rpc), entries[NV_GSP_REG_NUM_ENTRIES]); @@ -1094,6 +1124,7 @@ r535_gsp_rpc_set_registry(struct nvkm_gsp *gsp) strings += name_len; str_offset += name_len; } + rpc->size = str_offset; return nvkm_gsp_rpc_wr(gsp, rpc, false); } @@ -1532,27 +1563,6 @@ r535_gsp_msg_run_cpu_sequencer(void *priv, u32 fn, void *repv, u32 repc) return 0; } -static void -nvkm_gsp_mem_dtor(struct nvkm_gsp *gsp, struct nvkm_gsp_mem *mem) -{ - if (mem->data) { - dma_free_coherent(gsp->subdev.device->dev, mem->size, mem->data, mem->addr); - mem->data = NULL; - } -} - -static int -nvkm_gsp_mem_ctor(struct nvkm_gsp *gsp, u32 size, struct nvkm_gsp_mem *mem) -{ - mem->size = size; - mem->data = dma_alloc_coherent(gsp->subdev.device->dev, size, &mem->addr, GFP_KERNEL); - if (WARN_ON(!mem->data)) - return -ENOMEM; - - return 0; -} - - static int r535_gsp_booter_unload(struct nvkm_gsp *gsp, u32 mbox0, u32 mbox1) { @@ -1938,20 +1948,20 @@ nvkm_gsp_radix3_dtor(struct nvkm_gsp *gsp, struct nvkm_gsp_radix3 *rx3) * See kgspCreateRadix3_IMPL */ static int -nvkm_gsp_radix3_sg(struct nvkm_device *device, struct sg_table *sgt, u64 size, +nvkm_gsp_radix3_sg(struct nvkm_gsp *gsp, struct sg_table *sgt, u64 size, struct nvkm_gsp_radix3 *rx3) { u64 addr; for (int i = ARRAY_SIZE(rx3->mem) - 1; i >= 0; i--) { u64 *ptes; - int idx; + size_t bufsize; + int ret, idx; - rx3->mem[i].size = ALIGN((size / GSP_PAGE_SIZE) * sizeof(u64), GSP_PAGE_SIZE); - rx3->mem[i].data = dma_alloc_coherent(device->dev, rx3->mem[i].size, - &rx3->mem[i].addr, GFP_KERNEL); - if (WARN_ON(!rx3->mem[i].data)) - return -ENOMEM; + bufsize = ALIGN((size / GSP_PAGE_SIZE) * sizeof(u64), GSP_PAGE_SIZE); + ret = nvkm_gsp_mem_ctor(gsp, bufsize, &rx3->mem[i]); + if (ret) + return ret; ptes = rx3->mem[i].data; if (i == 2) { @@ -1991,7 +2001,7 @@ r535_gsp_fini(struct nvkm_gsp *gsp, bool suspend) if (ret) return ret; - ret = nvkm_gsp_radix3_sg(gsp->subdev.device, &gsp->sr.sgt, len, &gsp->sr.radix3); + ret = nvkm_gsp_radix3_sg(gsp, &gsp->sr.sgt, len, &gsp->sr.radix3); if (ret) return ret; @@ -2150,6 +2160,13 @@ r535_gsp_dtor(struct nvkm_gsp *gsp) mutex_destroy(&gsp->cmdq.mutex); r535_gsp_dtor_fws(gsp); + + nvkm_gsp_mem_dtor(gsp, &gsp->rmargs); + nvkm_gsp_mem_dtor(gsp, &gsp->wpr_meta); + nvkm_gsp_mem_dtor(gsp, &gsp->shm.mem); + nvkm_gsp_mem_dtor(gsp, &gsp->loginit); + nvkm_gsp_mem_dtor(gsp, &gsp->logintr); + nvkm_gsp_mem_dtor(gsp, &gsp->logrm); } int @@ -2194,7 +2211,7 @@ r535_gsp_oneinit(struct nvkm_gsp *gsp) memcpy(gsp->sig.data, data, size); /* Build radix3 page table for ELF image. */ - ret = nvkm_gsp_radix3_sg(device, &gsp->fw.mem.sgt, gsp->fw.len, &gsp->radix3); + ret = nvkm_gsp_radix3_sg(gsp, &gsp->fw.mem.sgt, gsp->fw.len, &gsp->radix3); if (ret) return ret; @@ -2295,8 +2312,12 @@ r535_gsp_load(struct nvkm_gsp *gsp, int ver, const struct nvkm_gsp_fwif *fwif) { struct nvkm_subdev *subdev = &gsp->subdev; int ret; + bool enable_gsp = fwif->enable; - if (!nvkm_boolopt(subdev->device->cfgopt, "NvGspRm", fwif->enable)) +#if IS_ENABLED(CONFIG_DRM_NOUVEAU_GSP_DEFAULT) + enable_gsp = true; +#endif + if (!nvkm_boolopt(subdev->device->cfgopt, "NvGspRm", enable_gsp)) return -EINVAL; if ((ret = r535_gsp_load_fw(gsp, "gsp", fwif->ver, &gsp->fws.rm)) || diff --git a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c index c4c0f08e9202..4945a1e787eb 100644 --- a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c +++ b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c @@ -1768,11 +1768,11 @@ static const struct panel_desc starry_qfh032011_53g_desc = { }; static const struct drm_display_mode starry_himax83102_j02_default_mode = { - .clock = 162850, + .clock = 162680, .hdisplay = 1200, - .hsync_start = 1200 + 50, - .hsync_end = 1200 + 50 + 20, - .htotal = 1200 + 50 + 20 + 50, + .hsync_start = 1200 + 60, + .hsync_end = 1200 + 60 + 20, + .htotal = 1200 + 60 + 20 + 40, .vdisplay = 1920, .vsync_start = 1920 + 116, .vsync_end = 1920 + 116 + 8, diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c index 85b3b4871a1d..fdd768bbd487 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c @@ -1985,8 +1985,10 @@ static void vop2_crtc_atomic_enable(struct drm_crtc *crtc, clock = vop2_set_intf_mux(vp, rkencoder->crtc_endpoint_id, polflags); } - if (!clock) + if (!clock) { + vop2_unlock(vop2); return; + } if (vcstate->output_mode == ROCKCHIP_OUT_MODE_AAAA && !(vp_data->feature & VOP2_VP_FEATURE_OUTPUT_10BIT)) diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c index 85f082396d42..d442b893275b 100644 --- a/drivers/gpu/drm/scheduler/sched_main.c +++ b/drivers/gpu/drm/scheduler/sched_main.c @@ -1178,21 +1178,24 @@ static void drm_sched_run_job_work(struct work_struct *w) struct drm_sched_entity *entity; struct dma_fence *fence; struct drm_sched_fence *s_fence; - struct drm_sched_job *sched_job = NULL; + struct drm_sched_job *sched_job; int r; if (READ_ONCE(sched->pause_submit)) return; /* Find entity with a ready job */ - while (!sched_job && (entity = drm_sched_select_entity(sched))) { - sched_job = drm_sched_entity_pop_job(entity); - if (!sched_job) - complete_all(&entity->entity_idle); - } + entity = drm_sched_select_entity(sched); if (!entity) return; /* No more work */ + sched_job = drm_sched_entity_pop_job(entity); + if (!sched_job) { + complete_all(&entity->entity_idle); + drm_sched_run_job_queue(sched); + return; + } + s_fence = sched_job->s_fence; atomic_add(sched_job->credits, &sched->credit_count); diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c index ff36171c8fb7..03d1c76aec2d 100644 --- a/drivers/gpu/drm/tegra/drm.c +++ b/drivers/gpu/drm/tegra/drm.c @@ -960,7 +960,8 @@ int host1x_client_iommu_attach(struct host1x_client *client) * not the shared IOMMU domain, don't try to attach it to a different * domain. This allows using the IOMMU-backed DMA API. */ - if (domain && domain != tegra->domain) + if (domain && domain->type != IOMMU_DOMAIN_IDENTITY && + domain != tegra->domain) return 0; if (tegra->domain) { @@ -1242,9 +1243,26 @@ static int host1x_drm_probe(struct host1x_device *dev) drm_mode_config_reset(drm); - err = drm_aperture_remove_framebuffers(&tegra_drm_driver); - if (err < 0) - goto hub; + /* + * Only take over from a potential firmware framebuffer if any CRTCs + * have been registered. This must not be a fatal error because there + * are other accelerators that are exposed via this driver. + * + * Another case where this happens is on Tegra234 where the display + * hardware is no longer part of the host1x complex, so this driver + * will not expose any modesetting features. + */ + if (drm->mode_config.num_crtc > 0) { + err = drm_aperture_remove_framebuffers(&tegra_drm_driver); + if (err < 0) + goto hub; + } else { + /* + * Indicate to userspace that this doesn't expose any display + * capabilities. + */ + drm->driver_features &= ~(DRIVER_MODESET | DRIVER_ATOMIC); + } err = drm_dev_register(drm, 0); if (err < 0) diff --git a/drivers/gpu/drm/tests/drm_buddy_test.c b/drivers/gpu/drm/tests/drm_buddy_test.c index ea2af6bd9abe..e48863a44556 100644 --- a/drivers/gpu/drm/tests/drm_buddy_test.c +++ b/drivers/gpu/drm/tests/drm_buddy_test.c @@ -8,16 +8,308 @@ #include <linux/prime_numbers.h> #include <linux/sched/signal.h> +#include <linux/sizes.h> #include <drm/drm_buddy.h> #include "../lib/drm_random.h" +static unsigned int random_seed; + static inline u64 get_size(int order, u64 chunk_size) { return (1 << order) * chunk_size; } +static void drm_test_buddy_alloc_range_bias(struct kunit *test) +{ + u32 mm_size, ps, bias_size, bias_start, bias_end, bias_rem; + DRM_RND_STATE(prng, random_seed); + unsigned int i, count, *order; + struct drm_buddy mm; + LIST_HEAD(allocated); + + bias_size = SZ_1M; + ps = roundup_pow_of_two(prandom_u32_state(&prng) % bias_size); + ps = max(SZ_4K, ps); + mm_size = (SZ_8M-1) & ~(ps-1); /* Multiple roots */ + + kunit_info(test, "mm_size=%u, ps=%u\n", mm_size, ps); + + KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, ps), + "buddy_init failed\n"); + + count = mm_size / bias_size; + order = drm_random_order(count, &prng); + KUNIT_EXPECT_TRUE(test, order); + + /* + * Idea is to split the address space into uniform bias ranges, and then + * in some random order allocate within each bias, using various + * patterns within. This should detect if allocations leak out from a + * given bias, for example. + */ + + for (i = 0; i < count; i++) { + LIST_HEAD(tmp); + u32 size; + + bias_start = order[i] * bias_size; + bias_end = bias_start + bias_size; + bias_rem = bias_size; + + /* internal round_up too big */ + KUNIT_ASSERT_TRUE_MSG(test, + drm_buddy_alloc_blocks(&mm, bias_start, + bias_end, bias_size + ps, bias_size, + &allocated, + DRM_BUDDY_RANGE_ALLOCATION), + "buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n", + bias_start, bias_end, bias_size, bias_size); + + /* size too big */ + KUNIT_ASSERT_TRUE_MSG(test, + drm_buddy_alloc_blocks(&mm, bias_start, + bias_end, bias_size + ps, ps, + &allocated, + DRM_BUDDY_RANGE_ALLOCATION), + "buddy_alloc didn't fail with bias(%x-%x), size=%u, ps=%u\n", + bias_start, bias_end, bias_size + ps, ps); + + /* bias range too small for size */ + KUNIT_ASSERT_TRUE_MSG(test, + drm_buddy_alloc_blocks(&mm, bias_start + ps, + bias_end, bias_size, ps, + &allocated, + DRM_BUDDY_RANGE_ALLOCATION), + "buddy_alloc didn't fail with bias(%x-%x), size=%u, ps=%u\n", + bias_start + ps, bias_end, bias_size, ps); + + /* bias misaligned */ + KUNIT_ASSERT_TRUE_MSG(test, + drm_buddy_alloc_blocks(&mm, bias_start + ps, + bias_end - ps, + bias_size >> 1, bias_size >> 1, + &allocated, + DRM_BUDDY_RANGE_ALLOCATION), + "buddy_alloc h didn't fail with bias(%x-%x), size=%u, ps=%u\n", + bias_start + ps, bias_end - ps, bias_size >> 1, bias_size >> 1); + + /* single big page */ + KUNIT_ASSERT_FALSE_MSG(test, + drm_buddy_alloc_blocks(&mm, bias_start, + bias_end, bias_size, bias_size, + &tmp, + DRM_BUDDY_RANGE_ALLOCATION), + "buddy_alloc i failed with bias(%x-%x), size=%u, ps=%u\n", + bias_start, bias_end, bias_size, bias_size); + drm_buddy_free_list(&mm, &tmp); + + /* single page with internal round_up */ + KUNIT_ASSERT_FALSE_MSG(test, + drm_buddy_alloc_blocks(&mm, bias_start, + bias_end, ps, bias_size, + &tmp, + DRM_BUDDY_RANGE_ALLOCATION), + "buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n", + bias_start, bias_end, ps, bias_size); + drm_buddy_free_list(&mm, &tmp); + + /* random size within */ + size = max(round_up(prandom_u32_state(&prng) % bias_rem, ps), ps); + if (size) + KUNIT_ASSERT_FALSE_MSG(test, + drm_buddy_alloc_blocks(&mm, bias_start, + bias_end, size, ps, + &tmp, + DRM_BUDDY_RANGE_ALLOCATION), + "buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n", + bias_start, bias_end, size, ps); + + bias_rem -= size; + /* too big for current avail */ + KUNIT_ASSERT_TRUE_MSG(test, + drm_buddy_alloc_blocks(&mm, bias_start, + bias_end, bias_rem + ps, ps, + &allocated, + DRM_BUDDY_RANGE_ALLOCATION), + "buddy_alloc didn't fail with bias(%x-%x), size=%u, ps=%u\n", + bias_start, bias_end, bias_rem + ps, ps); + + if (bias_rem) { + /* random fill of the remainder */ + size = max(round_up(prandom_u32_state(&prng) % bias_rem, ps), ps); + size = max(size, ps); + + KUNIT_ASSERT_FALSE_MSG(test, + drm_buddy_alloc_blocks(&mm, bias_start, + bias_end, size, ps, + &allocated, + DRM_BUDDY_RANGE_ALLOCATION), + "buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n", + bias_start, bias_end, size, ps); + /* + * Intentionally allow some space to be left + * unallocated, and ideally not always on the bias + * boundaries. + */ + drm_buddy_free_list(&mm, &tmp); + } else { + list_splice_tail(&tmp, &allocated); + } + } + + kfree(order); + drm_buddy_free_list(&mm, &allocated); + drm_buddy_fini(&mm); + + /* + * Something more free-form. Idea is to pick a random starting bias + * range within the address space and then start filling it up. Also + * randomly grow the bias range in both directions as we go along. This + * should give us bias start/end which is not always uniform like above, + * and in some cases will require the allocator to jump over already + * allocated nodes in the middle of the address space. + */ + + KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, ps), + "buddy_init failed\n"); + + bias_start = round_up(prandom_u32_state(&prng) % (mm_size - ps), ps); + bias_end = round_up(bias_start + prandom_u32_state(&prng) % (mm_size - bias_start), ps); + bias_end = max(bias_end, bias_start + ps); + bias_rem = bias_end - bias_start; + + do { + u32 size = max(round_up(prandom_u32_state(&prng) % bias_rem, ps), ps); + + KUNIT_ASSERT_FALSE_MSG(test, + drm_buddy_alloc_blocks(&mm, bias_start, + bias_end, size, ps, + &allocated, + DRM_BUDDY_RANGE_ALLOCATION), + "buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n", + bias_start, bias_end, size, ps); + bias_rem -= size; + + /* + * Try to randomly grow the bias range in both directions, or + * only one, or perhaps don't grow at all. + */ + do { + u32 old_bias_start = bias_start; + u32 old_bias_end = bias_end; + + if (bias_start) + bias_start -= round_up(prandom_u32_state(&prng) % bias_start, ps); + if (bias_end != mm_size) + bias_end += round_up(prandom_u32_state(&prng) % (mm_size - bias_end), ps); + + bias_rem += old_bias_start - bias_start; + bias_rem += bias_end - old_bias_end; + } while (!bias_rem && (bias_start || bias_end != mm_size)); + } while (bias_rem); + + KUNIT_ASSERT_EQ(test, bias_start, 0); + KUNIT_ASSERT_EQ(test, bias_end, mm_size); + KUNIT_ASSERT_TRUE_MSG(test, + drm_buddy_alloc_blocks(&mm, bias_start, bias_end, + ps, ps, + &allocated, + DRM_BUDDY_RANGE_ALLOCATION), + "buddy_alloc passed with bias(%x-%x), size=%u\n", + bias_start, bias_end, ps); + + drm_buddy_free_list(&mm, &allocated); + drm_buddy_fini(&mm); +} + +static void drm_test_buddy_alloc_contiguous(struct kunit *test) +{ + const unsigned long ps = SZ_4K, mm_size = 16 * 3 * SZ_4K; + unsigned long i, n_pages, total; + struct drm_buddy_block *block; + struct drm_buddy mm; + LIST_HEAD(left); + LIST_HEAD(middle); + LIST_HEAD(right); + LIST_HEAD(allocated); + + KUNIT_EXPECT_FALSE(test, drm_buddy_init(&mm, mm_size, ps)); + + /* + * Idea is to fragment the address space by alternating block + * allocations between three different lists; one for left, middle and + * right. We can then free a list to simulate fragmentation. In + * particular we want to exercise the DRM_BUDDY_CONTIGUOUS_ALLOCATION, + * including the try_harder path. + */ + + i = 0; + n_pages = mm_size / ps; + do { + struct list_head *list; + int slot = i % 3; + + if (slot == 0) + list = &left; + else if (slot == 1) + list = &middle; + else + list = &right; + KUNIT_ASSERT_FALSE_MSG(test, + drm_buddy_alloc_blocks(&mm, 0, mm_size, + ps, ps, list, 0), + "buddy_alloc hit an error size=%lu\n", + ps); + } while (++i < n_pages); + + KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size, + 3 * ps, ps, &allocated, + DRM_BUDDY_CONTIGUOUS_ALLOCATION), + "buddy_alloc didn't error size=%lu\n", 3 * ps); + + drm_buddy_free_list(&mm, &middle); + KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size, + 3 * ps, ps, &allocated, + DRM_BUDDY_CONTIGUOUS_ALLOCATION), + "buddy_alloc didn't error size=%lu\n", 3 * ps); + KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size, + 2 * ps, ps, &allocated, + DRM_BUDDY_CONTIGUOUS_ALLOCATION), + "buddy_alloc didn't error size=%lu\n", 2 * ps); + + drm_buddy_free_list(&mm, &right); + KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size, + 3 * ps, ps, &allocated, + DRM_BUDDY_CONTIGUOUS_ALLOCATION), + "buddy_alloc didn't error size=%lu\n", 3 * ps); + /* + * At this point we should have enough contiguous space for 2 blocks, + * however they are never buddies (since we freed middle and right) so + * will require the try_harder logic to find them. + */ + KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size, + 2 * ps, ps, &allocated, + DRM_BUDDY_CONTIGUOUS_ALLOCATION), + "buddy_alloc hit an error size=%lu\n", 2 * ps); + + drm_buddy_free_list(&mm, &left); + KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size, + 3 * ps, ps, &allocated, + DRM_BUDDY_CONTIGUOUS_ALLOCATION), + "buddy_alloc hit an error size=%lu\n", 3 * ps); + + total = 0; + list_for_each_entry(block, &allocated, link) + total += drm_buddy_block_size(&mm, block); + + KUNIT_ASSERT_EQ(test, total, ps * 2 + ps * 3); + + drm_buddy_free_list(&mm, &allocated); + drm_buddy_fini(&mm); +} + static void drm_test_buddy_alloc_pathological(struct kunit *test) { u64 mm_size, size, start = 0; @@ -275,16 +567,30 @@ static void drm_test_buddy_alloc_limit(struct kunit *test) drm_buddy_fini(&mm); } +static int drm_buddy_suite_init(struct kunit_suite *suite) +{ + while (!random_seed) + random_seed = get_random_u32(); + + kunit_info(suite, "Testing DRM buddy manager, with random_seed=0x%x\n", + random_seed); + + return 0; +} + static struct kunit_case drm_buddy_tests[] = { KUNIT_CASE(drm_test_buddy_alloc_limit), KUNIT_CASE(drm_test_buddy_alloc_optimistic), KUNIT_CASE(drm_test_buddy_alloc_pessimistic), KUNIT_CASE(drm_test_buddy_alloc_pathological), + KUNIT_CASE(drm_test_buddy_alloc_contiguous), + KUNIT_CASE(drm_test_buddy_alloc_range_bias), {} }; static struct kunit_suite drm_buddy_test_suite = { .name = "drm_buddy", + .suite_init = drm_buddy_suite_init, .test_cases = drm_buddy_tests, }; diff --git a/drivers/gpu/drm/tests/drm_mm_test.c b/drivers/gpu/drm/tests/drm_mm_test.c index 1eb0c304f960..f37c0d765865 100644 --- a/drivers/gpu/drm/tests/drm_mm_test.c +++ b/drivers/gpu/drm/tests/drm_mm_test.c @@ -157,7 +157,7 @@ static void drm_test_mm_init(struct kunit *test) /* After creation, it should all be one massive hole */ if (!assert_one_hole(test, &mm, 0, size)) { - KUNIT_FAIL(test, ""); + KUNIT_FAIL(test, "mm not one hole on creation"); goto out; } @@ -171,14 +171,14 @@ static void drm_test_mm_init(struct kunit *test) /* After filling the range entirely, there should be no holes */ if (!assert_no_holes(test, &mm)) { - KUNIT_FAIL(test, ""); + KUNIT_FAIL(test, "mm has holes when filled"); goto out; } /* And then after emptying it again, the massive hole should be back */ drm_mm_remove_node(&tmp); if (!assert_one_hole(test, &mm, 0, size)) { - KUNIT_FAIL(test, ""); + KUNIT_FAIL(test, "mm does not have single hole after emptying"); goto out; } diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c index b62f420a9f96..112438d965ff 100644 --- a/drivers/gpu/drm/ttm/ttm_pool.c +++ b/drivers/gpu/drm/ttm/ttm_pool.c @@ -387,7 +387,7 @@ static void ttm_pool_free_range(struct ttm_pool *pool, struct ttm_tt *tt, enum ttm_caching caching, pgoff_t start_page, pgoff_t end_page) { - struct page **pages = tt->pages; + struct page **pages = &tt->pages[start_page]; unsigned int order; pgoff_t i, nr; diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c index f8e9abe647b9..9539aa28937f 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.c +++ b/drivers/gpu/drm/virtio/virtgpu_drv.c @@ -94,6 +94,7 @@ static int virtio_gpu_probe(struct virtio_device *vdev) goto err_free; } + dma_set_max_seg_size(dev->dev, dma_max_mapping_size(dev->dev) ?: UINT_MAX); ret = virtio_gpu_init(vdev, dev); if (ret) goto err_free; diff --git a/drivers/gpu/drm/xe/abi/guc_actions_abi.h b/drivers/gpu/drm/xe/abi/guc_actions_abi.h index 3062e0e0d467..79ba98a169f9 100644 --- a/drivers/gpu/drm/xe/abi/guc_actions_abi.h +++ b/drivers/gpu/drm/xe/abi/guc_actions_abi.h @@ -50,8 +50,8 @@ #define HOST2GUC_SELF_CFG_REQUEST_MSG_LEN (GUC_HXG_REQUEST_MSG_MIN_LEN + 3u) #define HOST2GUC_SELF_CFG_REQUEST_MSG_0_MBZ GUC_HXG_REQUEST_MSG_0_DATA0 -#define HOST2GUC_SELF_CFG_REQUEST_MSG_1_KLV_KEY (0xffff << 16) -#define HOST2GUC_SELF_CFG_REQUEST_MSG_1_KLV_LEN (0xffff << 0) +#define HOST2GUC_SELF_CFG_REQUEST_MSG_1_KLV_KEY (0xffffu << 16) +#define HOST2GUC_SELF_CFG_REQUEST_MSG_1_KLV_LEN (0xffffu << 0) #define HOST2GUC_SELF_CFG_REQUEST_MSG_2_VALUE32 GUC_HXG_REQUEST_MSG_n_DATAn #define HOST2GUC_SELF_CFG_REQUEST_MSG_3_VALUE64 GUC_HXG_REQUEST_MSG_n_DATAn diff --git a/drivers/gpu/drm/xe/abi/guc_actions_slpc_abi.h b/drivers/gpu/drm/xe/abi/guc_actions_slpc_abi.h index 811add10c30d..c165e26c0976 100644 --- a/drivers/gpu/drm/xe/abi/guc_actions_slpc_abi.h +++ b/drivers/gpu/drm/xe/abi/guc_actions_slpc_abi.h @@ -242,8 +242,8 @@ struct slpc_shared_data { (HOST2GUC_PC_SLPC_REQUEST_REQUEST_MSG_MIN_LEN + \ HOST2GUC_PC_SLPC_EVENT_MAX_INPUT_ARGS) #define HOST2GUC_PC_SLPC_REQUEST_MSG_0_MBZ GUC_HXG_REQUEST_MSG_0_DATA0 -#define HOST2GUC_PC_SLPC_REQUEST_MSG_1_EVENT_ID (0xff << 8) -#define HOST2GUC_PC_SLPC_REQUEST_MSG_1_EVENT_ARGC (0xff << 0) +#define HOST2GUC_PC_SLPC_REQUEST_MSG_1_EVENT_ID (0xffu << 8) +#define HOST2GUC_PC_SLPC_REQUEST_MSG_1_EVENT_ARGC (0xffu << 0) #define HOST2GUC_PC_SLPC_REQUEST_MSG_N_EVENT_DATA_N GUC_HXG_REQUEST_MSG_n_DATAn #endif diff --git a/drivers/gpu/drm/xe/abi/guc_communication_ctb_abi.h b/drivers/gpu/drm/xe/abi/guc_communication_ctb_abi.h index 3b83f907ece4..0b1146d0c997 100644 --- a/drivers/gpu/drm/xe/abi/guc_communication_ctb_abi.h +++ b/drivers/gpu/drm/xe/abi/guc_communication_ctb_abi.h @@ -82,11 +82,11 @@ static_assert(sizeof(struct guc_ct_buffer_desc) == 64); #define GUC_CTB_HDR_LEN 1u #define GUC_CTB_MSG_MIN_LEN GUC_CTB_HDR_LEN #define GUC_CTB_MSG_MAX_LEN 256u -#define GUC_CTB_MSG_0_FENCE (0xffff << 16) -#define GUC_CTB_MSG_0_FORMAT (0xf << 12) +#define GUC_CTB_MSG_0_FENCE (0xffffu << 16) +#define GUC_CTB_MSG_0_FORMAT (0xfu << 12) #define GUC_CTB_FORMAT_HXG 0u -#define GUC_CTB_MSG_0_RESERVED (0xf << 8) -#define GUC_CTB_MSG_0_NUM_DWORDS (0xff << 0) +#define GUC_CTB_MSG_0_RESERVED (0xfu << 8) +#define GUC_CTB_MSG_0_NUM_DWORDS (0xffu << 0) /** * DOC: CTB HXG Message diff --git a/drivers/gpu/drm/xe/abi/guc_klvs_abi.h b/drivers/gpu/drm/xe/abi/guc_klvs_abi.h index 47094b9b044c..0400bc0fccdc 100644 --- a/drivers/gpu/drm/xe/abi/guc_klvs_abi.h +++ b/drivers/gpu/drm/xe/abi/guc_klvs_abi.h @@ -31,9 +31,9 @@ */ #define GUC_KLV_LEN_MIN 1u -#define GUC_KLV_0_KEY (0xffff << 16) -#define GUC_KLV_0_LEN (0xffff << 0) -#define GUC_KLV_n_VALUE (0xffffffff << 0) +#define GUC_KLV_0_KEY (0xffffu << 16) +#define GUC_KLV_0_LEN (0xffffu << 0) +#define GUC_KLV_n_VALUE (0xffffffffu << 0) /** * DOC: GuC Self Config KLVs diff --git a/drivers/gpu/drm/xe/abi/guc_messages_abi.h b/drivers/gpu/drm/xe/abi/guc_messages_abi.h index 3d199016cf88..29e414c82d56 100644 --- a/drivers/gpu/drm/xe/abi/guc_messages_abi.h +++ b/drivers/gpu/drm/xe/abi/guc_messages_abi.h @@ -40,18 +40,18 @@ */ #define GUC_HXG_MSG_MIN_LEN 1u -#define GUC_HXG_MSG_0_ORIGIN (0x1 << 31) +#define GUC_HXG_MSG_0_ORIGIN (0x1u << 31) #define GUC_HXG_ORIGIN_HOST 0u #define GUC_HXG_ORIGIN_GUC 1u -#define GUC_HXG_MSG_0_TYPE (0x7 << 28) +#define GUC_HXG_MSG_0_TYPE (0x7u << 28) #define GUC_HXG_TYPE_REQUEST 0u #define GUC_HXG_TYPE_EVENT 1u #define GUC_HXG_TYPE_NO_RESPONSE_BUSY 3u #define GUC_HXG_TYPE_NO_RESPONSE_RETRY 5u #define GUC_HXG_TYPE_RESPONSE_FAILURE 6u #define GUC_HXG_TYPE_RESPONSE_SUCCESS 7u -#define GUC_HXG_MSG_0_AUX (0xfffffff << 0) -#define GUC_HXG_MSG_n_PAYLOAD (0xffffffff << 0) +#define GUC_HXG_MSG_0_AUX (0xfffffffu << 0) +#define GUC_HXG_MSG_n_PAYLOAD (0xffffffffu << 0) /** * DOC: HXG Request @@ -85,8 +85,8 @@ */ #define GUC_HXG_REQUEST_MSG_MIN_LEN GUC_HXG_MSG_MIN_LEN -#define GUC_HXG_REQUEST_MSG_0_DATA0 (0xfff << 16) -#define GUC_HXG_REQUEST_MSG_0_ACTION (0xffff << 0) +#define GUC_HXG_REQUEST_MSG_0_DATA0 (0xfffu << 16) +#define GUC_HXG_REQUEST_MSG_0_ACTION (0xffffu << 0) #define GUC_HXG_REQUEST_MSG_n_DATAn GUC_HXG_MSG_n_PAYLOAD /** @@ -117,8 +117,8 @@ */ #define GUC_HXG_EVENT_MSG_MIN_LEN GUC_HXG_MSG_MIN_LEN -#define GUC_HXG_EVENT_MSG_0_DATA0 (0xfff << 16) -#define GUC_HXG_EVENT_MSG_0_ACTION (0xffff << 0) +#define GUC_HXG_EVENT_MSG_0_DATA0 (0xfffu << 16) +#define GUC_HXG_EVENT_MSG_0_ACTION (0xffffu << 0) #define GUC_HXG_EVENT_MSG_n_DATAn GUC_HXG_MSG_n_PAYLOAD /** @@ -188,8 +188,8 @@ */ #define GUC_HXG_FAILURE_MSG_LEN GUC_HXG_MSG_MIN_LEN -#define GUC_HXG_FAILURE_MSG_0_HINT (0xfff << 16) -#define GUC_HXG_FAILURE_MSG_0_ERROR (0xffff << 0) +#define GUC_HXG_FAILURE_MSG_0_HINT (0xfffu << 16) +#define GUC_HXG_FAILURE_MSG_0_ERROR (0xffffu << 0) /** * DOC: HXG Response diff --git a/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_object.h b/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_object.h index 68d9f6116bdf..777c20ceabab 100644 --- a/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_object.h +++ b/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_object.h @@ -10,7 +10,7 @@ #include "xe_bo.h" -#define i915_gem_object_is_shmem(obj) ((obj)->flags & XE_BO_CREATE_SYSTEM_BIT) +#define i915_gem_object_is_shmem(obj) (0) /* We don't use shmem */ static inline dma_addr_t i915_gem_object_get_dma_address(const struct xe_bo *bo, pgoff_t n) { diff --git a/drivers/gpu/drm/xe/tests/xe_migrate.c b/drivers/gpu/drm/xe/tests/xe_migrate.c index a6523df0f1d3..c347e2c29f81 100644 --- a/drivers/gpu/drm/xe/tests/xe_migrate.c +++ b/drivers/gpu/drm/xe/tests/xe_migrate.c @@ -114,21 +114,21 @@ static void test_copy(struct xe_migrate *m, struct xe_bo *bo, region | XE_BO_NEEDS_CPU_ACCESS); if (IS_ERR(remote)) { - KUNIT_FAIL(test, "Failed to allocate remote bo for %s: %li\n", - str, PTR_ERR(remote)); + KUNIT_FAIL(test, "Failed to allocate remote bo for %s: %pe\n", + str, remote); return; } err = xe_bo_validate(remote, NULL, false); if (err) { - KUNIT_FAIL(test, "Failed to validate system bo for %s: %li\n", + KUNIT_FAIL(test, "Failed to validate system bo for %s: %i\n", str, err); goto out_unlock; } err = xe_bo_vmap(remote); if (err) { - KUNIT_FAIL(test, "Failed to vmap system bo for %s: %li\n", + KUNIT_FAIL(test, "Failed to vmap system bo for %s: %i\n", str, err); goto out_unlock; } diff --git a/drivers/gpu/drm/xe/tests/xe_mocs_test.c b/drivers/gpu/drm/xe/tests/xe_mocs_test.c index ef56bd517b28..421b819fd4ba 100644 --- a/drivers/gpu/drm/xe/tests/xe_mocs_test.c +++ b/drivers/gpu/drm/xe/tests/xe_mocs_test.c @@ -21,4 +21,5 @@ kunit_test_suite(xe_mocs_test_suite); MODULE_AUTHOR("Intel Corporation"); MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("xe_mocs kunit test"); MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING); diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c index 0b0e262e2166..4d3b80ec906d 100644 --- a/drivers/gpu/drm/xe/xe_bo.c +++ b/drivers/gpu/drm/xe/xe_bo.c @@ -28,6 +28,14 @@ #include "xe_ttm_stolen_mgr.h" #include "xe_vm.h" +const char *const xe_mem_type_to_name[TTM_NUM_MEM_TYPES] = { + [XE_PL_SYSTEM] = "system", + [XE_PL_TT] = "gtt", + [XE_PL_VRAM0] = "vram0", + [XE_PL_VRAM1] = "vram1", + [XE_PL_STOLEN] = "stolen" +}; + static const struct ttm_place sys_placement_flags = { .fpfn = 0, .lpfn = 0, @@ -713,8 +721,7 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict, migrate = xe->tiles[0].migrate; xe_assert(xe, migrate); - - trace_xe_bo_move(bo); + trace_xe_bo_move(bo, new_mem->mem_type, old_mem_type, move_lacks_source); xe_device_mem_access_get(xe); if (xe_bo_is_pinned(bo) && !xe_bo_is_user(bo)) { diff --git a/drivers/gpu/drm/xe/xe_bo.h b/drivers/gpu/drm/xe/xe_bo.h index 9b1279aca127..8be42ac6cd07 100644 --- a/drivers/gpu/drm/xe/xe_bo.h +++ b/drivers/gpu/drm/xe/xe_bo.h @@ -243,6 +243,7 @@ int xe_bo_evict_pinned(struct xe_bo *bo); int xe_bo_restore_pinned(struct xe_bo *bo); extern struct ttm_device_funcs xe_ttm_funcs; +extern const char *const xe_mem_type_to_name[]; int xe_gem_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file); diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c index 1f0b4b9ce84f..5176c27e4b6a 100644 --- a/drivers/gpu/drm/xe/xe_device.c +++ b/drivers/gpu/drm/xe/xe_device.c @@ -83,9 +83,6 @@ static int xe_file_open(struct drm_device *dev, struct drm_file *file) return 0; } -static void device_kill_persistent_exec_queues(struct xe_device *xe, - struct xe_file *xef); - static void xe_file_close(struct drm_device *dev, struct drm_file *file) { struct xe_device *xe = to_xe_device(dev); @@ -102,8 +99,6 @@ static void xe_file_close(struct drm_device *dev, struct drm_file *file) mutex_unlock(&xef->exec_queue.lock); xa_destroy(&xef->exec_queue.xa); mutex_destroy(&xef->exec_queue.lock); - device_kill_persistent_exec_queues(xe, xef); - mutex_lock(&xef->vm.lock); xa_for_each(&xef->vm.xa, idx, vm) xe_vm_close_and_put(vm); @@ -255,9 +250,6 @@ struct xe_device *xe_device_create(struct pci_dev *pdev, xa_erase(&xe->usm.asid_to_vm, asid); } - drmm_mutex_init(&xe->drm, &xe->persistent_engines.lock); - INIT_LIST_HEAD(&xe->persistent_engines.list); - spin_lock_init(&xe->pinned.lock); INIT_LIST_HEAD(&xe->pinned.kernel_bo_present); INIT_LIST_HEAD(&xe->pinned.external_vram); @@ -570,37 +562,6 @@ void xe_device_shutdown(struct xe_device *xe) { } -void xe_device_add_persistent_exec_queues(struct xe_device *xe, struct xe_exec_queue *q) -{ - mutex_lock(&xe->persistent_engines.lock); - list_add_tail(&q->persistent.link, &xe->persistent_engines.list); - mutex_unlock(&xe->persistent_engines.lock); -} - -void xe_device_remove_persistent_exec_queues(struct xe_device *xe, - struct xe_exec_queue *q) -{ - mutex_lock(&xe->persistent_engines.lock); - if (!list_empty(&q->persistent.link)) - list_del(&q->persistent.link); - mutex_unlock(&xe->persistent_engines.lock); -} - -static void device_kill_persistent_exec_queues(struct xe_device *xe, - struct xe_file *xef) -{ - struct xe_exec_queue *q, *next; - - mutex_lock(&xe->persistent_engines.lock); - list_for_each_entry_safe(q, next, &xe->persistent_engines.list, - persistent.link) - if (q->persistent.xef == xef) { - xe_exec_queue_kill(q); - list_del_init(&q->persistent.link); - } - mutex_unlock(&xe->persistent_engines.lock); -} - void xe_device_wmb(struct xe_device *xe) { struct xe_gt *gt = xe_root_mmio_gt(xe); diff --git a/drivers/gpu/drm/xe/xe_device.h b/drivers/gpu/drm/xe/xe_device.h index 3da83b233206..08d8b72c7731 100644 --- a/drivers/gpu/drm/xe/xe_device.h +++ b/drivers/gpu/drm/xe/xe_device.h @@ -42,10 +42,6 @@ int xe_device_probe(struct xe_device *xe); void xe_device_remove(struct xe_device *xe); void xe_device_shutdown(struct xe_device *xe); -void xe_device_add_persistent_exec_queues(struct xe_device *xe, struct xe_exec_queue *q); -void xe_device_remove_persistent_exec_queues(struct xe_device *xe, - struct xe_exec_queue *q); - void xe_device_wmb(struct xe_device *xe); static inline struct xe_file *to_xe_file(const struct drm_file *file) diff --git a/drivers/gpu/drm/xe/xe_device_types.h b/drivers/gpu/drm/xe/xe_device_types.h index 5dc9127a2029..e8491979a6f2 100644 --- a/drivers/gpu/drm/xe/xe_device_types.h +++ b/drivers/gpu/drm/xe/xe_device_types.h @@ -341,14 +341,6 @@ struct xe_device { struct mutex lock; } usm; - /** @persistent_engines: engines that are closed but still running */ - struct { - /** @lock: protects persistent engines */ - struct mutex lock; - /** @list: list of persistent engines */ - struct list_head list; - } persistent_engines; - /** @pinned: pinned BO state */ struct { /** @lock: protected pinned BO list state */ diff --git a/drivers/gpu/drm/xe/xe_display.c b/drivers/gpu/drm/xe/xe_display.c index 74391d9b11ae..e4db069f0db3 100644 --- a/drivers/gpu/drm/xe/xe_display.c +++ b/drivers/gpu/drm/xe/xe_display.c @@ -134,8 +134,6 @@ static void xe_display_fini_nommio(struct drm_device *dev, void *dummy) int xe_display_init_nommio(struct xe_device *xe) { - int err; - if (!xe->info.enable_display) return 0; @@ -145,10 +143,6 @@ int xe_display_init_nommio(struct xe_device *xe) /* This must be called before any calls to HAS_PCH_* */ intel_detect_pch(xe); - err = intel_power_domains_init(xe); - if (err) - return err; - return drmm_add_action_or_reset(&xe->drm, xe_display_fini_nommio, xe); } diff --git a/drivers/gpu/drm/xe/xe_drm_client.c b/drivers/gpu/drm/xe/xe_drm_client.c index 82d1305e831f..6040e4d22b28 100644 --- a/drivers/gpu/drm/xe/xe_drm_client.c +++ b/drivers/gpu/drm/xe/xe_drm_client.c @@ -131,14 +131,6 @@ static void bo_meminfo(struct xe_bo *bo, static void show_meminfo(struct drm_printer *p, struct drm_file *file) { - static const char *const mem_type_to_name[TTM_NUM_MEM_TYPES] = { - [XE_PL_SYSTEM] = "system", - [XE_PL_TT] = "gtt", - [XE_PL_VRAM0] = "vram0", - [XE_PL_VRAM1] = "vram1", - [4 ... 6] = NULL, - [XE_PL_STOLEN] = "stolen" - }; struct drm_memory_stats stats[TTM_NUM_MEM_TYPES] = {}; struct xe_file *xef = file->driver_priv; struct ttm_device *bdev = &xef->xe->ttm; @@ -171,7 +163,7 @@ static void show_meminfo(struct drm_printer *p, struct drm_file *file) spin_unlock(&client->bos_lock); for (mem_type = XE_PL_SYSTEM; mem_type < TTM_NUM_MEM_TYPES; ++mem_type) { - if (!mem_type_to_name[mem_type]) + if (!xe_mem_type_to_name[mem_type]) continue; man = ttm_manager_type(bdev, mem_type); @@ -182,7 +174,7 @@ static void show_meminfo(struct drm_printer *p, struct drm_file *file) DRM_GEM_OBJECT_RESIDENT | (mem_type != XE_PL_SYSTEM ? 0 : DRM_GEM_OBJECT_PURGEABLE), - mem_type_to_name[mem_type]); + xe_mem_type_to_name[mem_type]); } } } diff --git a/drivers/gpu/drm/xe/xe_exec.c b/drivers/gpu/drm/xe/xe_exec.c index b853feed9ccc..17f26952e665 100644 --- a/drivers/gpu/drm/xe/xe_exec.c +++ b/drivers/gpu/drm/xe/xe_exec.c @@ -111,7 +111,7 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file) u64 addresses[XE_HW_ENGINE_MAX_INSTANCE]; struct drm_gpuvm_exec vm_exec = {.extra.fn = xe_exec_fn}; struct drm_exec *exec = &vm_exec.exec; - u32 i, num_syncs = 0; + u32 i, num_syncs = 0, num_ufence = 0; struct xe_sched_job *job; struct dma_fence *rebind_fence; struct xe_vm *vm; @@ -157,6 +157,14 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file) SYNC_PARSE_FLAG_LR_MODE : 0)); if (err) goto err_syncs; + + if (xe_sync_is_ufence(&syncs[i])) + num_ufence++; + } + + if (XE_IOCTL_DBG(xe, num_ufence > 1)) { + err = -EINVAL; + goto err_syncs; } if (xe_exec_queue_is_parallel(q)) { diff --git a/drivers/gpu/drm/xe/xe_exec_queue.c b/drivers/gpu/drm/xe/xe_exec_queue.c index bcfc4127c7c5..49223026c89f 100644 --- a/drivers/gpu/drm/xe/xe_exec_queue.c +++ b/drivers/gpu/drm/xe/xe_exec_queue.c @@ -60,7 +60,6 @@ static struct xe_exec_queue *__xe_exec_queue_create(struct xe_device *xe, q->fence_irq = >->fence_irq[hwe->class]; q->ring_ops = gt->ring_ops[hwe->class]; q->ops = gt->exec_queue_ops; - INIT_LIST_HEAD(&q->persistent.link); INIT_LIST_HEAD(&q->compute.link); INIT_LIST_HEAD(&q->multi_gt_link); @@ -310,102 +309,6 @@ static int exec_queue_set_timeslice(struct xe_device *xe, struct xe_exec_queue * return q->ops->set_timeslice(q, value); } -static int exec_queue_set_preemption_timeout(struct xe_device *xe, - struct xe_exec_queue *q, u64 value, - bool create) -{ - u32 min = 0, max = 0; - - xe_exec_queue_get_prop_minmax(q->hwe->eclass, - XE_EXEC_QUEUE_PREEMPT_TIMEOUT, &min, &max); - - if (xe_exec_queue_enforce_schedule_limit() && - !xe_hw_engine_timeout_in_range(value, min, max)) - return -EINVAL; - - return q->ops->set_preempt_timeout(q, value); -} - -static int exec_queue_set_persistence(struct xe_device *xe, struct xe_exec_queue *q, - u64 value, bool create) -{ - if (XE_IOCTL_DBG(xe, !create)) - return -EINVAL; - - if (XE_IOCTL_DBG(xe, xe_vm_in_preempt_fence_mode(q->vm))) - return -EINVAL; - - if (value) - q->flags |= EXEC_QUEUE_FLAG_PERSISTENT; - else - q->flags &= ~EXEC_QUEUE_FLAG_PERSISTENT; - - return 0; -} - -static int exec_queue_set_job_timeout(struct xe_device *xe, struct xe_exec_queue *q, - u64 value, bool create) -{ - u32 min = 0, max = 0; - - if (XE_IOCTL_DBG(xe, !create)) - return -EINVAL; - - xe_exec_queue_get_prop_minmax(q->hwe->eclass, - XE_EXEC_QUEUE_JOB_TIMEOUT, &min, &max); - - if (xe_exec_queue_enforce_schedule_limit() && - !xe_hw_engine_timeout_in_range(value, min, max)) - return -EINVAL; - - return q->ops->set_job_timeout(q, value); -} - -static int exec_queue_set_acc_trigger(struct xe_device *xe, struct xe_exec_queue *q, - u64 value, bool create) -{ - if (XE_IOCTL_DBG(xe, !create)) - return -EINVAL; - - if (XE_IOCTL_DBG(xe, !xe->info.has_usm)) - return -EINVAL; - - q->usm.acc_trigger = value; - - return 0; -} - -static int exec_queue_set_acc_notify(struct xe_device *xe, struct xe_exec_queue *q, - u64 value, bool create) -{ - if (XE_IOCTL_DBG(xe, !create)) - return -EINVAL; - - if (XE_IOCTL_DBG(xe, !xe->info.has_usm)) - return -EINVAL; - - q->usm.acc_notify = value; - - return 0; -} - -static int exec_queue_set_acc_granularity(struct xe_device *xe, struct xe_exec_queue *q, - u64 value, bool create) -{ - if (XE_IOCTL_DBG(xe, !create)) - return -EINVAL; - - if (XE_IOCTL_DBG(xe, !xe->info.has_usm)) - return -EINVAL; - - if (value > DRM_XE_ACC_GRANULARITY_64M) - return -EINVAL; - - q->usm.acc_granularity = value; - - return 0; -} - typedef int (*xe_exec_queue_set_property_fn)(struct xe_device *xe, struct xe_exec_queue *q, u64 value, bool create); @@ -413,12 +316,6 @@ typedef int (*xe_exec_queue_set_property_fn)(struct xe_device *xe, static const xe_exec_queue_set_property_fn exec_queue_set_property_funcs[] = { [DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY] = exec_queue_set_priority, [DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE] = exec_queue_set_timeslice, - [DRM_XE_EXEC_QUEUE_SET_PROPERTY_PREEMPTION_TIMEOUT] = exec_queue_set_preemption_timeout, - [DRM_XE_EXEC_QUEUE_SET_PROPERTY_PERSISTENCE] = exec_queue_set_persistence, - [DRM_XE_EXEC_QUEUE_SET_PROPERTY_JOB_TIMEOUT] = exec_queue_set_job_timeout, - [DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_TRIGGER] = exec_queue_set_acc_trigger, - [DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_NOTIFY] = exec_queue_set_acc_notify, - [DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_GRANULARITY] = exec_queue_set_acc_granularity, }; static int exec_queue_user_ext_set_property(struct xe_device *xe, @@ -437,10 +334,15 @@ static int exec_queue_user_ext_set_property(struct xe_device *xe, if (XE_IOCTL_DBG(xe, ext.property >= ARRAY_SIZE(exec_queue_set_property_funcs)) || - XE_IOCTL_DBG(xe, ext.pad)) + XE_IOCTL_DBG(xe, ext.pad) || + XE_IOCTL_DBG(xe, ext.property != DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY && + ext.property != DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE)) return -EINVAL; idx = array_index_nospec(ext.property, ARRAY_SIZE(exec_queue_set_property_funcs)); + if (!exec_queue_set_property_funcs[idx]) + return -EINVAL; + return exec_queue_set_property_funcs[idx](xe, q, ext.value, create); } @@ -704,9 +606,7 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data, } q = xe_exec_queue_create(xe, vm, logical_mask, - args->width, hwe, - xe_vm_in_lr_mode(vm) ? 0 : - EXEC_QUEUE_FLAG_PERSISTENT); + args->width, hwe, 0); up_read(&vm->lock); xe_vm_put(vm); if (IS_ERR(q)) @@ -728,8 +628,6 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data, goto kill_exec_queue; } - q->persistent.xef = xef; - mutex_lock(&xef->exec_queue.lock); err = xa_alloc(&xef->exec_queue.xa, &id, q, xa_limit_32b, GFP_KERNEL); mutex_unlock(&xef->exec_queue.lock); @@ -872,10 +770,7 @@ int xe_exec_queue_destroy_ioctl(struct drm_device *dev, void *data, if (XE_IOCTL_DBG(xe, !q)) return -ENOENT; - if (!(q->flags & EXEC_QUEUE_FLAG_PERSISTENT)) - xe_exec_queue_kill(q); - else - xe_device_add_persistent_exec_queues(xe, q); + xe_exec_queue_kill(q); trace_xe_exec_queue_close(q); xe_exec_queue_put(q); @@ -926,20 +821,24 @@ void xe_exec_queue_last_fence_put_unlocked(struct xe_exec_queue *q) * @q: The exec queue * @vm: The VM the engine does a bind or exec for * - * Get last fence, does not take a ref + * Get last fence, takes a ref * * Returns: last fence if not signaled, dma fence stub if signaled */ struct dma_fence *xe_exec_queue_last_fence_get(struct xe_exec_queue *q, struct xe_vm *vm) { + struct dma_fence *fence; + xe_exec_queue_last_fence_lockdep_assert(q, vm); if (q->last_fence && test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &q->last_fence->flags)) xe_exec_queue_last_fence_put(q, vm); - return q->last_fence ? q->last_fence : dma_fence_get_stub(); + fence = q->last_fence ? q->last_fence : dma_fence_get_stub(); + dma_fence_get(fence); + return fence; } /** diff --git a/drivers/gpu/drm/xe/xe_exec_queue_types.h b/drivers/gpu/drm/xe/xe_exec_queue_types.h index 8d4b7feb8c30..36f4901d8d7e 100644 --- a/drivers/gpu/drm/xe/xe_exec_queue_types.h +++ b/drivers/gpu/drm/xe/xe_exec_queue_types.h @@ -105,16 +105,6 @@ struct xe_exec_queue { struct xe_guc_exec_queue *guc; }; - /** - * @persistent: persistent exec queue state - */ - struct { - /** @xef: file which this exec queue belongs to */ - struct xe_file *xef; - /** @link: link in list of persistent exec queues */ - struct list_head link; - } persistent; - union { /** * @parallel: parallel submission state @@ -160,16 +150,6 @@ struct xe_exec_queue { spinlock_t lock; } compute; - /** @usm: unified shared memory state */ - struct { - /** @acc_trigger: access counter trigger */ - u32 acc_trigger; - /** @acc_notify: access counter notify */ - u32 acc_notify; - /** @acc_granularity: access counter granularity */ - u32 acc_granularity; - } usm; - /** @ops: submission backend exec queue operations */ const struct xe_exec_queue_ops *ops; diff --git a/drivers/gpu/drm/xe/xe_execlist.c b/drivers/gpu/drm/xe/xe_execlist.c index 96b5224eb478..acb4d9f38fd7 100644 --- a/drivers/gpu/drm/xe/xe_execlist.c +++ b/drivers/gpu/drm/xe/xe_execlist.c @@ -212,7 +212,7 @@ static void xe_execlist_port_wake_locked(struct xe_execlist_port *port, static void xe_execlist_make_active(struct xe_execlist_exec_queue *exl) { struct xe_execlist_port *port = exl->port; - enum xe_exec_queue_priority priority = exl->active_priority; + enum xe_exec_queue_priority priority = exl->q->sched_props.priority; XE_WARN_ON(priority == XE_EXEC_QUEUE_PRIORITY_UNSET); XE_WARN_ON(priority < 0); @@ -378,8 +378,6 @@ static void execlist_exec_queue_fini_async(struct work_struct *w) list_del(&exl->active_link); spin_unlock_irqrestore(&exl->port->lock, flags); - if (q->flags & EXEC_QUEUE_FLAG_PERSISTENT) - xe_device_remove_persistent_exec_queues(xe, q); drm_sched_entity_fini(&exl->entity); drm_sched_fini(&exl->sched); kfree(exl); diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c index 3af2adec1295..35474ddbaf97 100644 --- a/drivers/gpu/drm/xe/xe_gt.c +++ b/drivers/gpu/drm/xe/xe_gt.c @@ -437,7 +437,10 @@ static int all_fw_domain_init(struct xe_gt *gt) * USM has its only SA pool to non-block behind user operations */ if (gt_to_xe(gt)->info.has_usm) { - gt->usm.bb_pool = xe_sa_bo_manager_init(gt_to_tile(gt), SZ_1M, 16); + struct xe_device *xe = gt_to_xe(gt); + + gt->usm.bb_pool = xe_sa_bo_manager_init(gt_to_tile(gt), + IS_DGFX(xe) ? SZ_1M : SZ_512K, 16); if (IS_ERR(gt->usm.bb_pool)) { err = PTR_ERR(gt->usm.bb_pool); goto err_force_wake; diff --git a/drivers/gpu/drm/xe/xe_gt_idle.c b/drivers/gpu/drm/xe/xe_gt_idle.c index 9358f7336889..9fcae65b6469 100644 --- a/drivers/gpu/drm/xe/xe_gt_idle.c +++ b/drivers/gpu/drm/xe/xe_gt_idle.c @@ -145,10 +145,10 @@ void xe_gt_idle_sysfs_init(struct xe_gt_idle *gtidle) } if (xe_gt_is_media_type(gt)) { - sprintf(gtidle->name, "gt%d-mc\n", gt->info.id); + sprintf(gtidle->name, "gt%d-mc", gt->info.id); gtidle->idle_residency = xe_guc_pc_mc6_residency; } else { - sprintf(gtidle->name, "gt%d-rc\n", gt->info.id); + sprintf(gtidle->name, "gt%d-rc", gt->info.id); gtidle->idle_residency = xe_guc_pc_rc6_residency; } diff --git a/drivers/gpu/drm/xe/xe_gt_mcr.c b/drivers/gpu/drm/xe/xe_gt_mcr.c index 77925b35cf8d..8546cd3cc50d 100644 --- a/drivers/gpu/drm/xe/xe_gt_mcr.c +++ b/drivers/gpu/drm/xe/xe_gt_mcr.c @@ -480,7 +480,7 @@ static bool xe_gt_mcr_get_nonterminated_steering(struct xe_gt *gt, * to synchronize with external clients (e.g., firmware), so a semaphore * register will also need to be taken. */ -static void mcr_lock(struct xe_gt *gt) +static void mcr_lock(struct xe_gt *gt) __acquires(>->mcr_lock) { struct xe_device *xe = gt_to_xe(gt); int ret = 0; @@ -500,7 +500,7 @@ static void mcr_lock(struct xe_gt *gt) drm_WARN_ON_ONCE(&xe->drm, ret == -ETIMEDOUT); } -static void mcr_unlock(struct xe_gt *gt) +static void mcr_unlock(struct xe_gt *gt) __releases(>->mcr_lock) { /* Release hardware semaphore - this is done by writing 1 to the register */ if (GRAPHICS_VERx100(gt_to_xe(gt)) >= 1270) diff --git a/drivers/gpu/drm/xe/xe_gt_pagefault.c b/drivers/gpu/drm/xe/xe_gt_pagefault.c index 59a70d2e0a7a..73f08f1924df 100644 --- a/drivers/gpu/drm/xe/xe_gt_pagefault.c +++ b/drivers/gpu/drm/xe/xe_gt_pagefault.c @@ -165,7 +165,8 @@ retry_userptr: goto unlock_vm; } - if (!xe_vma_is_userptr(vma) || !xe_vma_userptr_check_repin(vma)) { + if (!xe_vma_is_userptr(vma) || + !xe_vma_userptr_check_repin(to_userptr_vma(vma))) { downgrade_write(&vm->lock); write_locked = false; } @@ -181,11 +182,13 @@ retry_userptr: /* TODO: Validate fault */ if (xe_vma_is_userptr(vma) && write_locked) { + struct xe_userptr_vma *uvma = to_userptr_vma(vma); + spin_lock(&vm->userptr.invalidated_lock); - list_del_init(&vma->userptr.invalidate_link); + list_del_init(&uvma->userptr.invalidate_link); spin_unlock(&vm->userptr.invalidated_lock); - ret = xe_vma_userptr_pin_pages(vma); + ret = xe_vma_userptr_pin_pages(uvma); if (ret) goto unlock_vm; @@ -220,7 +223,7 @@ retry_userptr: dma_fence_put(fence); if (xe_vma_is_userptr(vma)) - ret = xe_vma_userptr_check_repin(vma); + ret = xe_vma_userptr_check_repin(to_userptr_vma(vma)); vma->usm.tile_invalidated &= ~BIT(tile->id); unlock_dma_resv: @@ -332,7 +335,7 @@ int xe_guc_pagefault_handler(struct xe_guc *guc, u32 *msg, u32 len) return -EPROTO; asid = FIELD_GET(PFD_ASID, msg[1]); - pf_queue = >->usm.pf_queue[asid % NUM_PF_QUEUE]; + pf_queue = gt->usm.pf_queue + (asid % NUM_PF_QUEUE); spin_lock_irqsave(&pf_queue->lock, flags); full = pf_queue_full(pf_queue); diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c index 7eef23a00d77..f4c485289dbe 100644 --- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c +++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c @@ -247,6 +247,14 @@ int xe_gt_tlb_invalidation_vma(struct xe_gt *gt, xe_gt_assert(gt, vma); + /* Execlists not supported */ + if (gt_to_xe(gt)->info.force_execlist) { + if (fence) + __invalidation_fence_signal(fence); + + return 0; + } + action[len++] = XE_GUC_ACTION_TLB_INVALIDATION; action[len++] = 0; /* seqno, replaced in send_tlb_invalidation */ if (!xe->info.has_range_tlb_invalidation) { @@ -317,6 +325,10 @@ int xe_gt_tlb_invalidation_wait(struct xe_gt *gt, int seqno) struct drm_printer p = drm_err_printer(__func__); int ret; + /* Execlists not supported */ + if (gt_to_xe(gt)->info.force_execlist) + return 0; + /* * XXX: See above, this algorithm only works if seqno are always in * order diff --git a/drivers/gpu/drm/xe/xe_guc_pc.c b/drivers/gpu/drm/xe/xe_guc_pc.c index f71085228cb3..d91702592520 100644 --- a/drivers/gpu/drm/xe/xe_guc_pc.c +++ b/drivers/gpu/drm/xe/xe_guc_pc.c @@ -963,7 +963,9 @@ void xe_guc_pc_fini(struct xe_guc_pc *pc) struct xe_device *xe = pc_to_xe(pc); if (xe->info.skip_guc_pc) { + xe_device_mem_access_get(xe); xe_gt_idle_disable_c6(pc_to_gt(pc)); + xe_device_mem_access_put(xe); return; } diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c index 54ffcfcdd41f..f22ae717b0b2 100644 --- a/drivers/gpu/drm/xe/xe_guc_submit.c +++ b/drivers/gpu/drm/xe/xe_guc_submit.c @@ -1028,8 +1028,6 @@ static void __guc_exec_queue_fini_async(struct work_struct *w) if (xe_exec_queue_is_lr(q)) cancel_work_sync(&ge->lr_tdr); - if (q->flags & EXEC_QUEUE_FLAG_PERSISTENT) - xe_device_remove_persistent_exec_queues(gt_to_xe(q->gt), q); release_guc_id(guc, q); xe_sched_entity_fini(&ge->entity); xe_sched_fini(&ge->sched); diff --git a/drivers/gpu/drm/xe/xe_hw_fence.c b/drivers/gpu/drm/xe/xe_hw_fence.c index a6094c81f2ad..a5de3e7b0bd6 100644 --- a/drivers/gpu/drm/xe/xe_hw_fence.c +++ b/drivers/gpu/drm/xe/xe_hw_fence.c @@ -217,13 +217,13 @@ struct xe_hw_fence *xe_hw_fence_create(struct xe_hw_fence_ctx *ctx, if (!fence) return ERR_PTR(-ENOMEM); - dma_fence_init(&fence->dma, &xe_hw_fence_ops, &ctx->irq->lock, - ctx->dma_fence_ctx, ctx->next_seqno++); - fence->ctx = ctx; fence->seqno_map = seqno_map; INIT_LIST_HEAD(&fence->irq_link); + dma_fence_init(&fence->dma, &xe_hw_fence_ops, &ctx->irq->lock, + ctx->dma_fence_ctx, ctx->next_seqno++); + trace_xe_hw_fence_create(fence); return fence; diff --git a/drivers/gpu/drm/xe/xe_lrc.c b/drivers/gpu/drm/xe/xe_lrc.c index b7fa3831b684..b38319d2801e 100644 --- a/drivers/gpu/drm/xe/xe_lrc.c +++ b/drivers/gpu/drm/xe/xe_lrc.c @@ -21,10 +21,10 @@ #include "xe_map.h" #include "xe_vm.h" -#define CTX_VALID (1 << 0) -#define CTX_PRIVILEGE (1 << 8) -#define CTX_ADDRESSING_MODE_SHIFT 3 -#define LEGACY_64B_CONTEXT 3 +#define LRC_VALID (1 << 0) +#define LRC_PRIVILEGE (1 << 8) +#define LRC_ADDRESSING_MODE_SHIFT 3 +#define LRC_LEGACY_64B_CONTEXT 3 #define ENGINE_CLASS_SHIFT 61 #define ENGINE_INSTANCE_SHIFT 48 @@ -682,8 +682,6 @@ static void xe_lrc_set_ppgtt(struct xe_lrc *lrc, struct xe_vm *vm) #define PVC_CTX_ASID (0x2e + 1) #define PVC_CTX_ACC_CTR_THOLD (0x2a + 1) -#define ACC_GRANULARITY_S 20 -#define ACC_NOTIFY_S 16 int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe, struct xe_exec_queue *q, struct xe_vm *vm, u32 ring_size) @@ -754,23 +752,17 @@ int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe, xe_lrc_write_ctx_reg(lrc, CTX_RING_CTL, RING_CTL_SIZE(lrc->ring.size) | RING_VALID); if (xe->info.has_asid && vm) - xe_lrc_write_ctx_reg(lrc, PVC_CTX_ASID, - (q->usm.acc_granularity << - ACC_GRANULARITY_S) | vm->usm.asid); - if (xe->info.has_usm && vm) - xe_lrc_write_ctx_reg(lrc, PVC_CTX_ACC_CTR_THOLD, - (q->usm.acc_notify << ACC_NOTIFY_S) | - q->usm.acc_trigger); - - lrc->desc = CTX_VALID; - lrc->desc |= LEGACY_64B_CONTEXT << CTX_ADDRESSING_MODE_SHIFT; + xe_lrc_write_ctx_reg(lrc, PVC_CTX_ASID, vm->usm.asid); + + lrc->desc = LRC_VALID; + lrc->desc |= LRC_LEGACY_64B_CONTEXT << LRC_ADDRESSING_MODE_SHIFT; /* TODO: Priority */ /* While this appears to have something about privileged batches or * some such, it really just means PPGTT mode. */ if (vm) - lrc->desc |= CTX_PRIVILEGE; + lrc->desc |= LRC_PRIVILEGE; if (GRAPHICS_VERx100(xe) < 1250) { lrc->desc |= (u64)hwe->instance << ENGINE_INSTANCE_SHIFT; diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c index 5c6c54624252..70480c305602 100644 --- a/drivers/gpu/drm/xe/xe_migrate.c +++ b/drivers/gpu/drm/xe/xe_migrate.c @@ -170,11 +170,6 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m, if (!IS_DGFX(xe)) { /* Write out batch too */ m->batch_base_ofs = NUM_PT_SLOTS * XE_PAGE_SIZE; - if (xe->info.has_usm) { - batch = tile->primary_gt->usm.bb_pool->bo; - m->usm_batch_base_ofs = m->batch_base_ofs; - } - for (i = 0; i < batch->size; i += vm->flags & XE_VM_FLAG_64K ? XE_64K_PAGE_SIZE : XE_PAGE_SIZE) { @@ -185,6 +180,24 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m, entry); level++; } + if (xe->info.has_usm) { + xe_tile_assert(tile, batch->size == SZ_1M); + + batch = tile->primary_gt->usm.bb_pool->bo; + m->usm_batch_base_ofs = m->batch_base_ofs + SZ_1M; + xe_tile_assert(tile, batch->size == SZ_512K); + + for (i = 0; i < batch->size; + i += vm->flags & XE_VM_FLAG_64K ? XE_64K_PAGE_SIZE : + XE_PAGE_SIZE) { + entry = vm->pt_ops->pte_encode_bo(batch, i, + pat_index, 0); + + xe_map_wr(xe, &bo->vmap, map_ofs + level * 8, u64, + entry); + level++; + } + } } else { u64 batch_addr = xe_bo_addr(batch, 0, XE_PAGE_SIZE); @@ -1204,8 +1217,11 @@ static bool no_in_syncs(struct xe_vm *vm, struct xe_exec_queue *q, } if (q) { fence = xe_exec_queue_last_fence_get(q, vm); - if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) + if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) { + dma_fence_put(fence); return false; + } + dma_fence_put(fence); } return true; diff --git a/drivers/gpu/drm/xe/xe_mmio.c b/drivers/gpu/drm/xe/xe_mmio.c index 5f6b53ea5528..02f7808f28ca 100644 --- a/drivers/gpu/drm/xe/xe_mmio.c +++ b/drivers/gpu/drm/xe/xe_mmio.c @@ -105,7 +105,7 @@ static void xe_resize_vram_bar(struct xe_device *xe) pci_bus_for_each_resource(root, root_res, i) { if (root_res && root_res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) && - root_res->start > 0x100000000ull) + (u64)root_res->start > 0x100000000ul) break; } diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c index de1030a47588..6653c045f3c9 100644 --- a/drivers/gpu/drm/xe/xe_pt.c +++ b/drivers/gpu/drm/xe/xe_pt.c @@ -20,8 +20,8 @@ struct xe_pt_dir { struct xe_pt pt; - /** @dir: Directory structure for the xe_pt_walk functionality */ - struct xe_ptw_dir dir; + /** @children: Array of page-table child nodes */ + struct xe_ptw *children[XE_PDES]; }; #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_VM) @@ -44,7 +44,7 @@ static struct xe_pt_dir *as_xe_pt_dir(struct xe_pt *pt) static struct xe_pt *xe_pt_entry(struct xe_pt_dir *pt_dir, unsigned int index) { - return container_of(pt_dir->dir.entries[index], struct xe_pt, base); + return container_of(pt_dir->children[index], struct xe_pt, base); } static u64 __xe_pt_empty_pte(struct xe_tile *tile, struct xe_vm *vm, @@ -65,6 +65,14 @@ static u64 __xe_pt_empty_pte(struct xe_tile *tile, struct xe_vm *vm, XE_PTE_NULL; } +static void xe_pt_free(struct xe_pt *pt) +{ + if (pt->level) + kfree(as_xe_pt_dir(pt)); + else + kfree(pt); +} + /** * xe_pt_create() - Create a page-table. * @vm: The vm to create for. @@ -85,15 +93,19 @@ struct xe_pt *xe_pt_create(struct xe_vm *vm, struct xe_tile *tile, { struct xe_pt *pt; struct xe_bo *bo; - size_t size; int err; - size = !level ? sizeof(struct xe_pt) : sizeof(struct xe_pt_dir) + - XE_PDES * sizeof(struct xe_ptw *); - pt = kzalloc(size, GFP_KERNEL); + if (level) { + struct xe_pt_dir *dir = kzalloc(sizeof(*dir), GFP_KERNEL); + + pt = (dir) ? &dir->pt : NULL; + } else { + pt = kzalloc(sizeof(*pt), GFP_KERNEL); + } if (!pt) return ERR_PTR(-ENOMEM); + pt->level = level; bo = xe_bo_create_pin_map(vm->xe, tile, vm, SZ_4K, ttm_bo_type_kernel, XE_BO_CREATE_VRAM_IF_DGFX(tile) | @@ -106,8 +118,7 @@ struct xe_pt *xe_pt_create(struct xe_vm *vm, struct xe_tile *tile, goto err_kfree; } pt->bo = bo; - pt->level = level; - pt->base.dir = level ? &as_xe_pt_dir(pt)->dir : NULL; + pt->base.children = level ? as_xe_pt_dir(pt)->children : NULL; if (vm->xef) xe_drm_client_add_bo(vm->xef->client, pt->bo); @@ -116,7 +127,7 @@ struct xe_pt *xe_pt_create(struct xe_vm *vm, struct xe_tile *tile, return pt; err_kfree: - kfree(pt); + xe_pt_free(pt); return ERR_PTR(err); } @@ -193,7 +204,7 @@ void xe_pt_destroy(struct xe_pt *pt, u32 flags, struct llist_head *deferred) deferred); } } - kfree(pt); + xe_pt_free(pt); } /** @@ -358,7 +369,7 @@ xe_pt_insert_entry(struct xe_pt_stage_bind_walk *xe_walk, struct xe_pt *parent, struct iosys_map *map = &parent->bo->vmap; if (unlikely(xe_child)) - parent->base.dir->entries[offset] = &xe_child->base; + parent->base.children[offset] = &xe_child->base; xe_pt_write(xe_walk->vm->xe, map, offset, pte); parent->num_live++; @@ -488,10 +499,12 @@ xe_pt_stage_bind_entry(struct xe_ptw *parent, pgoff_t offset, * this device *requires* 64K PTE size for VRAM, fail. */ if (level == 0 && !xe_parent->is_compact) { - if (xe_pt_is_pte_ps64K(addr, next, xe_walk)) + if (xe_pt_is_pte_ps64K(addr, next, xe_walk)) { + xe_walk->vma->gpuva.flags |= XE_VMA_PTE_64K; pte |= XE_PTE_PS64; - else if (XE_WARN_ON(xe_walk->needs_64K)) + } else if (XE_WARN_ON(xe_walk->needs_64K)) { return -EINVAL; + } } ret = xe_pt_insert_entry(xe_walk, xe_parent, offset, NULL, pte); @@ -534,13 +547,16 @@ xe_pt_stage_bind_entry(struct xe_ptw *parent, pgoff_t offset, *child = &xe_child->base; /* - * Prefer the compact pagetable layout for L0 if possible. + * Prefer the compact pagetable layout for L0 if possible. Only + * possible if VMA covers entire 2MB region as compact 64k and + * 4k pages cannot be mixed within a 2MB region. * TODO: Suballocate the pt bo to avoid wasting a lot of * memory. */ if (GRAPHICS_VERx100(tile_to_xe(xe_walk->tile)) >= 1250 && level == 1 && covers && xe_pt_scan_64K(addr, next, xe_walk)) { walk->shifts = xe_compact_pt_shifts; + xe_walk->vma->gpuva.flags |= XE_VMA_PTE_COMPACT; flags |= XE_PDE_64K; xe_child->is_compact = true; } @@ -618,8 +634,8 @@ xe_pt_stage_bind(struct xe_tile *tile, struct xe_vma *vma, if (!xe_vma_is_null(vma)) { if (xe_vma_is_userptr(vma)) - xe_res_first_sg(vma->userptr.sg, 0, xe_vma_size(vma), - &curs); + xe_res_first_sg(to_userptr_vma(vma)->userptr.sg, 0, + xe_vma_size(vma), &curs); else if (xe_bo_is_vram(bo) || xe_bo_is_stolen(bo)) xe_res_first(bo->ttm.resource, xe_vma_bo_offset(vma), xe_vma_size(vma), &curs); @@ -853,7 +869,7 @@ static void xe_pt_commit_bind(struct xe_vma *vma, xe_pt_destroy(xe_pt_entry(pt_dir, j_), xe_vma_vm(vma)->flags, deferred); - pt_dir->dir.entries[j_] = &newpte->base; + pt_dir->children[j_] = &newpte->base; } kfree(entries[i].pt_entries); } @@ -906,17 +922,17 @@ static void xe_vm_dbg_print_entries(struct xe_device *xe, #ifdef CONFIG_DRM_XE_USERPTR_INVAL_INJECT -static int xe_pt_userptr_inject_eagain(struct xe_vma *vma) +static int xe_pt_userptr_inject_eagain(struct xe_userptr_vma *uvma) { - u32 divisor = vma->userptr.divisor ? vma->userptr.divisor : 2; + u32 divisor = uvma->userptr.divisor ? uvma->userptr.divisor : 2; static u32 count; if (count++ % divisor == divisor - 1) { - struct xe_vm *vm = xe_vma_vm(vma); + struct xe_vm *vm = xe_vma_vm(&uvma->vma); - vma->userptr.divisor = divisor << 1; + uvma->userptr.divisor = divisor << 1; spin_lock(&vm->userptr.invalidated_lock); - list_move_tail(&vma->userptr.invalidate_link, + list_move_tail(&uvma->userptr.invalidate_link, &vm->userptr.invalidated); spin_unlock(&vm->userptr.invalidated_lock); return true; @@ -927,7 +943,7 @@ static int xe_pt_userptr_inject_eagain(struct xe_vma *vma) #else -static bool xe_pt_userptr_inject_eagain(struct xe_vma *vma) +static bool xe_pt_userptr_inject_eagain(struct xe_userptr_vma *uvma) { return false; } @@ -1000,9 +1016,9 @@ static int xe_pt_userptr_pre_commit(struct xe_migrate_pt_update *pt_update) { struct xe_pt_migrate_pt_update *userptr_update = container_of(pt_update, typeof(*userptr_update), base); - struct xe_vma *vma = pt_update->vma; - unsigned long notifier_seq = vma->userptr.notifier_seq; - struct xe_vm *vm = xe_vma_vm(vma); + struct xe_userptr_vma *uvma = to_userptr_vma(pt_update->vma); + unsigned long notifier_seq = uvma->userptr.notifier_seq; + struct xe_vm *vm = xe_vma_vm(&uvma->vma); int err = xe_pt_vm_dependencies(pt_update->job, &vm->rftree[pt_update->tile_id], pt_update->start, @@ -1023,7 +1039,7 @@ static int xe_pt_userptr_pre_commit(struct xe_migrate_pt_update *pt_update) */ do { down_read(&vm->userptr.notifier_lock); - if (!mmu_interval_read_retry(&vma->userptr.notifier, + if (!mmu_interval_read_retry(&uvma->userptr.notifier, notifier_seq)) break; @@ -1032,11 +1048,11 @@ static int xe_pt_userptr_pre_commit(struct xe_migrate_pt_update *pt_update) if (userptr_update->bind) return -EAGAIN; - notifier_seq = mmu_interval_read_begin(&vma->userptr.notifier); + notifier_seq = mmu_interval_read_begin(&uvma->userptr.notifier); } while (true); /* Inject errors to test_whether they are handled correctly */ - if (userptr_update->bind && xe_pt_userptr_inject_eagain(vma)) { + if (userptr_update->bind && xe_pt_userptr_inject_eagain(uvma)) { up_read(&vm->userptr.notifier_lock); return -EAGAIN; } @@ -1297,7 +1313,7 @@ __xe_pt_bind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_exec_queue vma->tile_present |= BIT(tile->id); if (bind_pt_update.locked) { - vma->userptr.initial_bind = true; + to_userptr_vma(vma)->userptr.initial_bind = true; up_read(&vm->userptr.notifier_lock); xe_bo_put_commit(&deferred); } @@ -1507,7 +1523,7 @@ xe_pt_commit_unbind(struct xe_vma *vma, xe_pt_destroy(xe_pt_entry(pt_dir, i), xe_vma_vm(vma)->flags, deferred); - pt_dir->dir.entries[i] = NULL; + pt_dir->children[i] = NULL; } } } @@ -1642,7 +1658,7 @@ __xe_pt_unbind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_exec_queu if (!vma->tile_present) { spin_lock(&vm->userptr.invalidated_lock); - list_del_init(&vma->userptr.invalidate_link); + list_del_init(&to_userptr_vma(vma)->userptr.invalidate_link); spin_unlock(&vm->userptr.invalidated_lock); } up_read(&vm->userptr.notifier_lock); diff --git a/drivers/gpu/drm/xe/xe_pt_walk.c b/drivers/gpu/drm/xe/xe_pt_walk.c index 8f6c8d063f39..b8b3d2aea492 100644 --- a/drivers/gpu/drm/xe/xe_pt_walk.c +++ b/drivers/gpu/drm/xe/xe_pt_walk.c @@ -74,7 +74,7 @@ int xe_pt_walk_range(struct xe_ptw *parent, unsigned int level, u64 addr, u64 end, struct xe_pt_walk *walk) { pgoff_t offset = xe_pt_offset(addr, level, walk); - struct xe_ptw **entries = parent->dir ? parent->dir->entries : NULL; + struct xe_ptw **entries = parent->children ? parent->children : NULL; const struct xe_pt_walk_ops *ops = walk->ops; enum page_walk_action action; struct xe_ptw *child; diff --git a/drivers/gpu/drm/xe/xe_pt_walk.h b/drivers/gpu/drm/xe/xe_pt_walk.h index ec3d1e9efa6d..5ecc4d2f0f65 100644 --- a/drivers/gpu/drm/xe/xe_pt_walk.h +++ b/drivers/gpu/drm/xe/xe_pt_walk.h @@ -8,28 +8,15 @@ #include <linux/pagewalk.h> #include <linux/types.h> -struct xe_ptw_dir; - /** * struct xe_ptw - base class for driver pagetable subclassing. - * @dir: Pointer to an array of children if any. + * @children: Pointer to an array of children if any. * * Drivers could subclass this, and if it's a page-directory, typically - * embed the xe_ptw_dir::entries array in the same allocation. + * embed an array of xe_ptw pointers. */ struct xe_ptw { - struct xe_ptw_dir *dir; -}; - -/** - * struct xe_ptw_dir - page directory structure - * @entries: Array holding page directory children. - * - * It is the responsibility of the user to ensure @entries is - * correctly sized. - */ -struct xe_ptw_dir { - struct xe_ptw *entries[0]; + struct xe_ptw **children; }; /** diff --git a/drivers/gpu/drm/xe/xe_query.c b/drivers/gpu/drm/xe/xe_query.c index 9b35673b286c..7e924faeeea0 100644 --- a/drivers/gpu/drm/xe/xe_query.c +++ b/drivers/gpu/drm/xe/xe_query.c @@ -459,21 +459,21 @@ static size_t calc_topo_query_size(struct xe_device *xe) sizeof_field(struct xe_gt, fuse_topo.eu_mask_per_dss)); } -static void __user *copy_mask(void __user *ptr, - struct drm_xe_query_topology_mask *topo, - void *mask, size_t mask_size) +static int copy_mask(void __user **ptr, + struct drm_xe_query_topology_mask *topo, + void *mask, size_t mask_size) { topo->num_bytes = mask_size; - if (copy_to_user(ptr, topo, sizeof(*topo))) - return ERR_PTR(-EFAULT); - ptr += sizeof(topo); + if (copy_to_user(*ptr, topo, sizeof(*topo))) + return -EFAULT; + *ptr += sizeof(topo); - if (copy_to_user(ptr, mask, mask_size)) - return ERR_PTR(-EFAULT); - ptr += mask_size; + if (copy_to_user(*ptr, mask, mask_size)) + return -EFAULT; + *ptr += mask_size; - return ptr; + return 0; } static int query_gt_topology(struct xe_device *xe, @@ -493,28 +493,28 @@ static int query_gt_topology(struct xe_device *xe, } for_each_gt(gt, xe, id) { + int err; + topo.gt_id = id; topo.type = DRM_XE_TOPO_DSS_GEOMETRY; - query_ptr = copy_mask(query_ptr, &topo, - gt->fuse_topo.g_dss_mask, - sizeof(gt->fuse_topo.g_dss_mask)); - if (IS_ERR(query_ptr)) - return PTR_ERR(query_ptr); + err = copy_mask(&query_ptr, &topo, gt->fuse_topo.g_dss_mask, + sizeof(gt->fuse_topo.g_dss_mask)); + if (err) + return err; topo.type = DRM_XE_TOPO_DSS_COMPUTE; - query_ptr = copy_mask(query_ptr, &topo, - gt->fuse_topo.c_dss_mask, - sizeof(gt->fuse_topo.c_dss_mask)); - if (IS_ERR(query_ptr)) - return PTR_ERR(query_ptr); + err = copy_mask(&query_ptr, &topo, gt->fuse_topo.c_dss_mask, + sizeof(gt->fuse_topo.c_dss_mask)); + if (err) + return err; topo.type = DRM_XE_TOPO_EU_PER_DSS; - query_ptr = copy_mask(query_ptr, &topo, - gt->fuse_topo.eu_mask_per_dss, - sizeof(gt->fuse_topo.eu_mask_per_dss)); - if (IS_ERR(query_ptr)) - return PTR_ERR(query_ptr); + err = copy_mask(&query_ptr, &topo, + gt->fuse_topo.eu_mask_per_dss, + sizeof(gt->fuse_topo.eu_mask_per_dss)); + if (err) + return err; } return 0; diff --git a/drivers/gpu/drm/xe/xe_range_fence.c b/drivers/gpu/drm/xe/xe_range_fence.c index d35d9ec58e86..372378e89e98 100644 --- a/drivers/gpu/drm/xe/xe_range_fence.c +++ b/drivers/gpu/drm/xe/xe_range_fence.c @@ -151,6 +151,11 @@ xe_range_fence_tree_next(struct xe_range_fence *rfence, u64 start, u64 last) return xe_range_fence_tree_iter_next(rfence, start, last); } +static void xe_range_fence_free(struct xe_range_fence *rfence) +{ + kfree(rfence); +} + const struct xe_range_fence_ops xe_range_fence_kfree_ops = { - .free = (void (*)(struct xe_range_fence *rfence)) kfree, + .free = xe_range_fence_free, }; diff --git a/drivers/gpu/drm/xe/xe_sched_job.c b/drivers/gpu/drm/xe/xe_sched_job.c index 01106a1156ad..4e2ccad0e52f 100644 --- a/drivers/gpu/drm/xe/xe_sched_job.c +++ b/drivers/gpu/drm/xe/xe_sched_job.c @@ -274,7 +274,6 @@ int xe_sched_job_last_fence_add_dep(struct xe_sched_job *job, struct xe_vm *vm) struct dma_fence *fence; fence = xe_exec_queue_last_fence_get(job->q, vm); - dma_fence_get(fence); return drm_sched_job_add_dependency(&job->drm, fence); } diff --git a/drivers/gpu/drm/xe/xe_sync.c b/drivers/gpu/drm/xe/xe_sync.c index e4c220cf9115..02c9577fe418 100644 --- a/drivers/gpu/drm/xe/xe_sync.c +++ b/drivers/gpu/drm/xe/xe_sync.c @@ -19,7 +19,7 @@ #include "xe_macros.h" #include "xe_sched_job_types.h" -struct user_fence { +struct xe_user_fence { struct xe_device *xe; struct kref refcount; struct dma_fence_cb cb; @@ -27,31 +27,32 @@ struct user_fence { struct mm_struct *mm; u64 __user *addr; u64 value; + int signalled; }; static void user_fence_destroy(struct kref *kref) { - struct user_fence *ufence = container_of(kref, struct user_fence, + struct xe_user_fence *ufence = container_of(kref, struct xe_user_fence, refcount); mmdrop(ufence->mm); kfree(ufence); } -static void user_fence_get(struct user_fence *ufence) +static void user_fence_get(struct xe_user_fence *ufence) { kref_get(&ufence->refcount); } -static void user_fence_put(struct user_fence *ufence) +static void user_fence_put(struct xe_user_fence *ufence) { kref_put(&ufence->refcount, user_fence_destroy); } -static struct user_fence *user_fence_create(struct xe_device *xe, u64 addr, - u64 value) +static struct xe_user_fence *user_fence_create(struct xe_device *xe, u64 addr, + u64 value) { - struct user_fence *ufence; + struct xe_user_fence *ufence; ufence = kmalloc(sizeof(*ufence), GFP_KERNEL); if (!ufence) @@ -69,7 +70,7 @@ static struct user_fence *user_fence_create(struct xe_device *xe, u64 addr, static void user_fence_worker(struct work_struct *w) { - struct user_fence *ufence = container_of(w, struct user_fence, worker); + struct xe_user_fence *ufence = container_of(w, struct xe_user_fence, worker); if (mmget_not_zero(ufence->mm)) { kthread_use_mm(ufence->mm); @@ -80,10 +81,11 @@ static void user_fence_worker(struct work_struct *w) } wake_up_all(&ufence->xe->ufence_wq); + WRITE_ONCE(ufence->signalled, 1); user_fence_put(ufence); } -static void kick_ufence(struct user_fence *ufence, struct dma_fence *fence) +static void kick_ufence(struct xe_user_fence *ufence, struct dma_fence *fence) { INIT_WORK(&ufence->worker, user_fence_worker); queue_work(ufence->xe->ordered_wq, &ufence->worker); @@ -92,7 +94,7 @@ static void kick_ufence(struct user_fence *ufence, struct dma_fence *fence) static void user_fence_cb(struct dma_fence *fence, struct dma_fence_cb *cb) { - struct user_fence *ufence = container_of(cb, struct user_fence, cb); + struct xe_user_fence *ufence = container_of(cb, struct xe_user_fence, cb); kick_ufence(ufence, fence); } @@ -307,7 +309,6 @@ xe_sync_in_fence_get(struct xe_sync_entry *sync, int num_sync, /* Easy case... */ if (!num_in_fence) { fence = xe_exec_queue_last_fence_get(q, vm); - dma_fence_get(fence); return fence; } @@ -322,7 +323,6 @@ xe_sync_in_fence_get(struct xe_sync_entry *sync, int num_sync, } } fences[current_fence++] = xe_exec_queue_last_fence_get(q, vm); - dma_fence_get(fences[current_fence - 1]); cf = dma_fence_array_create(num_in_fence, fences, vm->composite_fence_ctx, vm->composite_fence_seqno++, @@ -342,3 +342,39 @@ err_out: return ERR_PTR(-ENOMEM); } + +/** + * xe_sync_ufence_get() - Get user fence from sync + * @sync: input sync + * + * Get a user fence reference from sync. + * + * Return: xe_user_fence pointer with reference + */ +struct xe_user_fence *xe_sync_ufence_get(struct xe_sync_entry *sync) +{ + user_fence_get(sync->ufence); + + return sync->ufence; +} + +/** + * xe_sync_ufence_put() - Put user fence reference + * @ufence: user fence reference + * + */ +void xe_sync_ufence_put(struct xe_user_fence *ufence) +{ + user_fence_put(ufence); +} + +/** + * xe_sync_ufence_get_status() - Get user fence status + * @ufence: user fence + * + * Return: 1 if signalled, 0 not signalled, <0 on error + */ +int xe_sync_ufence_get_status(struct xe_user_fence *ufence) +{ + return READ_ONCE(ufence->signalled); +} diff --git a/drivers/gpu/drm/xe/xe_sync.h b/drivers/gpu/drm/xe/xe_sync.h index d284afbe917c..0fd0d51208e6 100644 --- a/drivers/gpu/drm/xe/xe_sync.h +++ b/drivers/gpu/drm/xe/xe_sync.h @@ -33,4 +33,13 @@ struct dma_fence * xe_sync_in_fence_get(struct xe_sync_entry *sync, int num_sync, struct xe_exec_queue *q, struct xe_vm *vm); +static inline bool xe_sync_is_ufence(struct xe_sync_entry *sync) +{ + return !!sync->ufence; +} + +struct xe_user_fence *xe_sync_ufence_get(struct xe_sync_entry *sync); +void xe_sync_ufence_put(struct xe_user_fence *ufence); +int xe_sync_ufence_get_status(struct xe_user_fence *ufence); + #endif diff --git a/drivers/gpu/drm/xe/xe_sync_types.h b/drivers/gpu/drm/xe/xe_sync_types.h index 852db5e7884f..30ac3f51993b 100644 --- a/drivers/gpu/drm/xe/xe_sync_types.h +++ b/drivers/gpu/drm/xe/xe_sync_types.h @@ -18,7 +18,7 @@ struct xe_sync_entry { struct drm_syncobj *syncobj; struct dma_fence *fence; struct dma_fence_chain *chain_fence; - struct user_fence *ufence; + struct xe_user_fence *ufence; u64 addr; u64 timeline_value; u32 type; diff --git a/drivers/gpu/drm/xe/xe_tile.c b/drivers/gpu/drm/xe/xe_tile.c index 044c20881de7..0650b2fa75ef 100644 --- a/drivers/gpu/drm/xe/xe_tile.c +++ b/drivers/gpu/drm/xe/xe_tile.c @@ -167,9 +167,10 @@ int xe_tile_init_noalloc(struct xe_tile *tile) goto err_mem_access; tile->mem.kernel_bb_pool = xe_sa_bo_manager_init(tile, SZ_1M, 16); - if (IS_ERR(tile->mem.kernel_bb_pool)) + if (IS_ERR(tile->mem.kernel_bb_pool)) { err = PTR_ERR(tile->mem.kernel_bb_pool); - + goto err_mem_access; + } xe_wa_apply_tile_workarounds(tile); xe_tile_sysfs_init(tile); diff --git a/drivers/gpu/drm/xe/xe_trace.h b/drivers/gpu/drm/xe/xe_trace.h index 95163c303f3e..4ddc55527f9a 100644 --- a/drivers/gpu/drm/xe/xe_trace.h +++ b/drivers/gpu/drm/xe/xe_trace.h @@ -12,6 +12,7 @@ #include <linux/tracepoint.h> #include <linux/types.h> +#include "xe_bo.h" #include "xe_bo_types.h" #include "xe_exec_queue_types.h" #include "xe_gpu_scheduler_types.h" @@ -26,16 +27,16 @@ DECLARE_EVENT_CLASS(xe_gt_tlb_invalidation_fence, TP_ARGS(fence), TP_STRUCT__entry( - __field(u64, fence) + __field(struct xe_gt_tlb_invalidation_fence *, fence) __field(int, seqno) ), TP_fast_assign( - __entry->fence = (u64)fence; + __entry->fence = fence; __entry->seqno = fence->seqno; ), - TP_printk("fence=0x%016llx, seqno=%d", + TP_printk("fence=%p, seqno=%d", __entry->fence, __entry->seqno) ); @@ -82,16 +83,16 @@ DECLARE_EVENT_CLASS(xe_bo, TP_STRUCT__entry( __field(size_t, size) __field(u32, flags) - __field(u64, vm) + __field(struct xe_vm *, vm) ), TP_fast_assign( __entry->size = bo->size; __entry->flags = bo->flags; - __entry->vm = (unsigned long)bo->vm; + __entry->vm = bo->vm; ), - TP_printk("size=%zu, flags=0x%02x, vm=0x%016llx", + TP_printk("size=%zu, flags=0x%02x, vm=%p", __entry->size, __entry->flags, __entry->vm) ); @@ -100,9 +101,31 @@ DEFINE_EVENT(xe_bo, xe_bo_cpu_fault, TP_ARGS(bo) ); -DEFINE_EVENT(xe_bo, xe_bo_move, - TP_PROTO(struct xe_bo *bo), - TP_ARGS(bo) +TRACE_EVENT(xe_bo_move, + TP_PROTO(struct xe_bo *bo, uint32_t new_placement, uint32_t old_placement, + bool move_lacks_source), + TP_ARGS(bo, new_placement, old_placement, move_lacks_source), + TP_STRUCT__entry( + __field(struct xe_bo *, bo) + __field(size_t, size) + __field(u32, new_placement) + __field(u32, old_placement) + __array(char, device_id, 12) + __field(bool, move_lacks_source) + ), + + TP_fast_assign( + __entry->bo = bo; + __entry->size = bo->size; + __entry->new_placement = new_placement; + __entry->old_placement = old_placement; + strscpy(__entry->device_id, dev_name(xe_bo_device(__entry->bo)->drm.dev), 12); + __entry->move_lacks_source = move_lacks_source; + ), + TP_printk("move_lacks_source:%s, migrate object %p [size %zu] from %s to %s device_id:%s", + __entry->move_lacks_source ? "yes" : "no", __entry->bo, __entry->size, + xe_mem_type_to_name[__entry->old_placement], + xe_mem_type_to_name[__entry->new_placement], __entry->device_id) ); DECLARE_EVENT_CLASS(xe_exec_queue, @@ -327,16 +350,16 @@ DECLARE_EVENT_CLASS(xe_hw_fence, TP_STRUCT__entry( __field(u64, ctx) __field(u32, seqno) - __field(u64, fence) + __field(struct xe_hw_fence *, fence) ), TP_fast_assign( __entry->ctx = fence->dma.context; __entry->seqno = fence->dma.seqno; - __entry->fence = (unsigned long)fence; + __entry->fence = fence; ), - TP_printk("ctx=0x%016llx, fence=0x%016llx, seqno=%u", + TP_printk("ctx=0x%016llx, fence=%p, seqno=%u", __entry->ctx, __entry->fence, __entry->seqno) ); @@ -365,7 +388,7 @@ DECLARE_EVENT_CLASS(xe_vma, TP_ARGS(vma), TP_STRUCT__entry( - __field(u64, vma) + __field(struct xe_vma *, vma) __field(u32, asid) __field(u64, start) __field(u64, end) @@ -373,14 +396,14 @@ DECLARE_EVENT_CLASS(xe_vma, ), TP_fast_assign( - __entry->vma = (unsigned long)vma; + __entry->vma = vma; __entry->asid = xe_vma_vm(vma)->usm.asid; __entry->start = xe_vma_start(vma); __entry->end = xe_vma_end(vma) - 1; __entry->ptr = xe_vma_userptr(vma); ), - TP_printk("vma=0x%016llx, asid=0x%05x, start=0x%012llx, end=0x%012llx, ptr=0x%012llx,", + TP_printk("vma=%p, asid=0x%05x, start=0x%012llx, end=0x%012llx, userptr=0x%012llx,", __entry->vma, __entry->asid, __entry->start, __entry->end, __entry->ptr) ) @@ -465,16 +488,16 @@ DECLARE_EVENT_CLASS(xe_vm, TP_ARGS(vm), TP_STRUCT__entry( - __field(u64, vm) + __field(struct xe_vm *, vm) __field(u32, asid) ), TP_fast_assign( - __entry->vm = (unsigned long)vm; + __entry->vm = vm; __entry->asid = vm->usm.asid; ), - TP_printk("vm=0x%016llx, asid=0x%05x", __entry->vm, + TP_printk("vm=%p, asid=0x%05x", __entry->vm, __entry->asid) ); diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c index 53833ab81424..3b21afe5b488 100644 --- a/drivers/gpu/drm/xe/xe_vm.c +++ b/drivers/gpu/drm/xe/xe_vm.c @@ -37,8 +37,6 @@ #include "generated/xe_wa_oob.h" #include "xe_wa.h" -#define TEST_VM_ASYNC_OPS_ERROR - static struct drm_gem_object *xe_vm_obj(struct xe_vm *vm) { return vm->gpuvm.r_obj; @@ -46,7 +44,7 @@ static struct drm_gem_object *xe_vm_obj(struct xe_vm *vm) /** * xe_vma_userptr_check_repin() - Advisory check for repin needed - * @vma: The userptr vma + * @uvma: The userptr vma * * Check if the userptr vma has been invalidated since last successful * repin. The check is advisory only and can the function can be called @@ -56,15 +54,17 @@ static struct drm_gem_object *xe_vm_obj(struct xe_vm *vm) * * Return: 0 if userptr vma is valid, -EAGAIN otherwise; repin recommended. */ -int xe_vma_userptr_check_repin(struct xe_vma *vma) +int xe_vma_userptr_check_repin(struct xe_userptr_vma *uvma) { - return mmu_interval_check_retry(&vma->userptr.notifier, - vma->userptr.notifier_seq) ? + return mmu_interval_check_retry(&uvma->userptr.notifier, + uvma->userptr.notifier_seq) ? -EAGAIN : 0; } -int xe_vma_userptr_pin_pages(struct xe_vma *vma) +int xe_vma_userptr_pin_pages(struct xe_userptr_vma *uvma) { + struct xe_userptr *userptr = &uvma->userptr; + struct xe_vma *vma = &uvma->vma; struct xe_vm *vm = xe_vma_vm(vma); struct xe_device *xe = vm->xe; const unsigned long num_pages = xe_vma_size(vma) >> PAGE_SHIFT; @@ -80,30 +80,30 @@ retry: if (vma->gpuva.flags & XE_VMA_DESTROYED) return 0; - notifier_seq = mmu_interval_read_begin(&vma->userptr.notifier); - if (notifier_seq == vma->userptr.notifier_seq) + notifier_seq = mmu_interval_read_begin(&userptr->notifier); + if (notifier_seq == userptr->notifier_seq) return 0; pages = kvmalloc_array(num_pages, sizeof(*pages), GFP_KERNEL); if (!pages) return -ENOMEM; - if (vma->userptr.sg) { + if (userptr->sg) { dma_unmap_sgtable(xe->drm.dev, - vma->userptr.sg, + userptr->sg, read_only ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL, 0); - sg_free_table(vma->userptr.sg); - vma->userptr.sg = NULL; + sg_free_table(userptr->sg); + userptr->sg = NULL; } pinned = ret = 0; if (in_kthread) { - if (!mmget_not_zero(vma->userptr.notifier.mm)) { + if (!mmget_not_zero(userptr->notifier.mm)) { ret = -EFAULT; goto mm_closed; } - kthread_use_mm(vma->userptr.notifier.mm); + kthread_use_mm(userptr->notifier.mm); } while (pinned < num_pages) { @@ -112,43 +112,40 @@ retry: num_pages - pinned, read_only ? 0 : FOLL_WRITE, &pages[pinned]); - if (ret < 0) { - if (in_kthread) - ret = 0; + if (ret < 0) break; - } pinned += ret; ret = 0; } if (in_kthread) { - kthread_unuse_mm(vma->userptr.notifier.mm); - mmput(vma->userptr.notifier.mm); + kthread_unuse_mm(userptr->notifier.mm); + mmput(userptr->notifier.mm); } mm_closed: if (ret) goto out; - ret = sg_alloc_table_from_pages_segment(&vma->userptr.sgt, pages, + ret = sg_alloc_table_from_pages_segment(&userptr->sgt, pages, pinned, 0, (u64)pinned << PAGE_SHIFT, xe_sg_segment_size(xe->drm.dev), GFP_KERNEL); if (ret) { - vma->userptr.sg = NULL; + userptr->sg = NULL; goto out; } - vma->userptr.sg = &vma->userptr.sgt; + userptr->sg = &userptr->sgt; - ret = dma_map_sgtable(xe->drm.dev, vma->userptr.sg, + ret = dma_map_sgtable(xe->drm.dev, userptr->sg, read_only ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL, DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_NO_KERNEL_MAPPING); if (ret) { - sg_free_table(vma->userptr.sg); - vma->userptr.sg = NULL; + sg_free_table(userptr->sg); + userptr->sg = NULL; goto out; } @@ -167,8 +164,8 @@ out: kvfree(pages); if (!(ret < 0)) { - vma->userptr.notifier_seq = notifier_seq; - if (xe_vma_userptr_check_repin(vma) == -EAGAIN) + userptr->notifier_seq = notifier_seq; + if (xe_vma_userptr_check_repin(uvma) == -EAGAIN) goto retry; } @@ -635,7 +632,9 @@ static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni, const struct mmu_notifier_range *range, unsigned long cur_seq) { - struct xe_vma *vma = container_of(mni, struct xe_vma, userptr.notifier); + struct xe_userptr *userptr = container_of(mni, typeof(*userptr), notifier); + struct xe_userptr_vma *uvma = container_of(userptr, typeof(*uvma), userptr); + struct xe_vma *vma = &uvma->vma; struct xe_vm *vm = xe_vma_vm(vma); struct dma_resv_iter cursor; struct dma_fence *fence; @@ -651,7 +650,7 @@ static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni, mmu_interval_set_seq(mni, cur_seq); /* No need to stop gpu access if the userptr is not yet bound. */ - if (!vma->userptr.initial_bind) { + if (!userptr->initial_bind) { up_write(&vm->userptr.notifier_lock); return true; } @@ -663,7 +662,7 @@ static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni, if (!xe_vm_in_fault_mode(vm) && !(vma->gpuva.flags & XE_VMA_DESTROYED) && vma->tile_present) { spin_lock(&vm->userptr.invalidated_lock); - list_move_tail(&vma->userptr.invalidate_link, + list_move_tail(&userptr->invalidate_link, &vm->userptr.invalidated); spin_unlock(&vm->userptr.invalidated_lock); } @@ -703,7 +702,7 @@ static const struct mmu_interval_notifier_ops vma_userptr_notifier_ops = { int xe_vm_userptr_pin(struct xe_vm *vm) { - struct xe_vma *vma, *next; + struct xe_userptr_vma *uvma, *next; int err = 0; LIST_HEAD(tmp_evict); @@ -711,22 +710,23 @@ int xe_vm_userptr_pin(struct xe_vm *vm) /* Collect invalidated userptrs */ spin_lock(&vm->userptr.invalidated_lock); - list_for_each_entry_safe(vma, next, &vm->userptr.invalidated, + list_for_each_entry_safe(uvma, next, &vm->userptr.invalidated, userptr.invalidate_link) { - list_del_init(&vma->userptr.invalidate_link); - list_move_tail(&vma->combined_links.userptr, + list_del_init(&uvma->userptr.invalidate_link); + list_move_tail(&uvma->userptr.repin_link, &vm->userptr.repin_list); } spin_unlock(&vm->userptr.invalidated_lock); /* Pin and move to temporary list */ - list_for_each_entry_safe(vma, next, &vm->userptr.repin_list, - combined_links.userptr) { - err = xe_vma_userptr_pin_pages(vma); + list_for_each_entry_safe(uvma, next, &vm->userptr.repin_list, + userptr.repin_link) { + err = xe_vma_userptr_pin_pages(uvma); if (err < 0) return err; - list_move_tail(&vma->combined_links.userptr, &vm->rebind_list); + list_del_init(&uvma->userptr.repin_link); + list_move_tail(&uvma->vma.combined_links.rebind, &vm->rebind_list); } return 0; @@ -782,6 +782,14 @@ struct dma_fence *xe_vm_rebind(struct xe_vm *vm, bool rebind_worker) return fence; } +static void xe_vma_free(struct xe_vma *vma) +{ + if (xe_vma_is_userptr(vma)) + kfree(to_userptr_vma(vma)); + else + kfree(vma); +} + #define VMA_CREATE_FLAG_READ_ONLY BIT(0) #define VMA_CREATE_FLAG_IS_NULL BIT(1) @@ -800,14 +808,26 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm, xe_assert(vm->xe, start < end); xe_assert(vm->xe, end < vm->size); - if (!bo && !is_null) /* userptr */ + /* + * Allocate and ensure that the xe_vma_is_userptr() return + * matches what was allocated. + */ + if (!bo && !is_null) { + struct xe_userptr_vma *uvma = kzalloc(sizeof(*uvma), GFP_KERNEL); + + if (!uvma) + return ERR_PTR(-ENOMEM); + + vma = &uvma->vma; + } else { vma = kzalloc(sizeof(*vma), GFP_KERNEL); - else - vma = kzalloc(sizeof(*vma) - sizeof(struct xe_userptr), - GFP_KERNEL); - if (!vma) { - vma = ERR_PTR(-ENOMEM); - return vma; + if (!vma) + return ERR_PTR(-ENOMEM); + + if (is_null) + vma->gpuva.flags |= DRM_GPUVA_SPARSE; + if (bo) + vma->gpuva.gem.obj = &bo->ttm.base; } INIT_LIST_HEAD(&vma->combined_links.rebind); @@ -818,8 +838,6 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm, vma->gpuva.va.range = end - start + 1; if (read_only) vma->gpuva.flags |= XE_VMA_READ_ONLY; - if (is_null) - vma->gpuva.flags |= DRM_GPUVA_SPARSE; for_each_tile(tile, vm->xe, id) vma->tile_mask |= 0x1 << id; @@ -836,35 +854,35 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm, vm_bo = drm_gpuvm_bo_obtain(vma->gpuva.vm, &bo->ttm.base); if (IS_ERR(vm_bo)) { - kfree(vma); + xe_vma_free(vma); return ERR_CAST(vm_bo); } drm_gpuvm_bo_extobj_add(vm_bo); drm_gem_object_get(&bo->ttm.base); - vma->gpuva.gem.obj = &bo->ttm.base; vma->gpuva.gem.offset = bo_offset_or_userptr; drm_gpuva_link(&vma->gpuva, vm_bo); drm_gpuvm_bo_put(vm_bo); } else /* userptr or null */ { if (!is_null) { + struct xe_userptr *userptr = &to_userptr_vma(vma)->userptr; u64 size = end - start + 1; int err; - INIT_LIST_HEAD(&vma->userptr.invalidate_link); + INIT_LIST_HEAD(&userptr->invalidate_link); + INIT_LIST_HEAD(&userptr->repin_link); vma->gpuva.gem.offset = bo_offset_or_userptr; - err = mmu_interval_notifier_insert(&vma->userptr.notifier, + err = mmu_interval_notifier_insert(&userptr->notifier, current->mm, xe_vma_userptr(vma), size, &vma_userptr_notifier_ops); if (err) { - kfree(vma); - vma = ERR_PTR(err); - return vma; + xe_vma_free(vma); + return ERR_PTR(err); } - vma->userptr.notifier_seq = LONG_MAX; + userptr->notifier_seq = LONG_MAX; } xe_vm_get(vm); @@ -879,14 +897,21 @@ static void xe_vma_destroy_late(struct xe_vma *vma) struct xe_device *xe = vm->xe; bool read_only = xe_vma_read_only(vma); + if (vma->ufence) { + xe_sync_ufence_put(vma->ufence); + vma->ufence = NULL; + } + if (xe_vma_is_userptr(vma)) { - if (vma->userptr.sg) { + struct xe_userptr *userptr = &to_userptr_vma(vma)->userptr; + + if (userptr->sg) { dma_unmap_sgtable(xe->drm.dev, - vma->userptr.sg, + userptr->sg, read_only ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL, 0); - sg_free_table(vma->userptr.sg); - vma->userptr.sg = NULL; + sg_free_table(userptr->sg); + userptr->sg = NULL; } /* @@ -894,7 +919,7 @@ static void xe_vma_destroy_late(struct xe_vma *vma) * the notifer until we're sure the GPU is not accessing * them anymore */ - mmu_interval_notifier_remove(&vma->userptr.notifier); + mmu_interval_notifier_remove(&userptr->notifier); xe_vm_put(vm); } else if (xe_vma_is_null(vma)) { xe_vm_put(vm); @@ -902,7 +927,7 @@ static void xe_vma_destroy_late(struct xe_vma *vma) xe_bo_put(xe_vma_bo(vma)); } - kfree(vma); + xe_vma_free(vma); } static void vma_destroy_work_func(struct work_struct *w) @@ -933,7 +958,7 @@ static void xe_vma_destroy(struct xe_vma *vma, struct dma_fence *fence) xe_assert(vm->xe, vma->gpuva.flags & XE_VMA_DESTROYED); spin_lock(&vm->userptr.invalidated_lock); - list_del(&vma->userptr.invalidate_link); + list_del(&to_userptr_vma(vma)->userptr.invalidate_link); spin_unlock(&vm->userptr.invalidated_lock); } else if (!xe_vma_is_null(vma)) { xe_bo_assert_held(xe_vma_bo(vma)); @@ -975,9 +1000,16 @@ int xe_vm_prepare_vma(struct drm_exec *exec, struct xe_vma *vma, int err; XE_WARN_ON(!vm); - err = drm_exec_prepare_obj(exec, xe_vm_obj(vm), num_shared); - if (!err && bo && !bo->vm) - err = drm_exec_prepare_obj(exec, &bo->ttm.base, num_shared); + if (num_shared) + err = drm_exec_prepare_obj(exec, xe_vm_obj(vm), num_shared); + else + err = drm_exec_lock_obj(exec, xe_vm_obj(vm)); + if (!err && bo && !bo->vm) { + if (num_shared) + err = drm_exec_prepare_obj(exec, &bo->ttm.base, num_shared); + else + err = drm_exec_lock_obj(exec, &bo->ttm.base); + } return err; } @@ -1581,6 +1613,16 @@ xe_vm_unbind_vma(struct xe_vma *vma, struct xe_exec_queue *q, trace_xe_vma_unbind(vma); + if (vma->ufence) { + struct xe_user_fence * const f = vma->ufence; + + if (!xe_sync_ufence_get_status(f)) + return ERR_PTR(-EBUSY); + + vma->ufence = NULL; + xe_sync_ufence_put(f); + } + if (number_tiles > 1) { fences = kmalloc_array(number_tiles, sizeof(*fences), GFP_KERNEL); @@ -1714,6 +1756,21 @@ err_fences: return ERR_PTR(err); } +static struct xe_user_fence * +find_ufence_get(struct xe_sync_entry *syncs, u32 num_syncs) +{ + unsigned int i; + + for (i = 0; i < num_syncs; i++) { + struct xe_sync_entry *e = &syncs[i]; + + if (xe_sync_is_ufence(e)) + return xe_sync_ufence_get(e); + } + + return NULL; +} + static int __xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma, struct xe_exec_queue *q, struct xe_sync_entry *syncs, u32 num_syncs, bool immediate, bool first_op, @@ -1721,9 +1778,16 @@ static int __xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma, { struct dma_fence *fence; struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q); + struct xe_user_fence *ufence; xe_vm_assert_held(vm); + ufence = find_ufence_get(syncs, num_syncs); + if (vma->ufence && ufence) + xe_sync_ufence_put(vma->ufence); + + vma->ufence = ufence ?: vma->ufence; + if (immediate) { fence = xe_vm_bind_vma(vma, q, syncs, num_syncs, first_op, last_op); @@ -1959,6 +2023,7 @@ static int xe_vm_prefetch(struct xe_vm *vm, struct xe_vma *vma, xe_exec_queue_last_fence_get(wait_exec_queue, vm); xe_sync_entry_signal(&syncs[i], NULL, fence); + dma_fence_put(fence); } } @@ -2039,7 +2104,6 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo, struct drm_gem_object *obj = bo ? &bo->ttm.base : NULL; struct drm_gpuva_ops *ops; struct drm_gpuva_op *__op; - struct xe_vma_op *op; struct drm_gpuvm_bo *vm_bo; int err; @@ -2086,23 +2150,10 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo, if (IS_ERR(ops)) return ops; -#ifdef TEST_VM_ASYNC_OPS_ERROR - if (operation & FORCE_ASYNC_OP_ERROR) { - op = list_first_entry_or_null(&ops->list, struct xe_vma_op, - base.entry); - if (op) - op->inject_error = true; - } -#endif - drm_gpuva_for_each_op(__op, ops) { struct xe_vma_op *op = gpuva_op_to_vma_op(__op); if (__op->op == DRM_GPUVA_OP_MAP) { - op->map.immediate = - flags & DRM_XE_VM_BIND_FLAG_IMMEDIATE; - op->map.read_only = - flags & DRM_XE_VM_BIND_FLAG_READONLY; op->map.is_null = flags & DRM_XE_VM_BIND_FLAG_NULL; op->map.pat_index = pat_index; } else if (__op->op == DRM_GPUVA_OP_PREFETCH) { @@ -2150,7 +2201,7 @@ static struct xe_vma *new_vma(struct xe_vm *vm, struct drm_gpuva_op_map *op, drm_exec_fini(&exec); if (xe_vma_is_userptr(vma)) { - err = xe_vma_userptr_pin_pages(vma); + err = xe_vma_userptr_pin_pages(to_userptr_vma(vma)); if (err) { prep_vma_destroy(vm, vma, false); xe_vma_destroy_unlocked(vma); @@ -2172,13 +2223,17 @@ static u64 xe_vma_max_pte_size(struct xe_vma *vma) { if (vma->gpuva.flags & XE_VMA_PTE_1G) return SZ_1G; - else if (vma->gpuva.flags & XE_VMA_PTE_2M) + else if (vma->gpuva.flags & (XE_VMA_PTE_2M | XE_VMA_PTE_COMPACT)) return SZ_2M; + else if (vma->gpuva.flags & XE_VMA_PTE_64K) + return SZ_64K; + else if (vma->gpuva.flags & XE_VMA_PTE_4K) + return SZ_4K; - return SZ_4K; + return SZ_1G; /* Uninitialized, used max size */ } -static u64 xe_vma_set_pte_size(struct xe_vma *vma, u64 size) +static void xe_vma_set_pte_size(struct xe_vma *vma, u64 size) { switch (size) { case SZ_1G: @@ -2187,9 +2242,13 @@ static u64 xe_vma_set_pte_size(struct xe_vma *vma, u64 size) case SZ_2M: vma->gpuva.flags |= XE_VMA_PTE_2M; break; + case SZ_64K: + vma->gpuva.flags |= XE_VMA_PTE_64K; + break; + case SZ_4K: + vma->gpuva.flags |= XE_VMA_PTE_4K; + break; } - - return SZ_4K; } static int xe_vma_op_commit(struct xe_vm *vm, struct xe_vma_op *op) @@ -2287,8 +2346,6 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q, switch (op->base.op) { case DRM_GPUVA_OP_MAP: { - flags |= op->map.read_only ? - VMA_CREATE_FLAG_READ_ONLY : 0; flags |= op->map.is_null ? VMA_CREATE_FLAG_IS_NULL : 0; @@ -2419,7 +2476,7 @@ static int op_execute(struct drm_exec *exec, struct xe_vm *vm, case DRM_GPUVA_OP_MAP: err = xe_vm_bind(vm, vma, op->q, xe_vma_bo(vma), op->syncs, op->num_syncs, - op->map.immediate || !xe_vm_in_fault_mode(vm), + !xe_vm_in_fault_mode(vm), op->flags & XE_VMA_OP_FIRST, op->flags & XE_VMA_OP_LAST); break; @@ -2505,13 +2562,25 @@ retry_userptr: } drm_exec_fini(&exec); - if (err == -EAGAIN && xe_vma_is_userptr(vma)) { + if (err == -EAGAIN) { lockdep_assert_held_write(&vm->lock); - err = xe_vma_userptr_pin_pages(vma); - if (!err) - goto retry_userptr; - trace_xe_vma_fail(vma); + if (op->base.op == DRM_GPUVA_OP_REMAP) { + if (!op->remap.unmap_done) + vma = gpuva_to_vma(op->base.remap.unmap->va); + else if (op->remap.prev) + vma = op->remap.prev; + else + vma = op->remap.next; + } + + if (xe_vma_is_userptr(vma)) { + err = xe_vma_userptr_pin_pages(to_userptr_vma(vma)); + if (!err) + goto retry_userptr; + + trace_xe_vma_fail(vma); + } } return err; @@ -2523,13 +2592,6 @@ static int xe_vma_op_execute(struct xe_vm *vm, struct xe_vma_op *op) lockdep_assert_held_write(&vm->lock); -#ifdef TEST_VM_ASYNC_OPS_ERROR - if (op->inject_error) { - op->inject_error = false; - return -ENOMEM; - } -#endif - switch (op->base.op) { case DRM_GPUVA_OP_MAP: ret = __xe_vma_op_execute(vm, op->map.vma, op); @@ -2644,7 +2706,7 @@ static void vm_bind_ioctl_ops_unwind(struct xe_vm *vm, { int i; - for (i = num_ops_list - 1; i; ++i) { + for (i = num_ops_list - 1; i >= 0; --i) { struct drm_gpuva_ops *__ops = ops[i]; struct drm_gpuva_op *__op; @@ -2689,21 +2751,11 @@ static int vm_bind_ioctl_ops_execute(struct xe_vm *vm, return 0; } -#ifdef TEST_VM_ASYNC_OPS_ERROR -#define SUPPORTED_FLAGS \ - (FORCE_ASYNC_OP_ERROR | DRM_XE_VM_BIND_FLAG_READONLY | \ - DRM_XE_VM_BIND_FLAG_IMMEDIATE | DRM_XE_VM_BIND_FLAG_NULL | 0xffff) -#else -#define SUPPORTED_FLAGS \ - (DRM_XE_VM_BIND_FLAG_READONLY | \ - DRM_XE_VM_BIND_FLAG_IMMEDIATE | DRM_XE_VM_BIND_FLAG_NULL | \ - 0xffff) -#endif +#define SUPPORTED_FLAGS (DRM_XE_VM_BIND_FLAG_NULL | \ + DRM_XE_VM_BIND_FLAG_DUMPABLE) #define XE_64K_PAGE_MASK 0xffffull #define ALL_DRM_XE_SYNCS_FLAGS (DRM_XE_SYNCS_FLAG_WAIT_FOR_OP) -#define MAX_BINDS 512 /* FIXME: Picking random upper limit */ - static int vm_bind_ioctl_check_args(struct xe_device *xe, struct drm_xe_vm_bind *args, struct drm_xe_vm_bind_op **bind_ops) @@ -2715,16 +2767,16 @@ static int vm_bind_ioctl_check_args(struct xe_device *xe, XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1])) return -EINVAL; - if (XE_IOCTL_DBG(xe, args->extensions) || - XE_IOCTL_DBG(xe, args->num_binds > MAX_BINDS)) + if (XE_IOCTL_DBG(xe, args->extensions)) return -EINVAL; if (args->num_binds > 1) { u64 __user *bind_user = u64_to_user_ptr(args->vector_of_binds); - *bind_ops = kmalloc(sizeof(struct drm_xe_vm_bind_op) * - args->num_binds, GFP_KERNEL); + *bind_ops = kvmalloc_array(args->num_binds, + sizeof(struct drm_xe_vm_bind_op), + GFP_KERNEL | __GFP_ACCOUNT); if (!*bind_ops) return -ENOMEM; @@ -2814,7 +2866,7 @@ static int vm_bind_ioctl_check_args(struct xe_device *xe, free_bind_ops: if (args->num_binds > 1) - kfree(*bind_ops); + kvfree(*bind_ops); return err; } @@ -2851,7 +2903,7 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file) struct drm_gpuva_ops **ops = NULL; struct xe_vm *vm; struct xe_exec_queue *q = NULL; - u32 num_syncs; + u32 num_syncs, num_ufence = 0; struct xe_sync_entry *syncs = NULL; struct drm_xe_vm_bind_op *bind_ops; LIST_HEAD(ops_list); @@ -2902,13 +2954,15 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file) } if (args->num_binds) { - bos = kcalloc(args->num_binds, sizeof(*bos), GFP_KERNEL); + bos = kvcalloc(args->num_binds, sizeof(*bos), + GFP_KERNEL | __GFP_ACCOUNT); if (!bos) { err = -ENOMEM; goto release_vm_lock; } - ops = kcalloc(args->num_binds, sizeof(*ops), GFP_KERNEL); + ops = kvcalloc(args->num_binds, sizeof(*ops), + GFP_KERNEL | __GFP_ACCOUNT); if (!ops) { err = -ENOMEM; goto release_vm_lock; @@ -2988,6 +3042,14 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file) SYNC_PARSE_FLAG_DISALLOW_USER_FENCE : 0)); if (err) goto free_syncs; + + if (xe_sync_is_ufence(&syncs[num_syncs])) + num_ufence++; + } + + if (XE_IOCTL_DBG(xe, num_ufence > 1)) { + err = -EINVAL; + goto free_syncs; } if (!args->num_binds) { @@ -3041,10 +3103,10 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file) for (i = 0; bos && i < args->num_binds; ++i) xe_bo_put(bos[i]); - kfree(bos); - kfree(ops); + kvfree(bos); + kvfree(ops); if (args->num_binds > 1) - kfree(bind_ops); + kvfree(bind_ops); return err; @@ -3068,10 +3130,10 @@ put_exec_queue: if (q) xe_exec_queue_put(q); free_objs: - kfree(bos); - kfree(ops); + kvfree(bos); + kvfree(ops); if (args->num_binds > 1) - kfree(bind_ops); + kvfree(bind_ops); return err; } @@ -3130,8 +3192,8 @@ int xe_vm_invalidate_vma(struct xe_vma *vma) if (IS_ENABLED(CONFIG_PROVE_LOCKING)) { if (xe_vma_is_userptr(vma)) { WARN_ON_ONCE(!mmu_interval_check_retry - (&vma->userptr.notifier, - vma->userptr.notifier_seq)); + (&to_userptr_vma(vma)->userptr.notifier, + to_userptr_vma(vma)->userptr.notifier_seq)); WARN_ON_ONCE(!dma_resv_test_signaled(xe_vm_resv(xe_vma_vm(vma)), DMA_RESV_USAGE_BOOKKEEP)); @@ -3192,11 +3254,11 @@ int xe_analyze_vm(struct drm_printer *p, struct xe_vm *vm, int gt_id) if (is_null) { addr = 0; } else if (is_userptr) { + struct sg_table *sg = to_userptr_vma(vma)->userptr.sg; struct xe_res_cursor cur; - if (vma->userptr.sg) { - xe_res_first_sg(vma->userptr.sg, 0, XE_PAGE_SIZE, - &cur); + if (sg) { + xe_res_first_sg(sg, 0, XE_PAGE_SIZE, &cur); addr = xe_res_dma(&cur); } else { addr = 0; diff --git a/drivers/gpu/drm/xe/xe_vm.h b/drivers/gpu/drm/xe/xe_vm.h index cf2f96e8c1ab..9654a0612fc2 100644 --- a/drivers/gpu/drm/xe/xe_vm.h +++ b/drivers/gpu/drm/xe/xe_vm.h @@ -160,6 +160,18 @@ static inline bool xe_vma_is_userptr(struct xe_vma *vma) return xe_vma_has_no_bo(vma) && !xe_vma_is_null(vma); } +/** + * to_userptr_vma() - Return a pointer to an embedding userptr vma + * @vma: Pointer to the embedded struct xe_vma + * + * Return: Pointer to the embedding userptr vma + */ +static inline struct xe_userptr_vma *to_userptr_vma(struct xe_vma *vma) +{ + xe_assert(xe_vma_vm(vma)->xe, xe_vma_is_userptr(vma)); + return container_of(vma, struct xe_userptr_vma, vma); +} + u64 xe_vm_pdp4_descriptor(struct xe_vm *vm, struct xe_tile *tile); int xe_vm_create_ioctl(struct drm_device *dev, void *data, @@ -224,9 +236,9 @@ static inline void xe_vm_reactivate_rebind(struct xe_vm *vm) } } -int xe_vma_userptr_pin_pages(struct xe_vma *vma); +int xe_vma_userptr_pin_pages(struct xe_userptr_vma *uvma); -int xe_vma_userptr_check_repin(struct xe_vma *vma); +int xe_vma_userptr_check_repin(struct xe_userptr_vma *uvma); bool xe_vm_validate_should_retry(struct drm_exec *exec, int err, ktime_t *end); diff --git a/drivers/gpu/drm/xe/xe_vm_types.h b/drivers/gpu/drm/xe/xe_vm_types.h index 63e8a50b88e9..7300eea5394b 100644 --- a/drivers/gpu/drm/xe/xe_vm_types.h +++ b/drivers/gpu/drm/xe/xe_vm_types.h @@ -19,11 +19,9 @@ struct xe_bo; struct xe_sync_entry; +struct xe_user_fence; struct xe_vm; -#define TEST_VM_ASYNC_OPS_ERROR -#define FORCE_ASYNC_OP_ERROR BIT(31) - #define XE_VMA_READ_ONLY DRM_GPUVA_USERBITS #define XE_VMA_DESTROYED (DRM_GPUVA_USERBITS << 1) #define XE_VMA_ATOMIC_PTE_BIT (DRM_GPUVA_USERBITS << 2) @@ -32,11 +30,15 @@ struct xe_vm; #define XE_VMA_PTE_4K (DRM_GPUVA_USERBITS << 5) #define XE_VMA_PTE_2M (DRM_GPUVA_USERBITS << 6) #define XE_VMA_PTE_1G (DRM_GPUVA_USERBITS << 7) +#define XE_VMA_PTE_64K (DRM_GPUVA_USERBITS << 8) +#define XE_VMA_PTE_COMPACT (DRM_GPUVA_USERBITS << 9) /** struct xe_userptr - User pointer */ struct xe_userptr { /** @invalidate_link: Link for the vm::userptr.invalidated list */ struct list_head invalidate_link; + /** @userptr: link into VM repin list if userptr. */ + struct list_head repin_link; /** * @notifier: MMU notifier for user pointer (invalidation call back) */ @@ -68,8 +70,6 @@ struct xe_vma { * resv. */ union { - /** @userptr: link into VM repin list if userptr. */ - struct list_head userptr; /** @rebind: link into VM if this VMA needs rebinding. */ struct list_head rebind; /** @destroy: link to contested list when VM is being closed. */ @@ -107,9 +107,19 @@ struct xe_vma { u16 pat_index; /** - * @userptr: user pointer state, only allocated for VMAs that are - * user pointers + * @ufence: The user fence that was provided with MAP. + * Needs to be signalled before UNMAP can be processed. */ + struct xe_user_fence *ufence; +}; + +/** + * struct xe_userptr_vma - A userptr vma subclass + * @vma: The vma. + * @userptr: Additional userptr information. + */ +struct xe_userptr_vma { + struct xe_vma vma; struct xe_userptr userptr; }; @@ -285,10 +295,6 @@ struct xe_vm { struct xe_vma_op_map { /** @vma: VMA to map */ struct xe_vma *vma; - /** @immediate: Immediate bind */ - bool immediate; - /** @read_only: Read only */ - bool read_only; /** @is_null: is NULL binding */ bool is_null; /** @pat_index: The pat index to use for this operation. */ @@ -356,11 +362,6 @@ struct xe_vma_op { /** @flags: operation flags */ enum xe_vma_op_flags flags; -#ifdef TEST_VM_ASYNC_OPS_ERROR - /** @inject_error: inject error to test async op error handling */ - bool inject_error; -#endif - union { /** @map: VMA map operation specific data */ struct xe_vma_op_map map; diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c index 42fd504abbcd..89983d7d73ca 100644 --- a/drivers/gpu/host1x/dev.c +++ b/drivers/gpu/host1x/dev.c @@ -169,6 +169,7 @@ static const struct host1x_info host1x06_info = { .num_sid_entries = ARRAY_SIZE(tegra186_sid_table), .sid_table = tegra186_sid_table, .reserve_vblank_syncpts = false, + .skip_reset_assert = true, }; static const struct host1x_sid_entry tegra194_sid_table[] = { @@ -680,13 +681,15 @@ static int __maybe_unused host1x_runtime_suspend(struct device *dev) host1x_intr_stop(host); host1x_syncpt_save(host); - err = reset_control_bulk_assert(host->nresets, host->resets); - if (err) { - dev_err(dev, "failed to assert reset: %d\n", err); - goto resume_host1x; - } + if (!host->info->skip_reset_assert) { + err = reset_control_bulk_assert(host->nresets, host->resets); + if (err) { + dev_err(dev, "failed to assert reset: %d\n", err); + goto resume_host1x; + } - usleep_range(1000, 2000); + usleep_range(1000, 2000); + } clk_disable_unprepare(host->clk); reset_control_bulk_release(host->nresets, host->resets); diff --git a/drivers/gpu/host1x/dev.h b/drivers/gpu/host1x/dev.h index c8e302de7625..925a118db23f 100644 --- a/drivers/gpu/host1x/dev.h +++ b/drivers/gpu/host1x/dev.h @@ -116,6 +116,12 @@ struct host1x_info { * the display driver disables VBLANK increments. */ bool reserve_vblank_syncpts; + /* + * On Tegra186, secure world applications may require access to + * host1x during suspend/resume. To allow this, we need to leave + * host1x not in reset. + */ + bool skip_reset_assert; }; struct host1x { diff --git a/drivers/hid/bpf/hid_bpf_dispatch.c b/drivers/hid/bpf/hid_bpf_dispatch.c index d9ef45fcaeab..470ae2c29c94 100644 --- a/drivers/hid/bpf/hid_bpf_dispatch.c +++ b/drivers/hid/bpf/hid_bpf_dispatch.c @@ -143,6 +143,9 @@ u8 *call_hid_bpf_rdesc_fixup(struct hid_device *hdev, u8 *rdesc, unsigned int *s } EXPORT_SYMBOL_GPL(call_hid_bpf_rdesc_fixup); +/* Disables missing prototype warnings */ +__bpf_kfunc_start_defs(); + /** * hid_bpf_get_data - Get the kernel memory pointer associated with the context @ctx * @@ -152,7 +155,7 @@ EXPORT_SYMBOL_GPL(call_hid_bpf_rdesc_fixup); * * @returns %NULL on error, an %__u8 memory pointer on success */ -noinline __u8 * +__bpf_kfunc __u8 * hid_bpf_get_data(struct hid_bpf_ctx *ctx, unsigned int offset, const size_t rdwr_buf_size) { struct hid_bpf_ctx_kern *ctx_kern; @@ -167,6 +170,7 @@ hid_bpf_get_data(struct hid_bpf_ctx *ctx, unsigned int offset, const size_t rdwr return ctx_kern->data + offset; } +__bpf_kfunc_end_defs(); /* * The following set contains all functions we agree BPF programs @@ -241,6 +245,42 @@ int hid_bpf_reconnect(struct hid_device *hdev) return 0; } +static int do_hid_bpf_attach_prog(struct hid_device *hdev, int prog_fd, struct bpf_prog *prog, + __u32 flags) +{ + int fd, err, prog_type; + + prog_type = hid_bpf_get_prog_attach_type(prog); + if (prog_type < 0) + return prog_type; + + if (prog_type >= HID_BPF_PROG_TYPE_MAX) + return -EINVAL; + + if (prog_type == HID_BPF_PROG_TYPE_DEVICE_EVENT) { + err = hid_bpf_allocate_event_data(hdev); + if (err) + return err; + } + + fd = __hid_bpf_attach_prog(hdev, prog_type, prog_fd, prog, flags); + if (fd < 0) + return fd; + + if (prog_type == HID_BPF_PROG_TYPE_RDESC_FIXUP) { + err = hid_bpf_reconnect(hdev); + if (err) { + close_fd(fd); + return err; + } + } + + return fd; +} + +/* Disables missing prototype warnings */ +__bpf_kfunc_start_defs(); + /** * hid_bpf_attach_prog - Attach the given @prog_fd to the given HID device * @@ -253,22 +293,17 @@ int hid_bpf_reconnect(struct hid_device *hdev) * is pinned to the BPF file system). */ /* called from syscall */ -noinline int +__bpf_kfunc int hid_bpf_attach_prog(unsigned int hid_id, int prog_fd, __u32 flags) { struct hid_device *hdev; + struct bpf_prog *prog; struct device *dev; - int fd, err, prog_type = hid_bpf_get_prog_attach_type(prog_fd); + int err, fd; if (!hid_bpf_ops) return -EINVAL; - if (prog_type < 0) - return prog_type; - - if (prog_type >= HID_BPF_PROG_TYPE_MAX) - return -EINVAL; - if ((flags & ~HID_BPF_FLAG_MASK)) return -EINVAL; @@ -278,25 +313,29 @@ hid_bpf_attach_prog(unsigned int hid_id, int prog_fd, __u32 flags) hdev = to_hid_device(dev); - if (prog_type == HID_BPF_PROG_TYPE_DEVICE_EVENT) { - err = hid_bpf_allocate_event_data(hdev); - if (err) - return err; + /* + * take a ref on the prog itself, it will be released + * on errors or when it'll be detached + */ + prog = bpf_prog_get(prog_fd); + if (IS_ERR(prog)) { + err = PTR_ERR(prog); + goto out_dev_put; } - fd = __hid_bpf_attach_prog(hdev, prog_type, prog_fd, flags); - if (fd < 0) - return fd; - - if (prog_type == HID_BPF_PROG_TYPE_RDESC_FIXUP) { - err = hid_bpf_reconnect(hdev); - if (err) { - close_fd(fd); - return err; - } + fd = do_hid_bpf_attach_prog(hdev, prog_fd, prog, flags); + if (fd < 0) { + err = fd; + goto out_prog_put; } return fd; + + out_prog_put: + bpf_prog_put(prog); + out_dev_put: + put_device(dev); + return err; } /** @@ -306,7 +345,7 @@ hid_bpf_attach_prog(unsigned int hid_id, int prog_fd, __u32 flags) * * @returns A pointer to &struct hid_bpf_ctx on success, %NULL on error. */ -noinline struct hid_bpf_ctx * +__bpf_kfunc struct hid_bpf_ctx * hid_bpf_allocate_context(unsigned int hid_id) { struct hid_device *hdev; @@ -323,8 +362,10 @@ hid_bpf_allocate_context(unsigned int hid_id) hdev = to_hid_device(dev); ctx_kern = kzalloc(sizeof(*ctx_kern), GFP_KERNEL); - if (!ctx_kern) + if (!ctx_kern) { + put_device(dev); return NULL; + } ctx_kern->ctx.hid = hdev; @@ -337,14 +378,19 @@ hid_bpf_allocate_context(unsigned int hid_id) * @ctx: the HID-BPF context to release * */ -noinline void +__bpf_kfunc void hid_bpf_release_context(struct hid_bpf_ctx *ctx) { struct hid_bpf_ctx_kern *ctx_kern; + struct hid_device *hid; ctx_kern = container_of(ctx, struct hid_bpf_ctx_kern, ctx); + hid = (struct hid_device *)ctx_kern->ctx.hid; /* ignore const */ kfree(ctx_kern); + + /* get_device() is called by bus_find_device() */ + put_device(&hid->dev); } /** @@ -358,7 +404,7 @@ hid_bpf_release_context(struct hid_bpf_ctx *ctx) * * @returns %0 on success, a negative error code otherwise. */ -noinline int +__bpf_kfunc int hid_bpf_hw_request(struct hid_bpf_ctx *ctx, __u8 *buf, size_t buf__sz, enum hid_report_type rtype, enum hid_class_request reqtype) { @@ -426,6 +472,7 @@ hid_bpf_hw_request(struct hid_bpf_ctx *ctx, __u8 *buf, size_t buf__sz, kfree(dma_data); return ret; } +__bpf_kfunc_end_defs(); /* our HID-BPF entrypoints */ BTF_SET8_START(hid_bpf_fmodret_ids) diff --git a/drivers/hid/bpf/hid_bpf_dispatch.h b/drivers/hid/bpf/hid_bpf_dispatch.h index 63dfc8605cd2..fbe0639d09f2 100644 --- a/drivers/hid/bpf/hid_bpf_dispatch.h +++ b/drivers/hid/bpf/hid_bpf_dispatch.h @@ -12,9 +12,9 @@ struct hid_bpf_ctx_kern { int hid_bpf_preload_skel(void); void hid_bpf_free_links_and_skel(void); -int hid_bpf_get_prog_attach_type(int prog_fd); +int hid_bpf_get_prog_attach_type(struct bpf_prog *prog); int __hid_bpf_attach_prog(struct hid_device *hdev, enum hid_bpf_prog_type prog_type, int prog_fd, - __u32 flags); + struct bpf_prog *prog, __u32 flags); void __hid_bpf_destroy_device(struct hid_device *hdev); int hid_bpf_prog_run(struct hid_device *hdev, enum hid_bpf_prog_type type, struct hid_bpf_ctx_kern *ctx_kern); diff --git a/drivers/hid/bpf/hid_bpf_jmp_table.c b/drivers/hid/bpf/hid_bpf_jmp_table.c index eca34b7372f9..aa8e1c79cdf5 100644 --- a/drivers/hid/bpf/hid_bpf_jmp_table.c +++ b/drivers/hid/bpf/hid_bpf_jmp_table.c @@ -196,6 +196,7 @@ static void __hid_bpf_do_release_prog(int map_fd, unsigned int idx) static void hid_bpf_release_progs(struct work_struct *work) { int i, j, n, map_fd = -1; + bool hdev_destroyed; if (!jmp_table.map) return; @@ -220,6 +221,12 @@ static void hid_bpf_release_progs(struct work_struct *work) if (entry->hdev) { hdev = entry->hdev; type = entry->type; + /* + * hdev is still valid, even if we are called after hid_destroy_device(): + * when hid_bpf_attach() gets called, it takes a ref on the dev through + * bus_find_device() + */ + hdev_destroyed = hdev->bpf.destroyed; hid_bpf_populate_hdev(hdev, type); @@ -232,12 +239,19 @@ static void hid_bpf_release_progs(struct work_struct *work) if (test_bit(next->idx, jmp_table.enabled)) continue; - if (next->hdev == hdev && next->type == type) + if (next->hdev == hdev && next->type == type) { + /* + * clear the hdev reference and decrement the device ref + * that was taken during bus_find_device() while calling + * hid_bpf_attach() + */ next->hdev = NULL; + put_device(&hdev->dev); + } } - /* if type was rdesc fixup, reconnect device */ - if (type == HID_BPF_PROG_TYPE_RDESC_FIXUP) + /* if type was rdesc fixup and the device is not gone, reconnect device */ + if (type == HID_BPF_PROG_TYPE_RDESC_FIXUP && !hdev_destroyed) hid_bpf_reconnect(hdev); } } @@ -333,15 +347,10 @@ static int hid_bpf_insert_prog(int prog_fd, struct bpf_prog *prog) return err; } -int hid_bpf_get_prog_attach_type(int prog_fd) +int hid_bpf_get_prog_attach_type(struct bpf_prog *prog) { - struct bpf_prog *prog = NULL; - int i; int prog_type = HID_BPF_PROG_TYPE_UNDEF; - - prog = bpf_prog_get(prog_fd); - if (IS_ERR(prog)) - return PTR_ERR(prog); + int i; for (i = 0; i < HID_BPF_PROG_TYPE_MAX; i++) { if (hid_bpf_btf_ids[i] == prog->aux->attach_btf_id) { @@ -350,8 +359,6 @@ int hid_bpf_get_prog_attach_type(int prog_fd) } } - bpf_prog_put(prog); - return prog_type; } @@ -388,19 +395,13 @@ static const struct bpf_link_ops hid_bpf_link_lops = { /* called from syscall */ noinline int __hid_bpf_attach_prog(struct hid_device *hdev, enum hid_bpf_prog_type prog_type, - int prog_fd, __u32 flags) + int prog_fd, struct bpf_prog *prog, __u32 flags) { struct bpf_link_primer link_primer; struct hid_bpf_link *link; - struct bpf_prog *prog = NULL; struct hid_bpf_prog_entry *prog_entry; int cnt, err = -EINVAL, prog_table_idx = -1; - /* take a ref on the prog itself */ - prog = bpf_prog_get(prog_fd); - if (IS_ERR(prog)) - return PTR_ERR(prog); - mutex_lock(&hid_bpf_attach_lock); link = kzalloc(sizeof(*link), GFP_USER); @@ -467,7 +468,6 @@ __hid_bpf_attach_prog(struct hid_device *hdev, enum hid_bpf_prog_type prog_type, err_unlock: mutex_unlock(&hid_bpf_attach_lock); - bpf_prog_put(prog); kfree(link); return err; diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index fb30e228d35f..828a5c022c64 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -298,6 +298,9 @@ #define USB_VENDOR_ID_CIDC 0x1677 +#define I2C_VENDOR_ID_CIRQUE 0x0488 +#define I2C_PRODUCT_ID_CIRQUE_1063 0x1063 + #define USB_VENDOR_ID_CJTOUCH 0x24b8 #define USB_DEVICE_ID_CJTOUCH_MULTI_TOUCH_0020 0x0020 #define USB_DEVICE_ID_CJTOUCH_MULTI_TOUCH_0040 0x0040 diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c index fd6d8f1d9b8f..d2f3f234f29d 100644 --- a/drivers/hid/hid-logitech-hidpp.c +++ b/drivers/hid/hid-logitech-hidpp.c @@ -203,6 +203,8 @@ struct hidpp_device { struct hidpp_scroll_counter vertical_wheel_counter; u8 wireless_feature_index; + + bool connected_once; }; /* HID++ 1.0 error codes */ @@ -988,8 +990,13 @@ static int hidpp_root_get_protocol_version(struct hidpp_device *hidpp) hidpp->protocol_minor = response.rap.params[1]; print_version: - hid_info(hidpp->hid_dev, "HID++ %u.%u device connected.\n", - hidpp->protocol_major, hidpp->protocol_minor); + if (!hidpp->connected_once) { + hid_info(hidpp->hid_dev, "HID++ %u.%u device connected.\n", + hidpp->protocol_major, hidpp->protocol_minor); + hidpp->connected_once = true; + } else + hid_dbg(hidpp->hid_dev, "HID++ %u.%u device connected.\n", + hidpp->protocol_major, hidpp->protocol_minor); return 0; } @@ -4184,7 +4191,7 @@ static void hidpp_connect_event(struct work_struct *work) /* Get device version to check if it is connected */ ret = hidpp_root_get_protocol_version(hidpp); if (ret) { - hid_info(hidpp->hid_dev, "Disconnected\n"); + hid_dbg(hidpp->hid_dev, "Disconnected\n"); if (hidpp->battery.ps) { hidpp->battery.online = false; hidpp->battery.status = POWER_SUPPLY_STATUS_UNKNOWN; @@ -4610,6 +4617,8 @@ static const struct hid_device_id hidpp_devices[] = { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC088) }, { /* Logitech G Pro X Superlight Gaming Mouse over USB */ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC094) }, + { /* Logitech G Pro X Superlight 2 Gaming Mouse over USB */ + HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC09b) }, { /* G935 Gaming Headset */ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0x0a87), diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c index fd5b0637dad6..3e91e4d6ba6f 100644 --- a/drivers/hid/hid-multitouch.c +++ b/drivers/hid/hid-multitouch.c @@ -2153,6 +2153,10 @@ static const struct hid_device_id mt_devices[] = { { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT, HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8, + USB_VENDOR_ID_SYNAPTICS, 0xcddc) }, + + { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT, + HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8, USB_VENDOR_ID_SYNAPTICS, 0xce08) }, { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT, diff --git a/drivers/hid/hid-nvidia-shield.c b/drivers/hid/hid-nvidia-shield.c index 82d0a77359c4..58b15750dbb0 100644 --- a/drivers/hid/hid-nvidia-shield.c +++ b/drivers/hid/hid-nvidia-shield.c @@ -800,6 +800,8 @@ static inline int thunderstrike_led_create(struct thunderstrike *ts) led->name = devm_kasprintf(&ts->base.hdev->dev, GFP_KERNEL, "thunderstrike%d:blue:led", ts->id); + if (!led->name) + return -ENOMEM; led->max_brightness = 1; led->flags = LED_CORE_SUSPENDRESUME | LED_RETAIN_AT_SHUTDOWN; led->brightness_get = &thunderstrike_led_get_brightness; @@ -831,6 +833,8 @@ static inline int thunderstrike_psy_create(struct shield_device *shield_dev) shield_dev->battery_dev.desc.name = devm_kasprintf(&ts->base.hdev->dev, GFP_KERNEL, "thunderstrike_%d", ts->id); + if (!shield_dev->battery_dev.desc.name) + return -ENOMEM; shield_dev->battery_dev.psy = power_supply_register( &hdev->dev, &shield_dev->battery_dev.desc, &psy_cfg); diff --git a/drivers/hid/hid-steam.c b/drivers/hid/hid-steam.c index b3c4e50e248a..b08a5ab58528 100644 --- a/drivers/hid/hid-steam.c +++ b/drivers/hid/hid-steam.c @@ -1109,10 +1109,9 @@ static int steam_probe(struct hid_device *hdev, return hid_hw_start(hdev, HID_CONNECT_DEFAULT); steam = devm_kzalloc(&hdev->dev, sizeof(*steam), GFP_KERNEL); - if (!steam) { - ret = -ENOMEM; - goto steam_alloc_fail; - } + if (!steam) + return -ENOMEM; + steam->hdev = hdev; hid_set_drvdata(hdev, steam); spin_lock_init(&steam->lock); @@ -1129,14 +1128,14 @@ static int steam_probe(struct hid_device *hdev, */ ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT & ~HID_CONNECT_HIDRAW); if (ret) - goto hid_hw_start_fail; + goto err_cancel_work; ret = hid_hw_open(hdev); if (ret) { hid_err(hdev, "%s:hid_hw_open\n", __func__); - goto hid_hw_open_fail; + goto err_hw_stop; } if (steam->quirks & STEAM_QUIRK_WIRELESS) { @@ -1152,36 +1151,37 @@ static int steam_probe(struct hid_device *hdev, hid_err(hdev, "%s:steam_register failed with error %d\n", __func__, ret); - goto input_register_fail; + goto err_hw_close; } } steam->client_hdev = steam_create_client_hid(hdev); if (IS_ERR(steam->client_hdev)) { ret = PTR_ERR(steam->client_hdev); - goto client_hdev_fail; + goto err_stream_unregister; } steam->client_hdev->driver_data = steam; ret = hid_add_device(steam->client_hdev); if (ret) - goto client_hdev_add_fail; + goto err_destroy; return 0; -client_hdev_add_fail: - hid_hw_stop(hdev); -client_hdev_fail: +err_destroy: hid_destroy_device(steam->client_hdev); -input_register_fail: -hid_hw_open_fail: -hid_hw_start_fail: +err_stream_unregister: + if (steam->connected) + steam_unregister(steam); +err_hw_close: + hid_hw_close(hdev); +err_hw_stop: + hid_hw_stop(hdev); +err_cancel_work: cancel_work_sync(&steam->work_connect); cancel_delayed_work_sync(&steam->mode_switch); cancel_work_sync(&steam->rumble_work); -steam_alloc_fail: - hid_err(hdev, "%s: failed with error %d\n", - __func__, ret); + return ret; } diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c index 13c8dd8cd350..2bc762d31ac7 100644 --- a/drivers/hid/hidraw.c +++ b/drivers/hid/hidraw.c @@ -357,8 +357,11 @@ static int hidraw_release(struct inode * inode, struct file * file) down_write(&minors_rwsem); spin_lock_irqsave(&hidraw_table[minor]->list_lock, flags); - for (int i = list->tail; i < list->head; i++) - kfree(list->buffer[i].value); + while (list->tail != list->head) { + kfree(list->buffer[list->tail].value); + list->buffer[list->tail].value = NULL; + list->tail = (list->tail + 1) & (HIDRAW_BUFFER_SIZE - 1); + } list_del(&list->node); spin_unlock_irqrestore(&hidraw_table[minor]->list_lock, flags); kfree(list); diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c index 90f316ae9819..2df1ab3c31cc 100644 --- a/drivers/hid/i2c-hid/i2c-hid-core.c +++ b/drivers/hid/i2c-hid/i2c-hid-core.c @@ -49,6 +49,7 @@ #define I2C_HID_QUIRK_RESET_ON_RESUME BIT(2) #define I2C_HID_QUIRK_BAD_INPUT_SIZE BIT(3) #define I2C_HID_QUIRK_NO_WAKEUP_AFTER_RESET BIT(4) +#define I2C_HID_QUIRK_NO_SLEEP_ON_SUSPEND BIT(5) /* Command opcodes */ #define I2C_HID_OPCODE_RESET 0x01 @@ -131,6 +132,8 @@ static const struct i2c_hid_quirks { I2C_HID_QUIRK_RESET_ON_RESUME }, { USB_VENDOR_ID_ITE, I2C_DEVICE_ID_ITE_LENOVO_LEGION_Y720, I2C_HID_QUIRK_BAD_INPUT_SIZE }, + { I2C_VENDOR_ID_CIRQUE, I2C_PRODUCT_ID_CIRQUE_1063, + I2C_HID_QUIRK_NO_SLEEP_ON_SUSPEND }, /* * Sending the wakeup after reset actually break ELAN touchscreen controller */ @@ -956,7 +959,8 @@ static int i2c_hid_core_suspend(struct i2c_hid *ihid, bool force_poweroff) return ret; /* Save some power */ - i2c_hid_set_power(ihid, I2C_HID_PWR_SLEEP); + if (!(ihid->quirks & I2C_HID_QUIRK_NO_SLEEP_ON_SUSPEND)) + i2c_hid_set_power(ihid, I2C_HID_PWR_SLEEP); disable_irq(client->irq); diff --git a/drivers/hid/i2c-hid/i2c-hid-of.c b/drivers/hid/i2c-hid/i2c-hid-of.c index c4e1fa0273c8..8be4d576da77 100644 --- a/drivers/hid/i2c-hid/i2c-hid-of.c +++ b/drivers/hid/i2c-hid/i2c-hid-of.c @@ -87,6 +87,7 @@ static int i2c_hid_of_probe(struct i2c_client *client) if (!ihid_of) return -ENOMEM; + ihid_of->client = client; ihid_of->ops.power_up = i2c_hid_of_power_up; ihid_of->ops.power_down = i2c_hid_of_power_down; diff --git a/drivers/hid/intel-ish-hid/ishtp/bus.c b/drivers/hid/intel-ish-hid/ishtp/bus.c index aa6cb033bb06..03d5601ce807 100644 --- a/drivers/hid/intel-ish-hid/ishtp/bus.c +++ b/drivers/hid/intel-ish-hid/ishtp/bus.c @@ -722,6 +722,8 @@ void ishtp_bus_remove_all_clients(struct ishtp_device *ishtp_dev, spin_lock_irqsave(&ishtp_dev->cl_list_lock, flags); list_for_each_entry(cl, &ishtp_dev->cl_list, link) { cl->state = ISHTP_CL_DISCONNECTED; + if (warm_reset && cl->device->reference_count) + continue; /* * Wake any pending process. The waiter would check dev->state diff --git a/drivers/hid/intel-ish-hid/ishtp/client.c b/drivers/hid/intel-ish-hid/ishtp/client.c index 82c907f01bd3..8a7f2f6a4f86 100644 --- a/drivers/hid/intel-ish-hid/ishtp/client.c +++ b/drivers/hid/intel-ish-hid/ishtp/client.c @@ -49,7 +49,9 @@ static void ishtp_read_list_flush(struct ishtp_cl *cl) list_for_each_entry_safe(rb, next, &cl->dev->read_list.list, list) if (rb->cl && ishtp_cl_cmp_id(cl, rb->cl)) { list_del(&rb->list); - ishtp_io_rb_free(rb); + spin_lock(&cl->free_list_spinlock); + list_add_tail(&rb->list, &cl->free_rb_list.list); + spin_unlock(&cl->free_list_spinlock); } spin_unlock_irqrestore(&cl->dev->read_list_spinlock, flags); } diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c index b613f11ed949..2bc45b24075c 100644 --- a/drivers/hid/wacom_sys.c +++ b/drivers/hid/wacom_sys.c @@ -2087,7 +2087,7 @@ static int wacom_allocate_inputs(struct wacom *wacom) return 0; } -static int wacom_register_inputs(struct wacom *wacom) +static int wacom_setup_inputs(struct wacom *wacom) { struct input_dev *pen_input_dev, *touch_input_dev, *pad_input_dev; struct wacom_wac *wacom_wac = &(wacom->wacom_wac); @@ -2106,10 +2106,6 @@ static int wacom_register_inputs(struct wacom *wacom) input_free_device(pen_input_dev); wacom_wac->pen_input = NULL; pen_input_dev = NULL; - } else { - error = input_register_device(pen_input_dev); - if (error) - goto fail; } error = wacom_setup_touch_input_capabilities(touch_input_dev, wacom_wac); @@ -2118,10 +2114,6 @@ static int wacom_register_inputs(struct wacom *wacom) input_free_device(touch_input_dev); wacom_wac->touch_input = NULL; touch_input_dev = NULL; - } else { - error = input_register_device(touch_input_dev); - if (error) - goto fail; } error = wacom_setup_pad_input_capabilities(pad_input_dev, wacom_wac); @@ -2130,7 +2122,34 @@ static int wacom_register_inputs(struct wacom *wacom) input_free_device(pad_input_dev); wacom_wac->pad_input = NULL; pad_input_dev = NULL; - } else { + } + + return 0; +} + +static int wacom_register_inputs(struct wacom *wacom) +{ + struct input_dev *pen_input_dev, *touch_input_dev, *pad_input_dev; + struct wacom_wac *wacom_wac = &(wacom->wacom_wac); + int error = 0; + + pen_input_dev = wacom_wac->pen_input; + touch_input_dev = wacom_wac->touch_input; + pad_input_dev = wacom_wac->pad_input; + + if (pen_input_dev) { + error = input_register_device(pen_input_dev); + if (error) + goto fail; + } + + if (touch_input_dev) { + error = input_register_device(touch_input_dev); + if (error) + goto fail; + } + + if (pad_input_dev) { error = input_register_device(pad_input_dev); if (error) goto fail; @@ -2383,6 +2402,20 @@ static int wacom_parse_and_register(struct wacom *wacom, bool wireless) if (error) goto fail; + error = wacom_setup_inputs(wacom); + if (error) + goto fail; + + if (features->type == HID_GENERIC) + connect_mask |= HID_CONNECT_DRIVER; + + /* Regular HID work starts now */ + error = hid_hw_start(hdev, connect_mask); + if (error) { + hid_err(hdev, "hw start failed\n"); + goto fail; + } + error = wacom_register_inputs(wacom); if (error) goto fail; @@ -2397,16 +2430,6 @@ static int wacom_parse_and_register(struct wacom *wacom, bool wireless) goto fail; } - if (features->type == HID_GENERIC) - connect_mask |= HID_CONNECT_DRIVER; - - /* Regular HID work starts now */ - error = hid_hw_start(hdev, connect_mask); - if (error) { - hid_err(hdev, "hw start failed\n"); - goto fail; - } - if (!wireless) { /* Note that if query fails it is not a hard failure */ wacom_query_tablet_data(wacom); diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c index da8a01fedd39..fbe10fbc5769 100644 --- a/drivers/hid/wacom_wac.c +++ b/drivers/hid/wacom_wac.c @@ -2575,7 +2575,14 @@ static void wacom_wac_pen_report(struct hid_device *hdev, wacom_wac->hid_data.tipswitch); input_report_key(input, wacom_wac->tool[0], sense); if (wacom_wac->serial[0]) { - input_event(input, EV_MSC, MSC_SERIAL, wacom_wac->serial[0]); + /* + * xf86-input-wacom does not accept a serial number + * of '0'. Report the low 32 bits if possible, but + * if they are zero, report the upper ones instead. + */ + __u32 serial_lo = wacom_wac->serial[0] & 0xFFFFFFFFu; + __u32 serial_hi = wacom_wac->serial[0] >> 32; + input_event(input, EV_MSC, MSC_SERIAL, (int)(serial_lo ? serial_lo : serial_hi)); input_report_abs(input, ABS_MISC, sense ? id : 0); } diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c index 56f7e06c673e..adbf674355b2 100644 --- a/drivers/hv/channel.c +++ b/drivers/hv/channel.c @@ -322,125 +322,89 @@ static int create_gpadl_header(enum hv_gpadl_type type, void *kbuffer, pagecount = hv_gpadl_size(type, size) >> HV_HYP_PAGE_SHIFT; - /* do we need a gpadl body msg */ pfnsize = MAX_SIZE_CHANNEL_MESSAGE - sizeof(struct vmbus_channel_gpadl_header) - sizeof(struct gpa_range); + pfncount = umin(pagecount, pfnsize / sizeof(u64)); + + msgsize = sizeof(struct vmbus_channel_msginfo) + + sizeof(struct vmbus_channel_gpadl_header) + + sizeof(struct gpa_range) + pfncount * sizeof(u64); + msgheader = kzalloc(msgsize, GFP_KERNEL); + if (!msgheader) + return -ENOMEM; + + INIT_LIST_HEAD(&msgheader->submsglist); + msgheader->msgsize = msgsize; + + gpadl_header = (struct vmbus_channel_gpadl_header *) + msgheader->msg; + gpadl_header->rangecount = 1; + gpadl_header->range_buflen = sizeof(struct gpa_range) + + pagecount * sizeof(u64); + gpadl_header->range[0].byte_offset = 0; + gpadl_header->range[0].byte_count = hv_gpadl_size(type, size); + for (i = 0; i < pfncount; i++) + gpadl_header->range[0].pfn_array[i] = hv_gpadl_hvpfn( + type, kbuffer, size, send_offset, i); + *msginfo = msgheader; + + pfnsum = pfncount; + pfnleft = pagecount - pfncount; + + /* how many pfns can we fit in a body message */ + pfnsize = MAX_SIZE_CHANNEL_MESSAGE - + sizeof(struct vmbus_channel_gpadl_body); pfncount = pfnsize / sizeof(u64); - if (pagecount > pfncount) { - /* we need a gpadl body */ - /* fill in the header */ + /* + * If pfnleft is zero, everything fits in the header and no body + * messages are needed + */ + while (pfnleft) { + pfncurr = umin(pfncount, pfnleft); msgsize = sizeof(struct vmbus_channel_msginfo) + - sizeof(struct vmbus_channel_gpadl_header) + - sizeof(struct gpa_range) + pfncount * sizeof(u64); - msgheader = kzalloc(msgsize, GFP_KERNEL); - if (!msgheader) - goto nomem; - - INIT_LIST_HEAD(&msgheader->submsglist); - msgheader->msgsize = msgsize; - - gpadl_header = (struct vmbus_channel_gpadl_header *) - msgheader->msg; - gpadl_header->rangecount = 1; - gpadl_header->range_buflen = sizeof(struct gpa_range) + - pagecount * sizeof(u64); - gpadl_header->range[0].byte_offset = 0; - gpadl_header->range[0].byte_count = hv_gpadl_size(type, size); - for (i = 0; i < pfncount; i++) - gpadl_header->range[0].pfn_array[i] = hv_gpadl_hvpfn( - type, kbuffer, size, send_offset, i); - *msginfo = msgheader; - - pfnsum = pfncount; - pfnleft = pagecount - pfncount; - - /* how many pfns can we fit */ - pfnsize = MAX_SIZE_CHANNEL_MESSAGE - - sizeof(struct vmbus_channel_gpadl_body); - pfncount = pfnsize / sizeof(u64); - - /* fill in the body */ - while (pfnleft) { - if (pfnleft > pfncount) - pfncurr = pfncount; - else - pfncurr = pfnleft; - - msgsize = sizeof(struct vmbus_channel_msginfo) + - sizeof(struct vmbus_channel_gpadl_body) + - pfncurr * sizeof(u64); - msgbody = kzalloc(msgsize, GFP_KERNEL); - - if (!msgbody) { - struct vmbus_channel_msginfo *pos = NULL; - struct vmbus_channel_msginfo *tmp = NULL; - /* - * Free up all the allocated messages. - */ - list_for_each_entry_safe(pos, tmp, - &msgheader->submsglist, - msglistentry) { - - list_del(&pos->msglistentry); - kfree(pos); - } - - goto nomem; - } - - msgbody->msgsize = msgsize; - gpadl_body = - (struct vmbus_channel_gpadl_body *)msgbody->msg; + sizeof(struct vmbus_channel_gpadl_body) + + pfncurr * sizeof(u64); + msgbody = kzalloc(msgsize, GFP_KERNEL); + if (!msgbody) { + struct vmbus_channel_msginfo *pos = NULL; + struct vmbus_channel_msginfo *tmp = NULL; /* - * Gpadl is u32 and we are using a pointer which could - * be 64-bit - * This is governed by the guest/host protocol and - * so the hypervisor guarantees that this is ok. + * Free up all the allocated messages. */ - for (i = 0; i < pfncurr; i++) - gpadl_body->pfn[i] = hv_gpadl_hvpfn(type, - kbuffer, size, send_offset, pfnsum + i); - - /* add to msg header */ - list_add_tail(&msgbody->msglistentry, - &msgheader->submsglist); - pfnsum += pfncurr; - pfnleft -= pfncurr; + list_for_each_entry_safe(pos, tmp, + &msgheader->submsglist, + msglistentry) { + + list_del(&pos->msglistentry); + kfree(pos); + } + kfree(msgheader); + return -ENOMEM; } - } else { - /* everything fits in a header */ - msgsize = sizeof(struct vmbus_channel_msginfo) + - sizeof(struct vmbus_channel_gpadl_header) + - sizeof(struct gpa_range) + pagecount * sizeof(u64); - msgheader = kzalloc(msgsize, GFP_KERNEL); - if (msgheader == NULL) - goto nomem; - - INIT_LIST_HEAD(&msgheader->submsglist); - msgheader->msgsize = msgsize; - - gpadl_header = (struct vmbus_channel_gpadl_header *) - msgheader->msg; - gpadl_header->rangecount = 1; - gpadl_header->range_buflen = sizeof(struct gpa_range) + - pagecount * sizeof(u64); - gpadl_header->range[0].byte_offset = 0; - gpadl_header->range[0].byte_count = hv_gpadl_size(type, size); - for (i = 0; i < pagecount; i++) - gpadl_header->range[0].pfn_array[i] = hv_gpadl_hvpfn( - type, kbuffer, size, send_offset, i); - - *msginfo = msgheader; + + msgbody->msgsize = msgsize; + gpadl_body = (struct vmbus_channel_gpadl_body *)msgbody->msg; + + /* + * Gpadl is u32 and we are using a pointer which could + * be 64-bit + * This is governed by the guest/host protocol and + * so the hypervisor guarantees that this is ok. + */ + for (i = 0; i < pfncurr; i++) + gpadl_body->pfn[i] = hv_gpadl_hvpfn(type, + kbuffer, size, send_offset, pfnsum + i); + + /* add to msg header */ + list_add_tail(&msgbody->msglistentry, &msgheader->submsglist); + pfnsum += pfncurr; + pfnleft -= pfncurr; } return 0; -nomem: - kfree(msgheader); - kfree(msgbody); - return -ENOMEM; } /* diff --git a/drivers/hv/hv_util.c b/drivers/hv/hv_util.c index 42aec2c5606a..9c97c4065fe7 100644 --- a/drivers/hv/hv_util.c +++ b/drivers/hv/hv_util.c @@ -296,6 +296,11 @@ static struct { spinlock_t lock; } host_ts; +static bool timesync_implicit; + +module_param(timesync_implicit, bool, 0644); +MODULE_PARM_DESC(timesync_implicit, "If set treat SAMPLE as SYNC when clock is behind"); + static inline u64 reftime_to_ns(u64 reftime) { return (reftime - WLTIMEDELTA) * 100; @@ -345,6 +350,29 @@ static void hv_set_host_time(struct work_struct *work) } /* + * Due to a bug on Hyper-V hosts, the sync flag may not always be sent on resume. + * Force a sync if the guest is behind. + */ +static inline bool hv_implicit_sync(u64 host_time) +{ + struct timespec64 new_ts; + struct timespec64 threshold_ts; + + new_ts = ns_to_timespec64(reftime_to_ns(host_time)); + ktime_get_real_ts64(&threshold_ts); + + threshold_ts.tv_sec += 5; + + /* + * If guest behind the host by 5 or more seconds. + */ + if (timespec64_compare(&new_ts, &threshold_ts) >= 0) + return true; + + return false; +} + +/* * Synchronize time with host after reboot, restore, etc. * * ICTIMESYNCFLAG_SYNC flag bit indicates reboot, restore events of the VM. @@ -384,7 +412,8 @@ static inline void adj_guesttime(u64 hosttime, u64 reftime, u8 adj_flags) spin_unlock_irqrestore(&host_ts.lock, flags); /* Schedule work to do do_settimeofday64() */ - if (adj_flags & ICTIMESYNCFLAG_SYNC) + if ((adj_flags & ICTIMESYNCFLAG_SYNC) || + (timesync_implicit && hv_implicit_sync(host_ts.host_time))) schedule_work(&adj_time_work); } diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c index b33d5abd9beb..7f7965f3d187 100644 --- a/drivers/hv/vmbus_drv.c +++ b/drivers/hv/vmbus_drv.c @@ -988,7 +988,7 @@ static const struct dev_pm_ops vmbus_pm = { }; /* The one and only one */ -static struct bus_type hv_bus = { +static const struct bus_type hv_bus = { .name = "vmbus", .match = vmbus_match, .shutdown = vmbus_shutdown, diff --git a/drivers/hwmon/aspeed-pwm-tacho.c b/drivers/hwmon/aspeed-pwm-tacho.c index f6e1e55e8292..4acc1858d8ac 100644 --- a/drivers/hwmon/aspeed-pwm-tacho.c +++ b/drivers/hwmon/aspeed-pwm-tacho.c @@ -195,6 +195,8 @@ struct aspeed_pwm_tacho_data { u8 fan_tach_ch_source[MAX_ASPEED_FAN_TACH_CHANNELS]; struct aspeed_cooling_device *cdev[8]; const struct attribute_group *groups[3]; + /* protects access to shared ASPEED_PTCR_RESULT */ + struct mutex tach_lock; }; enum type { TYPEM, TYPEN, TYPEO }; @@ -529,6 +531,8 @@ static int aspeed_get_fan_tach_ch_rpm(struct aspeed_pwm_tacho_data *priv, u8 fan_tach_ch_source, type, mode, both; int ret; + mutex_lock(&priv->tach_lock); + regmap_write(priv->regmap, ASPEED_PTCR_TRIGGER, 0); regmap_write(priv->regmap, ASPEED_PTCR_TRIGGER, 0x1 << fan_tach_ch); @@ -546,6 +550,8 @@ static int aspeed_get_fan_tach_ch_rpm(struct aspeed_pwm_tacho_data *priv, ASPEED_RPM_STATUS_SLEEP_USEC, usec); + mutex_unlock(&priv->tach_lock); + /* return -ETIMEDOUT if we didn't get an answer. */ if (ret) return ret; @@ -915,6 +921,7 @@ static int aspeed_pwm_tacho_probe(struct platform_device *pdev) priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; + mutex_init(&priv->tach_lock); priv->regmap = devm_regmap_init(dev, NULL, (__force void *)regs, &aspeed_pwm_tacho_regmap_config); if (IS_ERR(priv->regmap)) diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c index ba82d1e79c13..b0991dde2e59 100644 --- a/drivers/hwmon/coretemp.c +++ b/drivers/hwmon/coretemp.c @@ -41,7 +41,7 @@ MODULE_PARM_DESC(tjmax, "TjMax value in degrees Celsius"); #define PKG_SYSFS_ATTR_NO 1 /* Sysfs attribute for package temp */ #define BASE_SYSFS_ATTR_NO 2 /* Sysfs Base attr no for coretemp */ -#define NUM_REAL_CORES 128 /* Number of Real cores per cpu */ +#define NUM_REAL_CORES 512 /* Number of Real cores per cpu */ #define CORETEMP_NAME_LENGTH 28 /* String Length of attrs */ #define MAX_CORE_ATTRS 4 /* Maximum no of basic attrs */ #define TOTAL_ATTRS (MAX_CORE_ATTRS + 1) @@ -419,7 +419,7 @@ static ssize_t show_temp(struct device *dev, } static int create_core_attrs(struct temp_data *tdata, struct device *dev, - int attr_no) + int index) { int i; static ssize_t (*const rd_ptr[TOTAL_ATTRS]) (struct device *dev, @@ -431,13 +431,20 @@ static int create_core_attrs(struct temp_data *tdata, struct device *dev, }; for (i = 0; i < tdata->attr_size; i++) { + /* + * We map the attr number to core id of the CPU + * The attr number is always core id + 2 + * The Pkgtemp will always show up as temp1_*, if available + */ + int attr_no = tdata->is_pkg_data ? 1 : tdata->cpu_core_id + 2; + snprintf(tdata->attr_name[i], CORETEMP_NAME_LENGTH, "temp%d_%s", attr_no, suffixes[i]); sysfs_attr_init(&tdata->sd_attrs[i].dev_attr.attr); tdata->sd_attrs[i].dev_attr.attr.name = tdata->attr_name[i]; tdata->sd_attrs[i].dev_attr.attr.mode = 0444; tdata->sd_attrs[i].dev_attr.show = rd_ptr[i]; - tdata->sd_attrs[i].index = attr_no; + tdata->sd_attrs[i].index = index; tdata->attrs[i] = &tdata->sd_attrs[i].dev_attr.attr; } tdata->attr_group.attrs = tdata->attrs; @@ -495,30 +502,25 @@ static int create_core_data(struct platform_device *pdev, unsigned int cpu, struct platform_data *pdata = platform_get_drvdata(pdev); struct cpuinfo_x86 *c = &cpu_data(cpu); u32 eax, edx; - int err, index, attr_no; + int err, index; if (!housekeeping_cpu(cpu, HK_TYPE_MISC)) return 0; /* - * Find attr number for sysfs: - * We map the attr number to core id of the CPU - * The attr number is always core id + 2 - * The Pkgtemp will always show up as temp1_*, if available + * Get the index of tdata in pdata->core_data[] + * tdata for package: pdata->core_data[1] + * tdata for core: pdata->core_data[2] .. pdata->core_data[NUM_REAL_CORES + 1] */ if (pkg_flag) { - attr_no = PKG_SYSFS_ATTR_NO; + index = PKG_SYSFS_ATTR_NO; } else { - index = ida_alloc(&pdata->ida, GFP_KERNEL); + index = ida_alloc_max(&pdata->ida, NUM_REAL_CORES - 1, GFP_KERNEL); if (index < 0) return index; - pdata->cpu_map[index] = topology_core_id(cpu); - attr_no = index + BASE_SYSFS_ATTR_NO; - } - if (attr_no > MAX_CORE_DATA - 1) { - err = -ERANGE; - goto ida_free; + pdata->cpu_map[index] = topology_core_id(cpu); + index += BASE_SYSFS_ATTR_NO; } tdata = init_temp_data(cpu, pkg_flag); @@ -544,20 +546,20 @@ static int create_core_data(struct platform_device *pdev, unsigned int cpu, if (get_ttarget(tdata, &pdev->dev) >= 0) tdata->attr_size++; - pdata->core_data[attr_no] = tdata; + pdata->core_data[index] = tdata; /* Create sysfs interfaces */ - err = create_core_attrs(tdata, pdata->hwmon_dev, attr_no); + err = create_core_attrs(tdata, pdata->hwmon_dev, index); if (err) goto exit_free; return 0; exit_free: - pdata->core_data[attr_no] = NULL; + pdata->core_data[index] = NULL; kfree(tdata); ida_free: if (!pkg_flag) - ida_free(&pdata->ida, index); + ida_free(&pdata->ida, index - BASE_SYSFS_ATTR_NO); return err; } @@ -780,7 +782,7 @@ static int __init coretemp_init(void) if (!x86_match_cpu(coretemp_ids)) return -ENODEV; - max_zones = topology_max_packages() * topology_max_die_per_package(); + max_zones = topology_max_packages() * topology_max_dies_per_package(); zone_devices = kcalloc(max_zones, sizeof(struct platform_device *), GFP_KERNEL); if (!zone_devices) diff --git a/drivers/hwmon/fam15h_power.c b/drivers/hwmon/fam15h_power.c index 6307112c2c0c..9ed2c4b6734e 100644 --- a/drivers/hwmon/fam15h_power.c +++ b/drivers/hwmon/fam15h_power.c @@ -209,7 +209,7 @@ static ssize_t power1_average_show(struct device *dev, * With the new x86 topology modelling, x86_max_cores is the * compute unit number. */ - cu_num = boot_cpu_data.x86_max_cores; + cu_num = topology_num_cores_per_package(); ret = read_registers(data); if (ret) diff --git a/drivers/hwmon/gigabyte_waterforce.c b/drivers/hwmon/gigabyte_waterforce.c index 85e523775714..8129d7b3ceaf 100644 --- a/drivers/hwmon/gigabyte_waterforce.c +++ b/drivers/hwmon/gigabyte_waterforce.c @@ -146,7 +146,7 @@ static int waterforce_get_status(struct waterforce_data *priv) /* Send command for getting status */ ret = waterforce_write_expanded(priv, get_status_cmd, GET_STATUS_CMD_LENGTH); if (ret < 0) - return ret; + goto unlock_and_return; ret = wait_for_completion_interruptible_timeout(&priv->status_report_received, msecs_to_jiffies(STATUS_VALIDITY)); diff --git a/drivers/hwmon/nct6775-core.c b/drivers/hwmon/nct6775-core.c index 8d2ef3145bca..9fbab8f02334 100644 --- a/drivers/hwmon/nct6775-core.c +++ b/drivers/hwmon/nct6775-core.c @@ -3512,6 +3512,7 @@ int nct6775_probe(struct device *dev, struct nct6775_data *data, const u16 *reg_temp_mon, *reg_temp_alternate, *reg_temp_crit; const u16 *reg_temp_crit_l = NULL, *reg_temp_crit_h = NULL; int num_reg_temp, num_reg_temp_mon, num_reg_tsi_temp; + int num_reg_temp_config; struct device *hwmon_dev; struct sensor_template_group tsi_temp_tg; @@ -3594,6 +3595,7 @@ int nct6775_probe(struct device *dev, struct nct6775_data *data, reg_temp_over = NCT6106_REG_TEMP_OVER; reg_temp_hyst = NCT6106_REG_TEMP_HYST; reg_temp_config = NCT6106_REG_TEMP_CONFIG; + num_reg_temp_config = ARRAY_SIZE(NCT6106_REG_TEMP_CONFIG); reg_temp_alternate = NCT6106_REG_TEMP_ALTERNATE; reg_temp_crit = NCT6106_REG_TEMP_CRIT; reg_temp_crit_l = NCT6106_REG_TEMP_CRIT_L; @@ -3669,6 +3671,7 @@ int nct6775_probe(struct device *dev, struct nct6775_data *data, reg_temp_over = NCT6106_REG_TEMP_OVER; reg_temp_hyst = NCT6106_REG_TEMP_HYST; reg_temp_config = NCT6106_REG_TEMP_CONFIG; + num_reg_temp_config = ARRAY_SIZE(NCT6106_REG_TEMP_CONFIG); reg_temp_alternate = NCT6106_REG_TEMP_ALTERNATE; reg_temp_crit = NCT6106_REG_TEMP_CRIT; reg_temp_crit_l = NCT6106_REG_TEMP_CRIT_L; @@ -3746,6 +3749,7 @@ int nct6775_probe(struct device *dev, struct nct6775_data *data, reg_temp_over = NCT6775_REG_TEMP_OVER; reg_temp_hyst = NCT6775_REG_TEMP_HYST; reg_temp_config = NCT6775_REG_TEMP_CONFIG; + num_reg_temp_config = ARRAY_SIZE(NCT6775_REG_TEMP_CONFIG); reg_temp_alternate = NCT6775_REG_TEMP_ALTERNATE; reg_temp_crit = NCT6775_REG_TEMP_CRIT; @@ -3821,6 +3825,7 @@ int nct6775_probe(struct device *dev, struct nct6775_data *data, reg_temp_over = NCT6775_REG_TEMP_OVER; reg_temp_hyst = NCT6775_REG_TEMP_HYST; reg_temp_config = NCT6776_REG_TEMP_CONFIG; + num_reg_temp_config = ARRAY_SIZE(NCT6776_REG_TEMP_CONFIG); reg_temp_alternate = NCT6776_REG_TEMP_ALTERNATE; reg_temp_crit = NCT6776_REG_TEMP_CRIT; @@ -3900,6 +3905,7 @@ int nct6775_probe(struct device *dev, struct nct6775_data *data, reg_temp_over = NCT6779_REG_TEMP_OVER; reg_temp_hyst = NCT6779_REG_TEMP_HYST; reg_temp_config = NCT6779_REG_TEMP_CONFIG; + num_reg_temp_config = ARRAY_SIZE(NCT6779_REG_TEMP_CONFIG); reg_temp_alternate = NCT6779_REG_TEMP_ALTERNATE; reg_temp_crit = NCT6779_REG_TEMP_CRIT; @@ -4034,6 +4040,7 @@ int nct6775_probe(struct device *dev, struct nct6775_data *data, reg_temp_over = NCT6779_REG_TEMP_OVER; reg_temp_hyst = NCT6779_REG_TEMP_HYST; reg_temp_config = NCT6779_REG_TEMP_CONFIG; + num_reg_temp_config = ARRAY_SIZE(NCT6779_REG_TEMP_CONFIG); reg_temp_alternate = NCT6779_REG_TEMP_ALTERNATE; reg_temp_crit = NCT6779_REG_TEMP_CRIT; @@ -4123,6 +4130,7 @@ int nct6775_probe(struct device *dev, struct nct6775_data *data, reg_temp_over = NCT6798_REG_TEMP_OVER; reg_temp_hyst = NCT6798_REG_TEMP_HYST; reg_temp_config = NCT6779_REG_TEMP_CONFIG; + num_reg_temp_config = ARRAY_SIZE(NCT6779_REG_TEMP_CONFIG); reg_temp_alternate = NCT6798_REG_TEMP_ALTERNATE; reg_temp_crit = NCT6798_REG_TEMP_CRIT; @@ -4204,7 +4212,8 @@ int nct6775_probe(struct device *dev, struct nct6775_data *data, = reg_temp_crit[src - 1]; if (reg_temp_crit_l && reg_temp_crit_l[i]) data->reg_temp[4][src - 1] = reg_temp_crit_l[i]; - data->reg_temp_config[src - 1] = reg_temp_config[i]; + if (i < num_reg_temp_config) + data->reg_temp_config[src - 1] = reg_temp_config[i]; data->temp_src[src - 1] = src; continue; } @@ -4217,7 +4226,8 @@ int nct6775_probe(struct device *dev, struct nct6775_data *data, data->reg_temp[0][s] = reg_temp[i]; data->reg_temp[1][s] = reg_temp_over[i]; data->reg_temp[2][s] = reg_temp_hyst[i]; - data->reg_temp_config[s] = reg_temp_config[i]; + if (i < num_reg_temp_config) + data->reg_temp_config[s] = reg_temp_config[i]; if (reg_temp_crit_h && reg_temp_crit_h[i]) data->reg_temp[3][s] = reg_temp_crit_h[i]; else if (reg_temp_crit[src - 1]) diff --git a/drivers/hwmon/pmbus/mp2975.c b/drivers/hwmon/pmbus/mp2975.c index b9bb469e2d8f..e5fa10b3b8bc 100644 --- a/drivers/hwmon/pmbus/mp2975.c +++ b/drivers/hwmon/pmbus/mp2975.c @@ -126,6 +126,21 @@ static const struct regulator_desc __maybe_unused mp2975_reg_desc[] = { #define to_mp2975_data(x) container_of(x, struct mp2975_data, info) +static int mp2975_read_byte_data(struct i2c_client *client, int page, int reg) +{ + switch (reg) { + case PMBUS_VOUT_MODE: + /* + * Report direct format as configured by MFR_DC_LOOP_CTRL. + * Unlike on MP2971/MP2973 the reported VOUT_MODE isn't automatically + * internally updated, but always reads as PB_VOUT_MODE_VID. + */ + return PB_VOUT_MODE_DIRECT; + default: + return -ENODATA; + } +} + static int mp2975_read_word_helper(struct i2c_client *client, int page, int phase, u8 reg, u16 mask) @@ -869,6 +884,7 @@ static struct pmbus_driver_info mp2975_info = { PMBUS_HAVE_IIN | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT | PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP | PMBUS_HAVE_POUT | PMBUS_HAVE_PIN | PMBUS_HAVE_STATUS_INPUT | PMBUS_PHASE_VIRTUAL, + .read_byte_data = mp2975_read_byte_data, .read_word_data = mp2975_read_word_data, #if IS_ENABLED(CONFIG_SENSORS_MP2975_REGULATOR) .num_regulators = 1, diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile index 3757b9391e60..aa0ee8ecd6f2 100644 --- a/drivers/i2c/busses/Makefile +++ b/drivers/i2c/busses/Makefile @@ -90,10 +90,8 @@ obj-$(CONFIG_I2C_NPCM) += i2c-npcm7xx.o obj-$(CONFIG_I2C_OCORES) += i2c-ocores.o obj-$(CONFIG_I2C_OMAP) += i2c-omap.o obj-$(CONFIG_I2C_OWL) += i2c-owl.o -i2c-pasemi-objs := i2c-pasemi-core.o i2c-pasemi-pci.o -obj-$(CONFIG_I2C_PASEMI) += i2c-pasemi.o -i2c-apple-objs := i2c-pasemi-core.o i2c-pasemi-platform.o -obj-$(CONFIG_I2C_APPLE) += i2c-apple.o +obj-$(CONFIG_I2C_PASEMI) += i2c-pasemi-core.o i2c-pasemi-pci.o +obj-$(CONFIG_I2C_APPLE) += i2c-pasemi-core.o i2c-pasemi-platform.o obj-$(CONFIG_I2C_PCA_PLATFORM) += i2c-pca-platform.o obj-$(CONFIG_I2C_PNX) += i2c-pnx.o obj-$(CONFIG_I2C_PXA) += i2c-pxa.o diff --git a/drivers/i2c/busses/i2c-aspeed.c b/drivers/i2c/busses/i2c-aspeed.c index 5511fd46a65e..ce8c4846b7fa 100644 --- a/drivers/i2c/busses/i2c-aspeed.c +++ b/drivers/i2c/busses/i2c-aspeed.c @@ -445,6 +445,7 @@ static u32 aspeed_i2c_master_irq(struct aspeed_i2c_bus *bus, u32 irq_status) irq_status); irq_handled |= (irq_status & ASPEED_I2CD_INTR_MASTER_ERRORS); if (bus->master_state != ASPEED_I2C_MASTER_INACTIVE) { + irq_handled = irq_status; bus->cmd_err = ret; bus->master_state = ASPEED_I2C_MASTER_INACTIVE; goto out_complete; diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c index 3932e8d96a17..274e987e4cfa 100644 --- a/drivers/i2c/busses/i2c-i801.c +++ b/drivers/i2c/busses/i2c-i801.c @@ -498,11 +498,10 @@ static int i801_block_transaction_by_block(struct i801_priv *priv, /* Set block buffer mode */ outb_p(inb_p(SMBAUXCTL(priv)) | SMBAUXCTL_E32B, SMBAUXCTL(priv)); - inb_p(SMBHSTCNT(priv)); /* reset the data buffer index */ - if (read_write == I2C_SMBUS_WRITE) { len = data->block[0]; outb_p(len, SMBHSTDAT0(priv)); + inb_p(SMBHSTCNT(priv)); /* reset the data buffer index */ for (i = 0; i < len; i++) outb_p(data->block[i+1], SMBBLKDAT(priv)); } @@ -520,6 +519,7 @@ static int i801_block_transaction_by_block(struct i801_priv *priv, } data->block[0] = len; + inb_p(SMBHSTCNT(priv)); /* reset the data buffer index */ for (i = 0; i < len; i++) data->block[i + 1] = inb_p(SMBBLKDAT(priv)); } @@ -1416,7 +1416,6 @@ static void i801_add_mux(struct i801_priv *priv) lookup->table[i] = GPIO_LOOKUP(mux_config->gpio_chip, mux_config->gpios[i], "mux", 0); gpiod_add_lookup_table(lookup); - priv->lookup = lookup; /* * Register the mux device, we use PLATFORM_DEVID_NONE here @@ -1430,7 +1429,10 @@ static void i801_add_mux(struct i801_priv *priv) sizeof(struct i2c_mux_gpio_platform_data)); if (IS_ERR(priv->mux_pdev)) { gpiod_remove_lookup_table(lookup); + devm_kfree(dev, lookup); dev_err(dev, "Failed to register i2c-mux-gpio device\n"); + } else { + priv->lookup = lookup; } } @@ -1742,9 +1744,9 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id) i801_enable_host_notify(&priv->adapter); - i801_probe_optional_slaves(priv); /* We ignore errors - multiplexing is optional */ i801_add_mux(priv); + i801_probe_optional_slaves(priv); pci_set_drvdata(dev, priv); diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c index 88a053987403..60e813137f84 100644 --- a/drivers/i2c/busses/i2c-imx.c +++ b/drivers/i2c/busses/i2c-imx.c @@ -803,6 +803,11 @@ static irqreturn_t i2c_imx_slave_handle(struct imx_i2c_struct *i2c_imx, ctl &= ~I2CR_MTX; imx_i2c_write_reg(ctl, i2c_imx, IMX_I2C_I2CR); imx_i2c_read_reg(i2c_imx, IMX_I2C_I2DR); + + /* flag the last byte as processed */ + i2c_imx_slave_event(i2c_imx, + I2C_SLAVE_READ_PROCESSED, &value); + i2c_imx_slave_finish_op(i2c_imx); return IRQ_HANDLED; } diff --git a/drivers/i2c/busses/i2c-pasemi-core.c b/drivers/i2c/busses/i2c-pasemi-core.c index 7d54a9f34c74..bd8becbdeeb2 100644 --- a/drivers/i2c/busses/i2c-pasemi-core.c +++ b/drivers/i2c/busses/i2c-pasemi-core.c @@ -369,6 +369,7 @@ int pasemi_i2c_common_probe(struct pasemi_smbus *smbus) return 0; } +EXPORT_SYMBOL_GPL(pasemi_i2c_common_probe); irqreturn_t pasemi_irq_handler(int irq, void *dev_id) { @@ -378,3 +379,8 @@ irqreturn_t pasemi_irq_handler(int irq, void *dev_id) complete(&smbus->irq_completion); return IRQ_HANDLED; } +EXPORT_SYMBOL_GPL(pasemi_irq_handler); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Olof Johansson <olof@lixom.net>"); +MODULE_DESCRIPTION("PA Semi PWRficient SMBus driver"); diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c index 0d2e7171e3a6..da94df466e83 100644 --- a/drivers/i2c/busses/i2c-qcom-geni.c +++ b/drivers/i2c/busses/i2c-qcom-geni.c @@ -613,20 +613,20 @@ static int geni_i2c_gpi_xfer(struct geni_i2c_dev *gi2c, struct i2c_msg msgs[], i peripheral.addr = msgs[i].addr; + ret = geni_i2c_gpi(gi2c, &msgs[i], &config, + &tx_addr, &tx_buf, I2C_WRITE, gi2c->tx_c); + if (ret) + goto err; + if (msgs[i].flags & I2C_M_RD) { ret = geni_i2c_gpi(gi2c, &msgs[i], &config, &rx_addr, &rx_buf, I2C_READ, gi2c->rx_c); if (ret) goto err; - } - - ret = geni_i2c_gpi(gi2c, &msgs[i], &config, - &tx_addr, &tx_buf, I2C_WRITE, gi2c->tx_c); - if (ret) - goto err; - if (msgs[i].flags & I2C_M_RD) dma_async_issue_pending(gi2c->rx_c); + } + dma_async_issue_pending(gi2c->tx_c); timeout = wait_for_completion_timeout(&gi2c->done, XFER_TIMEOUT); diff --git a/drivers/i2c/busses/i2c-wmt.c b/drivers/i2c/busses/i2c-wmt.c index ec2a8da134e5..198afee5233c 100644 --- a/drivers/i2c/busses/i2c-wmt.c +++ b/drivers/i2c/busses/i2c-wmt.c @@ -378,11 +378,15 @@ static int wmt_i2c_probe(struct platform_device *pdev) err = i2c_add_adapter(adap); if (err) - return err; + goto err_disable_clk; platform_set_drvdata(pdev, i2c_dev); return 0; + +err_disable_clk: + clk_disable_unprepare(i2c_dev->clk); + return err; } static void wmt_i2c_remove(struct platform_device *pdev) diff --git a/drivers/iio/accel/Kconfig b/drivers/iio/accel/Kconfig index 91adcac875a4..c9d7afe489e8 100644 --- a/drivers/iio/accel/Kconfig +++ b/drivers/iio/accel/Kconfig @@ -219,10 +219,12 @@ config BMA400 config BMA400_I2C tristate + select REGMAP_I2C depends on BMA400 config BMA400_SPI tristate + select REGMAP_SPI depends on BMA400 config BMC150_ACCEL diff --git a/drivers/iio/accel/adxl367.c b/drivers/iio/accel/adxl367.c index 90b7ae6d42b7..484fe2e9fb17 100644 --- a/drivers/iio/accel/adxl367.c +++ b/drivers/iio/accel/adxl367.c @@ -1429,9 +1429,11 @@ static int adxl367_verify_devid(struct adxl367_state *st) unsigned int val; int ret; - ret = regmap_read_poll_timeout(st->regmap, ADXL367_REG_DEVID, val, - val == ADXL367_DEVID_AD, 1000, 10000); + ret = regmap_read(st->regmap, ADXL367_REG_DEVID, &val); if (ret) + return dev_err_probe(st->dev, ret, "Failed to read dev id\n"); + + if (val != ADXL367_DEVID_AD) return dev_err_probe(st->dev, -ENODEV, "Invalid dev id 0x%02X, expected 0x%02X\n", val, ADXL367_DEVID_AD); @@ -1510,6 +1512,8 @@ int adxl367_probe(struct device *dev, const struct adxl367_ops *ops, if (ret) return ret; + fsleep(15000); + ret = adxl367_verify_devid(st); if (ret) return ret; diff --git a/drivers/iio/accel/adxl367_i2c.c b/drivers/iio/accel/adxl367_i2c.c index b595fe94f3a3..62c74bdc0d77 100644 --- a/drivers/iio/accel/adxl367_i2c.c +++ b/drivers/iio/accel/adxl367_i2c.c @@ -11,7 +11,7 @@ #include "adxl367.h" -#define ADXL367_I2C_FIFO_DATA 0x42 +#define ADXL367_I2C_FIFO_DATA 0x18 struct adxl367_i2c_state { struct regmap *regmap; diff --git a/drivers/iio/adc/ad4130.c b/drivers/iio/adc/ad4130.c index feb86fe6c422..62490424b6ae 100644 --- a/drivers/iio/adc/ad4130.c +++ b/drivers/iio/adc/ad4130.c @@ -1821,7 +1821,7 @@ static int ad4130_setup_int_clk(struct ad4130_state *st) { struct device *dev = &st->spi->dev; struct device_node *of_node = dev_of_node(dev); - struct clk_init_data init; + struct clk_init_data init = {}; const char *clk_name; int ret; @@ -1891,10 +1891,14 @@ static int ad4130_setup(struct iio_dev *indio_dev) return ret; /* - * Configure all GPIOs for output. If configured, the interrupt function - * of P2 takes priority over the GPIO out function. + * Configure unused GPIOs for output. If configured, the interrupt + * function of P2 takes priority over the GPIO out function. */ - val = AD4130_IO_CONTROL_GPIO_CTRL_MASK; + val = 0; + for (i = 0; i < AD4130_MAX_GPIOS; i++) + if (st->pins_fn[i + AD4130_AIN2_P1] == AD4130_PIN_FN_NONE) + val |= FIELD_PREP(AD4130_IO_CONTROL_GPIO_CTRL_MASK, BIT(i)); + val |= FIELD_PREP(AD4130_IO_CONTROL_INT_PIN_SEL_MASK, st->int_pin_sel); ret = regmap_write(st->regmap, AD4130_IO_CONTROL_REG, val); diff --git a/drivers/iio/adc/ad7091r8.c b/drivers/iio/adc/ad7091r8.c index 57700f124803..700564305057 100644 --- a/drivers/iio/adc/ad7091r8.c +++ b/drivers/iio/adc/ad7091r8.c @@ -195,7 +195,7 @@ static int ad7091r8_gpio_setup(struct ad7091r_state *st) st->reset_gpio = devm_gpiod_get_optional(st->dev, "reset", GPIOD_OUT_HIGH); if (IS_ERR(st->reset_gpio)) - return dev_err_probe(st->dev, PTR_ERR(st->convst_gpio), + return dev_err_probe(st->dev, PTR_ERR(st->reset_gpio), "Error on requesting reset GPIO\n"); if (st->reset_gpio) { diff --git a/drivers/iio/humidity/Kconfig b/drivers/iio/humidity/Kconfig index 2de5494e7c22..b15b7a3b66d5 100644 --- a/drivers/iio/humidity/Kconfig +++ b/drivers/iio/humidity/Kconfig @@ -48,6 +48,18 @@ config HDC2010 To compile this driver as a module, choose M here: the module will be called hdc2010. +config HDC3020 + tristate "TI HDC3020 relative humidity and temperature sensor" + depends on I2C + select CRC8 + help + Say yes here to build support for the Texas Instruments + HDC3020, HDC3021 and HDC3022 relative humidity and temperature + sensors. + + To compile this driver as a module, choose M here: the module + will be called hdc3020. + config HID_SENSOR_HUMIDITY tristate "HID Environmental humidity sensor" depends on HID_SENSOR_HUB diff --git a/drivers/iio/humidity/Makefile b/drivers/iio/humidity/Makefile index f19ff3de97c5..5fbeef299f61 100644 --- a/drivers/iio/humidity/Makefile +++ b/drivers/iio/humidity/Makefile @@ -7,6 +7,7 @@ obj-$(CONFIG_AM2315) += am2315.o obj-$(CONFIG_DHT11) += dht11.o obj-$(CONFIG_HDC100X) += hdc100x.o obj-$(CONFIG_HDC2010) += hdc2010.o +obj-$(CONFIG_HDC3020) += hdc3020.o obj-$(CONFIG_HID_SENSOR_HUMIDITY) += hid-sensor-humidity.o hts221-y := hts221_core.o \ diff --git a/drivers/iio/humidity/hdc3020.c b/drivers/iio/humidity/hdc3020.c index 4e3311170725..ed70415512f6 100644 --- a/drivers/iio/humidity/hdc3020.c +++ b/drivers/iio/humidity/hdc3020.c @@ -322,7 +322,7 @@ static int hdc3020_read_raw(struct iio_dev *indio_dev, if (chan->type != IIO_TEMP) return -EINVAL; - *val = 16852; + *val = -16852; return IIO_VAL_INT; default: diff --git a/drivers/iio/imu/bno055/Kconfig b/drivers/iio/imu/bno055/Kconfig index 83e53acfbe88..c7f5866a177d 100644 --- a/drivers/iio/imu/bno055/Kconfig +++ b/drivers/iio/imu/bno055/Kconfig @@ -8,6 +8,7 @@ config BOSCH_BNO055 config BOSCH_BNO055_SERIAL tristate "Bosch BNO055 attached via UART" depends on SERIAL_DEV_BUS + select REGMAP select BOSCH_BNO055 help Enable this to support Bosch BNO055 IMUs attached via UART. diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c index 66d4ba088e70..d4f9b5d8d28d 100644 --- a/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c +++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c @@ -109,6 +109,8 @@ irqreturn_t inv_mpu6050_read_fifo(int irq, void *p) /* compute and process only all complete datum */ nb = fifo_count / bytes_per_datum; fifo_count = nb * bytes_per_datum; + if (nb == 0) + goto end_session; /* Each FIFO data contains all sensors, so same number for FIFO and sensor data */ fifo_period = NSEC_PER_SEC / INV_MPU6050_DIVIDER_TO_FIFO_RATE(st->chip_config.divider); inv_sensors_timestamp_interrupt(&st->timestamp, fifo_period, nb, nb, pf->timestamp); diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c index 676704f9151f..e6e6e94452a3 100644 --- a/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c +++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c @@ -111,6 +111,7 @@ int inv_mpu6050_prepare_fifo(struct inv_mpu6050_state *st, bool enable) if (enable) { /* reset timestamping */ inv_sensors_timestamp_reset(&st->timestamp); + inv_sensors_timestamp_apply_odr(&st->timestamp, 0, 0, 0); /* reset FIFO */ d = st->chip_config.user_ctrl | INV_MPU6050_BIT_FIFO_RST; ret = regmap_write(st->map, st->reg->user_ctrl, d); @@ -184,6 +185,10 @@ static int inv_mpu6050_set_enable(struct iio_dev *indio_dev, bool enable) if (result) goto error_power_off; } else { + st->chip_config.gyro_fifo_enable = 0; + st->chip_config.accl_fifo_enable = 0; + st->chip_config.temp_fifo_enable = 0; + st->chip_config.magn_fifo_enable = 0; result = inv_mpu6050_prepare_fifo(st, false); if (result) goto error_power_off; diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c index 9a85752124dd..173dc00762a1 100644 --- a/drivers/iio/industrialio-core.c +++ b/drivers/iio/industrialio-core.c @@ -1584,10 +1584,13 @@ static int iio_device_register_sysfs(struct iio_dev *indio_dev) ret = iio_device_register_sysfs_group(indio_dev, &iio_dev_opaque->chan_attr_group); if (ret) - goto error_clear_attrs; + goto error_free_chan_attrs; return 0; +error_free_chan_attrs: + kfree(iio_dev_opaque->chan_attr_group.attrs); + iio_dev_opaque->chan_attr_group.attrs = NULL; error_clear_attrs: iio_free_chan_devattr_list(&iio_dev_opaque->channel_attr_list); diff --git a/drivers/iio/light/hid-sensor-als.c b/drivers/iio/light/hid-sensor-als.c index 5cd27f04b45e..b6c4bef2a7bb 100644 --- a/drivers/iio/light/hid-sensor-als.c +++ b/drivers/iio/light/hid-sensor-als.c @@ -226,6 +226,7 @@ static int als_capture_sample(struct hid_sensor_hub_device *hsdev, case HID_USAGE_SENSOR_TIME_TIMESTAMP: als_state->timestamp = hid_sensor_convert_timestamp(&als_state->common_attributes, *(s64 *)raw_data); + ret = 0; break; default: break; diff --git a/drivers/iio/magnetometer/rm3100-core.c b/drivers/iio/magnetometer/rm3100-core.c index 69938204456f..42b70cd42b39 100644 --- a/drivers/iio/magnetometer/rm3100-core.c +++ b/drivers/iio/magnetometer/rm3100-core.c @@ -530,6 +530,7 @@ int rm3100_common_probe(struct device *dev, struct regmap *regmap, int irq) struct rm3100_data *data; unsigned int tmp; int ret; + int samp_rate_index; indio_dev = devm_iio_device_alloc(dev, sizeof(*data)); if (!indio_dev) @@ -586,9 +587,14 @@ int rm3100_common_probe(struct device *dev, struct regmap *regmap, int irq) ret = regmap_read(regmap, RM3100_REG_TMRC, &tmp); if (ret < 0) return ret; + + samp_rate_index = tmp - RM3100_TMRC_OFFSET; + if (samp_rate_index < 0 || samp_rate_index >= RM3100_SAMP_NUM) { + dev_err(dev, "The value read from RM3100_REG_TMRC is invalid!\n"); + return -EINVAL; + } /* Initializing max wait time, which is double conversion time. */ - data->conversion_time = rm3100_samp_rates[tmp - RM3100_TMRC_OFFSET][2] - * 2; + data->conversion_time = rm3100_samp_rates[samp_rate_index][2] * 2; /* Cycle count values may not be what we want. */ if ((tmp - RM3100_TMRC_OFFSET) == 0) diff --git a/drivers/iio/pressure/bmp280-spi.c b/drivers/iio/pressure/bmp280-spi.c index 433d6fac83c4..a444d4b2978b 100644 --- a/drivers/iio/pressure/bmp280-spi.c +++ b/drivers/iio/pressure/bmp280-spi.c @@ -4,6 +4,7 @@ * * Inspired by the older BMP085 driver drivers/misc/bmp085-spi.c */ +#include <linux/bits.h> #include <linux/module.h> #include <linux/spi/spi.h> #include <linux/err.h> @@ -35,6 +36,34 @@ static int bmp280_regmap_spi_read(void *context, const void *reg, return spi_write_then_read(spi, reg, reg_size, val, val_size); } +static int bmp380_regmap_spi_read(void *context, const void *reg, + size_t reg_size, void *val, size_t val_size) +{ + struct spi_device *spi = to_spi_device(context); + u8 rx_buf[4]; + ssize_t status; + + /* + * Maximum number of consecutive bytes read for a temperature or + * pressure measurement is 3. + */ + if (val_size > 3) + return -EINVAL; + + /* + * According to the BMP3xx datasheets, for a basic SPI read opertion, + * the first byte needs to be dropped and the rest are the requested + * data. + */ + status = spi_write_then_read(spi, reg, 1, rx_buf, val_size + 1); + if (status) + return status; + + memcpy(val, rx_buf + 1, val_size); + + return 0; +} + static struct regmap_bus bmp280_regmap_bus = { .write = bmp280_regmap_spi_write, .read = bmp280_regmap_spi_read, @@ -42,10 +71,19 @@ static struct regmap_bus bmp280_regmap_bus = { .val_format_endian_default = REGMAP_ENDIAN_BIG, }; +static struct regmap_bus bmp380_regmap_bus = { + .write = bmp280_regmap_spi_write, + .read = bmp380_regmap_spi_read, + .read_flag_mask = BIT(7), + .reg_format_endian_default = REGMAP_ENDIAN_BIG, + .val_format_endian_default = REGMAP_ENDIAN_BIG, +}; + static int bmp280_spi_probe(struct spi_device *spi) { const struct spi_device_id *id = spi_get_device_id(spi); const struct bmp280_chip_info *chip_info; + struct regmap_bus *bmp_regmap_bus; struct regmap *regmap; int ret; @@ -58,8 +96,18 @@ static int bmp280_spi_probe(struct spi_device *spi) chip_info = spi_get_device_match_data(spi); + switch (chip_info->chip_id[0]) { + case BMP380_CHIP_ID: + case BMP390_CHIP_ID: + bmp_regmap_bus = &bmp380_regmap_bus; + break; + default: + bmp_regmap_bus = &bmp280_regmap_bus; + break; + } + regmap = devm_regmap_init(&spi->dev, - &bmp280_regmap_bus, + bmp_regmap_bus, &spi->dev, chip_info->regmap_config); if (IS_ERR(regmap)) { @@ -87,6 +135,7 @@ static const struct of_device_id bmp280_of_spi_match[] = { MODULE_DEVICE_TABLE(of, bmp280_of_spi_match); static const struct spi_device_id bmp280_spi_id[] = { + { "bmp085", (kernel_ulong_t)&bmp180_chip_info }, { "bmp180", (kernel_ulong_t)&bmp180_chip_info }, { "bmp181", (kernel_ulong_t)&bmp180_chip_info }, { "bmp280", (kernel_ulong_t)&bmp280_chip_info }, diff --git a/drivers/iio/pressure/dlhl60d.c b/drivers/iio/pressure/dlhl60d.c index 28c8269ba65d..0bba4c5a8d40 100644 --- a/drivers/iio/pressure/dlhl60d.c +++ b/drivers/iio/pressure/dlhl60d.c @@ -250,18 +250,17 @@ static irqreturn_t dlh_trigger_handler(int irq, void *private) struct dlh_state *st = iio_priv(indio_dev); int ret; unsigned int chn, i = 0; - __be32 tmp_buf[2]; + __be32 tmp_buf[2] = { }; ret = dlh_start_capture_and_read(st); if (ret) goto out; for_each_set_bit(chn, indio_dev->active_scan_mask, - indio_dev->masklength) { - memcpy(tmp_buf + i, + indio_dev->masklength) { + memcpy(&tmp_buf[i++], &st->rx_buf[1] + chn * DLH_NUM_DATA_BYTES, DLH_NUM_DATA_BYTES); - i++; } iio_push_to_buffers(indio_dev, tmp_buf); diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c index 824349659d69..ce9c5bae83bf 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c @@ -401,6 +401,10 @@ static void bnxt_re_create_fence_wqe(struct bnxt_re_pd *pd) struct bnxt_re_fence_data *fence = &pd->fence; struct ib_mr *ib_mr = &fence->mr->ib_mr; struct bnxt_qplib_swqe *wqe = &fence->bind_wqe; + struct bnxt_re_dev *rdev = pd->rdev; + + if (bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx)) + return; memset(wqe, 0, sizeof(*wqe)); wqe->type = BNXT_QPLIB_SWQE_TYPE_BIND_MW; @@ -455,6 +459,9 @@ static void bnxt_re_destroy_fence_mr(struct bnxt_re_pd *pd) struct device *dev = &rdev->en_dev->pdev->dev; struct bnxt_re_mr *mr = fence->mr; + if (bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx)) + return; + if (fence->mw) { bnxt_re_dealloc_mw(fence->mw); fence->mw = NULL; @@ -486,6 +493,9 @@ static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd) struct ib_mw *mw; int rc; + if (bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx)) + return 0; + dma_addr = dma_map_single(dev, fence->va, BNXT_RE_FENCE_BYTES, DMA_BIDIRECTIONAL); rc = dma_mapping_error(dev, dma_addr); @@ -1817,7 +1827,7 @@ int bnxt_re_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr, switch (srq_attr_mask) { case IB_SRQ_MAX_WR: /* SRQ resize is not supported */ - break; + return -EINVAL; case IB_SRQ_LIMIT: /* Change the SRQ threshold */ if (srq_attr->srq_limit > srq->qplib_srq.max_wqe) @@ -1832,13 +1842,12 @@ int bnxt_re_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr, /* On success, update the shadow */ srq->srq_limit = srq_attr->srq_limit; /* No need to Build and send response back to udata */ - break; + return 0; default: ibdev_err(&rdev->ibdev, "Unsupported srq_attr_mask 0x%x", srq_attr_mask); return -EINVAL; } - return 0; } int bnxt_re_query_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr) @@ -2556,11 +2565,6 @@ static int bnxt_re_build_inv_wqe(const struct ib_send_wr *wr, wqe->type = BNXT_QPLIB_SWQE_TYPE_LOCAL_INV; wqe->local_inv.inv_l_key = wr->ex.invalidate_rkey; - /* Need unconditional fence for local invalidate - * opcode to work as expected. - */ - wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE; - if (wr->send_flags & IB_SEND_SIGNALED) wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP; if (wr->send_flags & IB_SEND_SOLICITED) @@ -2583,12 +2587,6 @@ static int bnxt_re_build_reg_wqe(const struct ib_reg_wr *wr, wqe->frmr.levels = qplib_frpl->hwq.level; wqe->type = BNXT_QPLIB_SWQE_TYPE_REG_MR; - /* Need unconditional fence for reg_mr - * opcode to function as expected. - */ - - wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE; - if (wr->wr.send_flags & IB_SEND_SIGNALED) wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP; @@ -2719,6 +2717,18 @@ bad: return rc; } +static void bnxt_re_legacy_set_uc_fence(struct bnxt_qplib_swqe *wqe) +{ + /* Need unconditional fence for non-wire memory opcode + * to work as expected. + */ + if (wqe->type == BNXT_QPLIB_SWQE_TYPE_LOCAL_INV || + wqe->type == BNXT_QPLIB_SWQE_TYPE_FAST_REG_MR || + wqe->type == BNXT_QPLIB_SWQE_TYPE_REG_MR || + wqe->type == BNXT_QPLIB_SWQE_TYPE_BIND_MW) + wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE; +} + int bnxt_re_post_send(struct ib_qp *ib_qp, const struct ib_send_wr *wr, const struct ib_send_wr **bad_wr) { @@ -2798,8 +2808,11 @@ int bnxt_re_post_send(struct ib_qp *ib_qp, const struct ib_send_wr *wr, rc = -EINVAL; goto bad; } - if (!rc) + if (!rc) { + if (!bnxt_qplib_is_chip_gen_p5_p7(qp->rdev->chip_ctx)) + bnxt_re_legacy_set_uc_fence(&wqe); rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe); + } bad: if (rc) { ibdev_err(&qp->rdev->ibdev, diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c index f022c922fae5..54b4d2f3a5d8 100644 --- a/drivers/infiniband/hw/bnxt_re/main.c +++ b/drivers/infiniband/hw/bnxt_re/main.c @@ -280,9 +280,6 @@ static void bnxt_re_set_resource_limits(struct bnxt_re_dev *rdev) static void bnxt_re_vf_res_config(struct bnxt_re_dev *rdev) { - - if (test_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags)) - return; rdev->num_vfs = pci_sriov_get_totalvfs(rdev->en_dev->pdev); if (!bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx)) { bnxt_re_set_resource_limits(rdev); diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c index c98e04fe2ddd..439d0c7c5d0c 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c @@ -744,7 +744,8 @@ int bnxt_qplib_query_srq(struct bnxt_qplib_res *res, bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, &sbuf, sizeof(req), sizeof(resp), 0); rc = bnxt_qplib_rcfw_send_message(rcfw, &msg); - srq->threshold = le16_to_cpu(sb->srq_limit); + if (!rc) + srq->threshold = le16_to_cpu(sb->srq_limit); dma_free_coherent(&rcfw->pdev->dev, sbuf.size, sbuf.sb, sbuf.dma_addr); diff --git a/drivers/infiniband/hw/hfi1/pio.c b/drivers/infiniband/hw/hfi1/pio.c index 68c621ff59d0..5a91cbda4aee 100644 --- a/drivers/infiniband/hw/hfi1/pio.c +++ b/drivers/infiniband/hw/hfi1/pio.c @@ -2086,7 +2086,7 @@ int init_credit_return(struct hfi1_devdata *dd) "Unable to allocate credit return DMA range for NUMA %d\n", i); ret = -ENOMEM; - goto done; + goto free_cr_base; } } set_dev_node(&dd->pcidev->dev, dd->node); @@ -2094,6 +2094,10 @@ int init_credit_return(struct hfi1_devdata *dd) ret = 0; done: return ret; + +free_cr_base: + free_credit_return(dd); + goto done; } void free_credit_return(struct hfi1_devdata *dd) diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c index 6e5ac2023328..b67d23b1f286 100644 --- a/drivers/infiniband/hw/hfi1/sdma.c +++ b/drivers/infiniband/hw/hfi1/sdma.c @@ -3158,7 +3158,7 @@ int _pad_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx) { int rval = 0; - if ((unlikely(tx->num_desc + 1 == tx->desc_limit))) { + if ((unlikely(tx->num_desc == tx->desc_limit))) { rval = _extend_sdma_tx_descs(dd, tx); if (rval) { __sdma_txclean(dd, tx); diff --git a/drivers/infiniband/hw/irdma/defs.h b/drivers/infiniband/hw/irdma/defs.h index 8fb752f2eda2..2cb4b96db721 100644 --- a/drivers/infiniband/hw/irdma/defs.h +++ b/drivers/infiniband/hw/irdma/defs.h @@ -346,6 +346,7 @@ enum irdma_cqp_op_type { #define IRDMA_AE_LLP_TOO_MANY_KEEPALIVE_RETRIES 0x050b #define IRDMA_AE_LLP_DOUBT_REACHABILITY 0x050c #define IRDMA_AE_LLP_CONNECTION_ESTABLISHED 0x050e +#define IRDMA_AE_LLP_TOO_MANY_RNRS 0x050f #define IRDMA_AE_RESOURCE_EXHAUSTION 0x0520 #define IRDMA_AE_RESET_SENT 0x0601 #define IRDMA_AE_TERMINATE_SENT 0x0602 diff --git a/drivers/infiniband/hw/irdma/hw.c b/drivers/infiniband/hw/irdma/hw.c index bd4b2b896444..ad50b77282f8 100644 --- a/drivers/infiniband/hw/irdma/hw.c +++ b/drivers/infiniband/hw/irdma/hw.c @@ -387,6 +387,7 @@ static void irdma_process_aeq(struct irdma_pci_f *rf) case IRDMA_AE_LLP_TOO_MANY_RETRIES: case IRDMA_AE_LCE_QP_CATASTROPHIC: case IRDMA_AE_LCE_FUNCTION_CATASTROPHIC: + case IRDMA_AE_LLP_TOO_MANY_RNRS: case IRDMA_AE_LCE_CQ_CATASTROPHIC: case IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG: default: @@ -570,6 +571,13 @@ static void irdma_destroy_irq(struct irdma_pci_f *rf, dev->irq_ops->irdma_dis_irq(dev, msix_vec->idx); irq_update_affinity_hint(msix_vec->irq, NULL); free_irq(msix_vec->irq, dev_id); + if (rf == dev_id) { + tasklet_kill(&rf->dpc_tasklet); + } else { + struct irdma_ceq *iwceq = (struct irdma_ceq *)dev_id; + + tasklet_kill(&iwceq->dpc_tasklet); + } } /** diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c index b5eb8d421988..0b046c061742 100644 --- a/drivers/infiniband/hw/irdma/verbs.c +++ b/drivers/infiniband/hw/irdma/verbs.c @@ -839,7 +839,9 @@ static int irdma_validate_qp_attrs(struct ib_qp_init_attr *init_attr, if (init_attr->cap.max_inline_data > uk_attrs->max_hw_inline || init_attr->cap.max_send_sge > uk_attrs->max_hw_wq_frags || - init_attr->cap.max_recv_sge > uk_attrs->max_hw_wq_frags) + init_attr->cap.max_recv_sge > uk_attrs->max_hw_wq_frags || + init_attr->cap.max_send_wr > uk_attrs->max_hw_wq_quanta || + init_attr->cap.max_recv_wr > uk_attrs->max_hw_rq_quanta) return -EINVAL; if (rdma_protocol_roce(&iwdev->ibdev, 1)) { @@ -2184,9 +2186,8 @@ static int irdma_create_cq(struct ib_cq *ibcq, info.cq_base_pa = iwcq->kmem.pa; } - if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) - info.shadow_read_threshold = min(info.cq_uk_init_info.cq_size / 2, - (u32)IRDMA_MAX_CQ_READ_THRESH); + info.shadow_read_threshold = min(info.cq_uk_init_info.cq_size / 2, + (u32)IRDMA_MAX_CQ_READ_THRESH); if (irdma_sc_cq_init(cq, &info)) { ibdev_dbg(&iwdev->ibdev, "VERBS: init cq fail\n"); diff --git a/drivers/infiniband/hw/mlx5/cong.c b/drivers/infiniband/hw/mlx5/cong.c index f87531318feb..a78a067e3ce7 100644 --- a/drivers/infiniband/hw/mlx5/cong.c +++ b/drivers/infiniband/hw/mlx5/cong.c @@ -458,6 +458,12 @@ void mlx5_ib_init_cong_debugfs(struct mlx5_ib_dev *dev, u32 port_num) dbg_cc_params->root = debugfs_create_dir("cc_params", mlx5_debugfs_get_dev_root(mdev)); for (i = 0; i < MLX5_IB_DBG_CC_MAX; i++) { + if ((i == MLX5_IB_DBG_CC_GENERAL_RTT_RESP_DSCP_VALID || + i == MLX5_IB_DBG_CC_GENERAL_RTT_RESP_DSCP)) + if (!MLX5_CAP_GEN(mdev, roce) || + !MLX5_CAP_ROCE(mdev, roce_cc_general)) + continue; + dbg_cc_params->params[i].offset = i; dbg_cc_params->params[i].dev = dev; dbg_cc_params->params[i].port_num = port_num; diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c index 869369cb5b5f..253fea374a72 100644 --- a/drivers/infiniband/hw/mlx5/devx.c +++ b/drivers/infiniband/hw/mlx5/devx.c @@ -2949,7 +2949,7 @@ DECLARE_UVERBS_NAMED_METHOD( MLX5_IB_METHOD_DEVX_OBJ_MODIFY, UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_MODIFY_HANDLE, UVERBS_IDR_ANY_OBJECT, - UVERBS_ACCESS_WRITE, + UVERBS_ACCESS_READ, UA_MANDATORY), UVERBS_ATTR_PTR_IN( MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN, diff --git a/drivers/infiniband/hw/mlx5/wr.c b/drivers/infiniband/hw/mlx5/wr.c index df1d1b0a3ef7..9947feb7fb8a 100644 --- a/drivers/infiniband/hw/mlx5/wr.c +++ b/drivers/infiniband/hw/mlx5/wr.c @@ -78,7 +78,7 @@ static void set_eth_seg(const struct ib_send_wr *wr, struct mlx5_ib_qp *qp, */ copysz = min_t(u64, *cur_edge - (void *)eseg->inline_hdr.start, left); - memcpy(eseg->inline_hdr.start, pdata, copysz); + memcpy(eseg->inline_hdr.data, pdata, copysz); stride = ALIGN(sizeof(struct mlx5_wqe_eth_seg) - sizeof(eseg->inline_hdr.start) + copysz, 16); *size += stride / 16; diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index 7887a6786ed4..f118ce0a9a61 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c @@ -1879,8 +1879,17 @@ static int qedr_create_user_qp(struct qedr_dev *dev, /* RQ - read access only (0) */ rc = qedr_init_user_queue(udata, dev, &qp->urq, ureq.rq_addr, ureq.rq_len, true, 0, alloc_and_init); - if (rc) + if (rc) { + ib_umem_release(qp->usq.umem); + qp->usq.umem = NULL; + if (rdma_protocol_roce(&dev->ibdev, 1)) { + qedr_free_pbl(dev, &qp->usq.pbl_info, + qp->usq.pbl_tbl); + } else { + kfree(qp->usq.pbl_tbl); + } return rc; + } } memset(&in_params, 0, sizeof(in_params)); diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c index 58f70cfec45a..040234c01be4 100644 --- a/drivers/infiniband/ulp/srpt/ib_srpt.c +++ b/drivers/infiniband/ulp/srpt/ib_srpt.c @@ -79,12 +79,16 @@ module_param(srpt_srq_size, int, 0444); MODULE_PARM_DESC(srpt_srq_size, "Shared receive queue (SRQ) size."); +static int srpt_set_u64_x(const char *buffer, const struct kernel_param *kp) +{ + return kstrtou64(buffer, 16, (u64 *)kp->arg); +} static int srpt_get_u64_x(char *buffer, const struct kernel_param *kp) { return sprintf(buffer, "0x%016llx\n", *(u64 *)kp->arg); } -module_param_call(srpt_service_guid, NULL, srpt_get_u64_x, &srpt_service_guid, - 0444); +module_param_call(srpt_service_guid, srpt_set_u64_x, srpt_get_u64_x, + &srpt_service_guid, 0444); MODULE_PARM_DESC(srpt_service_guid, "Using this value for ioc_guid, id_ext, and cm_listen_id instead of using the node_guid of the first HCA."); @@ -210,10 +214,12 @@ static const char *get_ch_state_name(enum rdma_ch_state s) /** * srpt_qp_event - QP event callback function * @event: Description of the event that occurred. - * @ch: SRPT RDMA channel. + * @ptr: SRPT RDMA channel. */ -static void srpt_qp_event(struct ib_event *event, struct srpt_rdma_ch *ch) +static void srpt_qp_event(struct ib_event *event, void *ptr) { + struct srpt_rdma_ch *ch = ptr; + pr_debug("QP event %d on ch=%p sess_name=%s-%d state=%s\n", event->event, ch, ch->sess_name, ch->qp->qp_num, get_ch_state_name(ch->state)); @@ -1807,8 +1813,7 @@ retry: ch->cq_size = ch->rq_size + sq_size; qp_init->qp_context = (void *)ch; - qp_init->event_handler - = (void(*)(struct ib_event *, void*))srpt_qp_event; + qp_init->event_handler = srpt_qp_event; qp_init->send_cq = ch->cq; qp_init->recv_cq = ch->cq; qp_init->sq_sig_type = IB_SIGNAL_REQ_WR; diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c index b1244d7df6cc..14c828adebf7 100644 --- a/drivers/input/joystick/xpad.c +++ b/drivers/input/joystick/xpad.c @@ -130,7 +130,12 @@ static const struct xpad_device { { 0x0079, 0x18d4, "GPD Win 2 X-Box Controller", 0, XTYPE_XBOX360 }, { 0x03eb, 0xff01, "Wooting One (Legacy)", 0, XTYPE_XBOX360 }, { 0x03eb, 0xff02, "Wooting Two (Legacy)", 0, XTYPE_XBOX360 }, + { 0x03f0, 0x038D, "HyperX Clutch", 0, XTYPE_XBOX360 }, /* wired */ + { 0x03f0, 0x048D, "HyperX Clutch", 0, XTYPE_XBOX360 }, /* wireless */ { 0x03f0, 0x0495, "HyperX Clutch Gladiate", 0, XTYPE_XBOXONE }, + { 0x03f0, 0x07A0, "HyperX Clutch Gladiate RGB", 0, XTYPE_XBOXONE }, + { 0x03f0, 0x08B6, "HyperX Clutch Gladiate", 0, XTYPE_XBOXONE }, /* v2 */ + { 0x03f0, 0x09B4, "HyperX Clutch Tanto", 0, XTYPE_XBOXONE }, { 0x044f, 0x0f00, "Thrustmaster Wheel", 0, XTYPE_XBOX }, { 0x044f, 0x0f03, "Thrustmaster Wheel", 0, XTYPE_XBOX }, { 0x044f, 0x0f07, "Thrustmaster, Inc. Controller", 0, XTYPE_XBOX }, @@ -294,6 +299,7 @@ static const struct xpad_device { { 0x1689, 0xfd00, "Razer Onza Tournament Edition", 0, XTYPE_XBOX360 }, { 0x1689, 0xfd01, "Razer Onza Classic Edition", 0, XTYPE_XBOX360 }, { 0x1689, 0xfe00, "Razer Sabertooth", 0, XTYPE_XBOX360 }, + { 0x17ef, 0x6182, "Lenovo Legion Controller for Windows", 0, XTYPE_XBOX360 }, { 0x1949, 0x041a, "Amazon Game Controller", 0, XTYPE_XBOX360 }, { 0x1bad, 0x0002, "Harmonix Rock Band Guitar", 0, XTYPE_XBOX360 }, { 0x1bad, 0x0003, "Harmonix Rock Band Drumkit", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 }, @@ -462,6 +468,7 @@ static const struct usb_device_id xpad_table[] = { { USB_INTERFACE_INFO('X', 'B', 0) }, /* Xbox USB-IF not-approved class */ XPAD_XBOX360_VENDOR(0x0079), /* GPD Win 2 controller */ XPAD_XBOX360_VENDOR(0x03eb), /* Wooting Keyboards (Legacy) */ + XPAD_XBOX360_VENDOR(0x03f0), /* HP HyperX Xbox 360 controllers */ XPAD_XBOXONE_VENDOR(0x03f0), /* HP HyperX Xbox One controllers */ XPAD_XBOX360_VENDOR(0x044f), /* Thrustmaster Xbox 360 controllers */ XPAD_XBOX360_VENDOR(0x045e), /* Microsoft Xbox 360 controllers */ @@ -491,6 +498,7 @@ static const struct usb_device_id xpad_table[] = { XPAD_XBOX360_VENDOR(0x15e4), /* Numark Xbox 360 controllers */ XPAD_XBOX360_VENDOR(0x162e), /* Joytech Xbox 360 controllers */ XPAD_XBOX360_VENDOR(0x1689), /* Razer Onza */ + XPAD_XBOX360_VENDOR(0x17ef), /* Lenovo */ XPAD_XBOX360_VENDOR(0x1949), /* Amazon controllers */ XPAD_XBOX360_VENDOR(0x1bad), /* Harmonix Rock Band guitar and drums */ XPAD_XBOX360_VENDOR(0x20d6), /* PowerA controllers */ diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c index 13ef6284223d..7f67f9f2946b 100644 --- a/drivers/input/keyboard/atkbd.c +++ b/drivers/input/keyboard/atkbd.c @@ -811,7 +811,6 @@ static int atkbd_probe(struct atkbd *atkbd) { struct ps2dev *ps2dev = &atkbd->ps2dev; unsigned char param[2]; - bool skip_getid; /* * Some systems, where the bit-twiddling when testing the io-lines of the @@ -825,6 +824,11 @@ static int atkbd_probe(struct atkbd *atkbd) "keyboard reset failed on %s\n", ps2dev->serio->phys); + if (atkbd_skip_getid(atkbd)) { + atkbd->id = 0xab83; + goto deactivate_kbd; + } + /* * Then we check the keyboard ID. We should get 0xab83 under normal conditions. * Some keyboards report different values, but the first byte is always 0xab or @@ -833,18 +837,17 @@ static int atkbd_probe(struct atkbd *atkbd) */ param[0] = param[1] = 0xa5; /* initialize with invalid values */ - skip_getid = atkbd_skip_getid(atkbd); - if (skip_getid || ps2_command(ps2dev, param, ATKBD_CMD_GETID)) { + if (ps2_command(ps2dev, param, ATKBD_CMD_GETID)) { /* - * If the get ID command was skipped or failed, we check if we can at least set + * If the get ID command failed, we check if we can at least set * the LEDs on the keyboard. This should work on every keyboard out there. * It also turns the LEDs off, which we want anyway. */ param[0] = 0; if (ps2_command(ps2dev, param, ATKBD_CMD_SETLEDS)) return -1; - atkbd->id = skip_getid ? 0xab83 : 0xabba; + atkbd->id = 0xabba; return 0; } @@ -860,6 +863,7 @@ static int atkbd_probe(struct atkbd *atkbd) return -1; } +deactivate_kbd: /* * Make sure nothing is coming from the keyboard and disturbs our * internal state. diff --git a/drivers/input/keyboard/gpio_keys_polled.c b/drivers/input/keyboard/gpio_keys_polled.c index ba00ecfbd343..b41fd1240f43 100644 --- a/drivers/input/keyboard/gpio_keys_polled.c +++ b/drivers/input/keyboard/gpio_keys_polled.c @@ -315,12 +315,10 @@ static int gpio_keys_polled_probe(struct platform_device *pdev) error = devm_gpio_request_one(dev, button->gpio, flags, button->desc ? : DRV_NAME); - if (error) { - dev_err(dev, - "unable to claim gpio %u, err=%d\n", - button->gpio, error); - return error; - } + if (error) + return dev_err_probe(dev, error, + "unable to claim gpio %u\n", + button->gpio); bdata->gpiod = gpio_to_desc(button->gpio); if (!bdata->gpiod) { diff --git a/drivers/input/rmi4/rmi_driver.c b/drivers/input/rmi4/rmi_driver.c index 258d5fe3d395..42eaebb3bf5c 100644 --- a/drivers/input/rmi4/rmi_driver.c +++ b/drivers/input/rmi4/rmi_driver.c @@ -978,12 +978,12 @@ static int rmi_driver_remove(struct device *dev) rmi_disable_irq(rmi_dev, false); - irq_domain_remove(data->irqdomain); - data->irqdomain = NULL; - rmi_f34_remove_sysfs(rmi_dev); rmi_free_function_list(rmi_dev); + irq_domain_remove(data->irqdomain); + data->irqdomain = NULL; + return 0; } diff --git a/drivers/input/serio/i8042-acpipnpio.h b/drivers/input/serio/i8042-acpipnpio.h index b585b1dab870..dfc6c581873b 100644 --- a/drivers/input/serio/i8042-acpipnpio.h +++ b/drivers/input/serio/i8042-acpipnpio.h @@ -635,6 +635,14 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = { .driver_data = (void *)(SERIO_QUIRK_NOAUX) }, { + /* Fujitsu Lifebook U728 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), + DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK U728"), + }, + .driver_data = (void *)(SERIO_QUIRK_NOAUX) + }, + { /* Gigabyte M912 */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"), @@ -1210,6 +1218,12 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = { }, { .matches = { + DMI_MATCH(DMI_BOARD_NAME, "NS5x_7xPU"), + }, + .driver_data = (void *)(SERIO_QUIRK_NOAUX) + }, + { + .matches = { DMI_MATCH(DMI_BOARD_NAME, "NJ50_70CU"), }, .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS | diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c index af32fbe57b63..b068ff8afbc9 100644 --- a/drivers/input/touchscreen/goodix.c +++ b/drivers/input/touchscreen/goodix.c @@ -884,7 +884,8 @@ static int goodix_add_acpi_gpio_mappings(struct goodix_ts_data *ts) } } - if (ts->gpio_count == 2 && ts->gpio_int_idx == 0) { + /* Some devices with gpio_int_idx 0 list a third unused GPIO */ + if ((ts->gpio_count == 2 || ts->gpio_count == 3) && ts->gpio_int_idx == 0) { ts->irq_pin_access_method = IRQ_PIN_ACCESS_ACPI_GPIO; gpio_mapping = acpi_goodix_int_first_gpios; } else if (ts->gpio_count == 2 && ts->gpio_int_idx == 1) { diff --git a/drivers/interconnect/qcom/sc8180x.c b/drivers/interconnect/qcom/sc8180x.c index 20331e119beb..03d626776ba1 100644 --- a/drivers/interconnect/qcom/sc8180x.c +++ b/drivers/interconnect/qcom/sc8180x.c @@ -1372,6 +1372,7 @@ static struct qcom_icc_bcm bcm_mm0 = { static struct qcom_icc_bcm bcm_co0 = { .name = "CO0", + .keepalive = true, .num_nodes = 1, .nodes = { &slv_qns_cdsp_mem_noc } }; diff --git a/drivers/interconnect/qcom/sm8550.c b/drivers/interconnect/qcom/sm8550.c index 629faa4c9aae..fc22cecf650f 100644 --- a/drivers/interconnect/qcom/sm8550.c +++ b/drivers/interconnect/qcom/sm8550.c @@ -2223,6 +2223,7 @@ static struct platform_driver qnoc_driver = { .driver = { .name = "qnoc-sm8550", .of_match_table = qnoc_of_match, + .sync_state = icc_sync_state, }, }; diff --git a/drivers/interconnect/qcom/sm8650.c b/drivers/interconnect/qcom/sm8650.c index b83de54577b6..b962e6c233ef 100644 --- a/drivers/interconnect/qcom/sm8650.c +++ b/drivers/interconnect/qcom/sm8650.c @@ -1160,7 +1160,7 @@ static struct qcom_icc_node qns_gemnoc_sf = { static struct qcom_icc_bcm bcm_acv = { .name = "ACV", - .enable_mask = BIT(3), + .enable_mask = BIT(0), .num_nodes = 1, .nodes = { &ebi }, }; diff --git a/drivers/interconnect/qcom/x1e80100.c b/drivers/interconnect/qcom/x1e80100.c index d19501d913b3..cbaf4f9c41be 100644 --- a/drivers/interconnect/qcom/x1e80100.c +++ b/drivers/interconnect/qcom/x1e80100.c @@ -1586,6 +1586,7 @@ static struct qcom_icc_node qns_pcie_south_gem_noc_pcie = { static struct qcom_icc_bcm bcm_acv = { .name = "ACV", + .enable_mask = BIT(3), .num_nodes = 1, .nodes = { &ebi }, }; diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index 9a29d742617e..0d6095290b6a 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig @@ -179,7 +179,7 @@ config FSL_PAMU config MSM_IOMMU bool "MSM IOMMU Support" depends on ARM - depends on ARCH_MSM8X60 || ARCH_MSM8960 || COMPILE_TEST + depends on ARCH_QCOM || COMPILE_TEST select IOMMU_API select IOMMU_IO_PGTABLE_ARMV7S help diff --git a/drivers/iommu/amd/amd_iommu.h b/drivers/iommu/amd/amd_iommu.h index 8b3601f285fd..c970eae2313d 100644 --- a/drivers/iommu/amd/amd_iommu.h +++ b/drivers/iommu/amd/amd_iommu.h @@ -164,5 +164,4 @@ void amd_iommu_domain_set_pgtable(struct protection_domain *domain, u64 *root, int mode); struct dev_table_entry *get_dev_table(struct amd_iommu *iommu); -extern bool amd_iommu_snp_en; #endif diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c index c83bd0c2a1c9..480e7681f4f3 100644 --- a/drivers/iommu/amd/init.c +++ b/drivers/iommu/amd/init.c @@ -30,6 +30,7 @@ #include <asm/io_apic.h> #include <asm/irq_remapping.h> #include <asm/set_memory.h> +#include <asm/sev.h> #include <linux/crash_dump.h> @@ -3221,6 +3222,36 @@ out: return true; } +static void iommu_snp_enable(void) +{ +#ifdef CONFIG_KVM_AMD_SEV + if (!cpu_feature_enabled(X86_FEATURE_SEV_SNP)) + return; + /* + * The SNP support requires that IOMMU must be enabled, and is + * not configured in the passthrough mode. + */ + if (no_iommu || iommu_default_passthrough()) { + pr_err("SNP: IOMMU disabled or configured in passthrough mode, SNP cannot be supported.\n"); + return; + } + + amd_iommu_snp_en = check_feature(FEATURE_SNP); + if (!amd_iommu_snp_en) { + pr_err("SNP: IOMMU SNP feature not enabled, SNP cannot be supported.\n"); + return; + } + + pr_info("IOMMU SNP support enabled.\n"); + + /* Enforce IOMMU v1 pagetable when SNP is enabled. */ + if (amd_iommu_pgtable != AMD_IOMMU_V1) { + pr_warn("Forcing use of AMD IOMMU v1 page table due to SNP.\n"); + amd_iommu_pgtable = AMD_IOMMU_V1; + } +#endif +} + /**************************************************************************** * * AMD IOMMU Initialization State Machine @@ -3256,6 +3287,7 @@ static int __init state_next(void) break; case IOMMU_ENABLED: register_syscore_ops(&amd_iommu_syscore_ops); + iommu_snp_enable(); ret = amd_iommu_init_pci(); init_state = ret ? IOMMU_INIT_ERROR : IOMMU_PCI_INIT; break; @@ -3767,40 +3799,85 @@ int amd_iommu_pc_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64 return iommu_pc_get_set_reg(iommu, bank, cntr, fxn, value, true); } -#ifdef CONFIG_AMD_MEM_ENCRYPT -int amd_iommu_snp_enable(void) +#ifdef CONFIG_KVM_AMD_SEV +static int iommu_page_make_shared(void *page) { - /* - * The SNP support requires that IOMMU must be enabled, and is - * not configured in the passthrough mode. - */ - if (no_iommu || iommu_default_passthrough()) { - pr_err("SNP: IOMMU is disabled or configured in passthrough mode, SNP cannot be supported"); - return -EINVAL; + unsigned long paddr, pfn; + + paddr = iommu_virt_to_phys(page); + /* Cbit maybe set in the paddr */ + pfn = __sme_clr(paddr) >> PAGE_SHIFT; + + if (!(pfn % PTRS_PER_PMD)) { + int ret, level; + bool assigned; + + ret = snp_lookup_rmpentry(pfn, &assigned, &level); + if (ret) { + pr_warn("IOMMU PFN %lx RMP lookup failed, ret %d\n", pfn, ret); + return ret; + } + + if (!assigned) { + pr_warn("IOMMU PFN %lx not assigned in RMP table\n", pfn); + return -EINVAL; + } + + if (level > PG_LEVEL_4K) { + ret = psmash(pfn); + if (!ret) + goto done; + + pr_warn("PSMASH failed for IOMMU PFN %lx huge RMP entry, ret: %d, level: %d\n", + pfn, ret, level); + return ret; + } } - /* - * Prevent enabling SNP after IOMMU_ENABLED state because this process - * affect how IOMMU driver sets up data structures and configures - * IOMMU hardware. - */ - if (init_state > IOMMU_ENABLED) { - pr_err("SNP: Too late to enable SNP for IOMMU.\n"); - return -EINVAL; +done: + return rmp_make_shared(pfn, PG_LEVEL_4K); +} + +static int iommu_make_shared(void *va, size_t size) +{ + void *page; + int ret; + + if (!va) + return 0; + + for (page = va; page < (va + size); page += PAGE_SIZE) { + ret = iommu_page_make_shared(page); + if (ret) + return ret; } - amd_iommu_snp_en = check_feature(FEATURE_SNP); + return 0; +} + +int amd_iommu_snp_disable(void) +{ + struct amd_iommu *iommu; + int ret; + if (!amd_iommu_snp_en) - return -EINVAL; + return 0; + + for_each_iommu(iommu) { + ret = iommu_make_shared(iommu->evt_buf, EVT_BUFFER_SIZE); + if (ret) + return ret; - pr_info("SNP enabled\n"); + ret = iommu_make_shared(iommu->ppr_log, PPR_LOG_SIZE); + if (ret) + return ret; - /* Enforce IOMMU v1 pagetable when SNP is enabled. */ - if (amd_iommu_pgtable != AMD_IOMMU_V1) { - pr_warn("Force to using AMD IOMMU v1 page table due to SNP\n"); - amd_iommu_pgtable = AMD_IOMMU_V1; + ret = iommu_make_shared((void *)iommu->cmd_sem, PAGE_SIZE); + if (ret) + return ret; } return 0; } +EXPORT_SYMBOL_GPL(amd_iommu_snp_disable); #endif diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c index 05722121f00e..4a27fbdb2d84 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c @@ -292,10 +292,8 @@ arm_smmu_mmu_notifier_get(struct arm_smmu_domain *smmu_domain, struct mm_struct *mm) { int ret; - unsigned long flags; struct arm_smmu_ctx_desc *cd; struct arm_smmu_mmu_notifier *smmu_mn; - struct arm_smmu_master *master; list_for_each_entry(smmu_mn, &smmu_domain->mmu_notifiers, list) { if (smmu_mn->mn.mm == mm) { @@ -325,28 +323,9 @@ arm_smmu_mmu_notifier_get(struct arm_smmu_domain *smmu_domain, goto err_free_cd; } - spin_lock_irqsave(&smmu_domain->devices_lock, flags); - list_for_each_entry(master, &smmu_domain->devices, domain_head) { - ret = arm_smmu_write_ctx_desc(master, mm_get_enqcmd_pasid(mm), - cd); - if (ret) { - list_for_each_entry_from_reverse( - master, &smmu_domain->devices, domain_head) - arm_smmu_write_ctx_desc( - master, mm_get_enqcmd_pasid(mm), NULL); - break; - } - } - spin_unlock_irqrestore(&smmu_domain->devices_lock, flags); - if (ret) - goto err_put_notifier; - list_add(&smmu_mn->list, &smmu_domain->mmu_notifiers); return smmu_mn; -err_put_notifier: - /* Frees smmu_mn */ - mmu_notifier_put(&smmu_mn->mn); err_free_cd: arm_smmu_free_shared_cd(cd); return ERR_PTR(ret); @@ -363,9 +342,6 @@ static void arm_smmu_mmu_notifier_put(struct arm_smmu_mmu_notifier *smmu_mn) list_del(&smmu_mn->list); - arm_smmu_update_ctx_desc_devices(smmu_domain, mm_get_enqcmd_pasid(mm), - NULL); - /* * If we went through clear(), we've already invalidated, and no * new TLB entry can have been formed. @@ -381,7 +357,8 @@ static void arm_smmu_mmu_notifier_put(struct arm_smmu_mmu_notifier *smmu_mn) arm_smmu_free_shared_cd(cd); } -static int __arm_smmu_sva_bind(struct device *dev, struct mm_struct *mm) +static int __arm_smmu_sva_bind(struct device *dev, ioasid_t pasid, + struct mm_struct *mm) { int ret; struct arm_smmu_bond *bond; @@ -404,9 +381,15 @@ static int __arm_smmu_sva_bind(struct device *dev, struct mm_struct *mm) goto err_free_bond; } + ret = arm_smmu_write_ctx_desc(master, pasid, bond->smmu_mn->cd); + if (ret) + goto err_put_notifier; + list_add(&bond->list, &master->bonds); return 0; +err_put_notifier: + arm_smmu_mmu_notifier_put(bond->smmu_mn); err_free_bond: kfree(bond); return ret; @@ -568,6 +551,9 @@ void arm_smmu_sva_remove_dev_pasid(struct iommu_domain *domain, struct arm_smmu_master *master = dev_iommu_priv_get(dev); mutex_lock(&sva_lock); + + arm_smmu_write_ctx_desc(master, id, NULL); + list_for_each_entry(t, &master->bonds, list) { if (t->mm == mm) { bond = t; @@ -590,7 +576,7 @@ static int arm_smmu_sva_set_dev_pasid(struct iommu_domain *domain, struct mm_struct *mm = domain->mm; mutex_lock(&sva_lock); - ret = __arm_smmu_sva_bind(dev, mm); + ret = __arm_smmu_sva_bind(dev, id, mm); mutex_unlock(&sva_lock); return ret; diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c index 0ffb1cf17e0b..a74a509bcd63 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c @@ -3125,7 +3125,8 @@ static int arm_smmu_update_gbpa(struct arm_smmu_device *smmu, u32 set, u32 clr) static void arm_smmu_free_msis(void *data) { struct device *dev = data; - platform_msi_domain_free_irqs(dev); + + platform_device_msi_free_irqs_all(dev); } static void arm_smmu_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg) @@ -3166,7 +3167,7 @@ static void arm_smmu_setup_msis(struct arm_smmu_device *smmu) } /* Allocate MSIs for evtq, gerror and priq. Ignore cmdq */ - ret = platform_msi_domain_alloc_irqs(dev, nvec, arm_smmu_write_msi_msg); + ret = platform_device_msi_init_and_alloc_irqs(dev, nvec, arm_smmu_write_msi_msg); if (ret) { dev_warn(dev, "failed to allocate MSIs - falling back to wired irqs\n"); return; diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.c b/drivers/iommu/arm/arm-smmu/arm-smmu.c index 68b6bc5e7c71..6317aaf7b3ab 100644 --- a/drivers/iommu/arm/arm-smmu/arm-smmu.c +++ b/drivers/iommu/arm/arm-smmu/arm-smmu.c @@ -859,10 +859,14 @@ static void arm_smmu_destroy_domain_context(struct arm_smmu_domain *smmu_domain) arm_smmu_rpm_put(smmu); } -static struct iommu_domain *arm_smmu_domain_alloc_paging(struct device *dev) +static struct iommu_domain *arm_smmu_domain_alloc(unsigned type) { struct arm_smmu_domain *smmu_domain; + if (type != IOMMU_DOMAIN_UNMANAGED) { + if (using_legacy_binding || type != IOMMU_DOMAIN_DMA) + return NULL; + } /* * Allocate the domain and initialise some of its data structures. * We can't really do anything meaningful until we've added a @@ -875,15 +879,6 @@ static struct iommu_domain *arm_smmu_domain_alloc_paging(struct device *dev) mutex_init(&smmu_domain->init_mutex); spin_lock_init(&smmu_domain->cb_lock); - if (dev) { - struct arm_smmu_master_cfg *cfg = dev_iommu_priv_get(dev); - - if (arm_smmu_init_domain_context(smmu_domain, cfg->smmu, dev)) { - kfree(smmu_domain); - return NULL; - } - } - return &smmu_domain->domain; } @@ -1600,7 +1595,7 @@ static struct iommu_ops arm_smmu_ops = { .identity_domain = &arm_smmu_identity_domain, .blocked_domain = &arm_smmu_blocked_domain, .capable = arm_smmu_capable, - .domain_alloc_paging = arm_smmu_domain_alloc_paging, + .domain_alloc = arm_smmu_domain_alloc, .probe_device = arm_smmu_probe_device, .release_device = arm_smmu_release_device, .probe_finalize = arm_smmu_probe_finalize, diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index 6fb5f6fceea1..11652e0bcab3 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -396,8 +396,6 @@ static int domain_update_device_node(struct dmar_domain *domain) return nid; } -static void domain_update_iotlb(struct dmar_domain *domain); - /* Return the super pagesize bitmap if supported. */ static unsigned long domain_super_pgsize_bitmap(struct dmar_domain *domain) { @@ -1218,7 +1216,7 @@ domain_lookup_dev_info(struct dmar_domain *domain, return NULL; } -static void domain_update_iotlb(struct dmar_domain *domain) +void domain_update_iotlb(struct dmar_domain *domain) { struct dev_pasid_info *dev_pasid; struct device_domain_info *info; @@ -1368,6 +1366,46 @@ static void domain_flush_pasid_iotlb(struct intel_iommu *iommu, spin_unlock_irqrestore(&domain->lock, flags); } +static void __iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did, + unsigned long pfn, unsigned int pages, + int ih) +{ + unsigned int aligned_pages = __roundup_pow_of_two(pages); + unsigned long bitmask = aligned_pages - 1; + unsigned int mask = ilog2(aligned_pages); + u64 addr = (u64)pfn << VTD_PAGE_SHIFT; + + /* + * PSI masks the low order bits of the base address. If the + * address isn't aligned to the mask, then compute a mask value + * needed to ensure the target range is flushed. + */ + if (unlikely(bitmask & pfn)) { + unsigned long end_pfn = pfn + pages - 1, shared_bits; + + /* + * Since end_pfn <= pfn + bitmask, the only way bits + * higher than bitmask can differ in pfn and end_pfn is + * by carrying. This means after masking out bitmask, + * high bits starting with the first set bit in + * shared_bits are all equal in both pfn and end_pfn. + */ + shared_bits = ~(pfn ^ end_pfn) & ~bitmask; + mask = shared_bits ? __ffs(shared_bits) : BITS_PER_LONG; + } + + /* + * Fallback to domain selective flush if no PSI support or + * the size is too big. + */ + if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap)) + iommu->flush.flush_iotlb(iommu, did, 0, 0, + DMA_TLB_DSI_FLUSH); + else + iommu->flush.flush_iotlb(iommu, did, addr | ih, mask, + DMA_TLB_PSI_FLUSH); +} + static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, struct dmar_domain *domain, unsigned long pfn, unsigned int pages, @@ -1384,42 +1422,10 @@ static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, if (ih) ih = 1 << 6; - if (domain->use_first_level) { + if (domain->use_first_level) domain_flush_pasid_iotlb(iommu, domain, addr, pages, ih); - } else { - unsigned long bitmask = aligned_pages - 1; - - /* - * PSI masks the low order bits of the base address. If the - * address isn't aligned to the mask, then compute a mask value - * needed to ensure the target range is flushed. - */ - if (unlikely(bitmask & pfn)) { - unsigned long end_pfn = pfn + pages - 1, shared_bits; - - /* - * Since end_pfn <= pfn + bitmask, the only way bits - * higher than bitmask can differ in pfn and end_pfn is - * by carrying. This means after masking out bitmask, - * high bits starting with the first set bit in - * shared_bits are all equal in both pfn and end_pfn. - */ - shared_bits = ~(pfn ^ end_pfn) & ~bitmask; - mask = shared_bits ? __ffs(shared_bits) : BITS_PER_LONG; - } - - /* - * Fallback to domain selective flush if no PSI support or - * the size is too big. - */ - if (!cap_pgsel_inv(iommu->cap) || - mask > cap_max_amask_val(iommu->cap)) - iommu->flush.flush_iotlb(iommu, did, 0, 0, - DMA_TLB_DSI_FLUSH); - else - iommu->flush.flush_iotlb(iommu, did, addr | ih, mask, - DMA_TLB_PSI_FLUSH); - } + else + __iommu_flush_iotlb_psi(iommu, did, pfn, pages, ih); /* * In caching mode, changes of pages from non-present to present require @@ -1443,6 +1449,46 @@ static void __mapping_notify_one(struct intel_iommu *iommu, struct dmar_domain * iommu_flush_write_buffer(iommu); } +/* + * Flush the relevant caches in nested translation if the domain + * also serves as a parent + */ +static void parent_domain_flush(struct dmar_domain *domain, + unsigned long pfn, + unsigned long pages, int ih) +{ + struct dmar_domain *s1_domain; + + spin_lock(&domain->s1_lock); + list_for_each_entry(s1_domain, &domain->s1_domains, s2_link) { + struct device_domain_info *device_info; + struct iommu_domain_info *info; + unsigned long flags; + unsigned long i; + + xa_for_each(&s1_domain->iommu_array, i, info) + __iommu_flush_iotlb_psi(info->iommu, info->did, + pfn, pages, ih); + + if (!s1_domain->has_iotlb_device) + continue; + + spin_lock_irqsave(&s1_domain->lock, flags); + list_for_each_entry(device_info, &s1_domain->devices, link) + /* + * Address translation cache in device side caches the + * result of nested translation. There is no easy way + * to identify the exact set of nested translations + * affected by a change in S2. So just flush the entire + * device cache. + */ + __iommu_flush_dev_iotlb(device_info, 0, + MAX_AGAW_PFN_WIDTH); + spin_unlock_irqrestore(&s1_domain->lock, flags); + } + spin_unlock(&domain->s1_lock); +} + static void intel_flush_iotlb_all(struct iommu_domain *domain) { struct dmar_domain *dmar_domain = to_dmar_domain(domain); @@ -1462,6 +1508,9 @@ static void intel_flush_iotlb_all(struct iommu_domain *domain) if (!cap_caching_mode(iommu->cap)) iommu_flush_dev_iotlb(dmar_domain, 0, MAX_AGAW_PFN_WIDTH); } + + if (dmar_domain->nested_parent) + parent_domain_flush(dmar_domain, 0, -1, 0); } static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu) @@ -1985,6 +2034,9 @@ static void switch_to_super_page(struct dmar_domain *domain, iommu_flush_iotlb_psi(info->iommu, domain, start_pfn, lvl_pages, 0, 0); + if (domain->nested_parent) + parent_domain_flush(domain, start_pfn, + lvl_pages, 0); } pte++; @@ -3883,6 +3935,7 @@ intel_iommu_domain_alloc_user(struct device *dev, u32 flags, bool dirty_tracking = flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING; bool nested_parent = flags & IOMMU_HWPT_ALLOC_NEST_PARENT; struct intel_iommu *iommu = info->iommu; + struct dmar_domain *dmar_domain; struct iommu_domain *domain; /* Must be NESTING domain */ @@ -3908,11 +3961,16 @@ intel_iommu_domain_alloc_user(struct device *dev, u32 flags, if (!domain) return ERR_PTR(-ENOMEM); - if (nested_parent) - to_dmar_domain(domain)->nested_parent = true; + dmar_domain = to_dmar_domain(domain); + + if (nested_parent) { + dmar_domain->nested_parent = true; + INIT_LIST_HEAD(&dmar_domain->s1_domains); + spin_lock_init(&dmar_domain->s1_lock); + } if (dirty_tracking) { - if (to_dmar_domain(domain)->use_first_level) { + if (dmar_domain->use_first_level) { iommu_domain_free(domain); return ERR_PTR(-EOPNOTSUPP); } @@ -3924,8 +3982,12 @@ intel_iommu_domain_alloc_user(struct device *dev, u32 flags, static void intel_iommu_domain_free(struct iommu_domain *domain) { + struct dmar_domain *dmar_domain = to_dmar_domain(domain); + + WARN_ON(dmar_domain->nested_parent && + !list_empty(&dmar_domain->s1_domains)); if (domain != &si_domain->domain) - domain_exit(to_dmar_domain(domain)); + domain_exit(dmar_domain); } int prepare_domain_attach_device(struct iommu_domain *domain, @@ -4107,6 +4169,9 @@ static void intel_iommu_tlb_sync(struct iommu_domain *domain, start_pfn, nrpages, list_empty(&gather->freelist), 0); + if (dmar_domain->nested_parent) + parent_domain_flush(dmar_domain, start_pfn, nrpages, + list_empty(&gather->freelist)); put_pages_list(&gather->freelist); } @@ -4664,21 +4729,70 @@ static void *intel_iommu_hw_info(struct device *dev, u32 *length, u32 *type) return vtd; } +/* + * Set dirty tracking for the device list of a domain. The caller must + * hold the domain->lock when calling it. + */ +static int device_set_dirty_tracking(struct list_head *devices, bool enable) +{ + struct device_domain_info *info; + int ret = 0; + + list_for_each_entry(info, devices, link) { + ret = intel_pasid_setup_dirty_tracking(info->iommu, info->dev, + IOMMU_NO_PASID, enable); + if (ret) + break; + } + + return ret; +} + +static int parent_domain_set_dirty_tracking(struct dmar_domain *domain, + bool enable) +{ + struct dmar_domain *s1_domain; + unsigned long flags; + int ret; + + spin_lock(&domain->s1_lock); + list_for_each_entry(s1_domain, &domain->s1_domains, s2_link) { + spin_lock_irqsave(&s1_domain->lock, flags); + ret = device_set_dirty_tracking(&s1_domain->devices, enable); + spin_unlock_irqrestore(&s1_domain->lock, flags); + if (ret) + goto err_unwind; + } + spin_unlock(&domain->s1_lock); + return 0; + +err_unwind: + list_for_each_entry(s1_domain, &domain->s1_domains, s2_link) { + spin_lock_irqsave(&s1_domain->lock, flags); + device_set_dirty_tracking(&s1_domain->devices, + domain->dirty_tracking); + spin_unlock_irqrestore(&s1_domain->lock, flags); + } + spin_unlock(&domain->s1_lock); + return ret; +} + static int intel_iommu_set_dirty_tracking(struct iommu_domain *domain, bool enable) { struct dmar_domain *dmar_domain = to_dmar_domain(domain); - struct device_domain_info *info; int ret; spin_lock(&dmar_domain->lock); if (dmar_domain->dirty_tracking == enable) goto out_unlock; - list_for_each_entry(info, &dmar_domain->devices, link) { - ret = intel_pasid_setup_dirty_tracking(info->iommu, - info->domain, info->dev, - IOMMU_NO_PASID, enable); + ret = device_set_dirty_tracking(&dmar_domain->devices, enable); + if (ret) + goto err_unwind; + + if (dmar_domain->nested_parent) { + ret = parent_domain_set_dirty_tracking(dmar_domain, enable); if (ret) goto err_unwind; } @@ -4690,10 +4804,8 @@ out_unlock: return 0; err_unwind: - list_for_each_entry(info, &dmar_domain->devices, link) - intel_pasid_setup_dirty_tracking(info->iommu, dmar_domain, - info->dev, IOMMU_NO_PASID, - dmar_domain->dirty_tracking); + device_set_dirty_tracking(&dmar_domain->devices, + dmar_domain->dirty_tracking); spin_unlock(&dmar_domain->lock); return ret; } diff --git a/drivers/iommu/intel/iommu.h b/drivers/iommu/intel/iommu.h index d02f916d8e59..4145c04cb1c6 100644 --- a/drivers/iommu/intel/iommu.h +++ b/drivers/iommu/intel/iommu.h @@ -627,6 +627,10 @@ struct dmar_domain { int agaw; /* maximum mapped address */ u64 max_addr; + /* Protect the s1_domains list */ + spinlock_t s1_lock; + /* Track s1_domains nested on this domain */ + struct list_head s1_domains; }; /* Nested user domain */ @@ -637,6 +641,8 @@ struct dmar_domain { unsigned long s1_pgtbl; /* page table attributes */ struct iommu_hwpt_vtd_s1 s1_cfg; + /* link to parent domain siblings */ + struct list_head s2_link; }; }; @@ -1060,6 +1066,7 @@ int qi_submit_sync(struct intel_iommu *iommu, struct qi_desc *desc, */ #define QI_OPT_WAIT_DRAIN BIT(0) +void domain_update_iotlb(struct dmar_domain *domain); int domain_attach_iommu(struct dmar_domain *domain, struct intel_iommu *iommu); void domain_detach_iommu(struct dmar_domain *domain, struct intel_iommu *iommu); void device_block_translation(struct device *dev); diff --git a/drivers/iommu/intel/nested.c b/drivers/iommu/intel/nested.c index f26c7f1c46cc..a7d68f3d518a 100644 --- a/drivers/iommu/intel/nested.c +++ b/drivers/iommu/intel/nested.c @@ -65,12 +65,20 @@ static int intel_nested_attach_dev(struct iommu_domain *domain, list_add(&info->link, &dmar_domain->devices); spin_unlock_irqrestore(&dmar_domain->lock, flags); + domain_update_iotlb(dmar_domain); + return 0; } static void intel_nested_domain_free(struct iommu_domain *domain) { - kfree(to_dmar_domain(domain)); + struct dmar_domain *dmar_domain = to_dmar_domain(domain); + struct dmar_domain *s2_domain = dmar_domain->s2_domain; + + spin_lock(&s2_domain->s1_lock); + list_del(&dmar_domain->s2_link); + spin_unlock(&s2_domain->s1_lock); + kfree(dmar_domain); } static void nested_flush_dev_iotlb(struct dmar_domain *domain, u64 addr, @@ -95,7 +103,7 @@ static void nested_flush_dev_iotlb(struct dmar_domain *domain, u64 addr, } static void intel_nested_flush_cache(struct dmar_domain *domain, u64 addr, - unsigned long npages, bool ih) + u64 npages, bool ih) { struct iommu_domain_info *info; unsigned int mask; @@ -201,5 +209,9 @@ struct iommu_domain *intel_nested_domain_alloc(struct iommu_domain *parent, spin_lock_init(&domain->lock); xa_init(&domain->iommu_array); + spin_lock(&s2_domain->s1_lock); + list_add(&domain->s2_link, &s2_domain->s1_domains); + spin_unlock(&s2_domain->s1_lock); + return &domain->domain; } diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c index 3239cefa4c33..108158e2b907 100644 --- a/drivers/iommu/intel/pasid.c +++ b/drivers/iommu/intel/pasid.c @@ -428,7 +428,6 @@ int intel_pasid_setup_second_level(struct intel_iommu *iommu, * Set up dirty tracking on a second only or nested translation type. */ int intel_pasid_setup_dirty_tracking(struct intel_iommu *iommu, - struct dmar_domain *domain, struct device *dev, u32 pasid, bool enabled) { @@ -445,7 +444,7 @@ int intel_pasid_setup_dirty_tracking(struct intel_iommu *iommu, return -ENODEV; } - did = domain_id_iommu(domain, iommu); + did = pasid_get_domain_id(pte); pgtt = pasid_pte_get_pgtt(pte); if (pgtt != PASID_ENTRY_PGTT_SL_ONLY && pgtt != PASID_ENTRY_PGTT_NESTED) { @@ -658,6 +657,8 @@ int intel_pasid_setup_nested(struct intel_iommu *iommu, struct device *dev, pasid_set_domain_id(pte, did); pasid_set_address_width(pte, s2_domain->agaw); pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap)); + if (s2_domain->dirty_tracking) + pasid_set_ssade(pte); pasid_set_translation_type(pte, PASID_ENTRY_PGTT_NESTED); pasid_set_present(pte); spin_unlock(&iommu->lock); diff --git a/drivers/iommu/intel/pasid.h b/drivers/iommu/intel/pasid.h index 8d40d4c66e31..487ede039bdd 100644 --- a/drivers/iommu/intel/pasid.h +++ b/drivers/iommu/intel/pasid.h @@ -307,7 +307,6 @@ int intel_pasid_setup_second_level(struct intel_iommu *iommu, struct dmar_domain *domain, struct device *dev, u32 pasid); int intel_pasid_setup_dirty_tracking(struct intel_iommu *iommu, - struct dmar_domain *domain, struct device *dev, u32 pasid, bool enabled); int intel_pasid_setup_pass_through(struct intel_iommu *iommu, diff --git a/drivers/iommu/iommu-sva.c b/drivers/iommu/iommu-sva.c index c3fc9201d0be..65814cbc8402 100644 --- a/drivers/iommu/iommu-sva.c +++ b/drivers/iommu/iommu-sva.c @@ -41,6 +41,7 @@ static struct iommu_mm_data *iommu_alloc_mm_data(struct mm_struct *mm, struct de } iommu_mm->pasid = pasid; INIT_LIST_HEAD(&iommu_mm->sva_domains); + INIT_LIST_HEAD(&iommu_mm->sva_handles); /* * Make sure the write to mm->iommu_mm is not reordered in front of * initialization to iommu_mm fields. If it does, readers may see a @@ -82,6 +83,14 @@ struct iommu_sva *iommu_sva_bind_device(struct device *dev, struct mm_struct *mm goto out_unlock; } + list_for_each_entry(handle, &mm->iommu_mm->sva_handles, handle_item) { + if (handle->dev == dev) { + refcount_inc(&handle->users); + mutex_unlock(&iommu_sva_lock); + return handle; + } + } + handle = kzalloc(sizeof(*handle), GFP_KERNEL); if (!handle) { ret = -ENOMEM; @@ -111,6 +120,8 @@ struct iommu_sva *iommu_sva_bind_device(struct device *dev, struct mm_struct *mm list_add(&domain->next, &mm->iommu_mm->sva_domains); out: + refcount_set(&handle->users, 1); + list_add(&handle->handle_item, &mm->iommu_mm->sva_handles); mutex_unlock(&iommu_sva_lock); handle->dev = dev; handle->domain = domain; @@ -141,6 +152,12 @@ void iommu_sva_unbind_device(struct iommu_sva *handle) struct device *dev = handle->dev; mutex_lock(&iommu_sva_lock); + if (!refcount_dec_and_test(&handle->users)) { + mutex_unlock(&iommu_sva_lock); + return; + } + list_del(&handle->handle_item); + iommu_detach_device_pasid(domain, dev, iommu_mm->pasid); if (--domain->users == 0) { list_del(&domain->next); diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 68e648b55767..d14413916f93 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -1799,7 +1799,7 @@ iommu_group_alloc_default_domain(struct iommu_group *group, int req_type) * domain. Do not use in new drivers. */ if (ops->default_domain) { - if (req_type) + if (req_type != ops->default_domain->type) return ERR_PTR(-EINVAL); return ops->default_domain; } @@ -1871,10 +1871,18 @@ static int iommu_get_def_domain_type(struct iommu_group *group, const struct iommu_ops *ops = dev_iommu_ops(dev); int type; - if (!ops->def_domain_type) - return cur_type; - - type = ops->def_domain_type(dev); + if (ops->default_domain) { + /* + * Drivers that declare a global static default_domain will + * always choose that. + */ + type = ops->default_domain->type; + } else { + if (ops->def_domain_type) + type = ops->def_domain_type(dev); + else + return cur_type; + } if (!type || cur_type == type) return cur_type; if (!cur_type) diff --git a/drivers/iommu/iommufd/hw_pagetable.c b/drivers/iommu/iommufd/hw_pagetable.c index 3f3f1fa1a0a9..33d142f8057d 100644 --- a/drivers/iommu/iommufd/hw_pagetable.c +++ b/drivers/iommu/iommufd/hw_pagetable.c @@ -263,7 +263,8 @@ int iommufd_hwpt_alloc(struct iommufd_ucmd *ucmd) if (cmd->__reserved) return -EOPNOTSUPP; - if (cmd->data_type == IOMMU_HWPT_DATA_NONE && cmd->data_len) + if ((cmd->data_type == IOMMU_HWPT_DATA_NONE && cmd->data_len) || + (cmd->data_type != IOMMU_HWPT_DATA_NONE && !cmd->data_len)) return -EINVAL; idev = iommufd_get_device(ucmd, cmd->dev_id); diff --git a/drivers/iommu/iommufd/io_pagetable.c b/drivers/iommu/iommufd/io_pagetable.c index 504ac1b01b2d..05fd9d3abf1b 100644 --- a/drivers/iommu/iommufd/io_pagetable.c +++ b/drivers/iommu/iommufd/io_pagetable.c @@ -1330,20 +1330,23 @@ out_unlock: int iopt_add_access(struct io_pagetable *iopt, struct iommufd_access *access) { + u32 new_id; int rc; down_write(&iopt->domains_rwsem); down_write(&iopt->iova_rwsem); - rc = xa_alloc(&iopt->access_list, &access->iopt_access_list_id, access, - xa_limit_16b, GFP_KERNEL_ACCOUNT); + rc = xa_alloc(&iopt->access_list, &new_id, access, xa_limit_16b, + GFP_KERNEL_ACCOUNT); + if (rc) goto out_unlock; rc = iopt_calculate_iova_alignment(iopt); if (rc) { - xa_erase(&iopt->access_list, access->iopt_access_list_id); + xa_erase(&iopt->access_list, new_id); goto out_unlock; } + access->iopt_access_list_id = new_id; out_unlock: up_write(&iopt->iova_rwsem); diff --git a/drivers/iommu/iommufd/iommufd_test.h b/drivers/iommu/iommufd/iommufd_test.h index 482d4059f5db..e854d3f67205 100644 --- a/drivers/iommu/iommufd/iommufd_test.h +++ b/drivers/iommu/iommufd/iommufd_test.h @@ -45,6 +45,7 @@ enum { enum { MOCK_FLAGS_DEVICE_NO_DIRTY = 1 << 0, + MOCK_FLAGS_DEVICE_HUGE_IOVA = 1 << 1, }; enum { diff --git a/drivers/iommu/iommufd/iova_bitmap.c b/drivers/iommu/iommufd/iova_bitmap.c index 0a92c9eeaf7f..db8c46bee155 100644 --- a/drivers/iommu/iommufd/iova_bitmap.c +++ b/drivers/iommu/iommufd/iova_bitmap.c @@ -100,7 +100,7 @@ struct iova_bitmap { struct iova_bitmap_map mapped; /* userspace address of the bitmap */ - u64 __user *bitmap; + u8 __user *bitmap; /* u64 index that @mapped points to */ unsigned long mapped_base_index; @@ -113,6 +113,9 @@ struct iova_bitmap { /* length of the IOVA range for the whole bitmap */ size_t length; + + /* length of the IOVA range set ahead the pinned pages */ + unsigned long set_ahead_length; }; /* @@ -162,7 +165,7 @@ static int iova_bitmap_get(struct iova_bitmap *bitmap) { struct iova_bitmap_map *mapped = &bitmap->mapped; unsigned long npages; - u64 __user *addr; + u8 __user *addr; long ret; /* @@ -176,17 +179,18 @@ static int iova_bitmap_get(struct iova_bitmap *bitmap) sizeof(*bitmap->bitmap), PAGE_SIZE); /* - * We always cap at max number of 'struct page' a base page can fit. - * This is, for example, on x86 means 2M of bitmap data max. - */ - npages = min(npages, PAGE_SIZE / sizeof(struct page *)); - - /* * Bitmap address to be pinned is calculated via pointer arithmetic * with bitmap u64 word index. */ addr = bitmap->bitmap + bitmap->mapped_base_index; + /* + * We always cap at max number of 'struct page' a base page can fit. + * This is, for example, on x86 means 2M of bitmap data max. + */ + npages = min(npages + !!offset_in_page(addr), + PAGE_SIZE / sizeof(struct page *)); + ret = pin_user_pages_fast((unsigned long)addr, npages, FOLL_WRITE, mapped->pages); if (ret <= 0) @@ -247,7 +251,7 @@ struct iova_bitmap *iova_bitmap_alloc(unsigned long iova, size_t length, mapped = &bitmap->mapped; mapped->pgshift = __ffs(page_size); - bitmap->bitmap = data; + bitmap->bitmap = (u8 __user *)data; bitmap->mapped_total_index = iova_bitmap_offset_to_index(bitmap, length - 1) + 1; bitmap->iova = iova; @@ -304,7 +308,7 @@ static unsigned long iova_bitmap_mapped_remaining(struct iova_bitmap *bitmap) remaining = bitmap->mapped_total_index - bitmap->mapped_base_index; remaining = min_t(unsigned long, remaining, - bytes / sizeof(*bitmap->bitmap)); + DIV_ROUND_UP(bytes, sizeof(*bitmap->bitmap))); return remaining; } @@ -341,6 +345,32 @@ static bool iova_bitmap_done(struct iova_bitmap *bitmap) return bitmap->mapped_base_index >= bitmap->mapped_total_index; } +static int iova_bitmap_set_ahead(struct iova_bitmap *bitmap, + size_t set_ahead_length) +{ + int ret = 0; + + while (set_ahead_length > 0 && !iova_bitmap_done(bitmap)) { + unsigned long length = iova_bitmap_mapped_length(bitmap); + unsigned long iova = iova_bitmap_mapped_iova(bitmap); + + ret = iova_bitmap_get(bitmap); + if (ret) + break; + + length = min(length, set_ahead_length); + iova_bitmap_set(bitmap, iova, length); + + set_ahead_length -= length; + bitmap->mapped_base_index += + iova_bitmap_offset_to_index(bitmap, length - 1) + 1; + iova_bitmap_put(bitmap); + } + + bitmap->set_ahead_length = 0; + return ret; +} + /* * Advances to the next range, releases the current pinned * pages and pins the next set of bitmap pages. @@ -357,6 +387,15 @@ static int iova_bitmap_advance(struct iova_bitmap *bitmap) if (iova_bitmap_done(bitmap)) return 0; + /* Iterate, set and skip any bits requested for next iteration */ + if (bitmap->set_ahead_length) { + int ret; + + ret = iova_bitmap_set_ahead(bitmap, bitmap->set_ahead_length); + if (ret) + return ret; + } + /* When advancing the index we pin the next set of bitmap pages */ return iova_bitmap_get(bitmap); } @@ -409,6 +448,7 @@ void iova_bitmap_set(struct iova_bitmap *bitmap, mapped->pgshift) + mapped->pgoff * BITS_PER_BYTE; unsigned long last_bit = (((iova + length - 1) - mapped->iova) >> mapped->pgshift) + mapped->pgoff * BITS_PER_BYTE; + unsigned long last_page_idx = mapped->npages - 1; do { unsigned int page_idx = cur_bit / BITS_PER_PAGE; @@ -417,10 +457,18 @@ void iova_bitmap_set(struct iova_bitmap *bitmap, last_bit - cur_bit + 1); void *kaddr; + if (unlikely(page_idx > last_page_idx)) + break; + kaddr = kmap_local_page(mapped->pages[page_idx]); bitmap_set(kaddr, offset, nbits); kunmap_local(kaddr); cur_bit += nbits; } while (cur_bit <= last_bit); + + if (unlikely(cur_bit <= last_bit)) { + bitmap->set_ahead_length = + ((last_bit - cur_bit + 1) << bitmap->mapped.pgshift); + } } EXPORT_SYMBOL_NS_GPL(iova_bitmap_set, IOMMUFD); diff --git a/drivers/iommu/iommufd/selftest.c b/drivers/iommu/iommufd/selftest.c index d9e9920c7eba..7a2199470f31 100644 --- a/drivers/iommu/iommufd/selftest.c +++ b/drivers/iommu/iommufd/selftest.c @@ -36,11 +36,12 @@ static struct mock_bus_type iommufd_mock_bus_type = { }, }; -static atomic_t mock_dev_num; +static DEFINE_IDA(mock_dev_ida); enum { MOCK_DIRTY_TRACK = 1, MOCK_IO_PAGE_SIZE = PAGE_SIZE / 2, + MOCK_HUGE_PAGE_SIZE = 512 * MOCK_IO_PAGE_SIZE, /* * Like a real page table alignment requires the low bits of the address @@ -53,6 +54,7 @@ enum { MOCK_PFN_START_IOVA = _MOCK_PFN_START, MOCK_PFN_LAST_IOVA = _MOCK_PFN_START, MOCK_PFN_DIRTY_IOVA = _MOCK_PFN_START << 1, + MOCK_PFN_HUGE_IOVA = _MOCK_PFN_START << 2, }; /* @@ -61,8 +63,8 @@ enum { * In syzkaller mode the 64 bit IOVA is converted into an nth area and offset * value. This has a much smaller randomization space and syzkaller can hit it. */ -static unsigned long iommufd_test_syz_conv_iova(struct io_pagetable *iopt, - u64 *iova) +static unsigned long __iommufd_test_syz_conv_iova(struct io_pagetable *iopt, + u64 *iova) { struct syz_layout { __u32 nth_area; @@ -86,6 +88,21 @@ static unsigned long iommufd_test_syz_conv_iova(struct io_pagetable *iopt, return 0; } +static unsigned long iommufd_test_syz_conv_iova(struct iommufd_access *access, + u64 *iova) +{ + unsigned long ret; + + mutex_lock(&access->ioas_lock); + if (!access->ioas) { + mutex_unlock(&access->ioas_lock); + return 0; + } + ret = __iommufd_test_syz_conv_iova(&access->ioas->iopt, iova); + mutex_unlock(&access->ioas_lock); + return ret; +} + void iommufd_test_syz_conv_iova_id(struct iommufd_ucmd *ucmd, unsigned int ioas_id, u64 *iova, u32 *flags) { @@ -98,7 +115,7 @@ void iommufd_test_syz_conv_iova_id(struct iommufd_ucmd *ucmd, ioas = iommufd_get_ioas(ucmd->ictx, ioas_id); if (IS_ERR(ioas)) return; - *iova = iommufd_test_syz_conv_iova(&ioas->iopt, iova); + *iova = __iommufd_test_syz_conv_iova(&ioas->iopt, iova); iommufd_put_object(ucmd->ictx, &ioas->obj); } @@ -121,6 +138,7 @@ enum selftest_obj_type { struct mock_dev { struct device dev; unsigned long flags; + int id; }; struct selftest_obj { @@ -191,6 +209,34 @@ static int mock_domain_set_dirty_tracking(struct iommu_domain *domain, return 0; } +static bool mock_test_and_clear_dirty(struct mock_iommu_domain *mock, + unsigned long iova, size_t page_size, + unsigned long flags) +{ + unsigned long cur, end = iova + page_size - 1; + bool dirty = false; + void *ent, *old; + + for (cur = iova; cur < end; cur += MOCK_IO_PAGE_SIZE) { + ent = xa_load(&mock->pfns, cur / MOCK_IO_PAGE_SIZE); + if (!ent || !(xa_to_value(ent) & MOCK_PFN_DIRTY_IOVA)) + continue; + + dirty = true; + /* Clear dirty */ + if (!(flags & IOMMU_DIRTY_NO_CLEAR)) { + unsigned long val; + + val = xa_to_value(ent) & ~MOCK_PFN_DIRTY_IOVA; + old = xa_store(&mock->pfns, cur / MOCK_IO_PAGE_SIZE, + xa_mk_value(val), GFP_KERNEL); + WARN_ON_ONCE(ent != old); + } + } + + return dirty; +} + static int mock_domain_read_and_clear_dirty(struct iommu_domain *domain, unsigned long iova, size_t size, unsigned long flags, @@ -198,31 +244,31 @@ static int mock_domain_read_and_clear_dirty(struct iommu_domain *domain, { struct mock_iommu_domain *mock = container_of(domain, struct mock_iommu_domain, domain); - unsigned long i, max = size / MOCK_IO_PAGE_SIZE; - void *ent, *old; + unsigned long end = iova + size; + void *ent; if (!(mock->flags & MOCK_DIRTY_TRACK) && dirty->bitmap) return -EINVAL; - for (i = 0; i < max; i++) { - unsigned long cur = iova + i * MOCK_IO_PAGE_SIZE; + do { + unsigned long pgsize = MOCK_IO_PAGE_SIZE; + unsigned long head; - ent = xa_load(&mock->pfns, cur / MOCK_IO_PAGE_SIZE); - if (ent && (xa_to_value(ent) & MOCK_PFN_DIRTY_IOVA)) { - /* Clear dirty */ - if (!(flags & IOMMU_DIRTY_NO_CLEAR)) { - unsigned long val; - - val = xa_to_value(ent) & ~MOCK_PFN_DIRTY_IOVA; - old = xa_store(&mock->pfns, - cur / MOCK_IO_PAGE_SIZE, - xa_mk_value(val), GFP_KERNEL); - WARN_ON_ONCE(ent != old); - } - iommu_dirty_bitmap_record(dirty, cur, - MOCK_IO_PAGE_SIZE); + ent = xa_load(&mock->pfns, iova / MOCK_IO_PAGE_SIZE); + if (!ent) { + iova += pgsize; + continue; } - } + + if (xa_to_value(ent) & MOCK_PFN_HUGE_IOVA) + pgsize = MOCK_HUGE_PAGE_SIZE; + head = iova & ~(pgsize - 1); + + /* Clear dirty */ + if (mock_test_and_clear_dirty(mock, head, pgsize, flags)) + iommu_dirty_bitmap_record(dirty, head, pgsize); + iova = head + pgsize; + } while (iova < end); return 0; } @@ -234,6 +280,7 @@ const struct iommu_dirty_ops dirty_ops = { static struct iommu_domain *mock_domain_alloc_paging(struct device *dev) { + struct mock_dev *mdev = container_of(dev, struct mock_dev, dev); struct mock_iommu_domain *mock; mock = kzalloc(sizeof(*mock), GFP_KERNEL); @@ -242,6 +289,8 @@ static struct iommu_domain *mock_domain_alloc_paging(struct device *dev) mock->domain.geometry.aperture_start = MOCK_APERTURE_START; mock->domain.geometry.aperture_end = MOCK_APERTURE_LAST; mock->domain.pgsize_bitmap = MOCK_IO_PAGE_SIZE; + if (dev && mdev->flags & MOCK_FLAGS_DEVICE_HUGE_IOVA) + mock->domain.pgsize_bitmap |= MOCK_HUGE_PAGE_SIZE; mock->domain.ops = mock_ops.default_domain_ops; mock->domain.type = IOMMU_DOMAIN_UNMANAGED; xa_init(&mock->pfns); @@ -287,7 +336,7 @@ mock_domain_alloc_user(struct device *dev, u32 flags, return ERR_PTR(-EOPNOTSUPP); if (user_data || (has_dirty_flag && no_dirty_ops)) return ERR_PTR(-EOPNOTSUPP); - domain = mock_domain_alloc_paging(NULL); + domain = mock_domain_alloc_paging(dev); if (!domain) return ERR_PTR(-ENOMEM); if (has_dirty_flag) @@ -350,6 +399,9 @@ static int mock_domain_map_pages(struct iommu_domain *domain, if (pgcount == 1 && cur + MOCK_IO_PAGE_SIZE == pgsize) flags = MOCK_PFN_LAST_IOVA; + if (pgsize != MOCK_IO_PAGE_SIZE) { + flags |= MOCK_PFN_HUGE_IOVA; + } old = xa_store(&mock->pfns, iova / MOCK_IO_PAGE_SIZE, xa_mk_value((paddr / MOCK_IO_PAGE_SIZE) | flags), @@ -394,20 +446,27 @@ static size_t mock_domain_unmap_pages(struct iommu_domain *domain, /* * iommufd generates unmaps that must be a strict - * superset of the map's performend So every starting - * IOVA should have been an iova passed to map, and the + * superset of the map's performend So every + * starting/ending IOVA should have been an iova passed + * to map. * - * First IOVA must be present and have been a first IOVA - * passed to map_pages + * This simple logic doesn't work when the HUGE_PAGE is + * turned on since the core code will automatically + * switch between the two page sizes creating a break in + * the unmap calls. The break can land in the middle of + * contiguous IOVA. */ - if (first) { - WARN_ON(ent && !(xa_to_value(ent) & - MOCK_PFN_START_IOVA)); - first = false; + if (!(domain->pgsize_bitmap & MOCK_HUGE_PAGE_SIZE)) { + if (first) { + WARN_ON(ent && !(xa_to_value(ent) & + MOCK_PFN_START_IOVA)); + first = false; + } + if (pgcount == 1 && + cur + MOCK_IO_PAGE_SIZE == pgsize) + WARN_ON(ent && !(xa_to_value(ent) & + MOCK_PFN_LAST_IOVA)); } - if (pgcount == 1 && cur + MOCK_IO_PAGE_SIZE == pgsize) - WARN_ON(ent && !(xa_to_value(ent) & - MOCK_PFN_LAST_IOVA)); iova += MOCK_IO_PAGE_SIZE; ret += MOCK_IO_PAGE_SIZE; @@ -595,7 +654,7 @@ static void mock_dev_release(struct device *dev) { struct mock_dev *mdev = container_of(dev, struct mock_dev, dev); - atomic_dec(&mock_dev_num); + ida_free(&mock_dev_ida, mdev->id); kfree(mdev); } @@ -604,7 +663,8 @@ static struct mock_dev *mock_dev_create(unsigned long dev_flags) struct mock_dev *mdev; int rc; - if (dev_flags & ~(MOCK_FLAGS_DEVICE_NO_DIRTY)) + if (dev_flags & + ~(MOCK_FLAGS_DEVICE_NO_DIRTY | MOCK_FLAGS_DEVICE_HUGE_IOVA)) return ERR_PTR(-EINVAL); mdev = kzalloc(sizeof(*mdev), GFP_KERNEL); @@ -616,8 +676,12 @@ static struct mock_dev *mock_dev_create(unsigned long dev_flags) mdev->dev.release = mock_dev_release; mdev->dev.bus = &iommufd_mock_bus_type.bus; - rc = dev_set_name(&mdev->dev, "iommufd_mock%u", - atomic_inc_return(&mock_dev_num)); + rc = ida_alloc(&mock_dev_ida, GFP_KERNEL); + if (rc < 0) + goto err_put; + mdev->id = rc; + + rc = dev_set_name(&mdev->dev, "iommufd_mock%u", mdev->id); if (rc) goto err_put; @@ -1119,7 +1183,7 @@ static int iommufd_test_access_pages(struct iommufd_ucmd *ucmd, } if (flags & MOCK_FLAGS_ACCESS_SYZ) - iova = iommufd_test_syz_conv_iova(&staccess->access->ioas->iopt, + iova = iommufd_test_syz_conv_iova(staccess->access, &cmd->access_pages.iova); npages = (ALIGN(iova + length, PAGE_SIZE) - @@ -1221,8 +1285,8 @@ static int iommufd_test_access_rw(struct iommufd_ucmd *ucmd, } if (flags & MOCK_FLAGS_ACCESS_SYZ) - iova = iommufd_test_syz_conv_iova(&staccess->access->ioas->iopt, - &cmd->access_rw.iova); + iova = iommufd_test_syz_conv_iova(staccess->access, + &cmd->access_rw.iova); rc = iommufd_access_rw(staccess->access, iova, tmp, length, flags); if (rc) diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig index f7149d0f3d45..72c07a12f5e1 100644 --- a/drivers/irqchip/Kconfig +++ b/drivers/irqchip/Kconfig @@ -546,6 +546,17 @@ config SIFIVE_PLIC select IRQ_DOMAIN_HIERARCHY select GENERIC_IRQ_EFFECTIVE_AFF_MASK if SMP +config STARFIVE_JH8100_INTC + bool "StarFive JH8100 External Interrupt Controller" + depends on ARCH_STARFIVE || COMPILE_TEST + default ARCH_STARFIVE + select IRQ_DOMAIN_HIERARCHY + help + This enables support for the INTC chip found in StarFive JH8100 + SoC. + + If you don't know what to do here, say Y. + config EXYNOS_IRQ_COMBINER bool "Samsung Exynos IRQ combiner support" if COMPILE_TEST depends on (ARCH_EXYNOS && ARM) || COMPILE_TEST diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile index ffd945fe71aa..ec4a18380998 100644 --- a/drivers/irqchip/Makefile +++ b/drivers/irqchip/Makefile @@ -96,6 +96,7 @@ obj-$(CONFIG_CSKY_MPINTC) += irq-csky-mpintc.o obj-$(CONFIG_CSKY_APB_INTC) += irq-csky-apb-intc.o obj-$(CONFIG_RISCV_INTC) += irq-riscv-intc.o obj-$(CONFIG_SIFIVE_PLIC) += irq-sifive-plic.o +obj-$(CONFIG_STARFIVE_JH8100_INTC) += irq-starfive-jh8100-intc.o obj-$(CONFIG_IMX_IRQSTEER) += irq-imx-irqsteer.o obj-$(CONFIG_IMX_INTMUX) += irq-imx-intmux.o obj-$(CONFIG_IMX_MU_MSI) += irq-imx-mu-msi.o diff --git a/drivers/irqchip/irq-bcm6345-l1.c b/drivers/irqchip/irq-bcm6345-l1.c index 9745a119d0e6..eb02d203c963 100644 --- a/drivers/irqchip/irq-bcm6345-l1.c +++ b/drivers/irqchip/irq-bcm6345-l1.c @@ -242,7 +242,7 @@ static int __init bcm6345_l1_init_one(struct device_node *dn, else if (intc->n_words != n_words) return -EINVAL; - cpu = intc->cpus[idx] = kzalloc(sizeof(*cpu) + n_words * sizeof(u32), + cpu = intc->cpus[idx] = kzalloc(struct_size(cpu, enable_cache, n_words), GFP_KERNEL); if (!cpu) return -ENOMEM; diff --git a/drivers/irqchip/irq-bcm7038-l1.c b/drivers/irqchip/irq-bcm7038-l1.c index 24ca1d656adc..36e71af054e9 100644 --- a/drivers/irqchip/irq-bcm7038-l1.c +++ b/drivers/irqchip/irq-bcm7038-l1.c @@ -249,7 +249,7 @@ static int __init bcm7038_l1_init_one(struct device_node *dn, return -EINVAL; } - cpu = intc->cpus[idx] = kzalloc(sizeof(*cpu) + n_words * sizeof(u32), + cpu = intc->cpus[idx] = kzalloc(struct_size(cpu, mask_cache, n_words), GFP_KERNEL); if (!cpu) return -ENOMEM; diff --git a/drivers/irqchip/irq-brcmstb-l2.c b/drivers/irqchip/irq-brcmstb-l2.c index 5559c943f03f..2b0b3175cea0 100644 --- a/drivers/irqchip/irq-brcmstb-l2.c +++ b/drivers/irqchip/irq-brcmstb-l2.c @@ -2,7 +2,7 @@ /* * Generic Broadcom Set Top Box Level 2 Interrupt controller driver * - * Copyright (C) 2014-2017 Broadcom + * Copyright (C) 2014-2024 Broadcom */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt @@ -112,6 +112,9 @@ static void brcmstb_l2_intc_irq_handle(struct irq_desc *desc) generic_handle_domain_irq(b->domain, irq); } while (status); out: + /* Don't ack parent before all device writes are done */ + wmb(); + chained_irq_exit(chip, desc); } diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index d097001c1e3e..fca888b36680 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -207,6 +207,11 @@ static bool require_its_list_vmovp(struct its_vm *vm, struct its_node *its) return (gic_rdists->has_rvpeid || vm->vlpi_count[its->list_nr]); } +static bool rdists_support_shareable(void) +{ + return !(gic_rdists->flags & RDIST_FLAGS_FORCE_NON_SHAREABLE); +} + static u16 get_its_list(struct its_vm *vm) { struct its_node *its; @@ -2710,10 +2715,12 @@ static u64 inherit_vpe_l1_table_from_its(void) break; } val |= FIELD_PREP(GICR_VPROPBASER_4_1_ADDR, addr >> 12); - val |= FIELD_PREP(GICR_VPROPBASER_SHAREABILITY_MASK, - FIELD_GET(GITS_BASER_SHAREABILITY_MASK, baser)); - val |= FIELD_PREP(GICR_VPROPBASER_INNER_CACHEABILITY_MASK, - FIELD_GET(GITS_BASER_INNER_CACHEABILITY_MASK, baser)); + if (rdists_support_shareable()) { + val |= FIELD_PREP(GICR_VPROPBASER_SHAREABILITY_MASK, + FIELD_GET(GITS_BASER_SHAREABILITY_MASK, baser)); + val |= FIELD_PREP(GICR_VPROPBASER_INNER_CACHEABILITY_MASK, + FIELD_GET(GITS_BASER_INNER_CACHEABILITY_MASK, baser)); + } val |= FIELD_PREP(GICR_VPROPBASER_4_1_SIZE, GITS_BASER_NR_PAGES(baser) - 1); return val; @@ -2936,8 +2943,10 @@ static int allocate_vpe_l1_table(void) WARN_ON(!IS_ALIGNED(pa, psz)); val |= FIELD_PREP(GICR_VPROPBASER_4_1_ADDR, pa >> 12); - val |= GICR_VPROPBASER_RaWb; - val |= GICR_VPROPBASER_InnerShareable; + if (rdists_support_shareable()) { + val |= GICR_VPROPBASER_RaWb; + val |= GICR_VPROPBASER_InnerShareable; + } val |= GICR_VPROPBASER_4_1_Z; val |= GICR_VPROPBASER_4_1_VALID; @@ -3126,7 +3135,7 @@ static void its_cpu_init_lpis(void) gicr_write_propbaser(val, rbase + GICR_PROPBASER); tmp = gicr_read_propbaser(rbase + GICR_PROPBASER); - if (gic_rdists->flags & RDIST_FLAGS_FORCE_NON_SHAREABLE) + if (!rdists_support_shareable()) tmp &= ~GICR_PROPBASER_SHAREABILITY_MASK; if ((tmp ^ val) & GICR_PROPBASER_SHAREABILITY_MASK) { @@ -3153,7 +3162,7 @@ static void its_cpu_init_lpis(void) gicr_write_pendbaser(val, rbase + GICR_PENDBASER); tmp = gicr_read_pendbaser(rbase + GICR_PENDBASER); - if (gic_rdists->flags & RDIST_FLAGS_FORCE_NON_SHAREABLE) + if (!rdists_support_shareable()) tmp &= ~GICR_PENDBASER_SHAREABILITY_MASK; if (!(tmp & GICR_PENDBASER_SHAREABILITY_MASK)) { @@ -3172,6 +3181,7 @@ static void its_cpu_init_lpis(void) val |= GICR_CTLR_ENABLE_LPIS; writel_relaxed(val, rbase + GICR_CTLR); +out: if (gic_rdists->has_vlpis && !gic_rdists->has_rvpeid) { void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); @@ -3207,7 +3217,6 @@ static void its_cpu_init_lpis(void) /* Make sure the GIC has seen the above */ dsb(sy); -out: gic_data_rdist()->flags |= RD_LOCAL_LPI_ENABLED; pr_info("GICv3: CPU%d: using %s LPI pending table @%pa\n", smp_processor_id(), @@ -3817,8 +3826,9 @@ static int its_vpe_set_affinity(struct irq_data *d, bool force) { struct its_vpe *vpe = irq_data_get_irq_chip_data(d); - int from, cpu = cpumask_first(mask_val); + struct cpumask common, *table_mask; unsigned long flags; + int from, cpu; /* * Changing affinity is mega expensive, so let's be as lazy as @@ -3834,19 +3844,22 @@ static int its_vpe_set_affinity(struct irq_data *d, * taken on any vLPI handling path that evaluates vpe->col_idx. */ from = vpe_to_cpuid_lock(vpe, &flags); - if (from == cpu) - goto out; - - vpe->col_idx = cpu; + table_mask = gic_data_rdist_cpu(from)->vpe_table_mask; /* - * GICv4.1 allows us to skip VMOVP if moving to a cpu whose RD - * is sharing its VPE table with the current one. + * If we are offered another CPU in the same GICv4.1 ITS + * affinity, pick this one. Otherwise, any CPU will do. */ - if (gic_data_rdist_cpu(cpu)->vpe_table_mask && - cpumask_test_cpu(from, gic_data_rdist_cpu(cpu)->vpe_table_mask)) + if (table_mask && cpumask_and(&common, mask_val, table_mask)) + cpu = cpumask_test_cpu(from, &common) ? from : cpumask_first(&common); + else + cpu = cpumask_first(mask_val); + + if (from == cpu) goto out; + vpe->col_idx = cpu; + its_send_vmovp(vpe); its_vpe_db_proxy_move(vpe, from, cpu); @@ -3880,14 +3893,18 @@ static void its_vpe_schedule(struct its_vpe *vpe) val = virt_to_phys(page_address(vpe->its_vm->vprop_page)) & GENMASK_ULL(51, 12); val |= (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK; - val |= GICR_VPROPBASER_RaWb; - val |= GICR_VPROPBASER_InnerShareable; + if (rdists_support_shareable()) { + val |= GICR_VPROPBASER_RaWb; + val |= GICR_VPROPBASER_InnerShareable; + } gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER); val = virt_to_phys(page_address(vpe->vpt_page)) & GENMASK_ULL(51, 16); - val |= GICR_VPENDBASER_RaWaWb; - val |= GICR_VPENDBASER_InnerShareable; + if (rdists_support_shareable()) { + val |= GICR_VPENDBASER_RaWaWb; + val |= GICR_VPENDBASER_InnerShareable; + } /* * There is no good way of finding out if the pending table is * empty as we can race against the doorbell interrupt very @@ -4419,12 +4436,12 @@ static const struct irq_domain_ops its_sgi_domain_ops = { static int its_vpe_id_alloc(void) { - return ida_simple_get(&its_vpeid_ida, 0, ITS_MAX_VPEID, GFP_KERNEL); + return ida_alloc_max(&its_vpeid_ida, ITS_MAX_VPEID - 1, GFP_KERNEL); } static void its_vpe_id_free(u16 id) { - ida_simple_remove(&its_vpeid_ida, id); + ida_free(&its_vpeid_ida, id); } static int its_vpe_init(struct its_vpe *vpe) @@ -5078,6 +5095,8 @@ static int __init its_probe_one(struct its_node *its) u32 ctlr; int err; + its_enable_quirks(its); + if (is_v4(its)) { if (!(its->typer & GITS_TYPER_VMOVP)) { err = its_compute_its_list_map(its); @@ -5429,7 +5448,6 @@ static int __init its_of_probe(struct device_node *node) if (!its) return -ENOMEM; - its_enable_quirks(its); err = its_probe_one(its); if (err) { its_node_destroy(its); diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index 98b0329b7154..6fb276504bcc 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -19,6 +19,7 @@ #include <linux/percpu.h> #include <linux/refcount.h> #include <linux/slab.h> +#include <linux/iopoll.h> #include <linux/irqchip.h> #include <linux/irqchip/arm-gic-common.h> @@ -180,11 +181,6 @@ static enum gic_intid_range get_intid_range(struct irq_data *d) return __get_intid_range(d->hwirq); } -static inline unsigned int gic_irq(struct irq_data *d) -{ - return d->hwirq; -} - static inline bool gic_irq_in_rdist(struct irq_data *d) { switch (get_intid_range(d)) { @@ -251,17 +247,13 @@ static inline void __iomem *gic_dist_base(struct irq_data *d) static void gic_do_wait_for_rwp(void __iomem *base, u32 bit) { - u32 count = 1000000; /* 1s! */ + u32 val; + int ret; - while (readl_relaxed(base + GICD_CTLR) & bit) { - count--; - if (!count) { - pr_err_ratelimited("RWP timeout, gone fishing\n"); - return; - } - cpu_relax(); - udelay(1); - } + ret = readl_relaxed_poll_timeout_atomic(base + GICD_CTLR, val, !(val & bit), + 1, USEC_PER_SEC); + if (ret == -ETIMEDOUT) + pr_err_ratelimited("RWP timeout, gone fishing\n"); } /* Wait for completion of a distributor change */ @@ -279,8 +271,8 @@ static void gic_redist_wait_for_rwp(void) static void gic_enable_redist(bool enable) { void __iomem *rbase; - u32 count = 1000000; /* 1s! */ u32 val; + int ret; if (gic_data.flags & FLAGS_WORKAROUND_GICR_WAKER_MSM8996) return; @@ -301,16 +293,13 @@ static void gic_enable_redist(bool enable) return; /* No PM support in this redistributor */ } - while (--count) { - val = readl_relaxed(rbase + GICR_WAKER); - if (enable ^ (bool)(val & GICR_WAKER_ChildrenAsleep)) - break; - cpu_relax(); - udelay(1); - } - if (!count) + ret = readl_relaxed_poll_timeout_atomic(rbase + GICR_WAKER, val, + enable ^ (bool)(val & GICR_WAKER_ChildrenAsleep), + 1, USEC_PER_SEC); + if (ret == -ETIMEDOUT) { pr_err_ratelimited("redistributor failed to %s...\n", enable ? "wakeup" : "sleep"); + } } /* @@ -548,7 +537,7 @@ static int gic_irq_nmi_setup(struct irq_data *d) * A secondary irq_chip should be in charge of LPI request, * it should not be possible to get there */ - if (WARN_ON(gic_irq(d) >= 8192)) + if (WARN_ON(irqd_to_hwirq(d) >= 8192)) return -EINVAL; /* desc lock should already be held */ @@ -588,7 +577,7 @@ static void gic_irq_nmi_teardown(struct irq_data *d) * A secondary irq_chip should be in charge of LPI request, * it should not be possible to get there */ - if (WARN_ON(gic_irq(d) >= 8192)) + if (WARN_ON(irqd_to_hwirq(d) >= 8192)) return; /* desc lock should already be held */ @@ -626,7 +615,7 @@ static bool gic_arm64_erratum_2941627_needed(struct irq_data *d) static void gic_eoi_irq(struct irq_data *d) { - write_gicreg(gic_irq(d), ICC_EOIR1_EL1); + write_gicreg(irqd_to_hwirq(d), ICC_EOIR1_EL1); isb(); if (gic_arm64_erratum_2941627_needed(d)) { @@ -646,19 +635,19 @@ static void gic_eoimode1_eoi_irq(struct irq_data *d) * No need to deactivate an LPI, or an interrupt that * is is getting forwarded to a vcpu. */ - if (gic_irq(d) >= 8192 || irqd_is_forwarded_to_vcpu(d)) + if (irqd_to_hwirq(d) >= 8192 || irqd_is_forwarded_to_vcpu(d)) return; if (!gic_arm64_erratum_2941627_needed(d)) - gic_write_dir(gic_irq(d)); + gic_write_dir(irqd_to_hwirq(d)); else gic_poke_irq(d, GICD_ICACTIVER); } static int gic_set_type(struct irq_data *d, unsigned int type) { + irq_hw_number_t irq = irqd_to_hwirq(d); enum gic_intid_range range; - unsigned int irq = gic_irq(d); void __iomem *base; u32 offset, index; int ret; @@ -684,7 +673,7 @@ static int gic_set_type(struct irq_data *d, unsigned int type) ret = gic_configure_irq(index, type, base + offset, NULL); if (ret && (range == PPI_RANGE || range == EPPI_RANGE)) { /* Misconfigured PPIs are usually not fatal */ - pr_warn("GIC: PPI INTID%d is secure or misconfigured\n", irq); + pr_warn("GIC: PPI INTID%ld is secure or misconfigured\n", irq); ret = 0; } @@ -1702,9 +1691,13 @@ static int gic_irq_domain_select(struct irq_domain *d, irq_hw_number_t hwirq; /* Not for us */ - if (fwspec->fwnode != d->fwnode) + if (fwspec->fwnode != d->fwnode) return 0; + /* Handle pure domain searches */ + if (!fwspec->param_count) + return d->bus_token == bus_token; + /* If this is not DT, then we have a single domain */ if (!is_of_node(fwspec->fwnode)) return 1; diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c index 412196a7dad5..98aa383e39db 100644 --- a/drivers/irqchip/irq-gic.c +++ b/drivers/irqchip/irq-gic.c @@ -162,11 +162,6 @@ static inline void __iomem *gic_cpu_base(struct irq_data *d) return gic_data_cpu_base(gic_data); } -static inline unsigned int gic_irq(struct irq_data *d) -{ - return d->hwirq; -} - static inline bool cascading_gic_irq(struct irq_data *d) { void *data = irq_data_get_irq_handler_data(d); @@ -183,14 +178,16 @@ static inline bool cascading_gic_irq(struct irq_data *d) */ static void gic_poke_irq(struct irq_data *d, u32 offset) { - u32 mask = 1 << (gic_irq(d) % 32); - writel_relaxed(mask, gic_dist_base(d) + offset + (gic_irq(d) / 32) * 4); + u32 mask = 1 << (irqd_to_hwirq(d) % 32); + + writel_relaxed(mask, gic_dist_base(d) + offset + (irqd_to_hwirq(d) / 32) * 4); } static int gic_peek_irq(struct irq_data *d, u32 offset) { - u32 mask = 1 << (gic_irq(d) % 32); - return !!(readl_relaxed(gic_dist_base(d) + offset + (gic_irq(d) / 32) * 4) & mask); + u32 mask = 1 << (irqd_to_hwirq(d) % 32); + + return !!(readl_relaxed(gic_dist_base(d) + offset + (irqd_to_hwirq(d) / 32) * 4) & mask); } static void gic_mask_irq(struct irq_data *d) @@ -220,7 +217,7 @@ static void gic_unmask_irq(struct irq_data *d) static void gic_eoi_irq(struct irq_data *d) { - u32 hwirq = gic_irq(d); + irq_hw_number_t hwirq = irqd_to_hwirq(d); if (hwirq < 16) hwirq = this_cpu_read(sgi_intid); @@ -230,7 +227,7 @@ static void gic_eoi_irq(struct irq_data *d) static void gic_eoimode1_eoi_irq(struct irq_data *d) { - u32 hwirq = gic_irq(d); + irq_hw_number_t hwirq = irqd_to_hwirq(d); /* Do not deactivate an IRQ forwarded to a vcpu. */ if (irqd_is_forwarded_to_vcpu(d)) @@ -293,8 +290,8 @@ static int gic_irq_get_irqchip_state(struct irq_data *d, static int gic_set_type(struct irq_data *d, unsigned int type) { + irq_hw_number_t gicirq = irqd_to_hwirq(d); void __iomem *base = gic_dist_base(d); - unsigned int gicirq = gic_irq(d); int ret; /* Interrupt configuration for SGIs can't be changed */ @@ -309,7 +306,7 @@ static int gic_set_type(struct irq_data *d, unsigned int type) ret = gic_configure_irq(gicirq, type, base + GIC_DIST_CONFIG, NULL); if (ret && gicirq < 32) { /* Misconfigured PPIs are usually not fatal */ - pr_warn("GIC: PPI%d is secure or misconfigured\n", gicirq - 16); + pr_warn("GIC: PPI%ld is secure or misconfigured\n", gicirq - 16); ret = 0; } @@ -319,7 +316,7 @@ static int gic_set_type(struct irq_data *d, unsigned int type) static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu) { /* Only interrupts on the primary GIC can be forwarded to a vcpu. */ - if (cascading_gic_irq(d) || gic_irq(d) < 16) + if (cascading_gic_irq(d) || irqd_to_hwirq(d) < 16) return -EINVAL; if (vcpu) @@ -796,7 +793,7 @@ static void rmw_writeb(u8 bval, void __iomem *addr) static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, bool force) { - void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + gic_irq(d); + void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + irqd_to_hwirq(d); struct gic_chip_data *gic = irq_data_get_irq_chip_data(d); unsigned int cpu; diff --git a/drivers/irqchip/irq-imgpdc.c b/drivers/irqchip/irq-imgpdc.c index 5831be454673..b42ed68acfa6 100644 --- a/drivers/irqchip/irq-imgpdc.c +++ b/drivers/irqchip/irq-imgpdc.c @@ -461,12 +461,11 @@ err_generic: return ret; } -static int pdc_intc_remove(struct platform_device *pdev) +static void pdc_intc_remove(struct platform_device *pdev) { struct pdc_intc_priv *priv = platform_get_drvdata(pdev); irq_domain_remove(priv->domain); - return 0; } static const struct of_device_id pdc_intc_match[] = { @@ -479,8 +478,8 @@ static struct platform_driver pdc_intc_driver = { .name = "pdc-intc", .of_match_table = pdc_intc_match, }, - .probe = pdc_intc_probe, - .remove = pdc_intc_remove, + .probe = pdc_intc_probe, + .remove_new = pdc_intc_remove, }; static int __init pdc_intc_init(void) diff --git a/drivers/irqchip/irq-imx-intmux.c b/drivers/irqchip/irq-imx-intmux.c index aa041e4dfee0..511adfaeec82 100644 --- a/drivers/irqchip/irq-imx-intmux.c +++ b/drivers/irqchip/irq-imx-intmux.c @@ -166,6 +166,10 @@ static int imx_intmux_irq_select(struct irq_domain *d, struct irq_fwspec *fwspec if (fwspec->fwnode != d->fwnode) return false; + /* Handle pure domain searches */ + if (!fwspec->param_count) + return d->bus_token == bus_token; + return irqchip_data->chanidx == fwspec->param[1]; } @@ -282,7 +286,7 @@ out: return ret; } -static int imx_intmux_remove(struct platform_device *pdev) +static void imx_intmux_remove(struct platform_device *pdev) { struct intmux_data *data = platform_get_drvdata(pdev); int i; @@ -298,8 +302,6 @@ static int imx_intmux_remove(struct platform_device *pdev) } pm_runtime_disable(&pdev->dev); - - return 0; } #ifdef CONFIG_PM @@ -354,11 +356,11 @@ static const struct of_device_id imx_intmux_id[] = { static struct platform_driver imx_intmux_driver = { .driver = { - .name = "imx-intmux", - .of_match_table = imx_intmux_id, - .pm = &imx_intmux_pm_ops, + .name = "imx-intmux", + .of_match_table = imx_intmux_id, + .pm = &imx_intmux_pm_ops, }, - .probe = imx_intmux_probe, - .remove = imx_intmux_remove, + .probe = imx_intmux_probe, + .remove_new = imx_intmux_remove, }; builtin_platform_driver(imx_intmux_driver); diff --git a/drivers/irqchip/irq-imx-irqsteer.c b/drivers/irqchip/irq-imx-irqsteer.c index bd9543314539..20cf7a9e9ece 100644 --- a/drivers/irqchip/irq-imx-irqsteer.c +++ b/drivers/irqchip/irq-imx-irqsteer.c @@ -231,7 +231,7 @@ out: return ret; } -static int imx_irqsteer_remove(struct platform_device *pdev) +static void imx_irqsteer_remove(struct platform_device *pdev) { struct irqsteer_data *irqsteer_data = platform_get_drvdata(pdev); int i; @@ -243,8 +243,6 @@ static int imx_irqsteer_remove(struct platform_device *pdev) irq_domain_remove(irqsteer_data->domain); clk_disable_unprepare(irqsteer_data->ipg_clk); - - return 0; } #ifdef CONFIG_PM @@ -307,11 +305,11 @@ static const struct of_device_id imx_irqsteer_dt_ids[] = { static struct platform_driver imx_irqsteer_driver = { .driver = { - .name = "imx-irqsteer", - .of_match_table = imx_irqsteer_dt_ids, - .pm = &imx_irqsteer_pm_ops, + .name = "imx-irqsteer", + .of_match_table = imx_irqsteer_dt_ids, + .pm = &imx_irqsteer_pm_ops, }, - .probe = imx_irqsteer_probe, - .remove = imx_irqsteer_remove, + .probe = imx_irqsteer_probe, + .remove_new = imx_irqsteer_remove, }; builtin_platform_driver(imx_irqsteer_driver); diff --git a/drivers/irqchip/irq-keystone.c b/drivers/irqchip/irq-keystone.c index a36396db4b08..30f1979fa124 100644 --- a/drivers/irqchip/irq-keystone.c +++ b/drivers/irqchip/irq-keystone.c @@ -190,7 +190,7 @@ static int keystone_irq_probe(struct platform_device *pdev) return 0; } -static int keystone_irq_remove(struct platform_device *pdev) +static void keystone_irq_remove(struct platform_device *pdev) { struct keystone_irq_device *kirq = platform_get_drvdata(pdev); int hwirq; @@ -201,7 +201,6 @@ static int keystone_irq_remove(struct platform_device *pdev) irq_dispose_mapping(irq_find_mapping(kirq->irqd, hwirq)); irq_domain_remove(kirq->irqd); - return 0; } static const struct of_device_id keystone_irq_dt_ids[] = { @@ -212,7 +211,7 @@ MODULE_DEVICE_TABLE(of, keystone_irq_dt_ids); static struct platform_driver keystone_irq_device_driver = { .probe = keystone_irq_probe, - .remove = keystone_irq_remove, + .remove_new = keystone_irq_remove, .driver = { .name = "keystone_irq", .of_match_table = of_match_ptr(keystone_irq_dt_ids), diff --git a/drivers/irqchip/irq-loongson-eiointc.c b/drivers/irqchip/irq-loongson-eiointc.c index 1623cd779175..b64cbe3052e8 100644 --- a/drivers/irqchip/irq-loongson-eiointc.c +++ b/drivers/irqchip/irq-loongson-eiointc.c @@ -198,6 +198,12 @@ static void eiointc_irq_dispatch(struct irq_desc *desc) for (i = 0; i < eiointc_priv[0]->vec_count / VEC_COUNT_PER_REG; i++) { pending = iocsr_read64(EIOINTC_REG_ISR + (i << 3)); + + /* Skip handling if pending bitmap is zero */ + if (!pending) + continue; + + /* Clear the IRQs */ iocsr_write64(pending, EIOINTC_REG_ISR + (i << 3)); while (pending) { int bit = __ffs(pending); @@ -241,7 +247,7 @@ static int eiointc_domain_alloc(struct irq_domain *domain, unsigned int virq, int ret; unsigned int i, type; unsigned long hwirq = 0; - struct eiointc *priv = domain->host_data; + struct eiointc_priv *priv = domain->host_data; ret = irq_domain_translate_onecell(domain, arg, &hwirq, &type); if (ret) @@ -304,23 +310,7 @@ static int eiointc_suspend(void) static void eiointc_resume(void) { - int i, j; - struct irq_desc *desc; - struct irq_data *irq_data; - eiointc_router_init(0); - - for (i = 0; i < nr_pics; i++) { - for (j = 0; j < eiointc_priv[0]->vec_count; j++) { - desc = irq_resolve_mapping(eiointc_priv[i]->eiointc_domain, j); - if (desc && desc->handle_irq && desc->handle_irq != handle_bad_irq) { - raw_spin_lock(&desc->lock); - irq_data = irq_domain_get_irq_data(eiointc_priv[i]->eiointc_domain, irq_desc_get_irq(desc)); - eiointc_set_irq_affinity(irq_data, irq_data->common->affinity, 0); - raw_spin_unlock(&desc->lock); - } - } - } } static struct syscore_ops eiointc_syscore_ops = { diff --git a/drivers/irqchip/irq-ls-scfg-msi.c b/drivers/irqchip/irq-ls-scfg-msi.c index 15cf80b46322..1aef5c4d27c6 100644 --- a/drivers/irqchip/irq-ls-scfg-msi.c +++ b/drivers/irqchip/irq-ls-scfg-msi.c @@ -398,7 +398,7 @@ static int ls_scfg_msi_probe(struct platform_device *pdev) return 0; } -static int ls_scfg_msi_remove(struct platform_device *pdev) +static void ls_scfg_msi_remove(struct platform_device *pdev) { struct ls_scfg_msi *msi_data = platform_get_drvdata(pdev); int i; @@ -410,17 +410,15 @@ static int ls_scfg_msi_remove(struct platform_device *pdev) irq_domain_remove(msi_data->parent); platform_set_drvdata(pdev, NULL); - - return 0; } static struct platform_driver ls_scfg_msi_driver = { .driver = { - .name = "ls-scfg-msi", - .of_match_table = ls_scfg_msi_id, + .name = "ls-scfg-msi", + .of_match_table = ls_scfg_msi_id, }, - .probe = ls_scfg_msi_probe, - .remove = ls_scfg_msi_remove, + .probe = ls_scfg_msi_probe, + .remove_new = ls_scfg_msi_remove, }; module_platform_driver(ls_scfg_msi_driver); diff --git a/drivers/irqchip/irq-madera.c b/drivers/irqchip/irq-madera.c index 3eb1f8cdf674..acceb6e7fa95 100644 --- a/drivers/irqchip/irq-madera.c +++ b/drivers/irqchip/irq-madera.c @@ -222,7 +222,7 @@ static int madera_irq_probe(struct platform_device *pdev) return 0; } -static int madera_irq_remove(struct platform_device *pdev) +static void madera_irq_remove(struct platform_device *pdev) { struct madera *madera = dev_get_drvdata(pdev->dev.parent); @@ -232,13 +232,11 @@ static int madera_irq_remove(struct platform_device *pdev) */ madera->irq_dev = NULL; regmap_del_irq_chip(madera->irq, madera->irq_data); - - return 0; } static struct platform_driver madera_irq_driver = { - .probe = &madera_irq_probe, - .remove = &madera_irq_remove, + .probe = madera_irq_probe, + .remove_new = madera_irq_remove, .driver = { .name = "madera-irq", .pm = &madera_irq_pm_ops, diff --git a/drivers/irqchip/irq-mbigen.c b/drivers/irqchip/irq-mbigen.c index 5101a3fb11df..58881d313979 100644 --- a/drivers/irqchip/irq-mbigen.c +++ b/drivers/irqchip/irq-mbigen.c @@ -235,22 +235,17 @@ static const struct irq_domain_ops mbigen_domain_ops = { static int mbigen_of_create_domain(struct platform_device *pdev, struct mbigen_device *mgn_chip) { - struct device *parent; struct platform_device *child; struct irq_domain *domain; struct device_node *np; u32 num_pins; int ret = 0; - parent = bus_get_dev_root(&platform_bus_type); - if (!parent) - return -ENODEV; - for_each_child_of_node(pdev->dev.of_node, np) { if (!of_property_read_bool(np, "interrupt-controller")) continue; - child = of_platform_device_create(np, NULL, parent); + child = of_platform_device_create(np, NULL, NULL); if (!child) { ret = -ENOMEM; break; @@ -273,7 +268,6 @@ static int mbigen_of_create_domain(struct platform_device *pdev, } } - put_device(parent); if (ret) of_node_put(np); diff --git a/drivers/irqchip/irq-meson-gpio.c b/drivers/irqchip/irq-meson-gpio.c index f88df39f4129..9a1791908598 100644 --- a/drivers/irqchip/irq-meson-gpio.c +++ b/drivers/irqchip/irq-meson-gpio.c @@ -154,6 +154,10 @@ static const struct meson_gpio_irq_params c3_params = { INIT_MESON_S4_COMMON_DATA(55) }; +static const struct meson_gpio_irq_params t7_params = { + INIT_MESON_S4_COMMON_DATA(157) +}; + static const struct of_device_id meson_irq_gpio_matches[] __maybe_unused = { { .compatible = "amlogic,meson8-gpio-intc", .data = &meson8_params }, { .compatible = "amlogic,meson8b-gpio-intc", .data = &meson8b_params }, @@ -165,6 +169,7 @@ static const struct of_device_id meson_irq_gpio_matches[] __maybe_unused = { { .compatible = "amlogic,meson-a1-gpio-intc", .data = &a1_params }, { .compatible = "amlogic,meson-s4-gpio-intc", .data = &s4_params }, { .compatible = "amlogic,c3-gpio-intc", .data = &c3_params }, + { .compatible = "amlogic,t7-gpio-intc", .data = &t7_params }, { } }; diff --git a/drivers/irqchip/irq-mvebu-pic.c b/drivers/irqchip/irq-mvebu-pic.c index ef3d3646ccc2..d17d9c0e2880 100644 --- a/drivers/irqchip/irq-mvebu-pic.c +++ b/drivers/irqchip/irq-mvebu-pic.c @@ -167,14 +167,12 @@ static int mvebu_pic_probe(struct platform_device *pdev) return 0; } -static int mvebu_pic_remove(struct platform_device *pdev) +static void mvebu_pic_remove(struct platform_device *pdev) { struct mvebu_pic *pic = platform_get_drvdata(pdev); on_each_cpu(mvebu_pic_disable_percpu_irq, pic, 1); irq_domain_remove(pic->domain); - - return 0; } static const struct of_device_id mvebu_pic_of_match[] = { @@ -184,11 +182,11 @@ static const struct of_device_id mvebu_pic_of_match[] = { MODULE_DEVICE_TABLE(of, mvebu_pic_of_match); static struct platform_driver mvebu_pic_driver = { - .probe = mvebu_pic_probe, - .remove = mvebu_pic_remove, + .probe = mvebu_pic_probe, + .remove_new = mvebu_pic_remove, .driver = { - .name = "mvebu-pic", - .of_match_table = mvebu_pic_of_match, + .name = "mvebu-pic", + .of_match_table = mvebu_pic_of_match, }, }; module_platform_driver(mvebu_pic_driver); diff --git a/drivers/irqchip/irq-pruss-intc.c b/drivers/irqchip/irq-pruss-intc.c index 0f64ecb9b1f4..060eb000e9d3 100644 --- a/drivers/irqchip/irq-pruss-intc.c +++ b/drivers/irqchip/irq-pruss-intc.c @@ -599,7 +599,7 @@ fail_irq: return ret; } -static int pruss_intc_remove(struct platform_device *pdev) +static void pruss_intc_remove(struct platform_device *pdev) { struct pruss_intc *intc = platform_get_drvdata(pdev); u8 max_system_events = intc->soc_config->num_system_events; @@ -616,8 +616,6 @@ static int pruss_intc_remove(struct platform_device *pdev) irq_dispose_mapping(irq_find_mapping(intc->domain, hwirq)); irq_domain_remove(intc->domain); - - return 0; } static const struct pruss_intc_match_data pruss_intc_data = { @@ -645,12 +643,12 @@ MODULE_DEVICE_TABLE(of, pruss_intc_of_match); static struct platform_driver pruss_intc_driver = { .driver = { - .name = "pruss-intc", - .of_match_table = pruss_intc_of_match, - .suppress_bind_attrs = true, + .name = "pruss-intc", + .of_match_table = pruss_intc_of_match, + .suppress_bind_attrs = true, }, - .probe = pruss_intc_probe, - .remove = pruss_intc_remove, + .probe = pruss_intc_probe, + .remove_new = pruss_intc_remove, }; module_platform_driver(pruss_intc_driver); diff --git a/drivers/irqchip/irq-qcom-mpm.c b/drivers/irqchip/irq-qcom-mpm.c index cda5838d2232..7942d8eb3d00 100644 --- a/drivers/irqchip/irq-qcom-mpm.c +++ b/drivers/irqchip/irq-qcom-mpm.c @@ -389,8 +389,8 @@ static int qcom_mpm_init(struct device_node *np, struct device_node *parent) /* Don't use devm_ioremap_resource, as we're accessing a shared region. */ priv->base = devm_ioremap(dev, res.start, resource_size(&res)); of_node_put(msgram_np); - if (IS_ERR(priv->base)) - return PTR_ERR(priv->base); + if (!priv->base) + return -ENOMEM; } else { /* Otherwise, fall back to simple MMIO. */ priv->base = devm_platform_ioremap_resource(pdev, 0); diff --git a/drivers/irqchip/irq-renesas-intc-irqpin.c b/drivers/irqchip/irq-renesas-intc-irqpin.c index fa19585f3dee..9ad37237ba95 100644 --- a/drivers/irqchip/irq-renesas-intc-irqpin.c +++ b/drivers/irqchip/irq-renesas-intc-irqpin.c @@ -561,14 +561,13 @@ err0: return ret; } -static int intc_irqpin_remove(struct platform_device *pdev) +static void intc_irqpin_remove(struct platform_device *pdev) { struct intc_irqpin_priv *p = platform_get_drvdata(pdev); irq_domain_remove(p->irq_domain); pm_runtime_put(&pdev->dev); pm_runtime_disable(&pdev->dev); - return 0; } static int __maybe_unused intc_irqpin_suspend(struct device *dev) @@ -585,11 +584,11 @@ static SIMPLE_DEV_PM_OPS(intc_irqpin_pm_ops, intc_irqpin_suspend, NULL); static struct platform_driver intc_irqpin_device_driver = { .probe = intc_irqpin_probe, - .remove = intc_irqpin_remove, + .remove_new = intc_irqpin_remove, .driver = { - .name = "renesas_intc_irqpin", - .of_match_table = intc_irqpin_dt_ids, - .pm = &intc_irqpin_pm_ops, + .name = "renesas_intc_irqpin", + .of_match_table = intc_irqpin_dt_ids, + .pm = &intc_irqpin_pm_ops, } }; diff --git a/drivers/irqchip/irq-renesas-irqc.c b/drivers/irqchip/irq-renesas-irqc.c index 49b446b396f9..76026e0b8e20 100644 --- a/drivers/irqchip/irq-renesas-irqc.c +++ b/drivers/irqchip/irq-renesas-irqc.c @@ -218,14 +218,13 @@ err_runtime_pm_disable: return ret; } -static int irqc_remove(struct platform_device *pdev) +static void irqc_remove(struct platform_device *pdev) { struct irqc_priv *p = platform_get_drvdata(pdev); irq_domain_remove(p->irq_domain); pm_runtime_put(&pdev->dev); pm_runtime_disable(&pdev->dev); - return 0; } static int __maybe_unused irqc_suspend(struct device *dev) @@ -248,11 +247,11 @@ MODULE_DEVICE_TABLE(of, irqc_dt_ids); static struct platform_driver irqc_device_driver = { .probe = irqc_probe, - .remove = irqc_remove, + .remove_new = irqc_remove, .driver = { - .name = "renesas_irqc", + .name = "renesas_irqc", .of_match_table = irqc_dt_ids, - .pm = &irqc_pm_ops, + .pm = &irqc_pm_ops, } }; diff --git a/drivers/irqchip/irq-renesas-rza1.c b/drivers/irqchip/irq-renesas-rza1.c index e4c99c2e0373..f05afe82db4d 100644 --- a/drivers/irqchip/irq-renesas-rza1.c +++ b/drivers/irqchip/irq-renesas-rza1.c @@ -244,12 +244,11 @@ out_put_node: return ret; } -static int rza1_irqc_remove(struct platform_device *pdev) +static void rza1_irqc_remove(struct platform_device *pdev) { struct rza1_irqc_priv *priv = platform_get_drvdata(pdev); irq_domain_remove(priv->irq_domain); - return 0; } static const struct of_device_id rza1_irqc_dt_ids[] = { @@ -260,9 +259,9 @@ MODULE_DEVICE_TABLE(of, rza1_irqc_dt_ids); static struct platform_driver rza1_irqc_device_driver = { .probe = rza1_irqc_probe, - .remove = rza1_irqc_remove, + .remove_new = rza1_irqc_remove, .driver = { - .name = "renesas_rza1_irqc", + .name = "renesas_rza1_irqc", .of_match_table = rza1_irqc_dt_ids, } }; diff --git a/drivers/irqchip/irq-riscv-intc.c b/drivers/irqchip/irq-riscv-intc.c index e8d01b14ccdd..f87aeab460eb 100644 --- a/drivers/irqchip/irq-riscv-intc.c +++ b/drivers/irqchip/irq-riscv-intc.c @@ -17,17 +17,29 @@ #include <linux/module.h> #include <linux/of.h> #include <linux/smp.h> +#include <linux/soc/andes/irq.h> + +#include <asm/hwcap.h> static struct irq_domain *intc_domain; +static unsigned int riscv_intc_nr_irqs __ro_after_init = BITS_PER_LONG; +static unsigned int riscv_intc_custom_base __ro_after_init = BITS_PER_LONG; +static unsigned int riscv_intc_custom_nr_irqs __ro_after_init; static asmlinkage void riscv_intc_irq(struct pt_regs *regs) { unsigned long cause = regs->cause & ~CAUSE_IRQ_FLAG; - if (unlikely(cause >= BITS_PER_LONG)) - panic("unexpected interrupt cause"); + if (generic_handle_domain_irq(intc_domain, cause)) + pr_warn_ratelimited("Failed to handle interrupt (cause: %ld)\n", cause); +} + +static asmlinkage void riscv_intc_aia_irq(struct pt_regs *regs) +{ + unsigned long topi; - generic_handle_domain_irq(intc_domain, cause); + while ((topi = csr_read(CSR_TOPI))) + generic_handle_domain_irq(intc_domain, topi >> TOPI_IID_SHIFT); } /* @@ -39,12 +51,43 @@ static asmlinkage void riscv_intc_irq(struct pt_regs *regs) static void riscv_intc_irq_mask(struct irq_data *d) { - csr_clear(CSR_IE, BIT(d->hwirq)); + if (IS_ENABLED(CONFIG_32BIT) && d->hwirq >= BITS_PER_LONG) + csr_clear(CSR_IEH, BIT(d->hwirq - BITS_PER_LONG)); + else + csr_clear(CSR_IE, BIT(d->hwirq)); } static void riscv_intc_irq_unmask(struct irq_data *d) { - csr_set(CSR_IE, BIT(d->hwirq)); + if (IS_ENABLED(CONFIG_32BIT) && d->hwirq >= BITS_PER_LONG) + csr_set(CSR_IEH, BIT(d->hwirq - BITS_PER_LONG)); + else + csr_set(CSR_IE, BIT(d->hwirq)); +} + +static void andes_intc_irq_mask(struct irq_data *d) +{ + /* + * Andes specific S-mode local interrupt causes (hwirq) + * are defined as (256 + n) and controlled by n-th bit + * of SLIE. + */ + unsigned int mask = BIT(d->hwirq % BITS_PER_LONG); + + if (d->hwirq < ANDES_SLI_CAUSE_BASE) + csr_clear(CSR_IE, mask); + else + csr_clear(ANDES_CSR_SLIE, mask); +} + +static void andes_intc_irq_unmask(struct irq_data *d) +{ + unsigned int mask = BIT(d->hwirq % BITS_PER_LONG); + + if (d->hwirq < ANDES_SLI_CAUSE_BASE) + csr_set(CSR_IE, mask); + else + csr_set(ANDES_CSR_SLIE, mask); } static void riscv_intc_irq_eoi(struct irq_data *d) @@ -70,12 +113,21 @@ static struct irq_chip riscv_intc_chip = { .irq_eoi = riscv_intc_irq_eoi, }; +static struct irq_chip andes_intc_chip = { + .name = "RISC-V INTC", + .irq_mask = andes_intc_irq_mask, + .irq_unmask = andes_intc_irq_unmask, + .irq_eoi = riscv_intc_irq_eoi, +}; + static int riscv_intc_domain_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hwirq) { + struct irq_chip *chip = d->host_data; + irq_set_percpu_devid(irq); - irq_domain_set_info(d, irq, hwirq, &riscv_intc_chip, d->host_data, - handle_percpu_devid_irq, NULL, NULL); + irq_domain_set_info(d, irq, hwirq, chip, NULL, handle_percpu_devid_irq, + NULL, NULL); return 0; } @@ -93,6 +145,14 @@ static int riscv_intc_domain_alloc(struct irq_domain *domain, if (ret) return ret; + /* + * Only allow hwirq for which we have corresponding standard or + * custom interrupt enable register. + */ + if ((hwirq >= riscv_intc_nr_irqs && hwirq < riscv_intc_custom_base) || + (hwirq >= riscv_intc_custom_base + riscv_intc_custom_nr_irqs)) + return -EINVAL; + for (i = 0; i < nr_irqs; i++) { ret = riscv_intc_domain_map(domain, virq + i, hwirq + i); if (ret) @@ -113,18 +173,20 @@ static struct fwnode_handle *riscv_intc_hwnode(void) return intc_domain->fwnode; } -static int __init riscv_intc_init_common(struct fwnode_handle *fn) +static int __init riscv_intc_init_common(struct fwnode_handle *fn, struct irq_chip *chip) { int rc; - intc_domain = irq_domain_create_linear(fn, BITS_PER_LONG, - &riscv_intc_domain_ops, NULL); + intc_domain = irq_domain_create_tree(fn, &riscv_intc_domain_ops, chip); if (!intc_domain) { pr_err("unable to add IRQ domain\n"); return -ENXIO; } - rc = set_handle_irq(&riscv_intc_irq); + if (riscv_isa_extension_available(NULL, SxAIA)) + rc = set_handle_irq(&riscv_intc_aia_irq); + else + rc = set_handle_irq(&riscv_intc_irq); if (rc) { pr_err("failed to set irq handler\n"); return rc; @@ -132,7 +194,11 @@ static int __init riscv_intc_init_common(struct fwnode_handle *fn) riscv_set_intc_hwnode_fn(riscv_intc_hwnode); - pr_info("%d local interrupts mapped\n", BITS_PER_LONG); + pr_info("%d local interrupts mapped%s\n", + riscv_isa_extension_available(NULL, SxAIA) ? 64 : riscv_intc_nr_irqs, + riscv_isa_extension_available(NULL, SxAIA) ? " using AIA" : ""); + if (riscv_intc_custom_nr_irqs) + pr_info("%d custom local interrupts mapped\n", riscv_intc_custom_nr_irqs); return 0; } @@ -140,8 +206,9 @@ static int __init riscv_intc_init_common(struct fwnode_handle *fn) static int __init riscv_intc_init(struct device_node *node, struct device_node *parent) { - int rc; + struct irq_chip *chip = &riscv_intc_chip; unsigned long hartid; + int rc; rc = riscv_of_parent_hartid(node, &hartid); if (rc < 0) { @@ -166,10 +233,17 @@ static int __init riscv_intc_init(struct device_node *node, return 0; } - return riscv_intc_init_common(of_node_to_fwnode(node)); + if (of_device_is_compatible(node, "andestech,cpu-intc")) { + riscv_intc_custom_base = ANDES_SLI_CAUSE_BASE; + riscv_intc_custom_nr_irqs = ANDES_RV_IRQ_LAST; + chip = &andes_intc_chip; + } + + return riscv_intc_init_common(of_node_to_fwnode(node), chip); } IRQCHIP_DECLARE(riscv, "riscv,cpu-intc", riscv_intc_init); +IRQCHIP_DECLARE(andes, "andestech,cpu-intc", riscv_intc_init); #ifdef CONFIG_ACPI @@ -196,7 +270,7 @@ static int __init riscv_intc_acpi_init(union acpi_subtable_headers *header, return -ENOMEM; } - return riscv_intc_init_common(fn); + return riscv_intc_init_common(fn, &riscv_intc_chip); } IRQCHIP_ACPI_DECLARE(riscv_intc, ACPI_MADT_TYPE_RINTC, NULL, diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c index 5b7bc4fd9517..f3d4cb9e34f7 100644 --- a/drivers/irqchip/irq-sifive-plic.c +++ b/drivers/irqchip/irq-sifive-plic.c @@ -3,7 +3,6 @@ * Copyright (C) 2017 SiFive * Copyright (C) 2018 Christoph Hellwig */ -#define pr_fmt(fmt) "plic: " fmt #include <linux/cpu.h> #include <linux/interrupt.h> #include <linux/io.h> @@ -64,6 +63,7 @@ #define PLIC_QUIRK_EDGE_INTERRUPT 0 struct plic_priv { + struct device *dev; struct cpumask lmask; struct irq_domain *irqdomain; void __iomem *regs; @@ -103,9 +103,11 @@ static void __plic_toggle(void __iomem *enable_base, int hwirq, int enable) static void plic_toggle(struct plic_handler *handler, int hwirq, int enable) { - raw_spin_lock(&handler->enable_lock); + unsigned long flags; + + raw_spin_lock_irqsave(&handler->enable_lock, flags); __plic_toggle(handler->enable_base, hwirq, enable); - raw_spin_unlock(&handler->enable_lock); + raw_spin_unlock_irqrestore(&handler->enable_lock, flags); } static inline void plic_irq_toggle(const struct cpumask *mask, @@ -148,7 +150,13 @@ static void plic_irq_eoi(struct irq_data *d) { struct plic_handler *handler = this_cpu_ptr(&plic_handlers); - writel(d->hwirq, handler->hart_base + CONTEXT_CLAIM); + if (unlikely(irqd_irq_disabled(d))) { + plic_toggle(handler, d->hwirq, 1); + writel(d->hwirq, handler->hart_base + CONTEXT_CLAIM); + plic_toggle(handler, d->hwirq, 0); + } else { + writel(d->hwirq, handler->hart_base + CONTEXT_CLAIM); + } } #ifdef CONFIG_SMP @@ -236,6 +244,7 @@ static int plic_irq_set_type(struct irq_data *d, unsigned int type) static int plic_irq_suspend(void) { unsigned int i, cpu; + unsigned long flags; u32 __iomem *reg; struct plic_priv *priv; @@ -253,12 +262,12 @@ static int plic_irq_suspend(void) if (!handler->present) continue; - raw_spin_lock(&handler->enable_lock); + raw_spin_lock_irqsave(&handler->enable_lock, flags); for (i = 0; i < DIV_ROUND_UP(priv->nr_irqs, 32); i++) { reg = handler->enable_base + i * sizeof(u32); handler->enable_save[i] = readl(reg); } - raw_spin_unlock(&handler->enable_lock); + raw_spin_unlock_irqrestore(&handler->enable_lock, flags); } return 0; @@ -267,6 +276,7 @@ static int plic_irq_suspend(void) static void plic_irq_resume(void) { unsigned int i, index, cpu; + unsigned long flags; u32 __iomem *reg; struct plic_priv *priv; @@ -284,12 +294,12 @@ static void plic_irq_resume(void) if (!handler->present) continue; - raw_spin_lock(&handler->enable_lock); + raw_spin_lock_irqsave(&handler->enable_lock, flags); for (i = 0; i < DIV_ROUND_UP(priv->nr_irqs, 32); i++) { reg = handler->enable_base + i * sizeof(u32); writel(handler->enable_save[i], reg); } - raw_spin_unlock(&handler->enable_lock); + raw_spin_unlock_irqrestore(&handler->enable_lock, flags); } } @@ -370,9 +380,10 @@ static void plic_handle_irq(struct irq_desc *desc) while ((hwirq = readl(claim))) { int err = generic_handle_domain_irq(handler->priv->irqdomain, hwirq); - if (unlikely(err)) - pr_warn_ratelimited("can't find mapping for hwirq %lu\n", - hwirq); + if (unlikely(err)) { + dev_warn_ratelimited(handler->priv->dev, + "can't find mapping for hwirq %lu\n", hwirq); + } } chained_irq_exit(chip, desc); @@ -400,63 +411,122 @@ static int plic_starting_cpu(unsigned int cpu) enable_percpu_irq(plic_parent_irq, irq_get_trigger_type(plic_parent_irq)); else - pr_warn("cpu%d: parent irq not available\n", cpu); + dev_warn(handler->priv->dev, "cpu%d: parent irq not available\n", cpu); plic_set_threshold(handler, PLIC_ENABLE_THRESHOLD); return 0; } -static int __init __plic_init(struct device_node *node, - struct device_node *parent, - unsigned long plic_quirks) +static const struct of_device_id plic_match[] = { + { .compatible = "sifive,plic-1.0.0" }, + { .compatible = "riscv,plic0" }, + { .compatible = "andestech,nceplic100", + .data = (const void *)BIT(PLIC_QUIRK_EDGE_INTERRUPT) }, + { .compatible = "thead,c900-plic", + .data = (const void *)BIT(PLIC_QUIRK_EDGE_INTERRUPT) }, + {} +}; + +static int plic_parse_nr_irqs_and_contexts(struct platform_device *pdev, + u32 *nr_irqs, u32 *nr_contexts) { - int error = 0, nr_contexts, nr_handlers = 0, i; - u32 nr_irqs; - struct plic_priv *priv; - struct plic_handler *handler; - unsigned int cpu; + struct device *dev = &pdev->dev; + int rc; - priv = kzalloc(sizeof(*priv), GFP_KERNEL); - if (!priv) - return -ENOMEM; + /* + * Currently, only OF fwnode is supported so extend this + * function for ACPI support. + */ + if (!is_of_node(dev->fwnode)) + return -EINVAL; - priv->plic_quirks = plic_quirks; + rc = of_property_read_u32(to_of_node(dev->fwnode), "riscv,ndev", nr_irqs); + if (rc) { + dev_err(dev, "riscv,ndev property not available\n"); + return rc; + } - priv->regs = of_iomap(node, 0); - if (WARN_ON(!priv->regs)) { - error = -EIO; - goto out_free_priv; + *nr_contexts = of_irq_count(to_of_node(dev->fwnode)); + if (WARN_ON(!(*nr_contexts))) { + dev_err(dev, "no PLIC context available\n"); + return -EINVAL; } - error = -EINVAL; - of_property_read_u32(node, "riscv,ndev", &nr_irqs); - if (WARN_ON(!nr_irqs)) - goto out_iounmap; + return 0; +} - priv->nr_irqs = nr_irqs; +static int plic_parse_context_parent(struct platform_device *pdev, u32 context, + u32 *parent_hwirq, int *parent_cpu) +{ + struct device *dev = &pdev->dev; + struct of_phandle_args parent; + unsigned long hartid; + int rc; - priv->prio_save = bitmap_alloc(nr_irqs, GFP_KERNEL); - if (!priv->prio_save) - goto out_free_priority_reg; + /* + * Currently, only OF fwnode is supported so extend this + * function for ACPI support. + */ + if (!is_of_node(dev->fwnode)) + return -EINVAL; - nr_contexts = of_irq_count(node); - if (WARN_ON(!nr_contexts)) - goto out_free_priority_reg; + rc = of_irq_parse_one(to_of_node(dev->fwnode), context, &parent); + if (rc) + return rc; - error = -ENOMEM; - priv->irqdomain = irq_domain_add_linear(node, nr_irqs + 1, - &plic_irqdomain_ops, priv); - if (WARN_ON(!priv->irqdomain)) - goto out_free_priority_reg; + rc = riscv_of_parent_hartid(parent.np, &hartid); + if (rc) + return rc; - for (i = 0; i < nr_contexts; i++) { - struct of_phandle_args parent; - irq_hw_number_t hwirq; - int cpu; - unsigned long hartid; + *parent_hwirq = parent.args[0]; + *parent_cpu = riscv_hartid_to_cpuid(hartid); + return 0; +} + +static int plic_probe(struct platform_device *pdev) +{ + int error = 0, nr_contexts, nr_handlers = 0, cpu, i; + struct device *dev = &pdev->dev; + unsigned long plic_quirks = 0; + struct plic_handler *handler; + u32 nr_irqs, parent_hwirq; + struct irq_domain *domain; + struct plic_priv *priv; + irq_hw_number_t hwirq; + bool cpuhp_setup; - if (of_irq_parse_one(node, i, &parent)) { - pr_err("failed to parse parent for context %d.\n", i); + if (is_of_node(dev->fwnode)) { + const struct of_device_id *id; + + id = of_match_node(plic_match, to_of_node(dev->fwnode)); + if (id) + plic_quirks = (unsigned long)id->data; + } + + error = plic_parse_nr_irqs_and_contexts(pdev, &nr_irqs, &nr_contexts); + if (error) + return error; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->dev = dev; + priv->plic_quirks = plic_quirks; + priv->nr_irqs = nr_irqs; + + priv->regs = devm_platform_ioremap_resource(pdev, 0); + if (WARN_ON(!priv->regs)) + return -EIO; + + priv->prio_save = devm_bitmap_zalloc(dev, nr_irqs, GFP_KERNEL); + if (!priv->prio_save) + return -ENOMEM; + + for (i = 0; i < nr_contexts; i++) { + error = plic_parse_context_parent(pdev, i, &parent_hwirq, &cpu); + if (error) { + dev_warn(dev, "hwirq for context%d not found\n", i); continue; } @@ -464,7 +534,7 @@ static int __init __plic_init(struct device_node *node, * Skip contexts other than external interrupts for our * privilege level. */ - if (parent.args[0] != RV_IRQ_EXT) { + if (parent_hwirq != RV_IRQ_EXT) { /* Disable S-mode enable bits if running in M-mode. */ if (IS_ENABLED(CONFIG_RISCV_M_MODE)) { void __iomem *enable_base = priv->regs + @@ -477,24 +547,17 @@ static int __init __plic_init(struct device_node *node, continue; } - error = riscv_of_parent_hartid(parent.np, &hartid); - if (error < 0) { - pr_warn("failed to parse hart ID for context %d.\n", i); - continue; - } - - cpu = riscv_hartid_to_cpuid(hartid); if (cpu < 0) { - pr_warn("Invalid cpuid for context %d\n", i); + dev_warn(dev, "Invalid cpuid for context %d\n", i); continue; } /* Find parent domain and register chained handler */ - if (!plic_parent_irq && irq_find_host(parent.np)) { - plic_parent_irq = irq_of_parse_and_map(node, i); + domain = irq_find_matching_fwnode(riscv_get_intc_hwnode(), DOMAIN_BUS_ANY); + if (!plic_parent_irq && domain) { + plic_parent_irq = irq_create_mapping(domain, RV_IRQ_EXT); if (plic_parent_irq) - irq_set_chained_handler(plic_parent_irq, - plic_handle_irq); + irq_set_chained_handler(plic_parent_irq, plic_handle_irq); } /* @@ -504,7 +567,7 @@ static int __init __plic_init(struct device_node *node, */ handler = per_cpu_ptr(&plic_handlers, cpu); if (handler->present) { - pr_warn("handler already present for context %d.\n", i); + dev_warn(dev, "handler already present for context %d.\n", i); plic_set_threshold(handler, PLIC_DISABLE_THRESHOLD); goto done; } @@ -518,10 +581,10 @@ static int __init __plic_init(struct device_node *node, i * CONTEXT_ENABLE_SIZE; handler->priv = priv; - handler->enable_save = kcalloc(DIV_ROUND_UP(nr_irqs, 32), - sizeof(*handler->enable_save), GFP_KERNEL); + handler->enable_save = devm_kcalloc(dev, DIV_ROUND_UP(nr_irqs, 32), + sizeof(*handler->enable_save), GFP_KERNEL); if (!handler->enable_save) - goto out_free_enable_reg; + goto fail_cleanup_contexts; done: for (hwirq = 1; hwirq <= nr_irqs; hwirq++) { plic_toggle(handler, hwirq, 0); @@ -531,52 +594,60 @@ done: nr_handlers++; } + priv->irqdomain = irq_domain_add_linear(to_of_node(dev->fwnode), nr_irqs + 1, + &plic_irqdomain_ops, priv); + if (WARN_ON(!priv->irqdomain)) + goto fail_cleanup_contexts; + /* * We can have multiple PLIC instances so setup cpuhp state - * and register syscore operations only when context handler - * for current/boot CPU is present. + * and register syscore operations only once after context + * handlers of all online CPUs are initialized. */ - handler = this_cpu_ptr(&plic_handlers); - if (handler->present && !plic_cpuhp_setup_done) { - cpuhp_setup_state(CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING, - "irqchip/sifive/plic:starting", - plic_starting_cpu, plic_dying_cpu); - register_syscore_ops(&plic_irq_syscore_ops); - plic_cpuhp_setup_done = true; + if (!plic_cpuhp_setup_done) { + cpuhp_setup = true; + for_each_online_cpu(cpu) { + handler = per_cpu_ptr(&plic_handlers, cpu); + if (!handler->present) { + cpuhp_setup = false; + break; + } + } + if (cpuhp_setup) { + cpuhp_setup_state(CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING, + "irqchip/sifive/plic:starting", + plic_starting_cpu, plic_dying_cpu); + register_syscore_ops(&plic_irq_syscore_ops); + plic_cpuhp_setup_done = true; + } } - pr_info("%pOFP: mapped %d interrupts with %d handlers for" - " %d contexts.\n", node, nr_irqs, nr_handlers, nr_contexts); + dev_info(dev, "mapped %d interrupts with %d handlers for %d contexts.\n", + nr_irqs, nr_handlers, nr_contexts); return 0; -out_free_enable_reg: - for_each_cpu(cpu, cpu_present_mask) { +fail_cleanup_contexts: + for (i = 0; i < nr_contexts; i++) { + if (plic_parse_context_parent(pdev, i, &parent_hwirq, &cpu)) + continue; + if (parent_hwirq != RV_IRQ_EXT || cpu < 0) + continue; + handler = per_cpu_ptr(&plic_handlers, cpu); - kfree(handler->enable_save); + handler->present = false; + handler->hart_base = NULL; + handler->enable_base = NULL; + handler->enable_save = NULL; + handler->priv = NULL; } -out_free_priority_reg: - kfree(priv->prio_save); -out_iounmap: - iounmap(priv->regs); -out_free_priv: - kfree(priv); - return error; + return -ENOMEM; } -static int __init plic_init(struct device_node *node, - struct device_node *parent) -{ - return __plic_init(node, parent, 0); -} - -IRQCHIP_DECLARE(sifive_plic, "sifive,plic-1.0.0", plic_init); -IRQCHIP_DECLARE(riscv_plic0, "riscv,plic0", plic_init); /* for legacy systems */ - -static int __init plic_edge_init(struct device_node *node, - struct device_node *parent) -{ - return __plic_init(node, parent, BIT(PLIC_QUIRK_EDGE_INTERRUPT)); -} - -IRQCHIP_DECLARE(andestech_nceplic100, "andestech,nceplic100", plic_edge_init); -IRQCHIP_DECLARE(thead_c900_plic, "thead,c900-plic", plic_edge_init); +static struct platform_driver plic_driver = { + .driver = { + .name = "riscv-plic", + .of_match_table = plic_match, + }, + .probe = plic_probe, +}; +builtin_platform_driver(plic_driver); diff --git a/drivers/irqchip/irq-starfive-jh8100-intc.c b/drivers/irqchip/irq-starfive-jh8100-intc.c new file mode 100644 index 000000000000..0f5837176e53 --- /dev/null +++ b/drivers/irqchip/irq-starfive-jh8100-intc.c @@ -0,0 +1,207 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * StarFive JH8100 External Interrupt Controller driver + * + * Copyright (C) 2023 StarFive Technology Co., Ltd. + * + * Author: Changhuang Liang <changhuang.liang@starfivetech.com> + */ + +#define pr_fmt(fmt) "irq-starfive-jh8100: " fmt + +#include <linux/bitops.h> +#include <linux/clk.h> +#include <linux/irq.h> +#include <linux/irqchip.h> +#include <linux/irqchip/chained_irq.h> +#include <linux/irqdomain.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/reset.h> +#include <linux/spinlock.h> + +#define STARFIVE_INTC_SRC0_CLEAR 0x10 +#define STARFIVE_INTC_SRC0_MASK 0x14 +#define STARFIVE_INTC_SRC0_INT 0x1c + +#define STARFIVE_INTC_SRC_IRQ_NUM 32 + +struct starfive_irq_chip { + void __iomem *base; + struct irq_domain *domain; + raw_spinlock_t lock; +}; + +static void starfive_intc_bit_set(struct starfive_irq_chip *irqc, + u32 reg, u32 bit_mask) +{ + u32 value; + + value = ioread32(irqc->base + reg); + value |= bit_mask; + iowrite32(value, irqc->base + reg); +} + +static void starfive_intc_bit_clear(struct starfive_irq_chip *irqc, + u32 reg, u32 bit_mask) +{ + u32 value; + + value = ioread32(irqc->base + reg); + value &= ~bit_mask; + iowrite32(value, irqc->base + reg); +} + +static void starfive_intc_unmask(struct irq_data *d) +{ + struct starfive_irq_chip *irqc = irq_data_get_irq_chip_data(d); + + raw_spin_lock(&irqc->lock); + starfive_intc_bit_clear(irqc, STARFIVE_INTC_SRC0_MASK, BIT(d->hwirq)); + raw_spin_unlock(&irqc->lock); +} + +static void starfive_intc_mask(struct irq_data *d) +{ + struct starfive_irq_chip *irqc = irq_data_get_irq_chip_data(d); + + raw_spin_lock(&irqc->lock); + starfive_intc_bit_set(irqc, STARFIVE_INTC_SRC0_MASK, BIT(d->hwirq)); + raw_spin_unlock(&irqc->lock); +} + +static struct irq_chip intc_dev = { + .name = "StarFive JH8100 INTC", + .irq_unmask = starfive_intc_unmask, + .irq_mask = starfive_intc_mask, +}; + +static int starfive_intc_map(struct irq_domain *d, unsigned int irq, + irq_hw_number_t hwirq) +{ + irq_domain_set_info(d, irq, hwirq, &intc_dev, d->host_data, + handle_level_irq, NULL, NULL); + + return 0; +} + +static const struct irq_domain_ops starfive_intc_domain_ops = { + .xlate = irq_domain_xlate_onecell, + .map = starfive_intc_map, +}; + +static void starfive_intc_irq_handler(struct irq_desc *desc) +{ + struct starfive_irq_chip *irqc = irq_data_get_irq_handler_data(&desc->irq_data); + struct irq_chip *chip = irq_desc_get_chip(desc); + unsigned long value; + int hwirq; + + chained_irq_enter(chip, desc); + + value = ioread32(irqc->base + STARFIVE_INTC_SRC0_INT); + while (value) { + hwirq = ffs(value) - 1; + + generic_handle_domain_irq(irqc->domain, hwirq); + + starfive_intc_bit_set(irqc, STARFIVE_INTC_SRC0_CLEAR, BIT(hwirq)); + starfive_intc_bit_clear(irqc, STARFIVE_INTC_SRC0_CLEAR, BIT(hwirq)); + + __clear_bit(hwirq, &value); + } + + chained_irq_exit(chip, desc); +} + +static int __init starfive_intc_init(struct device_node *intc, + struct device_node *parent) +{ + struct starfive_irq_chip *irqc; + struct reset_control *rst; + struct clk *clk; + int parent_irq; + int ret; + + irqc = kzalloc(sizeof(*irqc), GFP_KERNEL); + if (!irqc) + return -ENOMEM; + + irqc->base = of_iomap(intc, 0); + if (!irqc->base) { + pr_err("Unable to map registers\n"); + ret = -ENXIO; + goto err_free; + } + + rst = of_reset_control_get_exclusive(intc, NULL); + if (IS_ERR(rst)) { + pr_err("Unable to get reset control %pe\n", rst); + ret = PTR_ERR(rst); + goto err_unmap; + } + + clk = of_clk_get(intc, 0); + if (IS_ERR(clk)) { + pr_err("Unable to get clock %pe\n", clk); + ret = PTR_ERR(clk); + goto err_reset_put; + } + + ret = reset_control_deassert(rst); + if (ret) + goto err_clk_put; + + ret = clk_prepare_enable(clk); + if (ret) + goto err_reset_assert; + + raw_spin_lock_init(&irqc->lock); + + irqc->domain = irq_domain_add_linear(intc, STARFIVE_INTC_SRC_IRQ_NUM, + &starfive_intc_domain_ops, irqc); + if (!irqc->domain) { + pr_err("Unable to create IRQ domain\n"); + ret = -EINVAL; + goto err_clk_disable; + } + + parent_irq = of_irq_get(intc, 0); + if (parent_irq < 0) { + pr_err("Failed to get main IRQ: %d\n", parent_irq); + ret = parent_irq; + goto err_remove_domain; + } + + irq_set_chained_handler_and_data(parent_irq, starfive_intc_irq_handler, + irqc); + + pr_info("Interrupt controller register, nr_irqs %d\n", + STARFIVE_INTC_SRC_IRQ_NUM); + + return 0; + +err_remove_domain: + irq_domain_remove(irqc->domain); +err_clk_disable: + clk_disable_unprepare(clk); +err_reset_assert: + reset_control_assert(rst); +err_clk_put: + clk_put(clk); +err_reset_put: + reset_control_put(rst); +err_unmap: + iounmap(irqc->base); +err_free: + kfree(irqc); + return ret; +} + +IRQCHIP_PLATFORM_DRIVER_BEGIN(starfive_intc) +IRQCHIP_MATCH("starfive,jh8100-intc", starfive_intc_init) +IRQCHIP_PLATFORM_DRIVER_END(starfive_intc) + +MODULE_DESCRIPTION("StarFive JH8100 External Interrupt Controller"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Changhuang Liang <changhuang.liang@starfivetech.com>"); diff --git a/drivers/irqchip/irq-stm32-exti.c b/drivers/irqchip/irq-stm32-exti.c index 971240e2e31b..26a5193d0ae4 100644 --- a/drivers/irqchip/irq-stm32-exti.c +++ b/drivers/irqchip/irq-stm32-exti.c @@ -898,10 +898,9 @@ static void stm32_exti_remove_irq(void *data) irq_domain_remove(domain); } -static int stm32_exti_remove(struct platform_device *pdev) +static void stm32_exti_remove(struct platform_device *pdev) { stm32_exti_h_syscore_deinit(); - return 0; } static int stm32_exti_probe(struct platform_device *pdev) @@ -991,10 +990,10 @@ MODULE_DEVICE_TABLE(of, stm32_exti_ids); static struct platform_driver stm32_exti_driver = { .probe = stm32_exti_probe, - .remove = stm32_exti_remove, + .remove_new = stm32_exti_remove, .driver = { - .name = "stm32_exti", - .of_match_table = stm32_exti_ids, + .name = "stm32_exti", + .of_match_table = stm32_exti_ids, }, }; diff --git a/drivers/irqchip/irq-ts4800.c b/drivers/irqchip/irq-ts4800.c index b2d61d4f6fe6..57f610dab6b8 100644 --- a/drivers/irqchip/irq-ts4800.c +++ b/drivers/irqchip/irq-ts4800.c @@ -139,13 +139,11 @@ static int ts4800_ic_probe(struct platform_device *pdev) return 0; } -static int ts4800_ic_remove(struct platform_device *pdev) +static void ts4800_ic_remove(struct platform_device *pdev) { struct ts4800_irq_data *data = platform_get_drvdata(pdev); irq_domain_remove(data->domain); - - return 0; } static const struct of_device_id ts4800_ic_of_match[] = { @@ -155,11 +153,11 @@ static const struct of_device_id ts4800_ic_of_match[] = { MODULE_DEVICE_TABLE(of, ts4800_ic_of_match); static struct platform_driver ts4800_ic_driver = { - .probe = ts4800_ic_probe, - .remove = ts4800_ic_remove, + .probe = ts4800_ic_probe, + .remove_new = ts4800_ic_remove, .driver = { - .name = "ts4800-irqc", - .of_match_table = ts4800_ic_of_match, + .name = "ts4800-irqc", + .of_match_table = ts4800_ic_of_match, }, }; module_platform_driver(ts4800_ic_driver); diff --git a/drivers/irqchip/irq-vic.c b/drivers/irqchip/irq-vic.c index 9e3d5561e04e..ea93e7236c4a 100644 --- a/drivers/irqchip/irq-vic.c +++ b/drivers/irqchip/irq-vic.c @@ -47,9 +47,8 @@ /** * struct vic_device - VIC PM device - * @parent_irq: The parent IRQ number of the VIC if cascaded, or 0. - * @irq: The IRQ number for the base of the VIC. * @base: The register base for the VIC. + * @irq: The IRQ number for the base of the VIC. * @valid_sources: A bitmask of valid interrupts * @resume_sources: A bitmask of interrupts for resume. * @resume_irqs: The IRQs enabled for resume. diff --git a/drivers/mailbox/bcm-flexrm-mailbox.c b/drivers/mailbox/bcm-flexrm-mailbox.c index e3e28a4f7d01..b1abc2a0c971 100644 --- a/drivers/mailbox/bcm-flexrm-mailbox.c +++ b/drivers/mailbox/bcm-flexrm-mailbox.c @@ -1587,8 +1587,8 @@ static int flexrm_mbox_probe(struct platform_device *pdev) } /* Allocate platform MSIs for each ring */ - ret = platform_msi_domain_alloc_irqs(dev, mbox->num_rings, - flexrm_mbox_msi_write); + ret = platform_device_msi_init_and_alloc_irqs(dev, mbox->num_rings, + flexrm_mbox_msi_write); if (ret) goto fail_destroy_cmpl_pool; @@ -1641,7 +1641,7 @@ skip_debugfs: fail_free_debugfs_root: debugfs_remove_recursive(mbox->root); - platform_msi_domain_free_irqs(dev); + platform_device_msi_free_irqs_all(dev); fail_destroy_cmpl_pool: dma_pool_destroy(mbox->cmpl_pool); fail_destroy_bd_pool: @@ -1657,7 +1657,7 @@ static void flexrm_mbox_remove(struct platform_device *pdev) debugfs_remove_recursive(mbox->root); - platform_msi_domain_free_irqs(dev); + platform_device_msi_free_irqs_all(dev); dma_pool_destroy(mbox->cmpl_pool); dma_pool_destroy(mbox->bd_pool); diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h index 6ae2329052c9..4e6afa89921f 100644 --- a/drivers/md/bcache/bcache.h +++ b/drivers/md/bcache/bcache.h @@ -300,7 +300,7 @@ struct cached_dev { struct list_head list; struct bcache_device disk; struct block_device *bdev; - struct bdev_handle *bdev_handle; + struct file *bdev_file; struct cache_sb sb; struct cache_sb_disk *sb_disk; @@ -423,7 +423,7 @@ struct cache { struct kobject kobj; struct block_device *bdev; - struct bdev_handle *bdev_handle; + struct file *bdev_file; struct task_struct *alloc_thread; diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index dc3f50f69714..330bcd9ea4a9 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -900,9 +900,23 @@ static int bcache_device_init(struct bcache_device *d, unsigned int block_size, struct request_queue *q; const size_t max_stripes = min_t(size_t, INT_MAX, SIZE_MAX / sizeof(atomic_t)); + struct queue_limits lim = { + .max_hw_sectors = UINT_MAX, + .max_sectors = UINT_MAX, + .max_segment_size = UINT_MAX, + .max_segments = BIO_MAX_VECS, + .max_hw_discard_sectors = UINT_MAX, + .io_min = block_size, + .logical_block_size = block_size, + .physical_block_size = block_size, + }; uint64_t n; int idx; + if (cached_bdev) { + d->stripe_size = bdev_io_opt(cached_bdev) >> SECTOR_SHIFT; + lim.io_opt = umax(block_size, bdev_io_opt(cached_bdev)); + } if (!d->stripe_size) d->stripe_size = 1 << 31; else if (d->stripe_size < BCH_MIN_STRIPE_SZ) @@ -935,8 +949,21 @@ static int bcache_device_init(struct bcache_device *d, unsigned int block_size, BIOSET_NEED_BVECS|BIOSET_NEED_RESCUER)) goto out_ida_remove; - d->disk = blk_alloc_disk(NUMA_NO_NODE); - if (!d->disk) + if (lim.logical_block_size > PAGE_SIZE && cached_bdev) { + /* + * This should only happen with BCACHE_SB_VERSION_BDEV. + * Block/page size is checked for BCACHE_SB_VERSION_CDEV. + */ + pr_info("bcache%i: sb/logical block size (%u) greater than page size (%lu) falling back to device logical block size (%u)\n", + idx, lim.logical_block_size, + PAGE_SIZE, bdev_logical_block_size(cached_bdev)); + + /* This also adjusts physical block size/min io size if needed */ + lim.logical_block_size = bdev_logical_block_size(cached_bdev); + } + + d->disk = blk_alloc_disk(&lim, NUMA_NO_NODE); + if (IS_ERR(d->disk)) goto out_bioset_exit; set_capacity(d->disk, sectors); @@ -949,27 +976,6 @@ static int bcache_device_init(struct bcache_device *d, unsigned int block_size, d->disk->private_data = d; q = d->disk->queue; - q->limits.max_hw_sectors = UINT_MAX; - q->limits.max_sectors = UINT_MAX; - q->limits.max_segment_size = UINT_MAX; - q->limits.max_segments = BIO_MAX_VECS; - blk_queue_max_discard_sectors(q, UINT_MAX); - q->limits.io_min = block_size; - q->limits.logical_block_size = block_size; - q->limits.physical_block_size = block_size; - - if (q->limits.logical_block_size > PAGE_SIZE && cached_bdev) { - /* - * This should only happen with BCACHE_SB_VERSION_BDEV. - * Block/page size is checked for BCACHE_SB_VERSION_CDEV. - */ - pr_info("%s: sb/logical block size (%u) greater than page size (%lu) falling back to device logical block size (%u)\n", - d->disk->disk_name, q->limits.logical_block_size, - PAGE_SIZE, bdev_logical_block_size(cached_bdev)); - - /* This also adjusts physical block size/min io size if needed */ - blk_queue_logical_block_size(q, bdev_logical_block_size(cached_bdev)); - } blk_queue_flag_set(QUEUE_FLAG_NONROT, d->disk->queue); @@ -1369,8 +1375,8 @@ static CLOSURE_CALLBACK(cached_dev_free) if (dc->sb_disk) put_page(virt_to_page(dc->sb_disk)); - if (dc->bdev_handle) - bdev_release(dc->bdev_handle); + if (dc->bdev_file) + fput(dc->bdev_file); wake_up(&unregister_wait); @@ -1416,9 +1422,7 @@ static int cached_dev_init(struct cached_dev *dc, unsigned int block_size) hlist_add_head(&io->hash, dc->io_hash + RECENT_IO); } - dc->disk.stripe_size = q->limits.io_opt >> 9; - - if (dc->disk.stripe_size) + if (bdev_io_opt(dc->bdev)) dc->partial_stripes_expensive = q->limits.raid_partial_stripes_expensive; @@ -1428,9 +1432,6 @@ static int cached_dev_init(struct cached_dev *dc, unsigned int block_size) if (ret) return ret; - blk_queue_io_opt(dc->disk.disk->queue, - max(queue_io_opt(dc->disk.disk->queue), queue_io_opt(q))); - atomic_set(&dc->io_errors, 0); dc->io_disable = false; dc->error_limit = DEFAULT_CACHED_DEV_ERROR_LIMIT; @@ -1445,7 +1446,7 @@ static int cached_dev_init(struct cached_dev *dc, unsigned int block_size) /* Cached device - bcache superblock */ static int register_bdev(struct cache_sb *sb, struct cache_sb_disk *sb_disk, - struct bdev_handle *bdev_handle, + struct file *bdev_file, struct cached_dev *dc) { const char *err = "cannot allocate memory"; @@ -1453,8 +1454,8 @@ static int register_bdev(struct cache_sb *sb, struct cache_sb_disk *sb_disk, int ret = -ENOMEM; memcpy(&dc->sb, sb, sizeof(struct cache_sb)); - dc->bdev_handle = bdev_handle; - dc->bdev = bdev_handle->bdev; + dc->bdev_file = bdev_file; + dc->bdev = file_bdev(bdev_file); dc->sb_disk = sb_disk; if (cached_dev_init(dc, sb->block_size << 9)) @@ -2218,8 +2219,8 @@ void bch_cache_release(struct kobject *kobj) if (ca->sb_disk) put_page(virt_to_page(ca->sb_disk)); - if (ca->bdev_handle) - bdev_release(ca->bdev_handle); + if (ca->bdev_file) + fput(ca->bdev_file); kfree(ca); module_put(THIS_MODULE); @@ -2339,18 +2340,18 @@ err_free: } static int register_cache(struct cache_sb *sb, struct cache_sb_disk *sb_disk, - struct bdev_handle *bdev_handle, + struct file *bdev_file, struct cache *ca) { const char *err = NULL; /* must be set for any error case */ int ret = 0; memcpy(&ca->sb, sb, sizeof(struct cache_sb)); - ca->bdev_handle = bdev_handle; - ca->bdev = bdev_handle->bdev; + ca->bdev_file = bdev_file; + ca->bdev = file_bdev(bdev_file); ca->sb_disk = sb_disk; - if (bdev_max_discard_sectors((bdev_handle->bdev))) + if (bdev_max_discard_sectors(file_bdev(bdev_file))) ca->discard = CACHE_DISCARD(&ca->sb); ret = cache_alloc(ca); @@ -2361,20 +2362,20 @@ static int register_cache(struct cache_sb *sb, struct cache_sb_disk *sb_disk, err = "cache_alloc(): cache device is too small"; else err = "cache_alloc(): unknown error"; - pr_notice("error %pg: %s\n", bdev_handle->bdev, err); + pr_notice("error %pg: %s\n", file_bdev(bdev_file), err); /* * If we failed here, it means ca->kobj is not initialized yet, * kobject_put() won't be called and there is no chance to - * call bdev_release() to bdev in bch_cache_release(). So - * we explicitly call bdev_release() here. + * call fput() to bdev in bch_cache_release(). So + * we explicitly call fput() on the block device here. */ - bdev_release(bdev_handle); + fput(bdev_file); return ret; } - if (kobject_add(&ca->kobj, bdev_kobj(bdev_handle->bdev), "bcache")) { + if (kobject_add(&ca->kobj, bdev_kobj(file_bdev(bdev_file)), "bcache")) { pr_notice("error %pg: error calling kobject_add\n", - bdev_handle->bdev); + file_bdev(bdev_file)); ret = -ENOMEM; goto out; } @@ -2388,7 +2389,7 @@ static int register_cache(struct cache_sb *sb, struct cache_sb_disk *sb_disk, goto out; } - pr_info("registered cache device %pg\n", ca->bdev_handle->bdev); + pr_info("registered cache device %pg\n", file_bdev(ca->bdev_file)); out: kobject_put(&ca->kobj); @@ -2446,7 +2447,7 @@ struct async_reg_args { char *path; struct cache_sb *sb; struct cache_sb_disk *sb_disk; - struct bdev_handle *bdev_handle; + struct file *bdev_file; void *holder; }; @@ -2457,7 +2458,7 @@ static void register_bdev_worker(struct work_struct *work) container_of(work, struct async_reg_args, reg_work.work); mutex_lock(&bch_register_lock); - if (register_bdev(args->sb, args->sb_disk, args->bdev_handle, + if (register_bdev(args->sb, args->sb_disk, args->bdev_file, args->holder) < 0) fail = true; mutex_unlock(&bch_register_lock); @@ -2478,7 +2479,7 @@ static void register_cache_worker(struct work_struct *work) container_of(work, struct async_reg_args, reg_work.work); /* blkdev_put() will be called in bch_cache_release() */ - if (register_cache(args->sb, args->sb_disk, args->bdev_handle, + if (register_cache(args->sb, args->sb_disk, args->bdev_file, args->holder)) fail = true; @@ -2516,7 +2517,7 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr, char *path = NULL; struct cache_sb *sb; struct cache_sb_disk *sb_disk; - struct bdev_handle *bdev_handle, *bdev_handle2; + struct file *bdev_file, *bdev_file2; void *holder = NULL; ssize_t ret; bool async_registration = false; @@ -2549,15 +2550,15 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr, ret = -EINVAL; err = "failed to open device"; - bdev_handle = bdev_open_by_path(strim(path), BLK_OPEN_READ, NULL, NULL); - if (IS_ERR(bdev_handle)) + bdev_file = bdev_file_open_by_path(strim(path), BLK_OPEN_READ, NULL, NULL); + if (IS_ERR(bdev_file)) goto out_free_sb; err = "failed to set blocksize"; - if (set_blocksize(bdev_handle->bdev, 4096)) + if (set_blocksize(file_bdev(bdev_file), 4096)) goto out_blkdev_put; - err = read_super(sb, bdev_handle->bdev, &sb_disk); + err = read_super(sb, file_bdev(bdev_file), &sb_disk); if (err) goto out_blkdev_put; @@ -2569,13 +2570,13 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr, } /* Now reopen in exclusive mode with proper holder */ - bdev_handle2 = bdev_open_by_dev(bdev_handle->bdev->bd_dev, + bdev_file2 = bdev_file_open_by_dev(file_bdev(bdev_file)->bd_dev, BLK_OPEN_READ | BLK_OPEN_WRITE, holder, NULL); - bdev_release(bdev_handle); - bdev_handle = bdev_handle2; - if (IS_ERR(bdev_handle)) { - ret = PTR_ERR(bdev_handle); - bdev_handle = NULL; + fput(bdev_file); + bdev_file = bdev_file2; + if (IS_ERR(bdev_file)) { + ret = PTR_ERR(bdev_file); + bdev_file = NULL; if (ret == -EBUSY) { dev_t dev; @@ -2610,7 +2611,7 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr, args->path = path; args->sb = sb; args->sb_disk = sb_disk; - args->bdev_handle = bdev_handle; + args->bdev_file = bdev_file; args->holder = holder; register_device_async(args); /* No wait and returns to user space */ @@ -2619,14 +2620,14 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr, if (SB_IS_BDEV(sb)) { mutex_lock(&bch_register_lock); - ret = register_bdev(sb, sb_disk, bdev_handle, holder); + ret = register_bdev(sb, sb_disk, bdev_file, holder); mutex_unlock(&bch_register_lock); /* blkdev_put() will be called in cached_dev_free() */ if (ret < 0) goto out_free_sb; } else { /* blkdev_put() will be called in bch_cache_release() */ - ret = register_cache(sb, sb_disk, bdev_handle, holder); + ret = register_cache(sb, sb_disk, bdev_file, holder); if (ret) goto out_free_sb; } @@ -2642,8 +2643,8 @@ out_free_holder: out_put_sb_page: put_page(virt_to_page(sb_disk)); out_blkdev_put: - if (bdev_handle) - bdev_release(bdev_handle); + if (bdev_file) + fput(bdev_file); out_free_sb: kfree(sb); out_free_path: diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h index 095b9b49aa82..e6757a30dcca 100644 --- a/drivers/md/dm-core.h +++ b/drivers/md/dm-core.h @@ -22,6 +22,8 @@ #include "dm-ima.h" #define DM_RESERVED_MAX_IOS 1024 +#define DM_MAX_TARGETS 1048576 +#define DM_MAX_TARGET_PARAMS 1024 struct dm_io; diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 855b482cbff1..59445763e55a 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -53,15 +53,17 @@ struct convert_context { struct completion restart; struct bio *bio_in; - struct bio *bio_out; struct bvec_iter iter_in; + struct bio *bio_out; struct bvec_iter iter_out; - u64 cc_sector; atomic_t cc_pending; + u64 cc_sector; union { struct skcipher_request *req; struct aead_request *req_aead; } r; + bool aead_recheck; + bool aead_failed; }; @@ -73,10 +75,8 @@ struct dm_crypt_io { struct bio *base_bio; u8 *integrity_metadata; bool integrity_metadata_from_pool:1; - bool in_tasklet:1; struct work_struct work; - struct tasklet_struct tasklet; struct convert_context ctx; @@ -84,6 +84,8 @@ struct dm_crypt_io { blk_status_t error; sector_t sector; + struct bvec_iter saved_bi_iter; + struct rb_node rb_node; } CRYPTO_MINALIGN_ATTR; @@ -1372,10 +1374,13 @@ static int crypt_convert_block_aead(struct crypt_config *cc, if (r == -EBADMSG) { sector_t s = le64_to_cpu(*sector); - DMERR_LIMIT("%pg: INTEGRITY AEAD ERROR, sector %llu", - ctx->bio_in->bi_bdev, s); - dm_audit_log_bio(DM_MSG_PREFIX, "integrity-aead", - ctx->bio_in, s, 0); + ctx->aead_failed = true; + if (ctx->aead_recheck) { + DMERR_LIMIT("%pg: INTEGRITY AEAD ERROR, sector %llu", + ctx->bio_in->bi_bdev, s); + dm_audit_log_bio(DM_MSG_PREFIX, "integrity-aead", + ctx->bio_in, s, 0); + } } if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post) @@ -1759,10 +1764,11 @@ static void crypt_io_init(struct dm_crypt_io *io, struct crypt_config *cc, io->base_bio = bio; io->sector = sector; io->error = 0; + io->ctx.aead_recheck = false; + io->ctx.aead_failed = false; io->ctx.r.req = NULL; io->integrity_metadata = NULL; io->integrity_metadata_from_pool = false; - io->in_tasklet = false; atomic_set(&io->io_pending, 0); } @@ -1771,12 +1777,7 @@ static void crypt_inc_pending(struct dm_crypt_io *io) atomic_inc(&io->io_pending); } -static void kcryptd_io_bio_endio(struct work_struct *work) -{ - struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work); - - bio_endio(io->base_bio); -} +static void kcryptd_queue_read(struct dm_crypt_io *io); /* * One of the bios was finished. Check for completion of @@ -1791,6 +1792,15 @@ static void crypt_dec_pending(struct dm_crypt_io *io) if (!atomic_dec_and_test(&io->io_pending)) return; + if (likely(!io->ctx.aead_recheck) && unlikely(io->ctx.aead_failed) && + cc->on_disk_tag_size && bio_data_dir(base_bio) == READ) { + io->ctx.aead_recheck = true; + io->ctx.aead_failed = false; + io->error = 0; + kcryptd_queue_read(io); + return; + } + if (io->ctx.r.req) crypt_free_req(cc, io->ctx.r.req, base_bio); @@ -1801,20 +1811,6 @@ static void crypt_dec_pending(struct dm_crypt_io *io) base_bio->bi_status = error; - /* - * If we are running this function from our tasklet, - * we can't call bio_endio() here, because it will call - * clone_endio() from dm.c, which in turn will - * free the current struct dm_crypt_io structure with - * our tasklet. In this case we need to delay bio_endio() - * execution to after the tasklet is done and dequeued. - */ - if (io->in_tasklet) { - INIT_WORK(&io->work, kcryptd_io_bio_endio); - queue_work(cc->io_queue, &io->work); - return; - } - bio_endio(base_bio); } @@ -1840,15 +1836,19 @@ static void crypt_endio(struct bio *clone) struct dm_crypt_io *io = clone->bi_private; struct crypt_config *cc = io->cc; unsigned int rw = bio_data_dir(clone); - blk_status_t error; + blk_status_t error = clone->bi_status; + + if (io->ctx.aead_recheck && !error) { + kcryptd_queue_crypt(io); + return; + } /* * free the processed pages */ - if (rw == WRITE) + if (rw == WRITE || io->ctx.aead_recheck) crypt_free_buffer_pages(cc, clone); - error = clone->bi_status; bio_put(clone); if (rw == READ && !error) { @@ -1869,6 +1869,22 @@ static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp) struct crypt_config *cc = io->cc; struct bio *clone; + if (io->ctx.aead_recheck) { + if (!(gfp & __GFP_DIRECT_RECLAIM)) + return 1; + crypt_inc_pending(io); + clone = crypt_alloc_buffer(io, io->base_bio->bi_iter.bi_size); + if (unlikely(!clone)) { + crypt_dec_pending(io); + return 1; + } + clone->bi_iter.bi_sector = cc->start + io->sector; + crypt_convert_init(cc, &io->ctx, clone, clone, io->sector); + io->saved_bi_iter = clone->bi_iter; + dm_submit_bio_remap(io->base_bio, clone); + return 0; + } + /* * We need the original biovec array in order to decrypt the whole bio * data *afterwards* -- thanks to immutable biovecs we don't need to @@ -2095,6 +2111,12 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) io->ctx.bio_out = clone; io->ctx.iter_out = clone->bi_iter; + if (crypt_integrity_aead(cc)) { + bio_copy_data(clone, io->base_bio); + io->ctx.bio_in = clone; + io->ctx.iter_in = clone->bi_iter; + } + sector += bio_sectors(clone); crypt_inc_pending(io); @@ -2131,6 +2153,14 @@ dec: static void kcryptd_crypt_read_done(struct dm_crypt_io *io) { + if (io->ctx.aead_recheck) { + if (!io->error) { + io->ctx.bio_in->bi_iter = io->saved_bi_iter; + bio_copy_data(io->base_bio, io->ctx.bio_in); + } + crypt_free_buffer_pages(io->cc, io->ctx.bio_in); + bio_put(io->ctx.bio_in); + } crypt_dec_pending(io); } @@ -2160,11 +2190,17 @@ static void kcryptd_crypt_read_convert(struct dm_crypt_io *io) crypt_inc_pending(io); - crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio, - io->sector); + if (io->ctx.aead_recheck) { + io->ctx.cc_sector = io->sector + cc->iv_offset; + r = crypt_convert(cc, &io->ctx, + test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags), true); + } else { + crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio, + io->sector); - r = crypt_convert(cc, &io->ctx, - test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags), true); + r = crypt_convert(cc, &io->ctx, + test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags), true); + } /* * Crypto API backlogged the request, because its queue was full * and we're in softirq context, so continue from a workqueue @@ -2206,10 +2242,13 @@ static void kcryptd_async_done(void *data, int error) if (error == -EBADMSG) { sector_t s = le64_to_cpu(*org_sector_of_dmreq(cc, dmreq)); - DMERR_LIMIT("%pg: INTEGRITY AEAD ERROR, sector %llu", - ctx->bio_in->bi_bdev, s); - dm_audit_log_bio(DM_MSG_PREFIX, "integrity-aead", - ctx->bio_in, s, 0); + ctx->aead_failed = true; + if (ctx->aead_recheck) { + DMERR_LIMIT("%pg: INTEGRITY AEAD ERROR, sector %llu", + ctx->bio_in->bi_bdev, s); + dm_audit_log_bio(DM_MSG_PREFIX, "integrity-aead", + ctx->bio_in, s, 0); + } io->error = BLK_STS_PROTECTION; } else if (error < 0) io->error = BLK_STS_IOERR; @@ -2246,11 +2285,6 @@ static void kcryptd_crypt(struct work_struct *work) kcryptd_crypt_write_convert(io); } -static void kcryptd_crypt_tasklet(unsigned long work) -{ - kcryptd_crypt((struct work_struct *)work); -} - static void kcryptd_queue_crypt(struct dm_crypt_io *io) { struct crypt_config *cc = io->cc; @@ -2262,15 +2296,10 @@ static void kcryptd_queue_crypt(struct dm_crypt_io *io) * irqs_disabled(): the kernel may run some IO completion from the idle thread, but * it is being executed with irqs disabled. */ - if (in_hardirq() || irqs_disabled()) { - io->in_tasklet = true; - tasklet_init(&io->tasklet, kcryptd_crypt_tasklet, (unsigned long)&io->work); - tasklet_schedule(&io->tasklet); + if (!(in_hardirq() || irqs_disabled())) { + kcryptd_crypt(&io->work); return; } - - kcryptd_crypt(&io->work); - return; } INIT_WORK(&io->work, kcryptd_crypt); @@ -3144,7 +3173,7 @@ static int crypt_ctr_optional(struct dm_target *ti, unsigned int argc, char **ar sval = strchr(opt_string + strlen("integrity:"), ':') + 1; if (!strcasecmp(sval, "aead")) { set_bit(CRYPT_MODE_INTEGRITY_AEAD, &cc->cipher_flags); - } else if (strcasecmp(sval, "none")) { + } else if (strcasecmp(sval, "none")) { ti->error = "Unknown integrity profile"; return -EINVAL; } @@ -3673,7 +3702,7 @@ static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits) static struct target_type crypt_target = { .name = "crypt", - .version = {1, 24, 0}, + .version = {1, 25, 0}, .module = THIS_MODULE, .ctr = crypt_ctr, .dtr = crypt_dtr, diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c index c5f03aab4552..1fc901df84eb 100644 --- a/drivers/md/dm-integrity.c +++ b/drivers/md/dm-integrity.c @@ -278,6 +278,8 @@ struct dm_integrity_c { atomic64_t number_of_mismatches; + mempool_t recheck_pool; + struct notifier_block reboot_notifier; }; @@ -1689,6 +1691,77 @@ failed: get_random_bytes(result, ic->tag_size); } +static noinline void integrity_recheck(struct dm_integrity_io *dio, char *checksum) +{ + struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io)); + struct dm_integrity_c *ic = dio->ic; + struct bvec_iter iter; + struct bio_vec bv; + sector_t sector, logical_sector, area, offset; + struct page *page; + void *buffer; + + get_area_and_offset(ic, dio->range.logical_sector, &area, &offset); + dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset, + &dio->metadata_offset); + sector = get_data_sector(ic, area, offset); + logical_sector = dio->range.logical_sector; + + page = mempool_alloc(&ic->recheck_pool, GFP_NOIO); + buffer = page_to_virt(page); + + __bio_for_each_segment(bv, bio, iter, dio->bio_details.bi_iter) { + unsigned pos = 0; + + do { + char *mem; + int r; + struct dm_io_request io_req; + struct dm_io_region io_loc; + io_req.bi_opf = REQ_OP_READ; + io_req.mem.type = DM_IO_KMEM; + io_req.mem.ptr.addr = buffer; + io_req.notify.fn = NULL; + io_req.client = ic->io; + io_loc.bdev = ic->dev->bdev; + io_loc.sector = sector; + io_loc.count = ic->sectors_per_block; + + r = dm_io(&io_req, 1, &io_loc, NULL); + if (unlikely(r)) { + dio->bi_status = errno_to_blk_status(r); + goto free_ret; + } + + integrity_sector_checksum(ic, logical_sector, buffer, checksum); + r = dm_integrity_rw_tag(ic, checksum, &dio->metadata_block, + &dio->metadata_offset, ic->tag_size, TAG_CMP); + if (r) { + if (r > 0) { + DMERR_LIMIT("%pg: Checksum failed at sector 0x%llx", + bio->bi_bdev, logical_sector); + atomic64_inc(&ic->number_of_mismatches); + dm_audit_log_bio(DM_MSG_PREFIX, "integrity-checksum", + bio, logical_sector, 0); + r = -EILSEQ; + } + dio->bi_status = errno_to_blk_status(r); + goto free_ret; + } + + mem = bvec_kmap_local(&bv); + memcpy(mem + pos, buffer, ic->sectors_per_block << SECTOR_SHIFT); + kunmap_local(mem); + + pos += ic->sectors_per_block << SECTOR_SHIFT; + sector += ic->sectors_per_block; + logical_sector += ic->sectors_per_block; + } while (pos < bv.bv_len); + } +free_ret: + mempool_free(page, &ic->recheck_pool); +} + static void integrity_metadata(struct work_struct *w) { struct dm_integrity_io *dio = container_of(w, struct dm_integrity_io, work); @@ -1776,15 +1849,8 @@ again: checksums_ptr - checksums, dio->op == REQ_OP_READ ? TAG_CMP : TAG_WRITE); if (unlikely(r)) { if (r > 0) { - sector_t s; - - s = sector - ((r + ic->tag_size - 1) / ic->tag_size); - DMERR_LIMIT("%pg: Checksum failed at sector 0x%llx", - bio->bi_bdev, s); - r = -EILSEQ; - atomic64_inc(&ic->number_of_mismatches); - dm_audit_log_bio(DM_MSG_PREFIX, "integrity-checksum", - bio, s, 0); + integrity_recheck(dio, checksums); + goto skip_io; } if (likely(checksums != checksums_onstack)) kfree(checksums); @@ -4261,6 +4327,12 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned int argc, char **argv goto bad; } + r = mempool_init_page_pool(&ic->recheck_pool, 1, 0); + if (r) { + ti->error = "Cannot allocate mempool"; + goto bad; + } + ic->metadata_wq = alloc_workqueue("dm-integrity-metadata", WQ_MEM_RECLAIM, METADATA_WORKQUEUE_MAX_ACTIVE); if (!ic->metadata_wq) { @@ -4609,6 +4681,7 @@ static void dm_integrity_dtr(struct dm_target *ti) kvfree(ic->bbs); if (ic->bufio) dm_bufio_client_destroy(ic->bufio); + mempool_exit(&ic->recheck_pool); mempool_exit(&ic->journal_io_mempool); if (ic->io) dm_io_client_destroy(ic->io); @@ -4661,7 +4734,7 @@ static void dm_integrity_dtr(struct dm_target *ti) static struct target_type integrity_target = { .name = "integrity", - .version = {1, 10, 0}, + .version = {1, 11, 0}, .module = THIS_MODULE, .features = DM_TARGET_SINGLETON | DM_TARGET_INTEGRITY, .ctr = dm_integrity_ctr, diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c index e65058e0ed06..3b1ad7127cb8 100644 --- a/drivers/md/dm-ioctl.c +++ b/drivers/md/dm-ioctl.c @@ -1941,7 +1941,8 @@ static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl *param_kern minimum_data_size - sizeof(param_kernel->version))) return -EFAULT; - if (param_kernel->data_size < minimum_data_size) { + if (unlikely(param_kernel->data_size < minimum_data_size) || + unlikely(param_kernel->data_size > DM_MAX_TARGETS * DM_MAX_TARGET_PARAMS)) { DMERR("Invalid data size in the ioctl structure: %u", param_kernel->data_size); return -EINVAL; diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index eb009d6bb03a..17e9af60bbf7 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c @@ -213,6 +213,7 @@ struct raid_dev { #define RT_FLAG_RS_IN_SYNC 6 #define RT_FLAG_RS_RESYNCING 7 #define RT_FLAG_RS_GROW 8 +#define RT_FLAG_RS_FROZEN 9 /* Array elements of 64 bit needed for rebuild/failed disk bits */ #define DISKS_ARRAY_ELEMS ((MAX_RAID_DEVICES + (sizeof(uint64_t) * 8 - 1)) / sizeof(uint64_t) / 8) @@ -3240,11 +3241,12 @@ size_check: rs->md.ro = 1; rs->md.in_sync = 1; - /* Keep array frozen until resume. */ - set_bit(MD_RECOVERY_FROZEN, &rs->md.recovery); - /* Has to be held on running the array */ mddev_suspend_and_lock_nointr(&rs->md); + + /* Keep array frozen until resume. */ + md_frozen_sync_thread(&rs->md); + r = md_run(&rs->md); rs->md.in_sync = 0; /* Assume already marked dirty */ if (r) { @@ -3339,7 +3341,8 @@ static int raid_map(struct dm_target *ti, struct bio *bio) if (unlikely(bio_end_sector(bio) > mddev->array_sectors)) return DM_MAPIO_REQUEUE; - md_handle_request(mddev, bio); + if (unlikely(!md_handle_request(mddev, bio))) + return DM_MAPIO_REQUEUE; return DM_MAPIO_SUBMITTED; } @@ -3718,21 +3721,33 @@ static int raid_message(struct dm_target *ti, unsigned int argc, char **argv, { struct raid_set *rs = ti->private; struct mddev *mddev = &rs->md; + int ret = 0; if (!mddev->pers || !mddev->pers->sync_request) return -EINVAL; - if (!strcasecmp(argv[0], "frozen")) - set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); - else - clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); + if (test_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags) || + test_bit(RT_FLAG_RS_FROZEN, &rs->runtime_flags)) + return -EBUSY; - if (!strcasecmp(argv[0], "idle") || !strcasecmp(argv[0], "frozen")) { - if (mddev->sync_thread) { - set_bit(MD_RECOVERY_INTR, &mddev->recovery); - md_reap_sync_thread(mddev); - } - } else if (decipher_sync_action(mddev, mddev->recovery) != st_idle) + if (!strcasecmp(argv[0], "frozen")) { + ret = mddev_lock(mddev); + if (ret) + return ret; + + md_frozen_sync_thread(mddev); + mddev_unlock(mddev); + } else if (!strcasecmp(argv[0], "idle")) { + ret = mddev_lock(mddev); + if (ret) + return ret; + + md_idle_sync_thread(mddev); + mddev_unlock(mddev); + } + + clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); + if (decipher_sync_action(mddev, mddev->recovery) != st_idle) return -EBUSY; else if (!strcasecmp(argv[0], "resync")) ; /* MD_RECOVERY_NEEDED set below */ @@ -3791,15 +3806,46 @@ static void raid_io_hints(struct dm_target *ti, struct queue_limits *limits) blk_limits_io_opt(limits, chunk_size_bytes * mddev_data_stripes(rs)); } +static void raid_presuspend(struct dm_target *ti) +{ + struct raid_set *rs = ti->private; + struct mddev *mddev = &rs->md; + + /* + * From now on, disallow raid_message() to change sync_thread until + * resume, raid_postsuspend() is too late. + */ + set_bit(RT_FLAG_RS_FROZEN, &rs->runtime_flags); + + if (!reshape_interrupted(mddev)) + return; + + /* + * For raid456, if reshape is interrupted, IO across reshape position + * will never make progress, while caller will wait for IO to be done. + * Inform raid456 to handle those IO to prevent deadlock. + */ + if (mddev->pers && mddev->pers->prepare_suspend) + mddev->pers->prepare_suspend(mddev); +} + +static void raid_presuspend_undo(struct dm_target *ti) +{ + struct raid_set *rs = ti->private; + + clear_bit(RT_FLAG_RS_FROZEN, &rs->runtime_flags); +} + static void raid_postsuspend(struct dm_target *ti) { struct raid_set *rs = ti->private; if (!test_and_set_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags)) { - /* Writes have to be stopped before suspending to avoid deadlocks. */ - if (!test_bit(MD_RECOVERY_FROZEN, &rs->md.recovery)) - md_stop_writes(&rs->md); - + /* + * sync_thread must be stopped during suspend, and writes have + * to be stopped before suspending to avoid deadlocks. + */ + md_stop_writes(&rs->md); mddev_suspend(&rs->md, false); } } @@ -4012,8 +4058,6 @@ static int raid_preresume(struct dm_target *ti) } /* Check for any resize/reshape on @rs and adjust/initiate */ - /* Be prepared for mddev_resume() in raid_resume() */ - set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); if (mddev->recovery_cp && mddev->recovery_cp < MaxSector) { set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); mddev->resync_min = mddev->recovery_cp; @@ -4047,7 +4091,9 @@ static void raid_resume(struct dm_target *ti) * Take this opportunity to check whether any failed * devices are reachable again. */ + mddev_lock_nointr(mddev); attempt_restore_of_faulty_devices(rs); + mddev_unlock(mddev); } if (test_and_clear_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags)) { @@ -4055,10 +4101,13 @@ static void raid_resume(struct dm_target *ti) if (mddev->delta_disks < 0) rs_set_capacity(rs); + WARN_ON_ONCE(!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)); + WARN_ON_ONCE(test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)); + clear_bit(RT_FLAG_RS_FROZEN, &rs->runtime_flags); mddev_lock_nointr(mddev); - clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); mddev->ro = 0; mddev->in_sync = 0; + md_unfrozen_sync_thread(mddev); mddev_unlock_and_resume(mddev); } } @@ -4074,6 +4123,8 @@ static struct target_type raid_target = { .message = raid_message, .iterate_devices = raid_iterate_devices, .io_hints = raid_io_hints, + .presuspend = raid_presuspend, + .presuspend_undo = raid_presuspend_undo, .postsuspend = raid_postsuspend, .preresume = raid_preresume, .resume = raid_resume, diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c index bdc14ec99814..1e5d988f44da 100644 --- a/drivers/md/dm-stats.c +++ b/drivers/md/dm-stats.c @@ -66,6 +66,9 @@ struct dm_stats_last_position { unsigned int last_rw; }; +#define DM_STAT_MAX_ENTRIES 8388608 +#define DM_STAT_MAX_HISTOGRAM_ENTRIES 134217728 + /* * A typo on the command line could possibly make the kernel run out of memory * and crash. To prevent the crash we account all used memory. We fail if we @@ -285,6 +288,9 @@ static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end, if (n_entries != (size_t)n_entries || !(size_t)(n_entries + 1)) return -EOVERFLOW; + if (n_entries > DM_STAT_MAX_ENTRIES) + return -EOVERFLOW; + shared_alloc_size = struct_size(s, stat_shared, n_entries); if ((shared_alloc_size - sizeof(struct dm_stat)) / sizeof(struct dm_stat_shared) != n_entries) return -EOVERFLOW; @@ -297,6 +303,9 @@ static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end, if (histogram_alloc_size / (n_histogram_entries + 1) != (size_t)n_entries * sizeof(unsigned long long)) return -EOVERFLOW; + if ((n_histogram_entries + 1) * (size_t)n_entries > DM_STAT_MAX_HISTOGRAM_ENTRIES) + return -EOVERFLOW; + if (!check_shared_memory(shared_alloc_size + histogram_alloc_size + num_possible_cpus() * (percpu_alloc_size + histogram_alloc_size))) return -ENOMEM; diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 260b5b8f2b0d..41f1d731ae5a 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -129,7 +129,12 @@ static int alloc_targets(struct dm_table *t, unsigned int num) int dm_table_create(struct dm_table **result, blk_mode_t mode, unsigned int num_targets, struct mapped_device *md) { - struct dm_table *t = kzalloc(sizeof(*t), GFP_KERNEL); + struct dm_table *t; + + if (num_targets > DM_MAX_TARGETS) + return -EOVERFLOW; + + t = kzalloc(sizeof(*t), GFP_KERNEL); if (!t) return -ENOMEM; @@ -144,7 +149,7 @@ int dm_table_create(struct dm_table **result, blk_mode_t mode, if (!num_targets) { kfree(t); - return -ENOMEM; + return -EOVERFLOW; } if (alloc_targets(t, num_targets)) { diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c index 14e58ae70521..1b591bfa90d5 100644 --- a/drivers/md/dm-verity-target.c +++ b/drivers/md/dm-verity-target.c @@ -482,6 +482,63 @@ int verity_for_bv_block(struct dm_verity *v, struct dm_verity_io *io, return 0; } +static int verity_recheck_copy(struct dm_verity *v, struct dm_verity_io *io, + u8 *data, size_t len) +{ + memcpy(data, io->recheck_buffer, len); + io->recheck_buffer += len; + + return 0; +} + +static noinline int verity_recheck(struct dm_verity *v, struct dm_verity_io *io, + struct bvec_iter start, sector_t cur_block) +{ + struct page *page; + void *buffer; + int r; + struct dm_io_request io_req; + struct dm_io_region io_loc; + + page = mempool_alloc(&v->recheck_pool, GFP_NOIO); + buffer = page_to_virt(page); + + io_req.bi_opf = REQ_OP_READ; + io_req.mem.type = DM_IO_KMEM; + io_req.mem.ptr.addr = buffer; + io_req.notify.fn = NULL; + io_req.client = v->io; + io_loc.bdev = v->data_dev->bdev; + io_loc.sector = cur_block << (v->data_dev_block_bits - SECTOR_SHIFT); + io_loc.count = 1 << (v->data_dev_block_bits - SECTOR_SHIFT); + r = dm_io(&io_req, 1, &io_loc, NULL); + if (unlikely(r)) + goto free_ret; + + r = verity_hash(v, verity_io_hash_req(v, io), buffer, + 1 << v->data_dev_block_bits, + verity_io_real_digest(v, io), true); + if (unlikely(r)) + goto free_ret; + + if (memcmp(verity_io_real_digest(v, io), + verity_io_want_digest(v, io), v->digest_size)) { + r = -EIO; + goto free_ret; + } + + io->recheck_buffer = buffer; + r = verity_for_bv_block(v, io, &start, verity_recheck_copy); + if (unlikely(r)) + goto free_ret; + + r = 0; +free_ret: + mempool_free(page, &v->recheck_pool); + + return r; +} + static int verity_bv_zero(struct dm_verity *v, struct dm_verity_io *io, u8 *data, size_t len) { @@ -508,9 +565,7 @@ static int verity_verify_io(struct dm_verity_io *io) { bool is_zero; struct dm_verity *v = io->v; -#if defined(CONFIG_DM_VERITY_FEC) struct bvec_iter start; -#endif struct bvec_iter iter_copy; struct bvec_iter *iter; struct crypto_wait wait; @@ -561,10 +616,7 @@ static int verity_verify_io(struct dm_verity_io *io) if (unlikely(r < 0)) return r; -#if defined(CONFIG_DM_VERITY_FEC) - if (verity_fec_is_enabled(v)) - start = *iter; -#endif + start = *iter; r = verity_for_io_block(v, io, iter, &wait); if (unlikely(r < 0)) return r; @@ -586,6 +638,10 @@ static int verity_verify_io(struct dm_verity_io *io) * tasklet since it may sleep, so fallback to work-queue. */ return -EAGAIN; + } else if (verity_recheck(v, io, start, cur_block) == 0) { + if (v->validated_blocks) + set_bit(cur_block, v->validated_blocks); + continue; #if defined(CONFIG_DM_VERITY_FEC) } else if (verity_fec_decode(v, io, DM_VERITY_BLOCK_TYPE_DATA, cur_block, NULL, &start) == 0) { @@ -645,23 +701,6 @@ static void verity_work(struct work_struct *w) verity_finish_io(io, errno_to_blk_status(verity_verify_io(io))); } -static void verity_tasklet(unsigned long data) -{ - struct dm_verity_io *io = (struct dm_verity_io *)data; - int err; - - io->in_tasklet = true; - err = verity_verify_io(io); - if (err == -EAGAIN || err == -ENOMEM) { - /* fallback to retrying with work-queue */ - INIT_WORK(&io->work, verity_work); - queue_work(io->v->verify_wq, &io->work); - return; - } - - verity_finish_io(io, errno_to_blk_status(err)); -} - static void verity_end_io(struct bio *bio) { struct dm_verity_io *io = bio->bi_private; @@ -674,13 +713,8 @@ static void verity_end_io(struct bio *bio) return; } - if (static_branch_unlikely(&use_tasklet_enabled) && io->v->use_tasklet) { - tasklet_init(&io->tasklet, verity_tasklet, (unsigned long)io); - tasklet_schedule(&io->tasklet); - } else { - INIT_WORK(&io->work, verity_work); - queue_work(io->v->verify_wq, &io->work); - } + INIT_WORK(&io->work, verity_work); + queue_work(io->v->verify_wq, &io->work); } /* @@ -963,6 +997,10 @@ static void verity_dtr(struct dm_target *ti) if (v->verify_wq) destroy_workqueue(v->verify_wq); + mempool_exit(&v->recheck_pool); + if (v->io) + dm_io_client_destroy(v->io); + if (v->bufio) dm_bufio_client_destroy(v->bufio); @@ -1401,6 +1439,20 @@ static int verity_ctr(struct dm_target *ti, unsigned int argc, char **argv) } v->hash_blocks = hash_position; + r = mempool_init_page_pool(&v->recheck_pool, 1, 0); + if (unlikely(r)) { + ti->error = "Cannot allocate mempool"; + goto bad; + } + + v->io = dm_io_client_create(); + if (IS_ERR(v->io)) { + r = PTR_ERR(v->io); + v->io = NULL; + ti->error = "Cannot allocate dm io"; + goto bad; + } + v->bufio = dm_bufio_client_create(v->hash_dev->bdev, 1 << v->hash_dev_block_bits, 1, sizeof(struct buffer_aux), dm_bufio_alloc_callback, NULL, @@ -1508,7 +1560,7 @@ int dm_verity_get_root_digest(struct dm_target *ti, u8 **root_digest, unsigned i static struct target_type verity_target = { .name = "verity", .features = DM_TARGET_IMMUTABLE, - .version = {1, 9, 0}, + .version = {1, 10, 0}, .module = THIS_MODULE, .ctr = verity_ctr, .dtr = verity_dtr, diff --git a/drivers/md/dm-verity.h b/drivers/md/dm-verity.h index f9d522c870e6..db93a91169d5 100644 --- a/drivers/md/dm-verity.h +++ b/drivers/md/dm-verity.h @@ -11,6 +11,7 @@ #ifndef DM_VERITY_H #define DM_VERITY_H +#include <linux/dm-io.h> #include <linux/dm-bufio.h> #include <linux/device-mapper.h> #include <linux/interrupt.h> @@ -68,6 +69,9 @@ struct dm_verity { unsigned long *validated_blocks; /* bitset blocks validated */ char *signature_key_desc; /* signature keyring reference */ + + struct dm_io_client *io; + mempool_t recheck_pool; }; struct dm_verity_io { @@ -76,14 +80,15 @@ struct dm_verity_io { /* original value of bio->bi_end_io */ bio_end_io_t *orig_bi_end_io; + struct bvec_iter iter; + sector_t block; unsigned int n_blocks; bool in_tasklet; - struct bvec_iter iter; - struct work_struct work; - struct tasklet_struct tasklet; + + char *recheck_buffer; /* * Three variably-size fields follow this struct: diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c index 074cb785eafc..b463c28c39ad 100644 --- a/drivers/md/dm-writecache.c +++ b/drivers/md/dm-writecache.c @@ -299,7 +299,7 @@ static int persistent_memory_claim(struct dm_writecache *wc) long i; wc->memory_map = NULL; - pages = kvmalloc_array(p, sizeof(struct page *), GFP_KERNEL); + pages = vmalloc_array(p, sizeof(struct page *)); if (!pages) { r = -ENOMEM; goto err2; @@ -330,7 +330,7 @@ static int persistent_memory_claim(struct dm_writecache *wc) r = -ENOMEM; goto err3; } - kvfree(pages); + vfree(pages); wc->memory_vmapped = true; } @@ -341,7 +341,7 @@ static int persistent_memory_claim(struct dm_writecache *wc) return 0; err3: - kvfree(pages); + vfree(pages); err2: dax_read_unlock(id); err1: @@ -962,7 +962,7 @@ static int writecache_alloc_entries(struct dm_writecache *wc) if (wc->entries) return 0; - wc->entries = vmalloc(array_size(sizeof(struct wc_entry), wc->n_blocks)); + wc->entries = vmalloc_array(wc->n_blocks, sizeof(struct wc_entry)); if (!wc->entries) return -ENOMEM; for (b = 0; b < wc->n_blocks; b++) { diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c index fdfe30f7b697..8156881a31de 100644 --- a/drivers/md/dm-zoned-metadata.c +++ b/drivers/md/dm-zoned-metadata.c @@ -1655,10 +1655,13 @@ static int dmz_reset_zone(struct dmz_metadata *zmd, struct dm_zone *zone) if (!dmz_is_empty(zone) || dmz_seq_write_err(zone)) { struct dmz_dev *dev = zone->dev; + unsigned int noio_flag; + noio_flag = memalloc_noio_save(); ret = blkdev_zone_mgmt(dev->bdev, REQ_OP_ZONE_RESET, dmz_start_sect(zmd, zone), - zmd->zone_nr_sectors, GFP_NOIO); + zmd->zone_nr_sectors); + memalloc_noio_restore(noio_flag); if (ret) { dmz_dev_err(dev, "Reset zone %u failed %d", zone->id, ret); diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 8dcabf84d866..447e132d09b5 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -726,7 +726,8 @@ static struct table_device *open_table_device(struct mapped_device *md, dev_t dev, blk_mode_t mode) { struct table_device *td; - struct bdev_handle *bdev_handle; + struct file *bdev_file; + struct block_device *bdev; u64 part_off; int r; @@ -735,34 +736,36 @@ static struct table_device *open_table_device(struct mapped_device *md, return ERR_PTR(-ENOMEM); refcount_set(&td->count, 1); - bdev_handle = bdev_open_by_dev(dev, mode, _dm_claim_ptr, NULL); - if (IS_ERR(bdev_handle)) { - r = PTR_ERR(bdev_handle); + bdev_file = bdev_file_open_by_dev(dev, mode, _dm_claim_ptr, NULL); + if (IS_ERR(bdev_file)) { + r = PTR_ERR(bdev_file); goto out_free_td; } + bdev = file_bdev(bdev_file); + /* * We can be called before the dm disk is added. In that case we can't * register the holder relation here. It will be done once add_disk was * called. */ if (md->disk->slave_dir) { - r = bd_link_disk_holder(bdev_handle->bdev, md->disk); + r = bd_link_disk_holder(bdev, md->disk); if (r) goto out_blkdev_put; } td->dm_dev.mode = mode; - td->dm_dev.bdev = bdev_handle->bdev; - td->dm_dev.bdev_handle = bdev_handle; - td->dm_dev.dax_dev = fs_dax_get_by_bdev(bdev_handle->bdev, &part_off, + td->dm_dev.bdev = bdev; + td->dm_dev.bdev_file = bdev_file; + td->dm_dev.dax_dev = fs_dax_get_by_bdev(bdev, &part_off, NULL, NULL); format_dev_t(td->dm_dev.name, dev); list_add(&td->list, &md->table_devices); return td; out_blkdev_put: - bdev_release(bdev_handle); + fput(bdev_file); out_free_td: kfree(td); return ERR_PTR(r); @@ -775,7 +778,7 @@ static void close_table_device(struct table_device *td, struct mapped_device *md { if (md->disk->slave_dir) bd_unlink_disk_holder(td->dm_dev.bdev, md->disk); - bdev_release(td->dm_dev.bdev_handle); + fput(td->dm_dev.bdev_file); put_dax(td->dm_dev.dax_dev); list_del(&td->list); kfree(td); @@ -2098,8 +2101,8 @@ static struct mapped_device *alloc_dev(int minor) * established. If request-based table is loaded: blk-mq will * override accordingly. */ - md->disk = blk_alloc_disk(md->numa_node_id); - if (!md->disk) + md->disk = blk_alloc_disk(NULL, md->numa_node_id); + if (IS_ERR(md->disk)) goto bad; md->queue = md->disk->queue; diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c index 9672f75c3050..059afc24c08b 100644 --- a/drivers/md/md-bitmap.c +++ b/drivers/md/md-bitmap.c @@ -234,7 +234,8 @@ static int __write_sb_page(struct md_rdev *rdev, struct bitmap *bitmap, sector_t doff; bdev = (rdev->meta_bdev) ? rdev->meta_bdev : rdev->bdev; - if (pg_index == store->file_pages - 1) { + /* we compare length (page numbers), not page offset. */ + if ((pg_index - store->sb_index) == store->file_pages - 1) { unsigned int last_page_size = store->bytes & (PAGE_SIZE - 1); if (last_page_size == 0) @@ -438,8 +439,8 @@ static void filemap_write_page(struct bitmap *bitmap, unsigned long pg_index, struct page *page = store->filemap[pg_index]; if (mddev_is_clustered(bitmap->mddev)) { - pg_index += bitmap->cluster_slot * - DIV_ROUND_UP(store->bytes, PAGE_SIZE); + /* go to node bitmap area starting point */ + pg_index += store->sb_index; } if (store->file) @@ -952,6 +953,7 @@ static void md_bitmap_file_set_bit(struct bitmap *bitmap, sector_t block) unsigned long index = file_page_index(store, chunk); unsigned long node_offset = 0; + index += store->sb_index; if (mddev_is_clustered(bitmap->mddev)) node_offset = bitmap->cluster_slot * store->file_pages; @@ -982,6 +984,7 @@ static void md_bitmap_file_clear_bit(struct bitmap *bitmap, sector_t block) unsigned long index = file_page_index(store, chunk); unsigned long node_offset = 0; + index += store->sb_index; if (mddev_is_clustered(bitmap->mddev)) node_offset = bitmap->cluster_slot * store->file_pages; @@ -1043,9 +1046,8 @@ void md_bitmap_unplug(struct bitmap *bitmap) if (dirty || need_write) { if (!writing) { md_bitmap_wait_writes(bitmap); - if (bitmap->mddev->queue) - blk_add_trace_msg(bitmap->mddev->queue, - "md bitmap_unplug"); + mddev_add_trace_msg(bitmap->mddev, + "md bitmap_unplug"); } clear_page_attr(bitmap, i, BITMAP_PAGE_PENDING); filemap_write_page(bitmap, i, false); @@ -1316,9 +1318,7 @@ void md_bitmap_daemon_work(struct mddev *mddev) } bitmap->allclean = 1; - if (bitmap->mddev->queue) - blk_add_trace_msg(bitmap->mddev->queue, - "md bitmap_daemon_work"); + mddev_add_trace_msg(bitmap->mddev, "md bitmap_daemon_work"); /* Any file-page which is PENDING now needs to be written. * So set NEEDWRITE now, then after we make any last-minute changes diff --git a/drivers/md/md-linear.h b/drivers/md/md-linear.h deleted file mode 100644 index 5587eeedb882..000000000000 --- a/drivers/md/md-linear.h +++ /dev/null @@ -1,17 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _LINEAR_H -#define _LINEAR_H - -struct dev_info { - struct md_rdev *rdev; - sector_t end_sector; -}; - -struct linear_conf -{ - struct rcu_head rcu; - sector_t array_sectors; - int raid_disks; /* a copy of mddev->raid_disks */ - struct dev_info disks[] __counted_by(raid_disks); -}; -#endif diff --git a/drivers/md/md-multipath.h b/drivers/md/md-multipath.h deleted file mode 100644 index b3099e5fc4d7..000000000000 --- a/drivers/md/md-multipath.h +++ /dev/null @@ -1,32 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _MULTIPATH_H -#define _MULTIPATH_H - -struct multipath_info { - struct md_rdev *rdev; -}; - -struct mpconf { - struct mddev *mddev; - struct multipath_info *multipaths; - int raid_disks; - spinlock_t device_lock; - struct list_head retry_list; - - mempool_t pool; -}; - -/* - * this is our 'private' 'collective' MULTIPATH buffer head. - * it contains information about what kind of IO operations were started - * for this MULTIPATH operation, and about their status: - */ - -struct multipath_bh { - struct mddev *mddev; - struct bio *master_bio; - struct bio bio; - int path; - struct list_head retry_list; -}; -#endif diff --git a/drivers/md/md.c b/drivers/md/md.c index 2266358d8074..e575e74aabf5 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -65,7 +65,6 @@ #include <linux/percpu-refcount.h> #include <linux/part_stat.h> -#include <trace/events/block.h> #include "md.h" #include "md-bitmap.h" #include "md-cluster.h" @@ -99,18 +98,6 @@ static void mddev_detach(struct mddev *mddev); static void export_rdev(struct md_rdev *rdev, struct mddev *mddev); static void md_wakeup_thread_directly(struct md_thread __rcu *thread); -enum md_ro_state { - MD_RDWR, - MD_RDONLY, - MD_AUTO_READ, - MD_MAX_STATE -}; - -static bool md_is_rdwr(struct mddev *mddev) -{ - return (mddev->ro == MD_RDWR); -} - /* * Default number of read corrections we'll attempt on an rdev * before ejecting it from the array. We divide the read error @@ -378,7 +365,7 @@ static bool is_suspended(struct mddev *mddev, struct bio *bio) return true; } -void md_handle_request(struct mddev *mddev, struct bio *bio) +bool md_handle_request(struct mddev *mddev, struct bio *bio) { check_suspended: if (is_suspended(mddev, bio)) { @@ -386,7 +373,7 @@ check_suspended: /* Bail out if REQ_NOWAIT is set for the bio */ if (bio->bi_opf & REQ_NOWAIT) { bio_wouldblock_error(bio); - return; + return true; } for (;;) { prepare_to_wait(&mddev->sb_wait, &__wait, @@ -402,10 +389,13 @@ check_suspended: if (!mddev->pers->make_request(mddev, bio)) { percpu_ref_put(&mddev->active_io); + if (!mddev->gendisk && mddev->pers->prepare_suspend) + return false; goto check_suspended; } percpu_ref_put(&mddev->active_io); + return true; } EXPORT_SYMBOL(md_handle_request); @@ -529,6 +519,24 @@ void mddev_resume(struct mddev *mddev) } EXPORT_SYMBOL_GPL(mddev_resume); +/* sync bdev before setting device to readonly or stopping raid*/ +static int mddev_set_closing_and_sync_blockdev(struct mddev *mddev, int opener_num) +{ + mutex_lock(&mddev->open_mutex); + if (mddev->pers && atomic_read(&mddev->openers) > opener_num) { + mutex_unlock(&mddev->open_mutex); + return -EBUSY; + } + if (test_and_set_bit(MD_CLOSING, &mddev->flags)) { + mutex_unlock(&mddev->open_mutex); + return -EBUSY; + } + mutex_unlock(&mddev->open_mutex); + + sync_blockdev(mddev->gendisk->part0); + return 0; +} + /* * Generic flush handling for md */ @@ -579,8 +587,12 @@ static void submit_flushes(struct work_struct *ws) rcu_read_lock(); } rcu_read_unlock(); - if (atomic_dec_and_test(&mddev->flush_pending)) + if (atomic_dec_and_test(&mddev->flush_pending)) { + /* The pair is percpu_ref_get() from md_flush_request() */ + percpu_ref_put(&mddev->active_io); + queue_work(md_wq, &mddev->flush_work); + } } static void md_submit_flush_data(struct work_struct *ws) @@ -2402,7 +2414,7 @@ int md_integrity_register(struct mddev *mddev) if (list_empty(&mddev->disks)) return 0; /* nothing to do */ - if (!mddev->gendisk || blk_get_integrity(mddev->gendisk)) + if (mddev_is_dm(mddev) || blk_get_integrity(mddev->gendisk)) return 0; /* shouldn't register, or already is */ rdev_for_each(rdev, mddev) { /* skip spares and non-functional disks */ @@ -2455,7 +2467,7 @@ int md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev) { struct blk_integrity *bi_mddev; - if (!mddev->gendisk) + if (mddev_is_dm(mddev)) return 0; bi_mddev = blk_get_integrity(mddev->gendisk); @@ -2562,6 +2574,7 @@ static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev) fail: pr_warn("md: failed to register dev-%s for %s\n", b, mdname(mddev)); + mddev_destroy_serial_pool(mddev, rdev); return err; } @@ -2578,7 +2591,7 @@ static void export_rdev(struct md_rdev *rdev, struct mddev *mddev) if (test_bit(AutoDetected, &rdev->flags)) md_autodetect_dev(rdev->bdev->bd_dev); #endif - bdev_release(rdev->bdev_handle); + fput(rdev->bdev_file); rdev->bdev = NULL; kobject_put(&rdev->kobj); } @@ -2591,7 +2604,7 @@ static void md_kick_rdev_from_array(struct md_rdev *rdev) list_del_rcu(&rdev->same_set); pr_debug("md: unbind<%pg>\n", rdev->bdev); mddev_destroy_serial_pool(rdev->mddev, rdev); - rdev->mddev = NULL; + WRITE_ONCE(rdev->mddev, NULL); sysfs_remove_link(&rdev->kobj, "block"); sysfs_put(rdev->sysfs_state); sysfs_put(rdev->sysfs_unack_badblocks); @@ -2847,8 +2860,7 @@ repeat: pr_debug("md: updating %s RAID superblock on device (in sync %d)\n", mdname(mddev), mddev->in_sync); - if (mddev->queue) - blk_add_trace_msg(mddev->queue, "md md_update_sb"); + mddev_add_trace_msg(mddev, "md md_update_sb"); rewrite: md_bitmap_update_sb(mddev->bitmap); rdev_for_each(rdev, mddev) { @@ -2929,7 +2941,6 @@ static int add_bound_rdev(struct md_rdev *rdev) set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); md_new_event(); - md_wakeup_thread(mddev->thread); return 0; } @@ -3044,10 +3055,8 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len) if (err == 0) { md_kick_rdev_from_array(rdev); - if (mddev->pers) { + if (mddev->pers) set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); - md_wakeup_thread(mddev->thread); - } md_new_event(); } } @@ -3077,7 +3086,6 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len) clear_bit(BlockedBadBlocks, &rdev->flags); wake_up(&rdev->blocked_wait); set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery); - md_wakeup_thread(rdev->mddev->thread); err = 0; } else if (cmd_match(buf, "insync") && rdev->raid_disk == -1) { @@ -3115,7 +3123,6 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len) !test_bit(Replacement, &rdev->flags)) set_bit(WantReplacement, &rdev->flags); set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery); - md_wakeup_thread(rdev->mddev->thread); err = 0; } else if (cmd_match(buf, "-want_replacement")) { /* Clearing 'want_replacement' is always allowed. @@ -3245,7 +3252,6 @@ slot_store(struct md_rdev *rdev, const char *buf, size_t len) if (rdev->raid_disk >= 0) return -EBUSY; set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery); - md_wakeup_thread(rdev->mddev->thread); } else if (rdev->mddev->pers) { /* Activating a spare .. or possibly reactivating * if we ever get bitmaps working here. @@ -3339,8 +3345,7 @@ static ssize_t new_offset_store(struct md_rdev *rdev, if (kstrtoull(buf, 10, &new_offset) < 0) return -EINVAL; - if (mddev->sync_thread || - test_bit(MD_RECOVERY_RUNNING,&mddev->recovery)) + if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) return -EBUSY; if (new_offset == rdev->data_offset) /* reset is always permitted */ @@ -3671,7 +3676,7 @@ rdev_attr_store(struct kobject *kobj, struct attribute *attr, struct kernfs_node *kn = NULL; bool suspend = false; ssize_t rv; - struct mddev *mddev = rdev->mddev; + struct mddev *mddev = READ_ONCE(rdev->mddev); if (!entry->store) return -EIO; @@ -3773,16 +3778,16 @@ static struct md_rdev *md_import_device(dev_t newdev, int super_format, int supe if (err) goto out_clear_rdev; - rdev->bdev_handle = bdev_open_by_dev(newdev, + rdev->bdev_file = bdev_file_open_by_dev(newdev, BLK_OPEN_READ | BLK_OPEN_WRITE, super_format == -2 ? &claim_rdev : rdev, NULL); - if (IS_ERR(rdev->bdev_handle)) { + if (IS_ERR(rdev->bdev_file)) { pr_warn("md: could not open device unknown-block(%u,%u).\n", MAJOR(newdev), MINOR(newdev)); - err = PTR_ERR(rdev->bdev_handle); + err = PTR_ERR(rdev->bdev_file); goto out_clear_rdev; } - rdev->bdev = rdev->bdev_handle->bdev; + rdev->bdev = file_bdev(rdev->bdev_file); kobject_init(&rdev->kobj, &rdev_ktype); @@ -3813,7 +3818,7 @@ static struct md_rdev *md_import_device(dev_t newdev, int super_format, int supe return rdev; out_blkdev_put: - bdev_release(rdev->bdev_handle); + fput(rdev->bdev_file); out_clear_rdev: md_rdev_clear(rdev); out_free_rdev: @@ -4013,8 +4018,7 @@ level_store(struct mddev *mddev, const char *buf, size_t len) */ rv = -EBUSY; - if (mddev->sync_thread || - test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || + if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || mddev->reshape_position != MaxSector || mddev->sysfs_active) goto out_unlock; @@ -4164,7 +4168,6 @@ level_store(struct mddev *mddev, const char *buf, size_t len) mddev->in_sync = 1; del_timer_sync(&mddev->safemode_timer); } - blk_set_stacking_limits(&mddev->queue->limits); pers->run(mddev); set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); if (!mddev->thread) @@ -4471,8 +4474,8 @@ array_state_show(struct mddev *mddev, char *page) return sprintf(page, "%s\n", array_states[st]); } -static int do_md_stop(struct mddev *mddev, int ro, struct block_device *bdev); -static int md_set_readonly(struct mddev *mddev, struct block_device *bdev); +static int do_md_stop(struct mddev *mddev, int ro); +static int md_set_readonly(struct mddev *mddev); static int restart_array(struct mddev *mddev); static ssize_t @@ -4489,6 +4492,17 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len) case broken: /* cannot be set */ case bad_word: return -EINVAL; + case clear: + case readonly: + case inactive: + case read_auto: + if (!mddev->pers || !md_is_rdwr(mddev)) + break; + /* write sysfs will not open mddev and opener should be 0 */ + err = mddev_set_closing_and_sync_blockdev(mddev, 0); + if (err) + return err; + break; default: break; } @@ -4522,14 +4536,14 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len) case inactive: /* stop an active array, return 0 otherwise */ if (mddev->pers) - err = do_md_stop(mddev, 2, NULL); + err = do_md_stop(mddev, 2); break; case clear: - err = do_md_stop(mddev, 0, NULL); + err = do_md_stop(mddev, 0); break; case readonly: if (mddev->pers) - err = md_set_readonly(mddev, NULL); + err = md_set_readonly(mddev); else { mddev->ro = MD_RDONLY; set_disk_ro(mddev->gendisk, 1); @@ -4539,7 +4553,7 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len) case read_auto: if (mddev->pers) { if (md_is_rdwr(mddev)) - err = md_set_readonly(mddev, NULL); + err = md_set_readonly(mddev); else if (mddev->ro == MD_RDONLY) err = restart_array(mddev); if (err == 0) { @@ -4588,6 +4602,11 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len) sysfs_notify_dirent_safe(mddev->sysfs_state); } mddev_unlock(mddev); + + if (st == readonly || st == read_auto || st == inactive || + (err && st == clear)) + clear_bit(MD_CLOSING, &mddev->flags); + return err ?: len; } static struct md_sysfs_entry md_array_state = @@ -4915,6 +4934,35 @@ static void stop_sync_thread(struct mddev *mddev, bool locked, bool check_seq) mddev_lock_nointr(mddev); } +void md_idle_sync_thread(struct mddev *mddev) +{ + lockdep_assert_held(&mddev->reconfig_mutex); + + clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); + stop_sync_thread(mddev, true, true); +} +EXPORT_SYMBOL_GPL(md_idle_sync_thread); + +void md_frozen_sync_thread(struct mddev *mddev) +{ + lockdep_assert_held(&mddev->reconfig_mutex); + + set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); + stop_sync_thread(mddev, true, false); +} +EXPORT_SYMBOL_GPL(md_frozen_sync_thread); + +void md_unfrozen_sync_thread(struct mddev *mddev) +{ + lockdep_assert_held(&mddev->reconfig_mutex); + + clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); + set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); + md_wakeup_thread(mddev->thread); + sysfs_notify_dirent_safe(mddev->sysfs_action); +} +EXPORT_SYMBOL_GPL(md_unfrozen_sync_thread); + static void idle_sync_thread(struct mddev *mddev) { mutex_lock(&mddev->sync_mutex); @@ -5706,6 +5754,51 @@ static const struct kobj_type md_ktype = { int mdp_major = 0; +/* stack the limit for all rdevs into lim */ +void mddev_stack_rdev_limits(struct mddev *mddev, struct queue_limits *lim) +{ + struct md_rdev *rdev; + + rdev_for_each(rdev, mddev) { + queue_limits_stack_bdev(lim, rdev->bdev, rdev->data_offset, + mddev->gendisk->disk_name); + } +} +EXPORT_SYMBOL_GPL(mddev_stack_rdev_limits); + +/* apply the extra stacking limits from a new rdev into mddev */ +int mddev_stack_new_rdev(struct mddev *mddev, struct md_rdev *rdev) +{ + struct queue_limits lim; + + if (mddev_is_dm(mddev)) + return 0; + + lim = queue_limits_start_update(mddev->gendisk->queue); + queue_limits_stack_bdev(&lim, rdev->bdev, rdev->data_offset, + mddev->gendisk->disk_name); + return queue_limits_commit_update(mddev->gendisk->queue, &lim); +} +EXPORT_SYMBOL_GPL(mddev_stack_new_rdev); + +/* update the optimal I/O size after a reshape */ +void mddev_update_io_opt(struct mddev *mddev, unsigned int nr_stripes) +{ + struct queue_limits lim; + + if (mddev_is_dm(mddev)) + return; + + /* don't bother updating io_opt if we can't suspend the array */ + if (mddev_suspend(mddev, false) < 0) + return; + lim = queue_limits_start_update(mddev->gendisk->queue); + lim.io_opt = lim.io_min * nr_stripes; + queue_limits_commit_update(mddev->gendisk->queue, &lim); + mddev_resume(mddev); +} +EXPORT_SYMBOL_GPL(mddev_update_io_opt); + static void mddev_delayed_delete(struct work_struct *ws) { struct mddev *mddev = container_of(ws, struct mddev, del_work); @@ -5770,10 +5863,11 @@ struct mddev *md_alloc(dev_t dev, char *name) */ mddev->hold_active = UNTIL_STOP; - error = -ENOMEM; - disk = blk_alloc_disk(NUMA_NO_NODE); - if (!disk) + disk = blk_alloc_disk(NULL, NUMA_NO_NODE); + if (IS_ERR(disk)) { + error = PTR_ERR(disk); goto out_free_mddev; + } disk->major = MAJOR(mddev->unit); disk->first_minor = unit << shift; @@ -5787,9 +5881,7 @@ struct mddev *md_alloc(dev_t dev, char *name) disk->fops = &md_fops; disk->private_data = mddev; - mddev->queue = disk->queue; - blk_set_stacking_limits(&mddev->queue->limits); - blk_queue_write_cache(mddev->queue, true, true); + blk_queue_write_cache(disk->queue, true, true); disk->events |= DISK_EVENT_MEDIA_CHANGE; mddev->gendisk = disk; error = add_disk(disk); @@ -5931,7 +6023,7 @@ int md_run(struct mddev *mddev) invalidate_bdev(rdev->bdev); if (mddev->ro != MD_RDONLY && rdev_read_only(rdev)) { mddev->ro = MD_RDONLY; - if (mddev->gendisk) + if (!mddev_is_dm(mddev)) set_disk_ro(mddev->gendisk, 1); } @@ -6034,7 +6126,10 @@ int md_run(struct mddev *mddev) pr_warn("True protection against single-disk failure might be compromised.\n"); } - mddev->recovery = 0; + /* dm-raid expect sync_thread to be frozen until resume */ + if (mddev->gendisk) + mddev->recovery = 0; + /* may be over-ridden by personality */ mddev->resync_max_sectors = mddev->dev_sectors; @@ -6090,7 +6185,8 @@ int md_run(struct mddev *mddev) } } - if (mddev->queue) { + if (!mddev_is_dm(mddev)) { + struct request_queue *q = mddev->gendisk->queue; bool nonrot = true; rdev_for_each(rdev, mddev) { @@ -6102,14 +6198,14 @@ int md_run(struct mddev *mddev) if (mddev->degraded) nonrot = false; if (nonrot) - blk_queue_flag_set(QUEUE_FLAG_NONROT, mddev->queue); + blk_queue_flag_set(QUEUE_FLAG_NONROT, q); else - blk_queue_flag_clear(QUEUE_FLAG_NONROT, mddev->queue); - blk_queue_flag_set(QUEUE_FLAG_IO_STAT, mddev->queue); + blk_queue_flag_clear(QUEUE_FLAG_NONROT, q); + blk_queue_flag_set(QUEUE_FLAG_IO_STAT, q); /* Set the NOWAIT flags if all underlying devices support it */ if (nowait) - blk_queue_flag_set(QUEUE_FLAG_NOWAIT, mddev->queue); + blk_queue_flag_set(QUEUE_FLAG_NOWAIT, q); } if (pers->sync_request) { if (mddev->kobj.sd && @@ -6188,7 +6284,6 @@ int do_md_run(struct mddev *mddev) /* run start up tasks that require md_thread */ md_start(mddev); - md_wakeup_thread(mddev->thread); md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */ set_capacity_and_notify(mddev->gendisk, mddev->array_sectors); @@ -6209,7 +6304,6 @@ int md_start(struct mddev *mddev) if (mddev->pers->start) { set_bit(MD_RECOVERY_WAIT, &mddev->recovery); - md_wakeup_thread(mddev->thread); ret = mddev->pers->start(mddev); clear_bit(MD_RECOVERY_WAIT, &mddev->recovery); md_wakeup_thread(mddev->sync_thread); @@ -6254,7 +6348,6 @@ static int restart_array(struct mddev *mddev) pr_debug("md: %s switched to read-write mode.\n", mdname(mddev)); /* Kick recovery or resync if necessary */ set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); - md_wakeup_thread(mddev->thread); md_wakeup_thread(mddev->sync_thread); sysfs_notify_dirent_safe(mddev->sysfs_state); return 0; @@ -6274,7 +6367,15 @@ static void md_clean(struct mddev *mddev) mddev->persistent = 0; mddev->level = LEVEL_NONE; mddev->clevel[0] = 0; - mddev->flags = 0; + /* + * Don't clear MD_CLOSING, or mddev can be opened again. + * 'hold_active != 0' means mddev is still in the creation + * process and will be used later. + */ + if (mddev->hold_active) + mddev->flags = 0; + else + mddev->flags &= BIT_ULL_MASK(MD_CLOSING); mddev->sb_flags = 0; mddev->ro = MD_RDWR; mddev->metadata_type[0] = 0; @@ -6311,7 +6412,6 @@ static void md_clean(struct mddev *mddev) static void __md_stop_writes(struct mddev *mddev) { - stop_sync_thread(mddev, true, false); del_timer_sync(&mddev->safemode_timer); if (mddev->pers && mddev->pers->quiesce) { @@ -6336,6 +6436,8 @@ static void __md_stop_writes(struct mddev *mddev) void md_stop_writes(struct mddev *mddev) { mddev_lock_nointr(mddev); + set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); + stop_sync_thread(mddev, true, false); __md_stop_writes(mddev); mddev_unlock(mddev); } @@ -6349,8 +6451,10 @@ static void mddev_detach(struct mddev *mddev) mddev->pers->quiesce(mddev, 0); } md_unregister_thread(mddev, &mddev->thread); - if (mddev->queue) - blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ + + /* the unplug fn references 'conf' */ + if (!mddev_is_dm(mddev)) + blk_sync_queue(mddev->gendisk->queue); } static void __md_stop(struct mddev *mddev) @@ -6387,7 +6491,8 @@ void md_stop(struct mddev *mddev) EXPORT_SYMBOL_GPL(md_stop); -static int md_set_readonly(struct mddev *mddev, struct block_device *bdev) +/* ensure 'mddev->pers' exist before calling md_set_readonly() */ +static int md_set_readonly(struct mddev *mddev) { int err = 0; int did_freeze = 0; @@ -6398,7 +6503,6 @@ static int md_set_readonly(struct mddev *mddev, struct block_device *bdev) if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) { did_freeze = 1; set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); - md_wakeup_thread(mddev->thread); } stop_sync_thread(mddev, false, false); @@ -6406,36 +6510,29 @@ static int md_set_readonly(struct mddev *mddev, struct block_device *bdev) !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)); mddev_lock_nointr(mddev); - mutex_lock(&mddev->open_mutex); - if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) || - mddev->sync_thread || - test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) { + if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) { pr_warn("md: %s still in use.\n",mdname(mddev)); err = -EBUSY; goto out; } - if (mddev->pers) { - __md_stop_writes(mddev); - - if (mddev->ro == MD_RDONLY) { - err = -ENXIO; - goto out; - } + __md_stop_writes(mddev); - mddev->ro = MD_RDONLY; - set_disk_ro(mddev->gendisk, 1); + if (mddev->ro == MD_RDONLY) { + err = -ENXIO; + goto out; } + mddev->ro = MD_RDONLY; + set_disk_ro(mddev->gendisk, 1); + out: - if ((mddev->pers && !err) || did_freeze) { + if (!err || did_freeze) { clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); - md_wakeup_thread(mddev->thread); sysfs_notify_dirent_safe(mddev->sysfs_state); } - mutex_unlock(&mddev->open_mutex); return err; } @@ -6443,8 +6540,7 @@ out: * 0 - completely stop and dis-assemble array * 2 - stop but do not disassemble array */ -static int do_md_stop(struct mddev *mddev, int mode, - struct block_device *bdev) +static int do_md_stop(struct mddev *mddev, int mode) { struct gendisk *disk = mddev->gendisk; struct md_rdev *rdev; @@ -6453,22 +6549,16 @@ static int do_md_stop(struct mddev *mddev, int mode, if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) { did_freeze = 1; set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); - md_wakeup_thread(mddev->thread); } stop_sync_thread(mddev, true, false); - mutex_lock(&mddev->open_mutex); - if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) || - mddev->sysfs_active || - mddev->sync_thread || + if (mddev->sysfs_active || test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) { pr_warn("md: %s still in use.\n",mdname(mddev)); - mutex_unlock(&mddev->open_mutex); if (did_freeze) { clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); - md_wakeup_thread(mddev->thread); } return -EBUSY; } @@ -6487,13 +6577,11 @@ static int do_md_stop(struct mddev *mddev, int mode, sysfs_unlink_rdev(mddev, rdev); set_capacity_and_notify(disk, 0); - mutex_unlock(&mddev->open_mutex); mddev->changed = 1; if (!md_is_rdwr(mddev)) mddev->ro = MD_RDWR; - } else - mutex_unlock(&mddev->open_mutex); + } /* * Free resources if final stop */ @@ -6539,7 +6627,7 @@ static void autorun_array(struct mddev *mddev) err = do_md_run(mddev); if (err) { pr_warn("md: do_md_run() returned %d\n", err); - do_md_stop(mddev, 0, NULL); + do_md_stop(mddev, 0); } } @@ -7009,9 +7097,7 @@ kick_rdev: md_kick_rdev_from_array(rdev); set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); - if (mddev->thread) - md_wakeup_thread(mddev->thread); - else + if (!mddev->thread) md_update_sb(mddev, 1); md_new_event(); @@ -7086,14 +7172,13 @@ static int hot_add_disk(struct mddev *mddev, dev_t dev) if (!bdev_nowait(rdev->bdev)) { pr_info("%s: Disabling nowait because %pg does not support nowait\n", mdname(mddev), rdev->bdev); - blk_queue_flag_clear(QUEUE_FLAG_NOWAIT, mddev->queue); + blk_queue_flag_clear(QUEUE_FLAG_NOWAIT, mddev->gendisk->queue); } /* * Kick recovery, maybe this spare has to be added to the * array immediately. */ set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); - md_wakeup_thread(mddev->thread); md_new_event(); return 0; @@ -7307,8 +7392,7 @@ static int update_size(struct mddev *mddev, sector_t num_sectors) * of each device. If num_sectors is zero, we find the largest size * that fits. */ - if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || - mddev->sync_thread) + if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) return -EBUSY; if (!md_is_rdwr(mddev)) return -EROFS; @@ -7325,10 +7409,9 @@ static int update_size(struct mddev *mddev, sector_t num_sectors) if (!rv) { if (mddev_is_clustered(mddev)) md_cluster_ops->update_size(mddev, old_dev_sectors); - else if (mddev->queue) { + else if (!mddev_is_dm(mddev)) set_capacity_and_notify(mddev->gendisk, mddev->array_sectors); - } } return rv; } @@ -7345,8 +7428,7 @@ static int update_raid_disks(struct mddev *mddev, int raid_disks) if (raid_disks <= 0 || (mddev->max_disks && raid_disks >= mddev->max_disks)) return -EINVAL; - if (mddev->sync_thread || - test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || + if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || test_bit(MD_RESYNCING_REMOTE, &mddev->recovery) || mddev->reshape_position != MaxSector) return -EBUSY; @@ -7542,16 +7624,17 @@ static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo) return 0; } -static inline bool md_ioctl_valid(unsigned int cmd) +static inline int md_ioctl_valid(unsigned int cmd) { switch (cmd) { - case ADD_NEW_DISK: case GET_ARRAY_INFO: - case GET_BITMAP_FILE: case GET_DISK_INFO: + case RAID_VERSION: + return 0; + case ADD_NEW_DISK: + case GET_BITMAP_FILE: case HOT_ADD_DISK: case HOT_REMOVE_DISK: - case RAID_VERSION: case RESTART_ARRAY_RW: case RUN_ARRAY: case SET_ARRAY_INFO: @@ -7560,9 +7643,11 @@ static inline bool md_ioctl_valid(unsigned int cmd) case STOP_ARRAY: case STOP_ARRAY_RO: case CLUSTERED_DISK_NACK: - return true; + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + return 0; default: - return false; + return -ENOTTY; } } @@ -7620,31 +7705,17 @@ static int md_ioctl(struct block_device *bdev, blk_mode_t mode, int err = 0; void __user *argp = (void __user *)arg; struct mddev *mddev = NULL; - bool did_set_md_closing = false; - - if (!md_ioctl_valid(cmd)) - return -ENOTTY; - switch (cmd) { - case RAID_VERSION: - case GET_ARRAY_INFO: - case GET_DISK_INFO: - break; - default: - if (!capable(CAP_SYS_ADMIN)) - return -EACCES; - } + err = md_ioctl_valid(cmd); + if (err) + return err; /* * Commands dealing with the RAID driver but not any * particular array: */ - switch (cmd) { - case RAID_VERSION: - err = get_version(argp); - goto out; - default:; - } + if (cmd == RAID_VERSION) + return get_version(argp); /* * Commands creating/starting a new array: @@ -7652,35 +7723,23 @@ static int md_ioctl(struct block_device *bdev, blk_mode_t mode, mddev = bdev->bd_disk->private_data; - if (!mddev) { - BUG(); - goto out; - } - /* Some actions do not requires the mutex */ switch (cmd) { case GET_ARRAY_INFO: if (!mddev->raid_disks && !mddev->external) - err = -ENODEV; - else - err = get_array_info(mddev, argp); - goto out; + return -ENODEV; + return get_array_info(mddev, argp); case GET_DISK_INFO: if (!mddev->raid_disks && !mddev->external) - err = -ENODEV; - else - err = get_disk_info(mddev, argp); - goto out; + return -ENODEV; + return get_disk_info(mddev, argp); case SET_DISK_FAULTY: - err = set_disk_faulty(mddev, new_decode_dev(arg)); - goto out; + return set_disk_faulty(mddev, new_decode_dev(arg)); case GET_BITMAP_FILE: - err = get_bitmap_file(mddev, argp); - goto out; - + return get_bitmap_file(mddev, argp); } if (cmd == HOT_REMOVE_DISK) @@ -7693,20 +7752,9 @@ static int md_ioctl(struct block_device *bdev, blk_mode_t mode, /* Need to flush page cache, and ensure no-one else opens * and writes */ - mutex_lock(&mddev->open_mutex); - if (mddev->pers && atomic_read(&mddev->openers) > 1) { - mutex_unlock(&mddev->open_mutex); - err = -EBUSY; - goto out; - } - if (test_and_set_bit(MD_CLOSING, &mddev->flags)) { - mutex_unlock(&mddev->open_mutex); - err = -EBUSY; - goto out; - } - did_set_md_closing = true; - mutex_unlock(&mddev->open_mutex); - sync_blockdev(bdev); + err = mddev_set_closing_and_sync_blockdev(mddev, 1); + if (err) + return err; } if (!md_is_rdwr(mddev)) @@ -7747,11 +7795,12 @@ static int md_ioctl(struct block_device *bdev, blk_mode_t mode, goto unlock; case STOP_ARRAY: - err = do_md_stop(mddev, 0, bdev); + err = do_md_stop(mddev, 0); goto unlock; case STOP_ARRAY_RO: - err = md_set_readonly(mddev, bdev); + if (mddev->pers) + err = md_set_readonly(mddev); goto unlock; case HOT_REMOVE_DISK: @@ -7846,7 +7895,7 @@ unlock: mddev_unlock(mddev); out: - if(did_set_md_closing) + if (cmd == STOP_ARRAY_RO || (err && cmd == STOP_ARRAY)) clear_bit(MD_CLOSING, &mddev->flags); return err; } @@ -8683,10 +8732,7 @@ void md_submit_discard_bio(struct mddev *mddev, struct md_rdev *rdev, bio_chain(discard_bio, bio); bio_clone_blkg_association(discard_bio, bio); - if (mddev->gendisk) - trace_block_bio_remap(discard_bio, - disk_devt(mddev->gendisk), - bio->bi_iter.bi_sector); + mddev_trace_remap(mddev, discard_bio, bio->bi_iter.bi_sector); submit_bio_noacct(discard_bio); } EXPORT_SYMBOL_GPL(md_submit_discard_bio); @@ -8733,6 +8779,23 @@ void md_account_bio(struct mddev *mddev, struct bio **bio) } EXPORT_SYMBOL_GPL(md_account_bio); +void md_free_cloned_bio(struct bio *bio) +{ + struct md_io_clone *md_io_clone = bio->bi_private; + struct bio *orig_bio = md_io_clone->orig_bio; + struct mddev *mddev = md_io_clone->mddev; + + if (bio->bi_status && !orig_bio->bi_status) + orig_bio->bi_status = bio->bi_status; + + if (md_io_clone->start_time) + bio_end_io_acct(orig_bio, md_io_clone->start_time); + + bio_put(bio); + percpu_ref_put(&mddev->active_io); +} +EXPORT_SYMBOL_GPL(md_free_cloned_bio); + /* md_allow_write(mddev) * Calling this ensures that the array is marked 'active' so that writes * may proceed without blocking. It is important to call this before @@ -8788,12 +8851,16 @@ void md_do_sync(struct md_thread *thread) int ret; /* just incase thread restarts... */ - if (test_bit(MD_RECOVERY_DONE, &mddev->recovery) || - test_bit(MD_RECOVERY_WAIT, &mddev->recovery)) + if (test_bit(MD_RECOVERY_DONE, &mddev->recovery)) return; - if (!md_is_rdwr(mddev)) {/* never try to sync a read-only array */ + + if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) + goto skip; + + if (test_bit(MD_RECOVERY_WAIT, &mddev->recovery) || + !md_is_rdwr(mddev)) {/* never try to sync a read-only array */ set_bit(MD_RECOVERY_INTR, &mddev->recovery); - return; + goto skip; } if (mddev_is_clustered(mddev)) { @@ -9162,7 +9229,7 @@ void md_do_sync(struct md_thread *thread) mddev->delta_disks > 0 && mddev->pers->finish_reshape && mddev->pers->size && - mddev->queue) { + !mddev_is_dm(mddev)) { mddev_lock_nointr(mddev); md_set_array_sectors(mddev, mddev->pers->size(mddev, 0, 0)); mddev_unlock(mddev); @@ -9262,9 +9329,14 @@ static bool md_spares_need_change(struct mddev *mddev) { struct md_rdev *rdev; - rdev_for_each(rdev, mddev) - if (rdev_removeable(rdev) || rdev_addable(rdev)) + rcu_read_lock(); + rdev_for_each_rcu(rdev, mddev) { + if (rdev_removeable(rdev) || rdev_addable(rdev)) { + rcu_read_unlock(); return true; + } + } + rcu_read_unlock(); return false; } @@ -9368,13 +9440,19 @@ static void md_start_sync(struct work_struct *ws) struct mddev *mddev = container_of(ws, struct mddev, sync_work); int spares = 0; bool suspend = false; + char *name; - if (md_spares_need_change(mddev)) + /* + * If reshape is still in progress, spares won't be added or removed + * from conf until reshape is done. + */ + if (mddev->reshape_position == MaxSector && + md_spares_need_change(mddev)) { suspend = true; + mddev_suspend(mddev, false); + } - suspend ? mddev_suspend_and_lock_nointr(mddev) : - mddev_lock_nointr(mddev); - + mddev_lock_nointr(mddev); if (!md_is_rdwr(mddev)) { /* * On a read-only array we can: @@ -9400,8 +9478,10 @@ static void md_start_sync(struct work_struct *ws) if (spares) md_bitmap_write_all(mddev->bitmap); + name = test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) ? + "reshape" : "resync"; rcu_assign_pointer(mddev->sync_thread, - md_register_thread(md_do_sync, mddev, "resync")); + md_register_thread(md_do_sync, mddev, name)); if (!mddev->sync_thread) { pr_warn("%s: could not start resync thread...\n", mdname(mddev)); @@ -9445,6 +9525,20 @@ not_running: sysfs_notify_dirent_safe(mddev->sysfs_action); } +static void unregister_sync_thread(struct mddev *mddev) +{ + if (!test_bit(MD_RECOVERY_DONE, &mddev->recovery)) { + /* resync/recovery still happening */ + clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); + return; + } + + if (WARN_ON_ONCE(!mddev->sync_thread)) + return; + + md_reap_sync_thread(mddev); +} + /* * This routine is regularly called by all per-raid-array threads to * deal with generic issues like resync and super-block update. @@ -9469,9 +9563,6 @@ not_running: */ void md_check_recovery(struct mddev *mddev) { - if (READ_ONCE(mddev->suspended)) - return; - if (mddev->bitmap) md_bitmap_daemon_work(mddev); @@ -9485,7 +9576,8 @@ void md_check_recovery(struct mddev *mddev) } if (!md_is_rdwr(mddev) && - !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) + !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) && + !test_bit(MD_RECOVERY_DONE, &mddev->recovery)) return; if ( ! ( (mddev->sb_flags & ~ (1<<MD_SB_CHANGE_PENDING)) || @@ -9507,8 +9599,7 @@ void md_check_recovery(struct mddev *mddev) struct md_rdev *rdev; if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) { - /* sync_work already queued. */ - clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); + unregister_sync_thread(mddev); goto unlock; } @@ -9571,16 +9662,7 @@ void md_check_recovery(struct mddev *mddev) * still set. */ if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) { - if (!test_bit(MD_RECOVERY_DONE, &mddev->recovery)) { - /* resync/recovery still happening */ - clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); - goto unlock; - } - - if (WARN_ON_ONCE(!mddev->sync_thread)) - goto unlock; - - md_reap_sync_thread(mddev); + unregister_sync_thread(mddev); goto unlock; } diff --git a/drivers/md/md.h b/drivers/md/md.h index 8d881cc59799..097d9dbd69b8 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -18,6 +18,7 @@ #include <linux/timer.h> #include <linux/wait.h> #include <linux/workqueue.h> +#include <trace/events/block.h> #include "md-cluster.h" #define MaxSector (~(sector_t)0) @@ -59,7 +60,7 @@ struct md_rdev { */ struct block_device *meta_bdev; struct block_device *bdev; /* block device handle */ - struct bdev_handle *bdev_handle; /* Handle from open for bdev */ + struct file *bdev_file; /* Handle from open for bdev */ struct page *sb_page, *bb_page; int sb_loaded; @@ -207,6 +208,7 @@ enum flag_bits { * check if there is collision between raid1 * serial bios. */ + Nonrot, /* non-rotational device (SSD) */ }; static inline int is_badblock(struct md_rdev *rdev, sector_t s, int sectors, @@ -222,6 +224,16 @@ static inline int is_badblock(struct md_rdev *rdev, sector_t s, int sectors, } return 0; } + +static inline int rdev_has_badblock(struct md_rdev *rdev, sector_t s, + int sectors) +{ + sector_t first_bad; + int bad_sectors; + + return is_badblock(rdev, s, sectors, &first_bad, &bad_sectors); +} + extern int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors, int is_new); extern int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors, @@ -468,7 +480,6 @@ struct mddev { struct timer_list safemode_timer; struct percpu_ref writes_pending; int sync_checkers; /* # of threads checking writes_pending */ - struct request_queue *queue; /* for plugging ... */ struct bitmap *bitmap; /* the bitmap for the device */ struct { @@ -558,6 +569,37 @@ enum recovery_flags { MD_RESYNCING_REMOTE, /* remote node is running resync thread */ }; +enum md_ro_state { + MD_RDWR, + MD_RDONLY, + MD_AUTO_READ, + MD_MAX_STATE +}; + +static inline bool md_is_rdwr(struct mddev *mddev) +{ + return (mddev->ro == MD_RDWR); +} + +static inline bool reshape_interrupted(struct mddev *mddev) +{ + /* reshape never start */ + if (mddev->reshape_position == MaxSector) + return false; + + /* interrupted */ + if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) + return true; + + /* running reshape will be interrupted soon. */ + if (test_bit(MD_RECOVERY_WAIT, &mddev->recovery) || + test_bit(MD_RECOVERY_INTR, &mddev->recovery) || + test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) + return true; + + return false; +} + static inline int __must_check mddev_lock(struct mddev *mddev) { return mutex_lock_interruptible(&mddev->reconfig_mutex); @@ -617,6 +659,7 @@ struct md_personality int (*start_reshape) (struct mddev *mddev); void (*finish_reshape) (struct mddev *mddev); void (*update_reshape_pos) (struct mddev *mddev); + void (*prepare_suspend) (struct mddev *mddev); /* quiesce suspends or resumes internal processing. * 1 - stop new actions and wait for action io to complete * 0 - return to normal behaviour @@ -750,6 +793,7 @@ extern void md_finish_reshape(struct mddev *mddev); void md_submit_discard_bio(struct mddev *mddev, struct md_rdev *rdev, struct bio *bio, sector_t start, sector_t size); void md_account_bio(struct mddev *mddev, struct bio **bio); +void md_free_cloned_bio(struct bio *bio); extern bool __must_check md_flush_request(struct mddev *mddev, struct bio *bio); extern void md_super_write(struct mddev *mddev, struct md_rdev *rdev, @@ -778,9 +822,12 @@ extern void md_stop_writes(struct mddev *mddev); extern int md_rdev_init(struct md_rdev *rdev); extern void md_rdev_clear(struct md_rdev *rdev); -extern void md_handle_request(struct mddev *mddev, struct bio *bio); +extern bool md_handle_request(struct mddev *mddev, struct bio *bio); extern int mddev_suspend(struct mddev *mddev, bool interruptible); extern void mddev_resume(struct mddev *mddev); +extern void md_idle_sync_thread(struct mddev *mddev); +extern void md_frozen_sync_thread(struct mddev *mddev); +extern void md_unfrozen_sync_thread(struct mddev *mddev); extern void md_reload_sb(struct mddev *mddev, int raid_disk); extern void md_update_sb(struct mddev *mddev, int force); @@ -821,7 +868,7 @@ static inline void mddev_check_write_zeroes(struct mddev *mddev, struct bio *bio { if (bio_op(bio) == REQ_OP_WRITE_ZEROES && !bio->bi_bdev->bd_disk->queue->limits.max_write_zeroes_sectors) - mddev->queue->limits.max_write_zeroes_sectors = 0; + mddev->gendisk->queue->limits.max_write_zeroes_sectors = 0; } static inline int mddev_suspend_and_lock(struct mddev *mddev) @@ -860,7 +907,31 @@ void md_autostart_arrays(int part); int md_set_array_info(struct mddev *mddev, struct mdu_array_info_s *info); int md_add_new_disk(struct mddev *mddev, struct mdu_disk_info_s *info); int do_md_run(struct mddev *mddev); +void mddev_stack_rdev_limits(struct mddev *mddev, struct queue_limits *lim); +int mddev_stack_new_rdev(struct mddev *mddev, struct md_rdev *rdev); +void mddev_update_io_opt(struct mddev *mddev, unsigned int nr_stripes); extern const struct block_device_operations md_fops; +/* + * MD devices can be used undeneath by DM, in which case ->gendisk is NULL. + */ +static inline bool mddev_is_dm(struct mddev *mddev) +{ + return !mddev->gendisk; +} + +static inline void mddev_trace_remap(struct mddev *mddev, struct bio *bio, + sector_t sector) +{ + if (!mddev_is_dm(mddev)) + trace_block_bio_remap(bio, disk_devt(mddev->gendisk), sector); +} + +#define mddev_add_trace_msg(mddev, fmt, args...) \ +do { \ + if (!mddev_is_dm(mddev)) \ + blk_add_trace_msg((mddev)->gendisk->queue, fmt, ##args); \ +} while (0) + #endif /* _MD_MD_H */ diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index c50a7abda744..c5d4aeb68404 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c @@ -379,6 +379,19 @@ static void raid0_free(struct mddev *mddev, void *priv) free_conf(mddev, conf); } +static int raid0_set_limits(struct mddev *mddev) +{ + struct queue_limits lim; + + blk_set_stacking_limits(&lim); + lim.max_hw_sectors = mddev->chunk_sectors; + lim.max_write_zeroes_sectors = mddev->chunk_sectors; + lim.io_min = mddev->chunk_sectors << 9; + lim.io_opt = lim.io_min * mddev->raid_disks; + mddev_stack_rdev_limits(mddev, &lim); + return queue_limits_set(mddev->gendisk->queue, &lim); +} + static int raid0_run(struct mddev *mddev) { struct r0conf *conf; @@ -399,20 +412,10 @@ static int raid0_run(struct mddev *mddev) mddev->private = conf; } conf = mddev->private; - if (mddev->queue) { - struct md_rdev *rdev; - - blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors); - blk_queue_max_write_zeroes_sectors(mddev->queue, mddev->chunk_sectors); - - blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9); - blk_queue_io_opt(mddev->queue, - (mddev->chunk_sectors << 9) * mddev->raid_disks); - - rdev_for_each(rdev, mddev) { - disk_stack_limits(mddev->gendisk, rdev->bdev, - rdev->data_offset << 9); - } + if (!mddev_is_dm(mddev)) { + ret = raid0_set_limits(mddev); + if (ret) + goto out_free_conf; } /* calculate array device size */ @@ -426,8 +429,10 @@ static int raid0_run(struct mddev *mddev) ret = md_integrity_register(mddev); if (ret) - free_conf(mddev, conf); - + goto out_free_conf; + return 0; +out_free_conf: + free_conf(mddev, conf); return ret; } @@ -578,10 +583,7 @@ static void raid0_map_submit_bio(struct mddev *mddev, struct bio *bio) bio_set_dev(bio, tmp_dev->bdev); bio->bi_iter.bi_sector = sector + zone->dev_start + tmp_dev->data_offset; - - if (mddev->gendisk) - trace_block_bio_remap(bio, disk_devt(mddev->gendisk), - bio_sector); + mddev_trace_remap(mddev, bio, bio_sector); mddev_check_write_zeroes(mddev, bio); submit_bio_noacct(bio); } diff --git a/drivers/md/raid1-10.c b/drivers/md/raid1-10.c index 512746551f36..2ea1710a3b70 100644 --- a/drivers/md/raid1-10.c +++ b/drivers/md/raid1-10.c @@ -227,3 +227,72 @@ static inline bool exceed_read_errors(struct mddev *mddev, struct md_rdev *rdev) return false; } + +/** + * raid1_check_read_range() - check a given read range for bad blocks, + * available read length is returned; + * @rdev: the rdev to read; + * @this_sector: read position; + * @len: read length; + * + * helper function for read_balance() + * + * 1) If there are no bad blocks in the range, @len is returned; + * 2) If the range are all bad blocks, 0 is returned; + * 3) If there are partial bad blocks: + * - If the bad block range starts after @this_sector, the length of first + * good region is returned; + * - If the bad block range starts before @this_sector, 0 is returned and + * the @len is updated to the offset into the region before we get to the + * good blocks; + */ +static inline int raid1_check_read_range(struct md_rdev *rdev, + sector_t this_sector, int *len) +{ + sector_t first_bad; + int bad_sectors; + + /* no bad block overlap */ + if (!is_badblock(rdev, this_sector, *len, &first_bad, &bad_sectors)) + return *len; + + /* + * bad block range starts offset into our range so we can return the + * number of sectors before the bad blocks start. + */ + if (first_bad > this_sector) + return first_bad - this_sector; + + /* read range is fully consumed by bad blocks. */ + if (this_sector + *len <= first_bad + bad_sectors) + return 0; + + /* + * final case, bad block range starts before or at the start of our + * range but does not cover our entire range so we still return 0 but + * update the length with the number of sectors before we get to the + * good ones. + */ + *len = first_bad + bad_sectors - this_sector; + return 0; +} + +/* + * Check if read should choose the first rdev. + * + * Balance on the whole device if no resync is going on (recovery is ok) or + * below the resync window. Otherwise, take the first readable disk. + */ +static inline bool raid1_should_read_first(struct mddev *mddev, + sector_t this_sector, int len) +{ + if ((mddev->recovery_cp < this_sector + len)) + return true; + + if (mddev_is_clustered(mddev) && + md_cluster_ops->area_resyncing(mddev, READ, this_sector, + this_sector + len)) + return true; + + return false; +} diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 286f8b16c7bd..be8ac24f50b6 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -46,9 +46,6 @@ static void allow_barrier(struct r1conf *conf, sector_t sector_nr); static void lower_barrier(struct r1conf *conf, sector_t sector_nr); -#define raid1_log(md, fmt, args...) \ - do { if ((md)->queue) blk_add_trace_msg((md)->queue, "raid1 " fmt, ##args); } while (0) - #define RAID_1_10_NAME "raid1" #include "raid1-10.c" @@ -498,9 +495,6 @@ static void raid1_end_write_request(struct bio *bio) * to user-side. So if something waits for IO, then it * will wait for the 'master' bio. */ - sector_t first_bad; - int bad_sectors; - r1_bio->bios[mirror] = NULL; to_put = bio; /* @@ -516,8 +510,8 @@ static void raid1_end_write_request(struct bio *bio) set_bit(R1BIO_Uptodate, &r1_bio->state); /* Maybe we can clear some bad blocks. */ - if (is_badblock(rdev, r1_bio->sector, r1_bio->sectors, - &first_bad, &bad_sectors) && !discard_error) { + if (rdev_has_badblock(rdev, r1_bio->sector, r1_bio->sectors) && + !discard_error) { r1_bio->bios[mirror] = IO_MADE_GOOD; set_bit(R1BIO_MadeGood, &r1_bio->state); } @@ -582,211 +576,312 @@ static sector_t align_to_barrier_unit_end(sector_t start_sector, return len; } -/* - * This routine returns the disk from which the requested read should - * be done. There is a per-array 'next expected sequential IO' sector - * number - if this matches on the next IO then we use the last disk. - * There is also a per-disk 'last know head position' sector that is - * maintained from IRQ contexts, both the normal and the resync IO - * completion handlers update this position correctly. If there is no - * perfect sequential match then we pick the disk whose head is closest. - * - * If there are 2 mirrors in the same 2 devices, performance degrades - * because position is mirror, not device based. - * - * The rdev for the device selected will have nr_pending incremented. - */ -static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sectors) +static void update_read_sectors(struct r1conf *conf, int disk, + sector_t this_sector, int len) { - const sector_t this_sector = r1_bio->sector; - int sectors; - int best_good_sectors; - int best_disk, best_dist_disk, best_pending_disk; - int has_nonrot_disk; + struct raid1_info *info = &conf->mirrors[disk]; + + atomic_inc(&info->rdev->nr_pending); + if (info->next_seq_sect != this_sector) + info->seq_start = this_sector; + info->next_seq_sect = this_sector + len; +} + +static int choose_first_rdev(struct r1conf *conf, struct r1bio *r1_bio, + int *max_sectors) +{ + sector_t this_sector = r1_bio->sector; + int len = r1_bio->sectors; int disk; - sector_t best_dist; - unsigned int min_pending; - struct md_rdev *rdev; - int choose_first; - int choose_next_idle; - /* - * Check if we can balance. We can balance on the whole - * device if no resync is going on, or below the resync window. - * We take the first readable disk when above the resync window. - */ - retry: - sectors = r1_bio->sectors; - best_disk = -1; - best_dist_disk = -1; - best_dist = MaxSector; - best_pending_disk = -1; - min_pending = UINT_MAX; - best_good_sectors = 0; - has_nonrot_disk = 0; - choose_next_idle = 0; - clear_bit(R1BIO_FailFast, &r1_bio->state); + for (disk = 0 ; disk < conf->raid_disks * 2 ; disk++) { + struct md_rdev *rdev; + int read_len; - if ((conf->mddev->recovery_cp < this_sector + sectors) || - (mddev_is_clustered(conf->mddev) && - md_cluster_ops->area_resyncing(conf->mddev, READ, this_sector, - this_sector + sectors))) - choose_first = 1; - else - choose_first = 0; + if (r1_bio->bios[disk] == IO_BLOCKED) + continue; + + rdev = conf->mirrors[disk].rdev; + if (!rdev || test_bit(Faulty, &rdev->flags)) + continue; + + /* choose the first disk even if it has some bad blocks. */ + read_len = raid1_check_read_range(rdev, this_sector, &len); + if (read_len > 0) { + update_read_sectors(conf, disk, this_sector, read_len); + *max_sectors = read_len; + return disk; + } + } + + return -1; +} + +static int choose_bb_rdev(struct r1conf *conf, struct r1bio *r1_bio, + int *max_sectors) +{ + sector_t this_sector = r1_bio->sector; + int best_disk = -1; + int best_len = 0; + int disk; for (disk = 0 ; disk < conf->raid_disks * 2 ; disk++) { - sector_t dist; - sector_t first_bad; - int bad_sectors; - unsigned int pending; - bool nonrot; + struct md_rdev *rdev; + int len; + int read_len; + + if (r1_bio->bios[disk] == IO_BLOCKED) + continue; rdev = conf->mirrors[disk].rdev; - if (r1_bio->bios[disk] == IO_BLOCKED - || rdev == NULL - || test_bit(Faulty, &rdev->flags)) + if (!rdev || test_bit(Faulty, &rdev->flags) || + test_bit(WriteMostly, &rdev->flags)) continue; - if (!test_bit(In_sync, &rdev->flags) && - rdev->recovery_offset < this_sector + sectors) + + /* keep track of the disk with the most readable sectors. */ + len = r1_bio->sectors; + read_len = raid1_check_read_range(rdev, this_sector, &len); + if (read_len > best_len) { + best_disk = disk; + best_len = read_len; + } + } + + if (best_disk != -1) { + *max_sectors = best_len; + update_read_sectors(conf, best_disk, this_sector, best_len); + } + + return best_disk; +} + +static int choose_slow_rdev(struct r1conf *conf, struct r1bio *r1_bio, + int *max_sectors) +{ + sector_t this_sector = r1_bio->sector; + int bb_disk = -1; + int bb_read_len = 0; + int disk; + + for (disk = 0 ; disk < conf->raid_disks * 2 ; disk++) { + struct md_rdev *rdev; + int len; + int read_len; + + if (r1_bio->bios[disk] == IO_BLOCKED) continue; - if (test_bit(WriteMostly, &rdev->flags)) { - /* Don't balance among write-mostly, just - * use the first as a last resort */ - if (best_dist_disk < 0) { - if (is_badblock(rdev, this_sector, sectors, - &first_bad, &bad_sectors)) { - if (first_bad <= this_sector) - /* Cannot use this */ - continue; - best_good_sectors = first_bad - this_sector; - } else - best_good_sectors = sectors; - best_dist_disk = disk; - best_pending_disk = disk; - } + + rdev = conf->mirrors[disk].rdev; + if (!rdev || test_bit(Faulty, &rdev->flags) || + !test_bit(WriteMostly, &rdev->flags)) continue; + + /* there are no bad blocks, we can use this disk */ + len = r1_bio->sectors; + read_len = raid1_check_read_range(rdev, this_sector, &len); + if (read_len == r1_bio->sectors) { + update_read_sectors(conf, disk, this_sector, read_len); + return disk; } - /* This is a reasonable device to use. It might - * even be best. + + /* + * there are partial bad blocks, choose the rdev with largest + * read length. */ - if (is_badblock(rdev, this_sector, sectors, - &first_bad, &bad_sectors)) { - if (best_dist < MaxSector) - /* already have a better device */ - continue; - if (first_bad <= this_sector) { - /* cannot read here. If this is the 'primary' - * device, then we must not read beyond - * bad_sectors from another device.. - */ - bad_sectors -= (this_sector - first_bad); - if (choose_first && sectors > bad_sectors) - sectors = bad_sectors; - if (best_good_sectors > sectors) - best_good_sectors = sectors; - - } else { - sector_t good_sectors = first_bad - this_sector; - if (good_sectors > best_good_sectors) { - best_good_sectors = good_sectors; - best_disk = disk; - } - if (choose_first) - break; - } - continue; - } else { - if ((sectors > best_good_sectors) && (best_disk >= 0)) - best_disk = -1; - best_good_sectors = sectors; + if (read_len > bb_read_len) { + bb_disk = disk; + bb_read_len = read_len; } + } + + if (bb_disk != -1) { + *max_sectors = bb_read_len; + update_read_sectors(conf, bb_disk, this_sector, bb_read_len); + } + + return bb_disk; +} + +static bool is_sequential(struct r1conf *conf, int disk, struct r1bio *r1_bio) +{ + /* TODO: address issues with this check and concurrency. */ + return conf->mirrors[disk].next_seq_sect == r1_bio->sector || + conf->mirrors[disk].head_position == r1_bio->sector; +} + +/* + * If buffered sequential IO size exceeds optimal iosize, check if there is idle + * disk. If yes, choose the idle disk. + */ +static bool should_choose_next(struct r1conf *conf, int disk) +{ + struct raid1_info *mirror = &conf->mirrors[disk]; + int opt_iosize; + + if (!test_bit(Nonrot, &mirror->rdev->flags)) + return false; + + opt_iosize = bdev_io_opt(mirror->rdev->bdev) >> 9; + return opt_iosize > 0 && mirror->seq_start != MaxSector && + mirror->next_seq_sect > opt_iosize && + mirror->next_seq_sect - opt_iosize >= mirror->seq_start; +} + +static bool rdev_readable(struct md_rdev *rdev, struct r1bio *r1_bio) +{ + if (!rdev || test_bit(Faulty, &rdev->flags)) + return false; + + /* still in recovery */ + if (!test_bit(In_sync, &rdev->flags) && + rdev->recovery_offset < r1_bio->sector + r1_bio->sectors) + return false; + + /* don't read from slow disk unless have to */ + if (test_bit(WriteMostly, &rdev->flags)) + return false; + + /* don't split IO for bad blocks unless have to */ + if (rdev_has_badblock(rdev, r1_bio->sector, r1_bio->sectors)) + return false; + + return true; +} + +struct read_balance_ctl { + sector_t closest_dist; + int closest_dist_disk; + int min_pending; + int min_pending_disk; + int sequential_disk; + int readable_disks; +}; + +static int choose_best_rdev(struct r1conf *conf, struct r1bio *r1_bio) +{ + int disk; + struct read_balance_ctl ctl = { + .closest_dist_disk = -1, + .closest_dist = MaxSector, + .min_pending_disk = -1, + .min_pending = UINT_MAX, + .sequential_disk = -1, + }; + + for (disk = 0 ; disk < conf->raid_disks * 2 ; disk++) { + struct md_rdev *rdev; + sector_t dist; + unsigned int pending; - if (best_disk >= 0) - /* At least two disks to choose from so failfast is OK */ + if (r1_bio->bios[disk] == IO_BLOCKED) + continue; + + rdev = conf->mirrors[disk].rdev; + if (!rdev_readable(rdev, r1_bio)) + continue; + + /* At least two disks to choose from so failfast is OK */ + if (ctl.readable_disks++ == 1) set_bit(R1BIO_FailFast, &r1_bio->state); - nonrot = bdev_nonrot(rdev->bdev); - has_nonrot_disk |= nonrot; pending = atomic_read(&rdev->nr_pending); - dist = abs(this_sector - conf->mirrors[disk].head_position); - if (choose_first) { - best_disk = disk; - break; - } + dist = abs(r1_bio->sector - conf->mirrors[disk].head_position); + /* Don't change to another disk for sequential reads */ - if (conf->mirrors[disk].next_seq_sect == this_sector - || dist == 0) { - int opt_iosize = bdev_io_opt(rdev->bdev) >> 9; - struct raid1_info *mirror = &conf->mirrors[disk]; + if (is_sequential(conf, disk, r1_bio)) { + if (!should_choose_next(conf, disk)) + return disk; - best_disk = disk; /* - * If buffered sequential IO size exceeds optimal - * iosize, check if there is idle disk. If yes, choose - * the idle disk. read_balance could already choose an - * idle disk before noticing it's a sequential IO in - * this disk. This doesn't matter because this disk - * will idle, next time it will be utilized after the - * first disk has IO size exceeds optimal iosize. In - * this way, iosize of the first disk will be optimal - * iosize at least. iosize of the second disk might be - * small, but not a big deal since when the second disk - * starts IO, the first disk is likely still busy. + * Add 'pending' to avoid choosing this disk if + * there is other idle disk. */ - if (nonrot && opt_iosize > 0 && - mirror->seq_start != MaxSector && - mirror->next_seq_sect > opt_iosize && - mirror->next_seq_sect - opt_iosize >= - mirror->seq_start) { - choose_next_idle = 1; - continue; - } - break; + pending++; + /* + * If there is no other idle disk, this disk + * will be chosen. + */ + ctl.sequential_disk = disk; } - if (choose_next_idle) - continue; - - if (min_pending > pending) { - min_pending = pending; - best_pending_disk = disk; + if (ctl.min_pending > pending) { + ctl.min_pending = pending; + ctl.min_pending_disk = disk; } - if (dist < best_dist) { - best_dist = dist; - best_dist_disk = disk; + if (ctl.closest_dist > dist) { + ctl.closest_dist = dist; + ctl.closest_dist_disk = disk; } } /* + * sequential IO size exceeds optimal iosize, however, there is no other + * idle disk, so choose the sequential disk. + */ + if (ctl.sequential_disk != -1 && ctl.min_pending != 0) + return ctl.sequential_disk; + + /* * If all disks are rotational, choose the closest disk. If any disk is * non-rotational, choose the disk with less pending request even the * disk is rotational, which might/might not be optimal for raids with * mixed ratation/non-rotational disks depending on workload. */ - if (best_disk == -1) { - if (has_nonrot_disk || min_pending == 0) - best_disk = best_pending_disk; - else - best_disk = best_dist_disk; - } + if (ctl.min_pending_disk != -1 && + (READ_ONCE(conf->nonrot_disks) || ctl.min_pending == 0)) + return ctl.min_pending_disk; + else + return ctl.closest_dist_disk; +} - if (best_disk >= 0) { - rdev = conf->mirrors[best_disk].rdev; - if (!rdev) - goto retry; - atomic_inc(&rdev->nr_pending); - sectors = best_good_sectors; +/* + * This routine returns the disk from which the requested read should be done. + * + * 1) If resync is in progress, find the first usable disk and use it even if it + * has some bad blocks. + * + * 2) Now that there is no resync, loop through all disks and skipping slow + * disks and disks with bad blocks for now. Only pay attention to key disk + * choice. + * + * 3) If we've made it this far, now look for disks with bad blocks and choose + * the one with most number of sectors. + * + * 4) If we are all the way at the end, we have no choice but to use a disk even + * if it is write mostly. + * + * The rdev for the device selected will have nr_pending incremented. + */ +static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, + int *max_sectors) +{ + int disk; - if (conf->mirrors[best_disk].next_seq_sect != this_sector) - conf->mirrors[best_disk].seq_start = this_sector; + clear_bit(R1BIO_FailFast, &r1_bio->state); + + if (raid1_should_read_first(conf->mddev, r1_bio->sector, + r1_bio->sectors)) + return choose_first_rdev(conf, r1_bio, max_sectors); - conf->mirrors[best_disk].next_seq_sect = this_sector + sectors; + disk = choose_best_rdev(conf, r1_bio); + if (disk >= 0) { + *max_sectors = r1_bio->sectors; + update_read_sectors(conf, disk, r1_bio->sector, + r1_bio->sectors); + return disk; } - *max_sectors = sectors; - return best_disk; + /* + * If we are here it means we didn't find a perfectly good disk so + * now spend a bit more time trying to find one with the most good + * sectors. + */ + disk = choose_bb_rdev(conf, r1_bio, max_sectors); + if (disk >= 0) + return disk; + + return choose_slow_rdev(conf, r1_bio, max_sectors); } static void wake_up_barrier(struct r1conf *conf) @@ -1098,7 +1193,7 @@ static void freeze_array(struct r1conf *conf, int extra) */ spin_lock_irq(&conf->resync_lock); conf->array_frozen = 1; - raid1_log(conf->mddev, "wait freeze"); + mddev_add_trace_msg(conf->mddev, "raid1 wait freeze"); wait_event_lock_irq_cmd( conf->wait_barrier, get_unqueued_pending(conf) == extra, @@ -1287,7 +1382,7 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio, * Reading from a write-mostly device must take care not to * over-take any writes that are 'behind' */ - raid1_log(mddev, "wait behind writes"); + mddev_add_trace_msg(mddev, "raid1 wait behind writes"); wait_event(bitmap->behind_wait, atomic_read(&bitmap->behind_writes) == 0); } @@ -1320,11 +1415,7 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio, test_bit(R1BIO_FailFast, &r1_bio->state)) read_bio->bi_opf |= MD_FAILFAST; read_bio->bi_private = r1_bio; - - if (mddev->gendisk) - trace_block_bio_remap(read_bio, disk_devt(mddev->gendisk), - r1_bio->sector); - + mddev_trace_remap(mddev, read_bio, r1_bio->sector); submit_bio_noacct(read_bio); } @@ -1474,7 +1565,8 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio, bio_wouldblock_error(bio); return; } - raid1_log(mddev, "wait rdev %d blocked", blocked_rdev->raid_disk); + mddev_add_trace_msg(mddev, "raid1 wait rdev %d blocked", + blocked_rdev->raid_disk); md_wait_for_blocked_rdev(blocked_rdev, mddev); wait_barrier(conf, bio->bi_iter.bi_sector, false); goto retry_write; @@ -1557,10 +1649,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio, mbio->bi_private = r1_bio; atomic_inc(&r1_bio->remaining); - - if (mddev->gendisk) - trace_block_bio_remap(mbio, disk_devt(mddev->gendisk), - r1_bio->sector); + mddev_trace_remap(mddev, mbio, r1_bio->sector); /* flush_pending_writes() needs access to the rdev so...*/ mbio->bi_bdev = (void *)rdev; if (!raid1_add_bio_to_plug(mddev, mbio, raid1_unplug, disks)) { @@ -1760,6 +1849,52 @@ static int raid1_spare_active(struct mddev *mddev) return count; } +static bool raid1_add_conf(struct r1conf *conf, struct md_rdev *rdev, int disk, + bool replacement) +{ + struct raid1_info *info = conf->mirrors + disk; + + if (replacement) + info += conf->raid_disks; + + if (info->rdev) + return false; + + if (bdev_nonrot(rdev->bdev)) { + set_bit(Nonrot, &rdev->flags); + WRITE_ONCE(conf->nonrot_disks, conf->nonrot_disks + 1); + } + + rdev->raid_disk = disk; + info->head_position = 0; + info->seq_start = MaxSector; + WRITE_ONCE(info->rdev, rdev); + + return true; +} + +static bool raid1_remove_conf(struct r1conf *conf, int disk) +{ + struct raid1_info *info = conf->mirrors + disk; + struct md_rdev *rdev = info->rdev; + + if (!rdev || test_bit(In_sync, &rdev->flags) || + atomic_read(&rdev->nr_pending)) + return false; + + /* Only remove non-faulty devices if recovery is not possible. */ + if (!test_bit(Faulty, &rdev->flags) && + rdev->mddev->recovery_disabled != conf->recovery_disabled && + rdev->mddev->degraded < conf->raid_disks) + return false; + + if (test_and_clear_bit(Nonrot, &rdev->flags)) + WRITE_ONCE(conf->nonrot_disks, conf->nonrot_disks - 1); + + WRITE_ONCE(info->rdev, NULL); + return true; +} + static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev) { struct r1conf *conf = mddev->private; @@ -1791,19 +1926,16 @@ static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev) for (mirror = first; mirror <= last; mirror++) { p = conf->mirrors + mirror; if (!p->rdev) { - if (mddev->gendisk) - disk_stack_limits(mddev->gendisk, rdev->bdev, - rdev->data_offset << 9); + err = mddev_stack_new_rdev(mddev, rdev); + if (err) + return err; - p->head_position = 0; - rdev->raid_disk = mirror; - err = 0; + raid1_add_conf(conf, rdev, mirror, false); /* As all devices are equivalent, we don't need a full recovery * if this was recently any drive of the array */ if (rdev->saved_raid_disk < 0) conf->fullsync = 1; - WRITE_ONCE(p->rdev, rdev); break; } if (test_bit(WantReplacement, &p->rdev->flags) && @@ -1813,13 +1945,11 @@ static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev) if (err && repl_slot >= 0) { /* Add this device as a replacement */ - p = conf->mirrors + repl_slot; clear_bit(In_sync, &rdev->flags); set_bit(Replacement, &rdev->flags); - rdev->raid_disk = repl_slot; + raid1_add_conf(conf, rdev, repl_slot, true); err = 0; conf->fullsync = 1; - WRITE_ONCE(p[conf->raid_disks].rdev, rdev); } print_conf(conf); @@ -1836,27 +1966,20 @@ static int raid1_remove_disk(struct mddev *mddev, struct md_rdev *rdev) if (unlikely(number >= conf->raid_disks)) goto abort; - if (rdev != p->rdev) - p = conf->mirrors + conf->raid_disks + number; + if (rdev != p->rdev) { + number += conf->raid_disks; + p = conf->mirrors + number; + } print_conf(conf); if (rdev == p->rdev) { - if (test_bit(In_sync, &rdev->flags) || - atomic_read(&rdev->nr_pending)) { + if (!raid1_remove_conf(conf, number)) { err = -EBUSY; goto abort; } - /* Only remove non-faulty devices if recovery - * is not possible. - */ - if (!test_bit(Faulty, &rdev->flags) && - mddev->recovery_disabled != conf->recovery_disabled && - mddev->degraded < conf->raid_disks) { - err = -EBUSY; - goto abort; - } - WRITE_ONCE(p->rdev, NULL); - if (conf->mirrors[conf->raid_disks + number].rdev) { + + if (number < conf->raid_disks && + conf->mirrors[conf->raid_disks + number].rdev) { /* We just removed a device that is being replaced. * Move down the replacement. We drain all IO before * doing this to avoid confusion. @@ -1944,8 +2067,6 @@ static void end_sync_write(struct bio *bio) struct r1bio *r1_bio = get_resync_r1bio(bio); struct mddev *mddev = r1_bio->mddev; struct r1conf *conf = mddev->private; - sector_t first_bad; - int bad_sectors; struct md_rdev *rdev = conf->mirrors[find_bio_disk(r1_bio, bio)].rdev; if (!uptodate) { @@ -1955,14 +2076,11 @@ static void end_sync_write(struct bio *bio) set_bit(MD_RECOVERY_NEEDED, & mddev->recovery); set_bit(R1BIO_WriteError, &r1_bio->state); - } else if (is_badblock(rdev, r1_bio->sector, r1_bio->sectors, - &first_bad, &bad_sectors) && - !is_badblock(conf->mirrors[r1_bio->read_disk].rdev, - r1_bio->sector, - r1_bio->sectors, - &first_bad, &bad_sectors) - ) + } else if (rdev_has_badblock(rdev, r1_bio->sector, r1_bio->sectors) && + !rdev_has_badblock(conf->mirrors[r1_bio->read_disk].rdev, + r1_bio->sector, r1_bio->sectors)) { set_bit(R1BIO_MadeGood, &r1_bio->state); + } put_sync_write_buf(r1_bio, uptodate); } @@ -2279,16 +2397,12 @@ static void fix_read_error(struct r1conf *conf, struct r1bio *r1_bio) s = PAGE_SIZE >> 9; do { - sector_t first_bad; - int bad_sectors; - rdev = conf->mirrors[d].rdev; if (rdev && (test_bit(In_sync, &rdev->flags) || (!test_bit(Faulty, &rdev->flags) && rdev->recovery_offset >= sect + s)) && - is_badblock(rdev, sect, s, - &first_bad, &bad_sectors) == 0) { + rdev_has_badblock(rdev, sect, s) == 0) { atomic_inc(&rdev->nr_pending); if (sync_page_io(rdev, sect, s<<9, conf->tmppage, REQ_OP_READ, false)) @@ -3006,23 +3120,17 @@ static struct r1conf *setup_conf(struct mddev *mddev) err = -EINVAL; spin_lock_init(&conf->device_lock); + conf->raid_disks = mddev->raid_disks; rdev_for_each(rdev, mddev) { int disk_idx = rdev->raid_disk; - if (disk_idx >= mddev->raid_disks - || disk_idx < 0) + + if (disk_idx >= conf->raid_disks || disk_idx < 0) continue; - if (test_bit(Replacement, &rdev->flags)) - disk = conf->mirrors + mddev->raid_disks + disk_idx; - else - disk = conf->mirrors + disk_idx; - if (disk->rdev) + if (!raid1_add_conf(conf, rdev, disk_idx, + test_bit(Replacement, &rdev->flags))) goto abort; - disk->rdev = rdev; - disk->head_position = 0; - disk->seq_start = MaxSector; } - conf->raid_disks = mddev->raid_disks; conf->mddev = mddev; INIT_LIST_HEAD(&conf->retry_list); INIT_LIST_HEAD(&conf->bio_end_io_list); @@ -3086,12 +3194,21 @@ static struct r1conf *setup_conf(struct mddev *mddev) return ERR_PTR(err); } +static int raid1_set_limits(struct mddev *mddev) +{ + struct queue_limits lim; + + blk_set_stacking_limits(&lim); + lim.max_write_zeroes_sectors = 0; + mddev_stack_rdev_limits(mddev, &lim); + return queue_limits_set(mddev->gendisk->queue, &lim); +} + static void raid1_free(struct mddev *mddev, void *priv); static int raid1_run(struct mddev *mddev) { struct r1conf *conf; int i; - struct md_rdev *rdev; int ret; if (mddev->level != 1) { @@ -3118,14 +3235,10 @@ static int raid1_run(struct mddev *mddev) if (IS_ERR(conf)) return PTR_ERR(conf); - if (mddev->queue) - blk_queue_max_write_zeroes_sectors(mddev->queue, 0); - - rdev_for_each(rdev, mddev) { - if (!mddev->gendisk) - continue; - disk_stack_limits(mddev->gendisk, rdev->bdev, - rdev->data_offset << 9); + if (!mddev_is_dm(mddev)) { + ret = raid1_set_limits(mddev); + if (ret) + goto abort; } mddev->degraded = 0; diff --git a/drivers/md/raid1.h b/drivers/md/raid1.h index 14d4211a123a..5300cbaa58a4 100644 --- a/drivers/md/raid1.h +++ b/drivers/md/raid1.h @@ -71,6 +71,7 @@ struct r1conf { * allow for replacements. */ int raid_disks; + int nonrot_disks; spinlock_t device_lock; diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 7412066ea22c..a4556d2e46bf 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -76,9 +76,6 @@ static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio); static void end_reshape_write(struct bio *bio); static void end_reshape(struct r10conf *conf); -#define raid10_log(md, fmt, args...) \ - do { if ((md)->queue) blk_add_trace_msg((md)->queue, "raid10 " fmt, ##args); } while (0) - #include "raid1-10.c" #define NULL_CMD @@ -518,11 +515,7 @@ static void raid10_end_write_request(struct bio *bio) * The 'master' represents the composite IO operation to * user-side. So if something waits for IO, then it will * wait for the 'master' bio. - */ - sector_t first_bad; - int bad_sectors; - - /* + * * Do not set R10BIO_Uptodate if the current device is * rebuilding or Faulty. This is because we cannot use * such device for properly reading the data back (we could @@ -535,10 +528,9 @@ static void raid10_end_write_request(struct bio *bio) set_bit(R10BIO_Uptodate, &r10_bio->state); /* Maybe we can clear some bad blocks. */ - if (is_badblock(rdev, - r10_bio->devs[slot].addr, - r10_bio->sectors, - &first_bad, &bad_sectors) && !discard_error) { + if (rdev_has_badblock(rdev, r10_bio->devs[slot].addr, + r10_bio->sectors) && + !discard_error) { bio_put(bio); if (repl) r10_bio->devs[slot].repl_bio = IO_MADE_GOOD; @@ -753,17 +745,8 @@ static struct md_rdev *read_balance(struct r10conf *conf, best_good_sectors = 0; do_balance = 1; clear_bit(R10BIO_FailFast, &r10_bio->state); - /* - * Check if we can balance. We can balance on the whole - * device if no resync is going on (recovery is ok), or below - * the resync window. We take the first readable disk when - * above the resync window. - */ - if ((conf->mddev->recovery_cp < MaxSector - && (this_sector + sectors >= conf->next_resync)) || - (mddev_is_clustered(conf->mddev) && - md_cluster_ops->area_resyncing(conf->mddev, READ, this_sector, - this_sector + sectors))) + + if (raid1_should_read_first(conf->mddev, this_sector, sectors)) do_balance = 0; for (slot = 0; slot < conf->copies ; slot++) { @@ -1033,7 +1016,7 @@ static bool wait_barrier(struct r10conf *conf, bool nowait) ret = false; } else { conf->nr_waiting++; - raid10_log(conf->mddev, "wait barrier"); + mddev_add_trace_msg(conf->mddev, "raid10 wait barrier"); wait_event_barrier(conf, stop_waiting_barrier(conf)); conf->nr_waiting--; } @@ -1152,7 +1135,7 @@ static bool regular_request_wait(struct mddev *mddev, struct r10conf *conf, bio_wouldblock_error(bio); return false; } - raid10_log(conf->mddev, "wait reshape"); + mddev_add_trace_msg(conf->mddev, "raid10 wait reshape"); wait_event(conf->wait_barrier, conf->reshape_progress <= bio->bi_iter.bi_sector || conf->reshape_progress >= bio->bi_iter.bi_sector + @@ -1249,10 +1232,7 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio, test_bit(R10BIO_FailFast, &r10_bio->state)) read_bio->bi_opf |= MD_FAILFAST; read_bio->bi_private = r10_bio; - - if (mddev->gendisk) - trace_block_bio_remap(read_bio, disk_devt(mddev->gendisk), - r10_bio->sector); + mddev_trace_remap(mddev, read_bio, r10_bio->sector); submit_bio_noacct(read_bio); return; } @@ -1288,10 +1268,7 @@ static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio, && enough(conf, devnum)) mbio->bi_opf |= MD_FAILFAST; mbio->bi_private = r10_bio; - - if (conf->mddev->gendisk) - trace_block_bio_remap(mbio, disk_devt(conf->mddev->gendisk), - r10_bio->sector); + mddev_trace_remap(mddev, mbio, r10_bio->sector); /* flush_pending_writes() needs access to the rdev so...*/ mbio->bi_bdev = (void *)rdev; @@ -1330,10 +1307,7 @@ retry_wait: } if (rdev && test_bit(WriteErrorSeen, &rdev->flags)) { - sector_t first_bad; sector_t dev_sector = r10_bio->devs[i].addr; - int bad_sectors; - int is_bad; /* * Discard request doesn't care the write result @@ -1342,9 +1316,8 @@ retry_wait: if (!r10_bio->sectors) continue; - is_bad = is_badblock(rdev, dev_sector, r10_bio->sectors, - &first_bad, &bad_sectors); - if (is_bad < 0) { + if (rdev_has_badblock(rdev, dev_sector, + r10_bio->sectors) < 0) { /* * Mustn't write here until the bad block * is acknowledged @@ -1360,8 +1333,9 @@ retry_wait: if (unlikely(blocked_rdev)) { /* Have to wait for this device to get unblocked, then retry */ allow_barrier(conf); - raid10_log(conf->mddev, "%s wait rdev %d blocked", - __func__, blocked_rdev->raid_disk); + mddev_add_trace_msg(conf->mddev, + "raid10 %s wait rdev %d blocked", + __func__, blocked_rdev->raid_disk); md_wait_for_blocked_rdev(blocked_rdev, mddev); wait_barrier(conf, false); goto retry_wait; @@ -1416,7 +1390,8 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio, bio_wouldblock_error(bio); return; } - raid10_log(conf->mddev, "wait reshape metadata"); + mddev_add_trace_msg(conf->mddev, + "raid10 wait reshape metadata"); wait_event(mddev->sb_wait, !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)); @@ -2131,10 +2106,9 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev) continue; } - if (mddev->gendisk) - disk_stack_limits(mddev->gendisk, rdev->bdev, - rdev->data_offset << 9); - + err = mddev_stack_new_rdev(mddev, rdev); + if (err) + return err; p->head_position = 0; p->recovery_disabled = mddev->recovery_disabled - 1; rdev->raid_disk = mirror; @@ -2150,10 +2124,9 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev) clear_bit(In_sync, &rdev->flags); set_bit(Replacement, &rdev->flags); rdev->raid_disk = repl_slot; - err = 0; - if (mddev->gendisk) - disk_stack_limits(mddev->gendisk, rdev->bdev, - rdev->data_offset << 9); + err = mddev_stack_new_rdev(mddev, rdev); + if (err) + return err; conf->fullsync = 1; WRITE_ONCE(p->replacement, rdev); } @@ -2290,8 +2263,6 @@ static void end_sync_write(struct bio *bio) struct mddev *mddev = r10_bio->mddev; struct r10conf *conf = mddev->private; int d; - sector_t first_bad; - int bad_sectors; int slot; int repl; struct md_rdev *rdev = NULL; @@ -2312,11 +2283,10 @@ static void end_sync_write(struct bio *bio) &rdev->mddev->recovery); set_bit(R10BIO_WriteError, &r10_bio->state); } - } else if (is_badblock(rdev, - r10_bio->devs[slot].addr, - r10_bio->sectors, - &first_bad, &bad_sectors)) + } else if (rdev_has_badblock(rdev, r10_bio->devs[slot].addr, + r10_bio->sectors)) { set_bit(R10BIO_MadeGood, &r10_bio->state); + } rdev_dec_pending(rdev, mddev); @@ -2597,11 +2567,8 @@ static void recovery_request_write(struct mddev *mddev, struct r10bio *r10_bio) static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector, int sectors, struct page *page, enum req_op op) { - sector_t first_bad; - int bad_sectors; - - if (is_badblock(rdev, sector, sectors, &first_bad, &bad_sectors) - && (op == REQ_OP_READ || test_bit(WriteErrorSeen, &rdev->flags))) + if (rdev_has_badblock(rdev, sector, sectors) && + (op == REQ_OP_READ || test_bit(WriteErrorSeen, &rdev->flags))) return -1; if (sync_page_io(rdev, sector, sectors << 9, page, op, false)) /* success */ @@ -2658,16 +2625,14 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10 s = PAGE_SIZE >> 9; do { - sector_t first_bad; - int bad_sectors; - d = r10_bio->devs[sl].devnum; rdev = conf->mirrors[d].rdev; if (rdev && test_bit(In_sync, &rdev->flags) && !test_bit(Faulty, &rdev->flags) && - is_badblock(rdev, r10_bio->devs[sl].addr + sect, s, - &first_bad, &bad_sectors) == 0) { + rdev_has_badblock(rdev, + r10_bio->devs[sl].addr + sect, + s) == 0) { atomic_inc(&rdev->nr_pending); success = sync_page_io(rdev, r10_bio->devs[sl].addr + @@ -4002,14 +3967,26 @@ static struct r10conf *setup_conf(struct mddev *mddev) return ERR_PTR(err); } -static void raid10_set_io_opt(struct r10conf *conf) +static unsigned int raid10_nr_stripes(struct r10conf *conf) { - int raid_disks = conf->geo.raid_disks; + unsigned int raid_disks = conf->geo.raid_disks; + + if (conf->geo.raid_disks % conf->geo.near_copies) + return raid_disks; + return raid_disks / conf->geo.near_copies; +} - if (!(conf->geo.raid_disks % conf->geo.near_copies)) - raid_disks /= conf->geo.near_copies; - blk_queue_io_opt(conf->mddev->queue, (conf->mddev->chunk_sectors << 9) * - raid_disks); +static int raid10_set_queue_limits(struct mddev *mddev) +{ + struct r10conf *conf = mddev->private; + struct queue_limits lim; + + blk_set_stacking_limits(&lim); + lim.max_write_zeroes_sectors = 0; + lim.io_min = mddev->chunk_sectors << 9; + lim.io_opt = lim.io_min * raid10_nr_stripes(conf); + mddev_stack_rdev_limits(mddev, &lim); + return queue_limits_set(mddev->gendisk->queue, &lim); } static int raid10_run(struct mddev *mddev) @@ -4021,6 +3998,7 @@ static int raid10_run(struct mddev *mddev) sector_t size; sector_t min_offset_diff = 0; int first = 1; + int ret = -EIO; if (mddev->private == NULL) { conf = setup_conf(mddev); @@ -4047,12 +4025,6 @@ static int raid10_run(struct mddev *mddev) } } - if (mddev->queue) { - blk_queue_max_write_zeroes_sectors(mddev->queue, 0); - blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9); - raid10_set_io_opt(conf); - } - rdev_for_each(rdev, mddev) { long long diff; @@ -4081,14 +4053,16 @@ static int raid10_run(struct mddev *mddev) if (first || diff < min_offset_diff) min_offset_diff = diff; - if (mddev->gendisk) - disk_stack_limits(mddev->gendisk, rdev->bdev, - rdev->data_offset << 9); - disk->head_position = 0; first = 0; } + if (!mddev_is_dm(conf->mddev)) { + ret = raid10_set_queue_limits(mddev); + if (ret) + goto out_free_conf; + } + /* need to check that every block has at least one working mirror */ if (!enough(conf, -1)) { pr_err("md/raid10:%s: not enough operational mirrors.\n", @@ -4175,11 +4149,7 @@ static int raid10_run(struct mddev *mddev) clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); - set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); - rcu_assign_pointer(mddev->sync_thread, - md_register_thread(md_do_sync, mddev, "reshape")); - if (!mddev->sync_thread) - goto out_free_conf; + set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); } return 0; @@ -4189,7 +4159,7 @@ out_free_conf: raid10_free_conf(conf); mddev->private = NULL; out: - return -EIO; + return ret; } static void raid10_free(struct mddev *mddev, void *priv) @@ -4573,16 +4543,8 @@ out: clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); clear_bit(MD_RECOVERY_DONE, &mddev->recovery); set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); - set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); - - rcu_assign_pointer(mddev->sync_thread, - md_register_thread(md_do_sync, mddev, "reshape")); - if (!mddev->sync_thread) { - ret = -EAGAIN; - goto abort; - } + set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); conf->reshape_checkpoint = jiffies; - md_wakeup_thread(mddev->sync_thread); md_new_event(); return 0; @@ -4966,8 +4928,7 @@ static void end_reshape(struct r10conf *conf) conf->reshape_safe = MaxSector; spin_unlock_irq(&conf->device_lock); - if (conf->mddev->queue) - raid10_set_io_opt(conf); + mddev_update_io_opt(conf->mddev, raid10_nr_stripes(conf)); conf->fullsync = 0; } diff --git a/drivers/md/raid5-ppl.c b/drivers/md/raid5-ppl.c index da4ba736c4f0..a70cbec12ed0 100644 --- a/drivers/md/raid5-ppl.c +++ b/drivers/md/raid5-ppl.c @@ -1393,7 +1393,8 @@ int ppl_init_log(struct r5conf *conf) ppl_conf->signature = ~crc32c_le(~0, mddev->uuid, sizeof(mddev->uuid)); ppl_conf->block_size = 512; } else { - ppl_conf->block_size = queue_logical_block_size(mddev->queue); + ppl_conf->block_size = + queue_logical_block_size(mddev->gendisk->queue); } for (i = 0; i < ppl_conf->count; i++) { diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 8497880135ee..d874abfc1836 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -36,6 +36,7 @@ */ #include <linux/blkdev.h> +#include <linux/delay.h> #include <linux/kthread.h> #include <linux/raid/pq.h> #include <linux/async_tx.h> @@ -760,6 +761,7 @@ enum stripe_result { STRIPE_RETRY, STRIPE_SCHEDULE_AND_RETRY, STRIPE_FAIL, + STRIPE_WAIT_RESHAPE, }; struct stripe_request_ctx { @@ -1210,10 +1212,8 @@ again: */ while (op_is_write(op) && rdev && test_bit(WriteErrorSeen, &rdev->flags)) { - sector_t first_bad; - int bad_sectors; - int bad = is_badblock(rdev, sh->sector, RAID5_STRIPE_SECTORS(conf), - &first_bad, &bad_sectors); + int bad = rdev_has_badblock(rdev, sh->sector, + RAID5_STRIPE_SECTORS(conf)); if (!bad) break; @@ -1295,10 +1295,7 @@ again: if (rrdev) set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags); - if (conf->mddev->gendisk) - trace_block_bio_remap(bi, - disk_devt(conf->mddev->gendisk), - sh->dev[i].sector); + mddev_trace_remap(conf->mddev, bi, sh->dev[i].sector); if (should_defer && op_is_write(op)) bio_list_add(&pending_bios, bi); else @@ -1342,10 +1339,7 @@ again: */ if (op == REQ_OP_DISCARD) rbi->bi_vcnt = 0; - if (conf->mddev->gendisk) - trace_block_bio_remap(rbi, - disk_devt(conf->mddev->gendisk), - sh->dev[i].sector); + mddev_trace_remap(conf->mddev, rbi, sh->dev[i].sector); if (should_defer && op_is_write(op)) bio_list_add(&pending_bios, rbi); else @@ -2412,7 +2406,7 @@ static int grow_one_stripe(struct r5conf *conf, gfp_t gfp) atomic_inc(&conf->active_stripes); raid5_release_stripe(sh); - conf->max_nr_stripes++; + WRITE_ONCE(conf->max_nr_stripes, conf->max_nr_stripes + 1); return 1; } @@ -2422,12 +2416,12 @@ static int grow_stripes(struct r5conf *conf, int num) size_t namelen = sizeof(conf->cache_name[0]); int devs = max(conf->raid_disks, conf->previous_raid_disks); - if (conf->mddev->gendisk) + if (mddev_is_dm(conf->mddev)) snprintf(conf->cache_name[0], namelen, - "raid%d-%s", conf->level, mdname(conf->mddev)); + "raid%d-%p", conf->level, conf->mddev); else snprintf(conf->cache_name[0], namelen, - "raid%d-%p", conf->level, conf->mddev); + "raid%d-%s", conf->level, mdname(conf->mddev)); snprintf(conf->cache_name[1], namelen, "%.27s-alt", conf->cache_name[0]); conf->active_name = 0; @@ -2707,7 +2701,7 @@ static int drop_one_stripe(struct r5conf *conf) shrink_buffers(sh); free_stripe(conf->slab_cache, sh); atomic_dec(&conf->active_stripes); - conf->max_nr_stripes--; + WRITE_ONCE(conf->max_nr_stripes, conf->max_nr_stripes - 1); return 1; } @@ -2855,8 +2849,6 @@ static void raid5_end_write_request(struct bio *bi) struct r5conf *conf = sh->raid_conf; int disks = sh->disks, i; struct md_rdev *rdev; - sector_t first_bad; - int bad_sectors; int replacement = 0; for (i = 0 ; i < disks; i++) { @@ -2888,9 +2880,8 @@ static void raid5_end_write_request(struct bio *bi) if (replacement) { if (bi->bi_status) md_error(conf->mddev, rdev); - else if (is_badblock(rdev, sh->sector, - RAID5_STRIPE_SECTORS(conf), - &first_bad, &bad_sectors)) + else if (rdev_has_badblock(rdev, sh->sector, + RAID5_STRIPE_SECTORS(conf))) set_bit(R5_MadeGoodRepl, &sh->dev[i].flags); } else { if (bi->bi_status) { @@ -2900,9 +2891,8 @@ static void raid5_end_write_request(struct bio *bi) if (!test_and_set_bit(WantReplacement, &rdev->flags)) set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery); - } else if (is_badblock(rdev, sh->sector, - RAID5_STRIPE_SECTORS(conf), - &first_bad, &bad_sectors)) { + } else if (rdev_has_badblock(rdev, sh->sector, + RAID5_STRIPE_SECTORS(conf))) { set_bit(R5_MadeGood, &sh->dev[i].flags); if (test_bit(R5_ReadError, &sh->dev[i].flags)) /* That was a successful write so make @@ -4205,10 +4195,9 @@ static int handle_stripe_dirtying(struct r5conf *conf, set_bit(STRIPE_HANDLE, &sh->state); if ((rmw < rcw || (rmw == rcw && conf->rmw_level == PARITY_PREFER_RMW)) && rmw > 0) { /* prefer read-modify-write, but need to get some data */ - if (conf->mddev->queue) - blk_add_trace_msg(conf->mddev->queue, - "raid5 rmw %llu %d", - (unsigned long long)sh->sector, rmw); + mddev_add_trace_msg(conf->mddev, "raid5 rmw %llu %d", + sh->sector, rmw); + for (i = disks; i--; ) { struct r5dev *dev = &sh->dev[i]; if (test_bit(R5_InJournal, &dev->flags) && @@ -4285,10 +4274,11 @@ static int handle_stripe_dirtying(struct r5conf *conf, set_bit(STRIPE_DELAYED, &sh->state); } } - if (rcw && conf->mddev->queue) - blk_add_trace_msg(conf->mddev->queue, "raid5 rcw %llu %d %d %d", - (unsigned long long)sh->sector, - rcw, qread, test_bit(STRIPE_DELAYED, &sh->state)); + if (rcw && !mddev_is_dm(conf->mddev)) + blk_add_trace_msg(conf->mddev->gendisk->queue, + "raid5 rcw %llu %d %d %d", + (unsigned long long)sh->sector, rcw, qread, + test_bit(STRIPE_DELAYED, &sh->state)); } if (rcw > disks && rmw > disks && @@ -4674,8 +4664,6 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s) /* Now to look around and see what can be done */ for (i=disks; i--; ) { struct md_rdev *rdev; - sector_t first_bad; - int bad_sectors; int is_bad = 0; dev = &sh->dev[i]; @@ -4719,8 +4707,8 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s) rdev = conf->disks[i].replacement; if (rdev && !test_bit(Faulty, &rdev->flags) && rdev->recovery_offset >= sh->sector + RAID5_STRIPE_SECTORS(conf) && - !is_badblock(rdev, sh->sector, RAID5_STRIPE_SECTORS(conf), - &first_bad, &bad_sectors)) + !rdev_has_badblock(rdev, sh->sector, + RAID5_STRIPE_SECTORS(conf))) set_bit(R5_ReadRepl, &dev->flags); else { if (rdev && !test_bit(Faulty, &rdev->flags)) @@ -4733,8 +4721,8 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s) if (rdev && test_bit(Faulty, &rdev->flags)) rdev = NULL; if (rdev) { - is_bad = is_badblock(rdev, sh->sector, RAID5_STRIPE_SECTORS(conf), - &first_bad, &bad_sectors); + is_bad = rdev_has_badblock(rdev, sh->sector, + RAID5_STRIPE_SECTORS(conf)); if (s->blocked_rdev == NULL && (test_bit(Blocked, &rdev->flags) || is_bad < 0)) { @@ -5463,8 +5451,8 @@ static int raid5_read_one_chunk(struct mddev *mddev, struct bio *raid_bio) struct r5conf *conf = mddev->private; struct bio *align_bio; struct md_rdev *rdev; - sector_t sector, end_sector, first_bad; - int bad_sectors, dd_idx; + sector_t sector, end_sector; + int dd_idx; bool did_inc; if (!in_chunk_boundary(mddev, raid_bio)) { @@ -5493,8 +5481,7 @@ static int raid5_read_one_chunk(struct mddev *mddev, struct bio *raid_bio) atomic_inc(&rdev->nr_pending); - if (is_badblock(rdev, sector, bio_sectors(raid_bio), &first_bad, - &bad_sectors)) { + if (rdev_has_badblock(rdev, sector, bio_sectors(raid_bio))) { rdev_dec_pending(rdev, mddev); return 0; } @@ -5530,9 +5517,7 @@ static int raid5_read_one_chunk(struct mddev *mddev, struct bio *raid_bio) spin_unlock_irq(&conf->device_lock); } - if (mddev->gendisk) - trace_block_bio_remap(align_bio, disk_devt(mddev->gendisk), - raid_bio->bi_iter.bi_sector); + mddev_trace_remap(mddev, align_bio, raid_bio->bi_iter.bi_sector); submit_bio_noacct(align_bio); return 1; } @@ -5701,8 +5686,8 @@ static void raid5_unplug(struct blk_plug_cb *blk_cb, bool from_schedule) } release_inactive_stripe_list(conf, cb->temp_inactive_list, NR_STRIPE_HASH_LOCKS); - if (mddev->queue) - trace_block_unplug(mddev->queue, cnt, !from_schedule); + if (!mddev_is_dm(mddev)) + trace_block_unplug(mddev->gendisk->queue, cnt, !from_schedule); kfree(cb); } @@ -5946,7 +5931,8 @@ static enum stripe_result make_stripe_request(struct mddev *mddev, if (ahead_of_reshape(mddev, logical_sector, conf->reshape_safe)) { spin_unlock_irq(&conf->device_lock); - return STRIPE_SCHEDULE_AND_RETRY; + ret = STRIPE_SCHEDULE_AND_RETRY; + goto out; } } spin_unlock_irq(&conf->device_lock); @@ -6025,6 +6011,12 @@ static enum stripe_result make_stripe_request(struct mddev *mddev, out_release: raid5_release_stripe(sh); +out: + if (ret == STRIPE_SCHEDULE_AND_RETRY && reshape_interrupted(mddev)) { + bi->bi_status = BLK_STS_RESOURCE; + ret = STRIPE_WAIT_RESHAPE; + pr_err_ratelimited("dm-raid456: io across reshape position while reshape can't make progress"); + } return ret; } @@ -6146,7 +6138,7 @@ static bool raid5_make_request(struct mddev *mddev, struct bio * bi) while (1) { res = make_stripe_request(mddev, conf, &ctx, logical_sector, bi); - if (res == STRIPE_FAIL) + if (res == STRIPE_FAIL || res == STRIPE_WAIT_RESHAPE) break; if (res == STRIPE_RETRY) @@ -6184,6 +6176,11 @@ static bool raid5_make_request(struct mddev *mddev, struct bio * bi) if (rw == WRITE) md_write_end(mddev); + if (res == STRIPE_WAIT_RESHAPE) { + md_free_cloned_bio(bi); + return false; + } + bio_endio(bi); return true; } @@ -6773,7 +6770,18 @@ static void raid5d(struct md_thread *thread) spin_unlock_irq(&conf->device_lock); md_check_recovery(mddev); spin_lock_irq(&conf->device_lock); + + /* + * Waiting on MD_SB_CHANGE_PENDING below may deadlock + * seeing md_check_recovery() is needed to clear + * the flag when using mdmon. + */ + continue; } + + wait_event_lock_irq(mddev->sb_wait, + !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags), + conf->device_lock); } pr_debug("%d stripes handled\n", handled); @@ -6820,7 +6828,7 @@ raid5_set_cache_size(struct mddev *mddev, int size) if (size <= 16 || size > 32768) return -EINVAL; - conf->min_nr_stripes = size; + WRITE_ONCE(conf->min_nr_stripes, size); mutex_lock(&conf->cache_size_mutex); while (size < conf->max_nr_stripes && drop_one_stripe(conf)) @@ -6832,7 +6840,7 @@ raid5_set_cache_size(struct mddev *mddev, int size) mutex_lock(&conf->cache_size_mutex); while (size > conf->max_nr_stripes) if (!grow_one_stripe(conf, GFP_KERNEL)) { - conf->min_nr_stripes = conf->max_nr_stripes; + WRITE_ONCE(conf->min_nr_stripes, conf->max_nr_stripes); result = -ENOMEM; break; } @@ -6967,10 +6975,8 @@ raid5_store_stripe_size(struct mddev *mddev, const char *page, size_t len) pr_debug("md/raid: change stripe_size from %lu to %lu\n", conf->stripe_size, new); - if (mddev->sync_thread || - test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || - mddev->reshape_position != MaxSector || - mddev->sysfs_active) { + if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || + mddev->reshape_position != MaxSector || mddev->sysfs_active) { err = -EBUSY; goto out_unlock; } @@ -7084,7 +7090,7 @@ raid5_store_skip_copy(struct mddev *mddev, const char *page, size_t len) if (!conf) err = -ENODEV; else if (new != conf->skip_copy) { - struct request_queue *q = mddev->queue; + struct request_queue *q = mddev->gendisk->queue; conf->skip_copy = new; if (new) @@ -7390,11 +7396,13 @@ static unsigned long raid5_cache_count(struct shrinker *shrink, struct shrink_control *sc) { struct r5conf *conf = shrink->private_data; + int max_stripes = READ_ONCE(conf->max_nr_stripes); + int min_stripes = READ_ONCE(conf->min_nr_stripes); - if (conf->max_nr_stripes < conf->min_nr_stripes) + if (max_stripes < min_stripes) /* unlikely, but not impossible */ return 0; - return conf->max_nr_stripes - conf->min_nr_stripes; + return max_stripes - min_stripes; } static struct r5conf *setup_conf(struct mddev *mddev) @@ -7684,10 +7692,65 @@ static int only_parity(int raid_disk, int algo, int raid_disks, int max_degraded return 0; } -static void raid5_set_io_opt(struct r5conf *conf) +static int raid5_set_limits(struct mddev *mddev) { - blk_queue_io_opt(conf->mddev->queue, (conf->chunk_sectors << 9) * - (conf->raid_disks - conf->max_degraded)); + struct r5conf *conf = mddev->private; + struct queue_limits lim; + int data_disks, stripe; + struct md_rdev *rdev; + + /* + * The read-ahead size must cover two whole stripes, which is + * 2 * (datadisks) * chunksize where 'n' is the number of raid devices. + */ + data_disks = conf->previous_raid_disks - conf->max_degraded; + + /* + * We can only discard a whole stripe. It doesn't make sense to + * discard data disk but write parity disk + */ + stripe = roundup_pow_of_two(data_disks * (mddev->chunk_sectors << 9)); + + blk_set_stacking_limits(&lim); + lim.io_min = mddev->chunk_sectors << 9; + lim.io_opt = lim.io_min * (conf->raid_disks - conf->max_degraded); + lim.raid_partial_stripes_expensive = 1; + lim.discard_granularity = stripe; + lim.max_write_zeroes_sectors = 0; + mddev_stack_rdev_limits(mddev, &lim); + rdev_for_each(rdev, mddev) + queue_limits_stack_bdev(&lim, rdev->bdev, rdev->new_data_offset, + mddev->gendisk->disk_name); + + /* + * Zeroing is required for discard, otherwise data could be lost. + * + * Consider a scenario: discard a stripe (the stripe could be + * inconsistent if discard_zeroes_data is 0); write one disk of the + * stripe (the stripe could be inconsistent again depending on which + * disks are used to calculate parity); the disk is broken; The stripe + * data of this disk is lost. + * + * We only allow DISCARD if the sysadmin has confirmed that only safe + * devices are in use by setting a module parameter. A better idea + * might be to turn DISCARD into WRITE_ZEROES requests, as that is + * required to be safe. + */ + if (!devices_handle_discard_safely || + lim.max_discard_sectors < (stripe >> 9) || + lim.discard_granularity < stripe) + lim.max_hw_discard_sectors = 0; + + /* + * Requests require having a bitmap for each stripe. + * Limit the max sectors based on this. + */ + lim.max_hw_sectors = RAID5_MAX_REQ_STRIPES << RAID5_STRIPE_SHIFT(conf); + + /* No restrictions on the number of segments in the request */ + lim.max_segments = USHRT_MAX; + + return queue_limits_set(mddev->gendisk->queue, &lim); } static int raid5_run(struct mddev *mddev) @@ -7700,6 +7763,7 @@ static int raid5_run(struct mddev *mddev) int i; long long min_offset_diff = 0; int first = 1; + int ret = -EIO; if (mddev->recovery_cp != MaxSector) pr_notice("md/raid:%s: not clean -- starting background reconstruction\n", @@ -7936,11 +8000,7 @@ static int raid5_run(struct mddev *mddev) clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); - set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); - rcu_assign_pointer(mddev->sync_thread, - md_register_thread(md_do_sync, mddev, "reshape")); - if (!mddev->sync_thread) - goto abort; + set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); } /* Ok, everything is just fine now */ @@ -7952,66 +8012,10 @@ static int raid5_run(struct mddev *mddev) mdname(mddev)); md_set_array_sectors(mddev, raid5_size(mddev, 0, 0)); - if (mddev->queue) { - int chunk_size; - /* read-ahead size must cover two whole stripes, which - * is 2 * (datadisks) * chunksize where 'n' is the - * number of raid devices - */ - int data_disks = conf->previous_raid_disks - conf->max_degraded; - int stripe = data_disks * - ((mddev->chunk_sectors << 9) / PAGE_SIZE); - - chunk_size = mddev->chunk_sectors << 9; - blk_queue_io_min(mddev->queue, chunk_size); - raid5_set_io_opt(conf); - mddev->queue->limits.raid_partial_stripes_expensive = 1; - /* - * We can only discard a whole stripe. It doesn't make sense to - * discard data disk but write parity disk - */ - stripe = stripe * PAGE_SIZE; - stripe = roundup_pow_of_two(stripe); - mddev->queue->limits.discard_granularity = stripe; - - blk_queue_max_write_zeroes_sectors(mddev->queue, 0); - - rdev_for_each(rdev, mddev) { - disk_stack_limits(mddev->gendisk, rdev->bdev, - rdev->data_offset << 9); - disk_stack_limits(mddev->gendisk, rdev->bdev, - rdev->new_data_offset << 9); - } - - /* - * zeroing is required, otherwise data - * could be lost. Consider a scenario: discard a stripe - * (the stripe could be inconsistent if - * discard_zeroes_data is 0); write one disk of the - * stripe (the stripe could be inconsistent again - * depending on which disks are used to calculate - * parity); the disk is broken; The stripe data of this - * disk is lost. - * - * We only allow DISCARD if the sysadmin has confirmed that - * only safe devices are in use by setting a module parameter. - * A better idea might be to turn DISCARD into WRITE_ZEROES - * requests, as that is required to be safe. - */ - if (!devices_handle_discard_safely || - mddev->queue->limits.max_discard_sectors < (stripe >> 9) || - mddev->queue->limits.discard_granularity < stripe) - blk_queue_max_discard_sectors(mddev->queue, 0); - - /* - * Requests require having a bitmap for each stripe. - * Limit the max sectors based on this. - */ - blk_queue_max_hw_sectors(mddev->queue, - RAID5_MAX_REQ_STRIPES << RAID5_STRIPE_SHIFT(conf)); - - /* No restrictions on the number of segments in the request */ - blk_queue_max_segments(mddev->queue, USHRT_MAX); + if (!mddev_is_dm(mddev)) { + ret = raid5_set_limits(mddev); + if (ret) + goto abort; } if (log_init(conf, journal_dev, raid5_has_ppl(conf))) @@ -8024,7 +8028,7 @@ abort: free_conf(conf); mddev->private = NULL; pr_warn("md/raid:%s: failed to run raid set.\n", mdname(mddev)); - return -EIO; + return ret; } static void raid5_free(struct mddev *mddev, void *priv) @@ -8506,29 +8510,8 @@ static int raid5_start_reshape(struct mddev *mddev) clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); clear_bit(MD_RECOVERY_DONE, &mddev->recovery); set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); - set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); - rcu_assign_pointer(mddev->sync_thread, - md_register_thread(md_do_sync, mddev, "reshape")); - if (!mddev->sync_thread) { - mddev->recovery = 0; - spin_lock_irq(&conf->device_lock); - write_seqcount_begin(&conf->gen_lock); - mddev->raid_disks = conf->raid_disks = conf->previous_raid_disks; - mddev->new_chunk_sectors = - conf->chunk_sectors = conf->prev_chunk_sectors; - mddev->new_layout = conf->algorithm = conf->prev_algo; - rdev_for_each(rdev, mddev) - rdev->new_data_offset = rdev->data_offset; - smp_wmb(); - conf->generation --; - conf->reshape_progress = MaxSector; - mddev->reshape_position = MaxSector; - write_seqcount_end(&conf->gen_lock); - spin_unlock_irq(&conf->device_lock); - return -EAGAIN; - } + set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); conf->reshape_checkpoint = jiffies; - md_wakeup_thread(mddev->sync_thread); md_new_event(); return 0; } @@ -8556,8 +8539,8 @@ static void end_reshape(struct r5conf *conf) spin_unlock_irq(&conf->device_lock); wake_up(&conf->wait_for_overlap); - if (conf->mddev->queue) - raid5_set_io_opt(conf); + mddev_update_io_opt(conf->mddev, + conf->raid_disks - conf->max_degraded); } } @@ -8934,6 +8917,18 @@ static int raid5_start(struct mddev *mddev) return r5l_start(conf->log); } +/* + * This is only used for dm-raid456, caller already frozen sync_thread, hence + * if rehsape is still in progress, io that is waiting for reshape can never be + * done now, hence wake up and handle those IO. + */ +static void raid5_prepare_suspend(struct mddev *mddev) +{ + struct r5conf *conf = mddev->private; + + wake_up(&conf->wait_for_overlap); +} + static struct md_personality raid6_personality = { .name = "raid6", @@ -8957,6 +8952,7 @@ static struct md_personality raid6_personality = .quiesce = raid5_quiesce, .takeover = raid6_takeover, .change_consistency_policy = raid5_change_consistency_policy, + .prepare_suspend = raid5_prepare_suspend, }; static struct md_personality raid5_personality = { @@ -8981,6 +8977,7 @@ static struct md_personality raid5_personality = .quiesce = raid5_quiesce, .takeover = raid5_takeover, .change_consistency_policy = raid5_change_consistency_policy, + .prepare_suspend = raid5_prepare_suspend, }; static struct md_personality raid4_personality = @@ -9006,6 +9003,7 @@ static struct md_personality raid4_personality = .quiesce = raid5_quiesce, .takeover = raid4_takeover, .change_consistency_policy = raid5_change_consistency_policy, + .prepare_suspend = raid5_prepare_suspend, }; static int __init raid5_init(void) diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c index aebd3c12020b..c381c22135a2 100644 --- a/drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c +++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c @@ -725,6 +725,9 @@ irqreturn_t rkisp1_capture_isr(int irq, void *ctx) unsigned int i; u32 status; + if (!rkisp1->irqs_enabled) + return IRQ_NONE; + status = rkisp1_read(rkisp1, RKISP1_CIF_MI_MIS); if (!status) return IRQ_NONE; diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-common.h b/drivers/media/platform/rockchip/rkisp1/rkisp1-common.h index 4b6b28c05b89..b757f75edecf 100644 --- a/drivers/media/platform/rockchip/rkisp1/rkisp1-common.h +++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-common.h @@ -450,6 +450,7 @@ struct rkisp1_debug { * @debug: debug params to be exposed on debugfs * @info: version-specific ISP information * @irqs: IRQ line numbers + * @irqs_enabled: the hardware is enabled and can cause interrupts */ struct rkisp1_device { void __iomem *base_addr; @@ -471,6 +472,7 @@ struct rkisp1_device { struct rkisp1_debug debug; const struct rkisp1_info *info; int irqs[RKISP1_NUM_IRQS]; + bool irqs_enabled; }; /* diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-csi.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-csi.c index b6e47e2f1b94..4202642e0523 100644 --- a/drivers/media/platform/rockchip/rkisp1/rkisp1-csi.c +++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-csi.c @@ -196,6 +196,9 @@ irqreturn_t rkisp1_csi_isr(int irq, void *ctx) struct rkisp1_device *rkisp1 = dev_get_drvdata(dev); u32 val, status; + if (!rkisp1->irqs_enabled) + return IRQ_NONE; + status = rkisp1_read(rkisp1, RKISP1_CIF_MIPI_MIS); if (!status) return IRQ_NONE; diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c index f96f821a7b50..73cf08a74011 100644 --- a/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c +++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c @@ -305,6 +305,24 @@ static int __maybe_unused rkisp1_runtime_suspend(struct device *dev) { struct rkisp1_device *rkisp1 = dev_get_drvdata(dev); + rkisp1->irqs_enabled = false; + /* Make sure the IRQ handler will see the above */ + mb(); + + /* + * Wait until any running IRQ handler has returned. The IRQ handler + * may get called even after this (as it's a shared interrupt line) + * but the 'irqs_enabled' flag will make the handler return immediately. + */ + for (unsigned int il = 0; il < ARRAY_SIZE(rkisp1->irqs); ++il) { + if (rkisp1->irqs[il] == -1) + continue; + + /* Skip if the irq line is the same as previous */ + if (il == 0 || rkisp1->irqs[il - 1] != rkisp1->irqs[il]) + synchronize_irq(rkisp1->irqs[il]); + } + clk_bulk_disable_unprepare(rkisp1->clk_size, rkisp1->clks); return pinctrl_pm_select_sleep_state(dev); } @@ -321,6 +339,10 @@ static int __maybe_unused rkisp1_runtime_resume(struct device *dev) if (ret) return ret; + rkisp1->irqs_enabled = true; + /* Make sure the IRQ handler will see the above */ + mb(); + return 0; } @@ -559,7 +581,7 @@ static int rkisp1_probe(struct platform_device *pdev) rkisp1->irqs[il] = irq; } - ret = devm_request_irq(dev, irq, info->isrs[i].isr, 0, + ret = devm_request_irq(dev, irq, info->isrs[i].isr, IRQF_SHARED, dev_driver_string(dev), dev); if (ret) { dev_err(dev, "request irq failed: %d\n", ret); diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c index f00873d31c42..78a1f7a1499b 100644 --- a/drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c +++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c @@ -976,6 +976,9 @@ irqreturn_t rkisp1_isp_isr(int irq, void *ctx) struct rkisp1_device *rkisp1 = dev_get_drvdata(dev); u32 status, isp_err; + if (!rkisp1->irqs_enabled) + return IRQ_NONE; + status = rkisp1_read(rkisp1, RKISP1_CIF_ISP_MIS); if (!status) return IRQ_NONE; diff --git a/drivers/media/rc/Kconfig b/drivers/media/rc/Kconfig index 2afe67ffa285..74d69ce22a33 100644 --- a/drivers/media/rc/Kconfig +++ b/drivers/media/rc/Kconfig @@ -319,6 +319,7 @@ config IR_PWM_TX tristate "PWM IR transmitter" depends on LIRC depends on PWM + depends on HIGH_RES_TIMERS depends on OF help Say Y if you want to use a PWM based IR transmitter. This is diff --git a/drivers/media/rc/bpf-lirc.c b/drivers/media/rc/bpf-lirc.c index fe17c7f98e81..52d82cbe7685 100644 --- a/drivers/media/rc/bpf-lirc.c +++ b/drivers/media/rc/bpf-lirc.c @@ -253,7 +253,7 @@ int lirc_prog_attach(const union bpf_attr *attr, struct bpf_prog *prog) if (attr->attach_flags) return -EINVAL; - rcdev = rc_dev_get_from_fd(attr->target_fd); + rcdev = rc_dev_get_from_fd(attr->target_fd, true); if (IS_ERR(rcdev)) return PTR_ERR(rcdev); @@ -278,7 +278,7 @@ int lirc_prog_detach(const union bpf_attr *attr) if (IS_ERR(prog)) return PTR_ERR(prog); - rcdev = rc_dev_get_from_fd(attr->target_fd); + rcdev = rc_dev_get_from_fd(attr->target_fd, true); if (IS_ERR(rcdev)) { bpf_prog_put(prog); return PTR_ERR(rcdev); @@ -303,7 +303,7 @@ int lirc_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr) if (attr->query.query_flags) return -EINVAL; - rcdev = rc_dev_get_from_fd(attr->query.target_fd); + rcdev = rc_dev_get_from_fd(attr->query.target_fd, false); if (IS_ERR(rcdev)) return PTR_ERR(rcdev); diff --git a/drivers/media/rc/ir_toy.c b/drivers/media/rc/ir_toy.c index 196806709259..69e630d85262 100644 --- a/drivers/media/rc/ir_toy.c +++ b/drivers/media/rc/ir_toy.c @@ -332,6 +332,7 @@ static int irtoy_tx(struct rc_dev *rc, uint *txbuf, uint count) sizeof(COMMAND_SMODE_EXIT), STATE_COMMAND_NO_RESP); if (err) { dev_err(irtoy->dev, "exit sample mode: %d\n", err); + kfree(buf); return err; } @@ -339,6 +340,7 @@ static int irtoy_tx(struct rc_dev *rc, uint *txbuf, uint count) sizeof(COMMAND_SMODE_ENTER), STATE_COMMAND); if (err) { dev_err(irtoy->dev, "enter sample mode: %d\n", err); + kfree(buf); return err; } diff --git a/drivers/media/rc/lirc_dev.c b/drivers/media/rc/lirc_dev.c index a537734832c5..caad59f76793 100644 --- a/drivers/media/rc/lirc_dev.c +++ b/drivers/media/rc/lirc_dev.c @@ -814,7 +814,7 @@ void __exit lirc_dev_exit(void) unregister_chrdev_region(lirc_base_dev, RC_DEV_MAX); } -struct rc_dev *rc_dev_get_from_fd(int fd) +struct rc_dev *rc_dev_get_from_fd(int fd, bool write) { struct fd f = fdget(fd); struct lirc_fh *fh; @@ -828,6 +828,9 @@ struct rc_dev *rc_dev_get_from_fd(int fd) return ERR_PTR(-EINVAL); } + if (write && !(f.file->f_mode & FMODE_WRITE)) + return ERR_PTR(-EPERM); + fh = f.file->private_data; dev = fh->rc; diff --git a/drivers/media/rc/rc-core-priv.h b/drivers/media/rc/rc-core-priv.h index ef1e95e1af7f..7df949fc65e2 100644 --- a/drivers/media/rc/rc-core-priv.h +++ b/drivers/media/rc/rc-core-priv.h @@ -325,7 +325,7 @@ void lirc_raw_event(struct rc_dev *dev, struct ir_raw_event ev); void lirc_scancode_event(struct rc_dev *dev, struct lirc_scancode *lsc); int lirc_register(struct rc_dev *dev); void lirc_unregister(struct rc_dev *dev); -struct rc_dev *rc_dev_get_from_fd(int fd); +struct rc_dev *rc_dev_get_from_fd(int fd, bool write); #else static inline int lirc_dev_init(void) { return 0; } static inline void lirc_dev_exit(void) {} diff --git a/drivers/memory/emif.c b/drivers/memory/emif.c index 434982545be6..8c5ad5c025fa 100644 --- a/drivers/memory/emif.c +++ b/drivers/memory/emif.c @@ -72,7 +72,6 @@ static DEFINE_SPINLOCK(emif_lock); static unsigned long irq_state; static LIST_HEAD(device_list); -#ifdef CONFIG_DEBUG_FS static void do_emif_regdump_show(struct seq_file *s, struct emif_data *emif, struct emif_regs *regs) { @@ -140,31 +139,24 @@ static int emif_mr4_show(struct seq_file *s, void *unused) DEFINE_SHOW_ATTRIBUTE(emif_mr4); -static int __init_or_module emif_debugfs_init(struct emif_data *emif) +static void emif_debugfs_init(struct emif_data *emif) { - emif->debugfs_root = debugfs_create_dir(dev_name(emif->dev), NULL); - debugfs_create_file("regcache_dump", S_IRUGO, emif->debugfs_root, emif, - &emif_regdump_fops); - debugfs_create_file("mr4", S_IRUGO, emif->debugfs_root, emif, - &emif_mr4_fops); - return 0; -} - -static void __exit emif_debugfs_exit(struct emif_data *emif) -{ - debugfs_remove_recursive(emif->debugfs_root); - emif->debugfs_root = NULL; -} -#else -static inline int __init_or_module emif_debugfs_init(struct emif_data *emif) -{ - return 0; + if (IS_ENABLED(CONFIG_DEBUG_FS)) { + emif->debugfs_root = debugfs_create_dir(dev_name(emif->dev), NULL); + debugfs_create_file("regcache_dump", S_IRUGO, emif->debugfs_root, emif, + &emif_regdump_fops); + debugfs_create_file("mr4", S_IRUGO, emif->debugfs_root, emif, + &emif_mr4_fops); + } } -static inline void __exit emif_debugfs_exit(struct emif_data *emif) +static void emif_debugfs_exit(struct emif_data *emif) { + if (IS_ENABLED(CONFIG_DEBUG_FS)) { + debugfs_remove_recursive(emif->debugfs_root); + emif->debugfs_root = NULL; + } } -#endif /* * Get bus width used by EMIF. Note that this may be different from the @@ -679,7 +671,7 @@ static void disable_and_clear_all_interrupts(struct emif_data *emif) clear_all_interrupts(emif); } -static int __init_or_module setup_interrupts(struct emif_data *emif, u32 irq) +static int setup_interrupts(struct emif_data *emif, u32 irq) { u32 interrupts, type; void __iomem *base = emif->base; @@ -710,7 +702,7 @@ static int __init_or_module setup_interrupts(struct emif_data *emif, u32 irq) } -static void __init_or_module emif_onetime_settings(struct emif_data *emif) +static void emif_onetime_settings(struct emif_data *emif) { u32 pwr_mgmt_ctrl, zq, temp_alert_cfg; void __iomem *base = emif->base; @@ -834,8 +826,7 @@ static int is_custom_config_valid(struct emif_custom_configs *cust_cfgs, return valid; } -#if defined(CONFIG_OF) -static void __init_or_module of_get_custom_configs(struct device_node *np_emif, +static void of_get_custom_configs(struct device_node *np_emif, struct emif_data *emif) { struct emif_custom_configs *cust_cfgs = NULL; @@ -884,7 +875,7 @@ static void __init_or_module of_get_custom_configs(struct device_node *np_emif, emif->plat_data->custom_configs = cust_cfgs; } -static void __init_or_module of_get_ddr_info(struct device_node *np_emif, +static void of_get_ddr_info(struct device_node *np_emif, struct device_node *np_ddr, struct ddr_device_info *dev_info) { @@ -918,7 +909,7 @@ static void __init_or_module of_get_ddr_info(struct device_node *np_emif, dev_info->io_width = __fls(io_width) - 1; } -static struct emif_data * __init_or_module of_get_memory_device_details( +static struct emif_data *of_get_memory_device_details( struct device_node *np_emif, struct device *dev) { struct emif_data *emif = NULL; @@ -991,16 +982,7 @@ out: return emif; } -#else - -static struct emif_data * __init_or_module of_get_memory_device_details( - struct device_node *np_emif, struct device *dev) -{ - return NULL; -} -#endif - -static struct emif_data *__init_or_module get_device_details( +static struct emif_data *get_device_details( struct platform_device *pdev) { u32 size; @@ -1104,7 +1086,7 @@ error: return NULL; } -static int __init_or_module emif_probe(struct platform_device *pdev) +static int emif_probe(struct platform_device *pdev) { struct emif_data *emif; int irq, ret; @@ -1159,7 +1141,7 @@ error: return -ENODEV; } -static void __exit emif_remove(struct platform_device *pdev) +static void emif_remove(struct platform_device *pdev) { struct emif_data *emif = platform_get_drvdata(pdev); @@ -1183,7 +1165,8 @@ MODULE_DEVICE_TABLE(of, emif_of_match); #endif static struct platform_driver emif_driver = { - .remove_new = __exit_p(emif_remove), + .probe = emif_probe, + .remove_new = emif_remove, .shutdown = emif_shutdown, .driver = { .name = "emif", @@ -1191,7 +1174,7 @@ static struct platform_driver emif_driver = { }, }; -module_platform_driver_probe(emif_driver, emif_probe); +module_platform_driver(emif_driver); MODULE_DESCRIPTION("TI EMIF SDRAM Controller Driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/memory/stm32-fmc2-ebi.c b/drivers/memory/stm32-fmc2-ebi.c index 47d0ea5f1616..1c63eeacd071 100644 --- a/drivers/memory/stm32-fmc2-ebi.c +++ b/drivers/memory/stm32-fmc2-ebi.c @@ -11,6 +11,7 @@ #include <linux/of_platform.h> #include <linux/pinctrl/consumer.h> #include <linux/platform_device.h> +#include <linux/pm_runtime.h> #include <linux/regmap.h> #include <linux/reset.h> @@ -20,8 +21,15 @@ #define FMC2_BCR(x) ((x) * 0x8 + FMC2_BCR1) #define FMC2_BTR(x) ((x) * 0x8 + FMC2_BTR1) #define FMC2_PCSCNTR 0x20 +#define FMC2_CFGR 0x20 +#define FMC2_SR 0x84 #define FMC2_BWTR1 0x104 #define FMC2_BWTR(x) ((x) * 0x8 + FMC2_BWTR1) +#define FMC2_SECCFGR 0x300 +#define FMC2_CIDCFGR0 0x30c +#define FMC2_CIDCFGR(x) ((x) * 0x8 + FMC2_CIDCFGR0) +#define FMC2_SEMCR0 0x310 +#define FMC2_SEMCR(x) ((x) * 0x8 + FMC2_SEMCR0) /* Register: FMC2_BCR1 */ #define FMC2_BCR1_CCLKEN BIT(20) @@ -42,6 +50,7 @@ #define FMC2_BCR_ASYNCWAIT BIT(15) #define FMC2_BCR_CPSIZE GENMASK(18, 16) #define FMC2_BCR_CBURSTRW BIT(19) +#define FMC2_BCR_CSCOUNT GENMASK(21, 20) #define FMC2_BCR_NBLSET GENMASK(23, 22) /* Register: FMC2_BTRx/FMC2_BWTRx */ @@ -58,8 +67,28 @@ #define FMC2_PCSCNTR_CSCOUNT GENMASK(15, 0) #define FMC2_PCSCNTR_CNTBEN(x) BIT((x) + 16) +/* Register: FMC2_CFGR */ +#define FMC2_CFGR_CLKDIV GENMASK(19, 16) +#define FMC2_CFGR_CCLKEN BIT(20) +#define FMC2_CFGR_FMC2EN BIT(31) + +/* Register: FMC2_SR */ +#define FMC2_SR_ISOST GENMASK(1, 0) + +/* Register: FMC2_CIDCFGR */ +#define FMC2_CIDCFGR_CFEN BIT(0) +#define FMC2_CIDCFGR_SEMEN BIT(1) +#define FMC2_CIDCFGR_SCID GENMASK(6, 4) +#define FMC2_CIDCFGR_SEMWLC1 BIT(17) + +/* Register: FMC2_SEMCR */ +#define FMC2_SEMCR_SEM_MUTEX BIT(0) +#define FMC2_SEMCR_SEMCID GENMASK(6, 4) + #define FMC2_MAX_EBI_CE 4 #define FMC2_MAX_BANKS 5 +#define FMC2_MAX_RESOURCES 6 +#define FMC2_CID1 1 #define FMC2_BCR_CPSIZE_0 0x0 #define FMC2_BCR_CPSIZE_128 0x1 @@ -74,6 +103,11 @@ #define FMC2_BCR_MTYP_PSRAM 0x1 #define FMC2_BCR_MTYP_NOR 0x2 +#define FMC2_BCR_CSCOUNT_0 0x0 +#define FMC2_BCR_CSCOUNT_1 0x1 +#define FMC2_BCR_CSCOUNT_64 0x2 +#define FMC2_BCR_CSCOUNT_256 0x3 + #define FMC2_BXTR_EXTMOD_A 0x0 #define FMC2_BXTR_EXTMOD_B 0x1 #define FMC2_BXTR_EXTMOD_C 0x2 @@ -88,6 +122,7 @@ #define FMC2_BTR_CLKDIV_MAX 0xf #define FMC2_BTR_DATLAT_MAX 0xf #define FMC2_PCSCNTR_CSCOUNT_MAX 0xff +#define FMC2_CFGR_CLKDIV_MAX 0xf enum stm32_fmc2_ebi_bank { FMC2_EBI1 = 0, @@ -101,7 +136,8 @@ enum stm32_fmc2_ebi_register_type { FMC2_REG_BCR = 1, FMC2_REG_BTR, FMC2_REG_BWTR, - FMC2_REG_PCSCNTR + FMC2_REG_PCSCNTR, + FMC2_REG_CFGR }; enum stm32_fmc2_ebi_transaction_type { @@ -132,16 +168,42 @@ enum stm32_fmc2_ebi_cpsize { FMC2_CPSIZE_1024 = 1024 }; +enum stm32_fmc2_ebi_cscount { + FMC2_CSCOUNT_0 = 0, + FMC2_CSCOUNT_1 = 1, + FMC2_CSCOUNT_64 = 64, + FMC2_CSCOUNT_256 = 256 +}; + +struct stm32_fmc2_ebi; + +struct stm32_fmc2_ebi_data { + const struct stm32_fmc2_prop *child_props; + unsigned int nb_child_props; + u32 fmc2_enable_reg; + u32 fmc2_enable_bit; + int (*nwait_used_by_ctrls)(struct stm32_fmc2_ebi *ebi); + void (*set_setup)(struct stm32_fmc2_ebi *ebi); + int (*save_setup)(struct stm32_fmc2_ebi *ebi); + int (*check_rif)(struct stm32_fmc2_ebi *ebi, u32 resource); + void (*put_sems)(struct stm32_fmc2_ebi *ebi); + void (*get_sems)(struct stm32_fmc2_ebi *ebi); +}; + struct stm32_fmc2_ebi { struct device *dev; struct clk *clk; struct regmap *regmap; + const struct stm32_fmc2_ebi_data *data; u8 bank_assigned; + u8 sem_taken; + bool access_granted; u32 bcr[FMC2_MAX_EBI_CE]; u32 btr[FMC2_MAX_EBI_CE]; u32 bwtr[FMC2_MAX_EBI_CE]; u32 pcscntr; + u32 cfgr; }; /* @@ -181,8 +243,11 @@ static int stm32_fmc2_ebi_check_mux(struct stm32_fmc2_ebi *ebi, int cs) { u32 bcr; + int ret; - regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr); + ret = regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr); + if (ret) + return ret; if (bcr & FMC2_BCR_MTYP) return 0; @@ -195,8 +260,11 @@ static int stm32_fmc2_ebi_check_waitcfg(struct stm32_fmc2_ebi *ebi, int cs) { u32 bcr, val = FIELD_PREP(FMC2_BCR_MTYP, FMC2_BCR_MTYP_NOR); + int ret; - regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr); + ret = regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr); + if (ret) + return ret; if ((bcr & FMC2_BCR_MTYP) == val && bcr & FMC2_BCR_BURSTEN) return 0; @@ -209,8 +277,11 @@ static int stm32_fmc2_ebi_check_sync_trans(struct stm32_fmc2_ebi *ebi, int cs) { u32 bcr; + int ret; - regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr); + ret = regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr); + if (ret) + return ret; if (bcr & FMC2_BCR_BURSTEN) return 0; @@ -218,13 +289,43 @@ static int stm32_fmc2_ebi_check_sync_trans(struct stm32_fmc2_ebi *ebi, return -EINVAL; } +static int stm32_fmc2_ebi_mp25_check_cclk(struct stm32_fmc2_ebi *ebi, + const struct stm32_fmc2_prop *prop, + int cs) +{ + if (!ebi->access_granted) + return -EACCES; + + return stm32_fmc2_ebi_check_sync_trans(ebi, prop, cs); +} + +static int stm32_fmc2_ebi_mp25_check_clk_period(struct stm32_fmc2_ebi *ebi, + const struct stm32_fmc2_prop *prop, + int cs) +{ + u32 cfgr; + int ret; + + ret = regmap_read(ebi->regmap, FMC2_CFGR, &cfgr); + if (ret) + return ret; + + if (cfgr & FMC2_CFGR_CCLKEN && !ebi->access_granted) + return -EACCES; + + return stm32_fmc2_ebi_check_sync_trans(ebi, prop, cs); +} + static int stm32_fmc2_ebi_check_async_trans(struct stm32_fmc2_ebi *ebi, const struct stm32_fmc2_prop *prop, int cs) { u32 bcr; + int ret; - regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr); + ret = regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr); + if (ret) + return ret; if (!(bcr & FMC2_BCR_BURSTEN) || !(bcr & FMC2_BCR_CBURSTRW)) return 0; @@ -237,8 +338,11 @@ static int stm32_fmc2_ebi_check_cpsize(struct stm32_fmc2_ebi *ebi, int cs) { u32 bcr, val = FIELD_PREP(FMC2_BCR_MTYP, FMC2_BCR_MTYP_PSRAM); + int ret; - regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr); + ret = regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr); + if (ret) + return ret; if ((bcr & FMC2_BCR_MTYP) == val && bcr & FMC2_BCR_BURSTEN) return 0; @@ -251,12 +355,18 @@ static int stm32_fmc2_ebi_check_address_hold(struct stm32_fmc2_ebi *ebi, int cs) { u32 bcr, bxtr, val = FIELD_PREP(FMC2_BXTR_ACCMOD, FMC2_BXTR_EXTMOD_D); + int ret; + + ret = regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr); + if (ret) + return ret; - regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr); if (prop->reg_type == FMC2_REG_BWTR) - regmap_read(ebi->regmap, FMC2_BWTR(cs), &bxtr); + ret = regmap_read(ebi->regmap, FMC2_BWTR(cs), &bxtr); else - regmap_read(ebi->regmap, FMC2_BTR(cs), &bxtr); + ret = regmap_read(ebi->regmap, FMC2_BTR(cs), &bxtr); + if (ret) + return ret; if ((!(bcr & FMC2_BCR_BURSTEN) || !(bcr & FMC2_BCR_CBURSTRW)) && ((bxtr & FMC2_BXTR_ACCMOD) == val || bcr & FMC2_BCR_MUXEN)) @@ -270,12 +380,19 @@ static int stm32_fmc2_ebi_check_clk_period(struct stm32_fmc2_ebi *ebi, int cs) { u32 bcr, bcr1; + int ret; - regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr); - if (cs) - regmap_read(ebi->regmap, FMC2_BCR1, &bcr1); - else + ret = regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr); + if (ret) + return ret; + + if (cs) { + ret = regmap_read(ebi->regmap, FMC2_BCR1, &bcr1); + if (ret) + return ret; + } else { bcr1 = bcr; + } if (bcr & FMC2_BCR_BURSTEN && (!cs || !(bcr1 & FMC2_BCR1_CCLKEN))) return 0; @@ -307,18 +424,48 @@ static u32 stm32_fmc2_ebi_ns_to_clk_period(struct stm32_fmc2_ebi *ebi, { u32 nb_clk_cycles = stm32_fmc2_ebi_ns_to_clock_cycles(ebi, cs, setup); u32 bcr, btr, clk_period; + int ret; + + ret = regmap_read(ebi->regmap, FMC2_BCR1, &bcr); + if (ret) + return ret; - regmap_read(ebi->regmap, FMC2_BCR1, &bcr); if (bcr & FMC2_BCR1_CCLKEN || !cs) - regmap_read(ebi->regmap, FMC2_BTR1, &btr); + ret = regmap_read(ebi->regmap, FMC2_BTR1, &btr); else - regmap_read(ebi->regmap, FMC2_BTR(cs), &btr); + ret = regmap_read(ebi->regmap, FMC2_BTR(cs), &btr); + if (ret) + return ret; clk_period = FIELD_GET(FMC2_BTR_CLKDIV, btr) + 1; return DIV_ROUND_UP(nb_clk_cycles, clk_period); } +static u32 stm32_fmc2_ebi_mp25_ns_to_clk_period(struct stm32_fmc2_ebi *ebi, + int cs, u32 setup) +{ + u32 nb_clk_cycles = stm32_fmc2_ebi_ns_to_clock_cycles(ebi, cs, setup); + u32 cfgr, btr, clk_period; + int ret; + + ret = regmap_read(ebi->regmap, FMC2_CFGR, &cfgr); + if (ret) + return ret; + + if (cfgr & FMC2_CFGR_CCLKEN) { + clk_period = FIELD_GET(FMC2_CFGR_CLKDIV, cfgr) + 1; + } else { + ret = regmap_read(ebi->regmap, FMC2_BTR(cs), &btr); + if (ret) + return ret; + + clk_period = FIELD_GET(FMC2_BTR_CLKDIV, btr) + 1; + } + + return DIV_ROUND_UP(nb_clk_cycles, clk_period); +} + static int stm32_fmc2_ebi_get_reg(int reg_type, int cs, u32 *reg) { switch (reg_type) { @@ -334,6 +481,9 @@ static int stm32_fmc2_ebi_get_reg(int reg_type, int cs, u32 *reg) case FMC2_REG_PCSCNTR: *reg = FMC2_PCSCNTR; break; + case FMC2_REG_CFGR: + *reg = FMC2_CFGR; + break; default: return -EINVAL; } @@ -571,11 +721,16 @@ static int stm32_fmc2_ebi_set_address_setup(struct stm32_fmc2_ebi *ebi, if (ret) return ret; - regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr); + ret = regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr); + if (ret) + return ret; + if (prop->reg_type == FMC2_REG_BWTR) - regmap_read(ebi->regmap, FMC2_BWTR(cs), &bxtr); + ret = regmap_read(ebi->regmap, FMC2_BWTR(cs), &bxtr); else - regmap_read(ebi->regmap, FMC2_BTR(cs), &bxtr); + ret = regmap_read(ebi->regmap, FMC2_BTR(cs), &bxtr); + if (ret) + return ret; if ((bxtr & FMC2_BXTR_ACCMOD) == val || bcr & FMC2_BCR_MUXEN) val = clamp_val(setup, 1, FMC2_BXTR_ADDSET_MAX); @@ -675,6 +830,30 @@ static int stm32_fmc2_ebi_set_clk_period(struct stm32_fmc2_ebi *ebi, return 0; } +static int stm32_fmc2_ebi_mp25_set_clk_period(struct stm32_fmc2_ebi *ebi, + const struct stm32_fmc2_prop *prop, + int cs, u32 setup) +{ + u32 val, cfgr; + int ret; + + ret = regmap_read(ebi->regmap, FMC2_CFGR, &cfgr); + if (ret) + return ret; + + if (cfgr & FMC2_CFGR_CCLKEN) { + val = setup ? clamp_val(setup - 1, 1, FMC2_CFGR_CLKDIV_MAX) : 1; + val = FIELD_PREP(FMC2_CFGR_CLKDIV, val); + regmap_update_bits(ebi->regmap, FMC2_CFGR, FMC2_CFGR_CLKDIV, val); + } else { + val = setup ? clamp_val(setup - 1, 1, FMC2_BTR_CLKDIV_MAX) : 1; + val = FIELD_PREP(FMC2_BTR_CLKDIV, val); + regmap_update_bits(ebi->regmap, FMC2_BTR(cs), FMC2_BTR_CLKDIV, val); + } + + return 0; +} + static int stm32_fmc2_ebi_set_data_latency(struct stm32_fmc2_ebi *ebi, const struct stm32_fmc2_prop *prop, int cs, u32 setup) @@ -693,11 +872,14 @@ static int stm32_fmc2_ebi_set_max_low_pulse(struct stm32_fmc2_ebi *ebi, int cs, u32 setup) { u32 old_val, new_val, pcscntr; + int ret; if (setup < 1) return 0; - regmap_read(ebi->regmap, FMC2_PCSCNTR, &pcscntr); + ret = regmap_read(ebi->regmap, FMC2_PCSCNTR, &pcscntr); + if (ret) + return ret; /* Enable counter for the bank */ regmap_update_bits(ebi->regmap, FMC2_PCSCNTR, @@ -717,6 +899,27 @@ static int stm32_fmc2_ebi_set_max_low_pulse(struct stm32_fmc2_ebi *ebi, return 0; } +static int stm32_fmc2_ebi_mp25_set_max_low_pulse(struct stm32_fmc2_ebi *ebi, + const struct stm32_fmc2_prop *prop, + int cs, u32 setup) +{ + u32 val; + + if (setup == FMC2_CSCOUNT_0) + val = FIELD_PREP(FMC2_BCR_CSCOUNT, FMC2_BCR_CSCOUNT_0); + else if (setup == FMC2_CSCOUNT_1) + val = FIELD_PREP(FMC2_BCR_CSCOUNT, FMC2_BCR_CSCOUNT_1); + else if (setup <= FMC2_CSCOUNT_64) + val = FIELD_PREP(FMC2_BCR_CSCOUNT, FMC2_BCR_CSCOUNT_64); + else + val = FIELD_PREP(FMC2_BCR_CSCOUNT, FMC2_BCR_CSCOUNT_256); + + regmap_update_bits(ebi->regmap, FMC2_BCR(cs), + FMC2_BCR_CSCOUNT, val); + + return 0; +} + static const struct stm32_fmc2_prop stm32_fmc2_child_props[] = { /* st,fmc2-ebi-cs-trans-type must be the first property */ { @@ -882,6 +1085,275 @@ static const struct stm32_fmc2_prop stm32_fmc2_child_props[] = { }, }; +static const struct stm32_fmc2_prop stm32_fmc2_mp25_child_props[] = { + /* st,fmc2-ebi-cs-trans-type must be the first property */ + { + .name = "st,fmc2-ebi-cs-transaction-type", + .mprop = true, + .set = stm32_fmc2_ebi_set_trans_type, + }, + { + .name = "st,fmc2-ebi-cs-cclk-enable", + .bprop = true, + .reg_type = FMC2_REG_CFGR, + .reg_mask = FMC2_CFGR_CCLKEN, + .check = stm32_fmc2_ebi_mp25_check_cclk, + .set = stm32_fmc2_ebi_set_bit_field, + }, + { + .name = "st,fmc2-ebi-cs-mux-enable", + .bprop = true, + .reg_type = FMC2_REG_BCR, + .reg_mask = FMC2_BCR_MUXEN, + .check = stm32_fmc2_ebi_check_mux, + .set = stm32_fmc2_ebi_set_bit_field, + }, + { + .name = "st,fmc2-ebi-cs-buswidth", + .reset_val = FMC2_BUSWIDTH_16, + .set = stm32_fmc2_ebi_set_buswidth, + }, + { + .name = "st,fmc2-ebi-cs-waitpol-high", + .bprop = true, + .reg_type = FMC2_REG_BCR, + .reg_mask = FMC2_BCR_WAITPOL, + .set = stm32_fmc2_ebi_set_bit_field, + }, + { + .name = "st,fmc2-ebi-cs-waitcfg-enable", + .bprop = true, + .reg_type = FMC2_REG_BCR, + .reg_mask = FMC2_BCR_WAITCFG, + .check = stm32_fmc2_ebi_check_waitcfg, + .set = stm32_fmc2_ebi_set_bit_field, + }, + { + .name = "st,fmc2-ebi-cs-wait-enable", + .bprop = true, + .reg_type = FMC2_REG_BCR, + .reg_mask = FMC2_BCR_WAITEN, + .check = stm32_fmc2_ebi_check_sync_trans, + .set = stm32_fmc2_ebi_set_bit_field, + }, + { + .name = "st,fmc2-ebi-cs-asyncwait-enable", + .bprop = true, + .reg_type = FMC2_REG_BCR, + .reg_mask = FMC2_BCR_ASYNCWAIT, + .check = stm32_fmc2_ebi_check_async_trans, + .set = stm32_fmc2_ebi_set_bit_field, + }, + { + .name = "st,fmc2-ebi-cs-cpsize", + .check = stm32_fmc2_ebi_check_cpsize, + .set = stm32_fmc2_ebi_set_cpsize, + }, + { + .name = "st,fmc2-ebi-cs-byte-lane-setup-ns", + .calculate = stm32_fmc2_ebi_ns_to_clock_cycles, + .set = stm32_fmc2_ebi_set_bl_setup, + }, + { + .name = "st,fmc2-ebi-cs-address-setup-ns", + .reg_type = FMC2_REG_BTR, + .reset_val = FMC2_BXTR_ADDSET_MAX, + .check = stm32_fmc2_ebi_check_async_trans, + .calculate = stm32_fmc2_ebi_ns_to_clock_cycles, + .set = stm32_fmc2_ebi_set_address_setup, + }, + { + .name = "st,fmc2-ebi-cs-address-hold-ns", + .reg_type = FMC2_REG_BTR, + .reset_val = FMC2_BXTR_ADDHLD_MAX, + .check = stm32_fmc2_ebi_check_address_hold, + .calculate = stm32_fmc2_ebi_ns_to_clock_cycles, + .set = stm32_fmc2_ebi_set_address_hold, + }, + { + .name = "st,fmc2-ebi-cs-data-setup-ns", + .reg_type = FMC2_REG_BTR, + .reset_val = FMC2_BXTR_DATAST_MAX, + .check = stm32_fmc2_ebi_check_async_trans, + .calculate = stm32_fmc2_ebi_ns_to_clock_cycles, + .set = stm32_fmc2_ebi_set_data_setup, + }, + { + .name = "st,fmc2-ebi-cs-bus-turnaround-ns", + .reg_type = FMC2_REG_BTR, + .reset_val = FMC2_BXTR_BUSTURN_MAX + 1, + .calculate = stm32_fmc2_ebi_ns_to_clock_cycles, + .set = stm32_fmc2_ebi_set_bus_turnaround, + }, + { + .name = "st,fmc2-ebi-cs-data-hold-ns", + .reg_type = FMC2_REG_BTR, + .check = stm32_fmc2_ebi_check_async_trans, + .calculate = stm32_fmc2_ebi_ns_to_clock_cycles, + .set = stm32_fmc2_ebi_set_data_hold, + }, + { + .name = "st,fmc2-ebi-cs-clk-period-ns", + .reset_val = FMC2_CFGR_CLKDIV_MAX + 1, + .check = stm32_fmc2_ebi_mp25_check_clk_period, + .calculate = stm32_fmc2_ebi_ns_to_clock_cycles, + .set = stm32_fmc2_ebi_mp25_set_clk_period, + }, + { + .name = "st,fmc2-ebi-cs-data-latency-ns", + .check = stm32_fmc2_ebi_check_sync_trans, + .calculate = stm32_fmc2_ebi_mp25_ns_to_clk_period, + .set = stm32_fmc2_ebi_set_data_latency, + }, + { + .name = "st,fmc2-ebi-cs-write-address-setup-ns", + .reg_type = FMC2_REG_BWTR, + .reset_val = FMC2_BXTR_ADDSET_MAX, + .check = stm32_fmc2_ebi_check_async_trans, + .calculate = stm32_fmc2_ebi_ns_to_clock_cycles, + .set = stm32_fmc2_ebi_set_address_setup, + }, + { + .name = "st,fmc2-ebi-cs-write-address-hold-ns", + .reg_type = FMC2_REG_BWTR, + .reset_val = FMC2_BXTR_ADDHLD_MAX, + .check = stm32_fmc2_ebi_check_address_hold, + .calculate = stm32_fmc2_ebi_ns_to_clock_cycles, + .set = stm32_fmc2_ebi_set_address_hold, + }, + { + .name = "st,fmc2-ebi-cs-write-data-setup-ns", + .reg_type = FMC2_REG_BWTR, + .reset_val = FMC2_BXTR_DATAST_MAX, + .check = stm32_fmc2_ebi_check_async_trans, + .calculate = stm32_fmc2_ebi_ns_to_clock_cycles, + .set = stm32_fmc2_ebi_set_data_setup, + }, + { + .name = "st,fmc2-ebi-cs-write-bus-turnaround-ns", + .reg_type = FMC2_REG_BWTR, + .reset_val = FMC2_BXTR_BUSTURN_MAX + 1, + .calculate = stm32_fmc2_ebi_ns_to_clock_cycles, + .set = stm32_fmc2_ebi_set_bus_turnaround, + }, + { + .name = "st,fmc2-ebi-cs-write-data-hold-ns", + .reg_type = FMC2_REG_BWTR, + .check = stm32_fmc2_ebi_check_async_trans, + .calculate = stm32_fmc2_ebi_ns_to_clock_cycles, + .set = stm32_fmc2_ebi_set_data_hold, + }, + { + .name = "st,fmc2-ebi-cs-max-low-pulse-ns", + .calculate = stm32_fmc2_ebi_ns_to_clock_cycles, + .set = stm32_fmc2_ebi_mp25_set_max_low_pulse, + }, +}; + +static int stm32_fmc2_ebi_mp25_check_rif(struct stm32_fmc2_ebi *ebi, u32 resource) +{ + u32 seccfgr, cidcfgr, semcr; + int cid, ret; + + if (resource >= FMC2_MAX_RESOURCES) + return -EINVAL; + + ret = regmap_read(ebi->regmap, FMC2_SECCFGR, &seccfgr); + if (ret) + return ret; + + if (seccfgr & BIT(resource)) { + if (resource) + dev_err(ebi->dev, "resource %d is configured as secure\n", + resource); + + return -EACCES; + } + + ret = regmap_read(ebi->regmap, FMC2_CIDCFGR(resource), &cidcfgr); + if (ret) + return ret; + + if (!(cidcfgr & FMC2_CIDCFGR_CFEN)) + /* CID filtering is turned off: access granted */ + return 0; + + if (!(cidcfgr & FMC2_CIDCFGR_SEMEN)) { + /* Static CID mode */ + cid = FIELD_GET(FMC2_CIDCFGR_SCID, cidcfgr); + if (cid != FMC2_CID1) { + if (resource) + dev_err(ebi->dev, "static CID%d set for resource %d\n", + cid, resource); + + return -EACCES; + } + + return 0; + } + + /* Pass-list with semaphore mode */ + if (!(cidcfgr & FMC2_CIDCFGR_SEMWLC1)) { + if (resource) + dev_err(ebi->dev, "CID1 is block-listed for resource %d\n", + resource); + + return -EACCES; + } + + ret = regmap_read(ebi->regmap, FMC2_SEMCR(resource), &semcr); + if (ret) + return ret; + + if (!(semcr & FMC2_SEMCR_SEM_MUTEX)) { + regmap_update_bits(ebi->regmap, FMC2_SEMCR(resource), + FMC2_SEMCR_SEM_MUTEX, FMC2_SEMCR_SEM_MUTEX); + + ret = regmap_read(ebi->regmap, FMC2_SEMCR(resource), &semcr); + if (ret) + return ret; + } + + cid = FIELD_GET(FMC2_SEMCR_SEMCID, semcr); + if (cid != FMC2_CID1) { + if (resource) + dev_err(ebi->dev, "resource %d is already used by CID%d\n", + resource, cid); + + return -EACCES; + } + + ebi->sem_taken |= BIT(resource); + + return 0; +} + +static void stm32_fmc2_ebi_mp25_put_sems(struct stm32_fmc2_ebi *ebi) +{ + unsigned int resource; + + for (resource = 0; resource < FMC2_MAX_RESOURCES; resource++) { + if (!(ebi->sem_taken & BIT(resource))) + continue; + + regmap_update_bits(ebi->regmap, FMC2_SEMCR(resource), + FMC2_SEMCR_SEM_MUTEX, 0); + } +} + +static void stm32_fmc2_ebi_mp25_get_sems(struct stm32_fmc2_ebi *ebi) +{ + unsigned int resource; + + for (resource = 0; resource < FMC2_MAX_RESOURCES; resource++) { + if (!(ebi->sem_taken & BIT(resource))) + continue; + + regmap_update_bits(ebi->regmap, FMC2_SEMCR(resource), + FMC2_SEMCR_SEM_MUTEX, FMC2_SEMCR_SEM_MUTEX); + } +} + static int stm32_fmc2_ebi_parse_prop(struct stm32_fmc2_ebi *ebi, struct device_node *dev_node, const struct stm32_fmc2_prop *prop, @@ -944,17 +1416,48 @@ static void stm32_fmc2_ebi_disable_bank(struct stm32_fmc2_ebi *ebi, int cs) regmap_update_bits(ebi->regmap, FMC2_BCR(cs), FMC2_BCR_MBKEN, 0); } -static void stm32_fmc2_ebi_save_setup(struct stm32_fmc2_ebi *ebi) +static int stm32_fmc2_ebi_save_setup(struct stm32_fmc2_ebi *ebi) { unsigned int cs; + int ret; for (cs = 0; cs < FMC2_MAX_EBI_CE; cs++) { - regmap_read(ebi->regmap, FMC2_BCR(cs), &ebi->bcr[cs]); - regmap_read(ebi->regmap, FMC2_BTR(cs), &ebi->btr[cs]); - regmap_read(ebi->regmap, FMC2_BWTR(cs), &ebi->bwtr[cs]); + if (!(ebi->bank_assigned & BIT(cs))) + continue; + + ret = regmap_read(ebi->regmap, FMC2_BCR(cs), &ebi->bcr[cs]); + ret |= regmap_read(ebi->regmap, FMC2_BTR(cs), &ebi->btr[cs]); + ret |= regmap_read(ebi->regmap, FMC2_BWTR(cs), &ebi->bwtr[cs]); + if (ret) + return ret; } - regmap_read(ebi->regmap, FMC2_PCSCNTR, &ebi->pcscntr); + return 0; +} + +static int stm32_fmc2_ebi_mp1_save_setup(struct stm32_fmc2_ebi *ebi) +{ + int ret; + + ret = stm32_fmc2_ebi_save_setup(ebi); + if (ret) + return ret; + + return regmap_read(ebi->regmap, FMC2_PCSCNTR, &ebi->pcscntr); +} + +static int stm32_fmc2_ebi_mp25_save_setup(struct stm32_fmc2_ebi *ebi) +{ + int ret; + + ret = stm32_fmc2_ebi_save_setup(ebi); + if (ret) + return ret; + + if (ebi->access_granted) + ret = regmap_read(ebi->regmap, FMC2_CFGR, &ebi->cfgr); + + return ret; } static void stm32_fmc2_ebi_set_setup(struct stm32_fmc2_ebi *ebi) @@ -962,14 +1465,29 @@ static void stm32_fmc2_ebi_set_setup(struct stm32_fmc2_ebi *ebi) unsigned int cs; for (cs = 0; cs < FMC2_MAX_EBI_CE; cs++) { + if (!(ebi->bank_assigned & BIT(cs))) + continue; + regmap_write(ebi->regmap, FMC2_BCR(cs), ebi->bcr[cs]); regmap_write(ebi->regmap, FMC2_BTR(cs), ebi->btr[cs]); regmap_write(ebi->regmap, FMC2_BWTR(cs), ebi->bwtr[cs]); } +} +static void stm32_fmc2_ebi_mp1_set_setup(struct stm32_fmc2_ebi *ebi) +{ + stm32_fmc2_ebi_set_setup(ebi); regmap_write(ebi->regmap, FMC2_PCSCNTR, ebi->pcscntr); } +static void stm32_fmc2_ebi_mp25_set_setup(struct stm32_fmc2_ebi *ebi) +{ + stm32_fmc2_ebi_set_setup(ebi); + + if (ebi->access_granted) + regmap_write(ebi->regmap, FMC2_CFGR, ebi->cfgr); +} + static void stm32_fmc2_ebi_disable_banks(struct stm32_fmc2_ebi *ebi) { unsigned int cs; @@ -983,33 +1501,48 @@ static void stm32_fmc2_ebi_disable_banks(struct stm32_fmc2_ebi *ebi) } /* NWAIT signal can not be connected to EBI controller and NAND controller */ -static bool stm32_fmc2_ebi_nwait_used_by_ctrls(struct stm32_fmc2_ebi *ebi) +static int stm32_fmc2_ebi_nwait_used_by_ctrls(struct stm32_fmc2_ebi *ebi) { + struct device *dev = ebi->dev; unsigned int cs; u32 bcr; + int ret; for (cs = 0; cs < FMC2_MAX_EBI_CE; cs++) { if (!(ebi->bank_assigned & BIT(cs))) continue; - regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr); + ret = regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr); + if (ret) + return ret; + if ((bcr & FMC2_BCR_WAITEN || bcr & FMC2_BCR_ASYNCWAIT) && - ebi->bank_assigned & BIT(FMC2_NAND)) - return true; + ebi->bank_assigned & BIT(FMC2_NAND)) { + dev_err(dev, "NWAIT signal connected to EBI and NAND controllers\n"); + return -EINVAL; + } } - return false; + return 0; } static void stm32_fmc2_ebi_enable(struct stm32_fmc2_ebi *ebi) { - regmap_update_bits(ebi->regmap, FMC2_BCR1, - FMC2_BCR1_FMC2EN, FMC2_BCR1_FMC2EN); + if (!ebi->access_granted) + return; + + regmap_update_bits(ebi->regmap, ebi->data->fmc2_enable_reg, + ebi->data->fmc2_enable_bit, + ebi->data->fmc2_enable_bit); } static void stm32_fmc2_ebi_disable(struct stm32_fmc2_ebi *ebi) { - regmap_update_bits(ebi->regmap, FMC2_BCR1, FMC2_BCR1_FMC2EN, 0); + if (!ebi->access_granted) + return; + + regmap_update_bits(ebi->regmap, ebi->data->fmc2_enable_reg, + ebi->data->fmc2_enable_bit, 0); } static int stm32_fmc2_ebi_setup_cs(struct stm32_fmc2_ebi *ebi, @@ -1021,8 +1554,8 @@ static int stm32_fmc2_ebi_setup_cs(struct stm32_fmc2_ebi *ebi, stm32_fmc2_ebi_disable_bank(ebi, cs); - for (i = 0; i < ARRAY_SIZE(stm32_fmc2_child_props); i++) { - const struct stm32_fmc2_prop *p = &stm32_fmc2_child_props[i]; + for (i = 0; i < ebi->data->nb_child_props; i++) { + const struct stm32_fmc2_prop *p = &ebi->data->child_props[i]; ret = stm32_fmc2_ebi_parse_prop(ebi, dev_node, p, cs); if (ret) { @@ -1066,6 +1599,15 @@ static int stm32_fmc2_ebi_parse_dt(struct stm32_fmc2_ebi *ebi) return -EINVAL; } + if (ebi->data->check_rif) { + ret = ebi->data->check_rif(ebi, bank + 1); + if (ret) { + dev_err(dev, "bank access failed: %d\n", bank); + of_node_put(child); + return ret; + } + } + if (bank < FMC2_MAX_EBI_CE) { ret = stm32_fmc2_ebi_setup_cs(ebi, child, bank); if (ret) { @@ -1085,9 +1627,10 @@ static int stm32_fmc2_ebi_parse_dt(struct stm32_fmc2_ebi *ebi) return -ENODEV; } - if (stm32_fmc2_ebi_nwait_used_by_ctrls(ebi)) { - dev_err(dev, "NWAIT signal connected to EBI and NAND controllers\n"); - return -EINVAL; + if (ebi->data->nwait_used_by_ctrls) { + ret = ebi->data->nwait_used_by_ctrls(ebi); + if (ret) + return ret; } stm32_fmc2_ebi_enable(ebi); @@ -1107,6 +1650,11 @@ static int stm32_fmc2_ebi_probe(struct platform_device *pdev) return -ENOMEM; ebi->dev = dev; + platform_set_drvdata(pdev, ebi); + + ebi->data = of_device_get_match_data(dev); + if (!ebi->data) + return -EINVAL; ebi->regmap = device_node_to_regmap(dev->of_node); if (IS_ERR(ebi->regmap)) @@ -1120,28 +1668,57 @@ static int stm32_fmc2_ebi_probe(struct platform_device *pdev) if (PTR_ERR(rstc) == -EPROBE_DEFER) return -EPROBE_DEFER; - ret = clk_prepare_enable(ebi->clk); + ret = devm_pm_runtime_enable(dev); if (ret) return ret; + ret = pm_runtime_resume_and_get(dev); + if (ret < 0) + return ret; + if (!IS_ERR(rstc)) { reset_control_assert(rstc); reset_control_deassert(rstc); } + /* Check if CFGR register can be modified */ + ebi->access_granted = true; + if (ebi->data->check_rif) { + ret = ebi->data->check_rif(ebi, 0); + if (ret) { + u32 sr; + + ebi->access_granted = false; + + ret = regmap_read(ebi->regmap, FMC2_SR, &sr); + if (ret) + goto err_release; + + /* In case of CFGR is secure, just check that the FMC2 is enabled */ + if (sr & FMC2_SR_ISOST) { + dev_err(dev, "FMC2 is not ready to be used.\n"); + ret = -EACCES; + goto err_release; + } + } + } + ret = stm32_fmc2_ebi_parse_dt(ebi); if (ret) goto err_release; - stm32_fmc2_ebi_save_setup(ebi); - platform_set_drvdata(pdev, ebi); + ret = ebi->data->save_setup(ebi); + if (ret) + goto err_release; return 0; err_release: stm32_fmc2_ebi_disable_banks(ebi); stm32_fmc2_ebi_disable(ebi); - clk_disable_unprepare(ebi->clk); + if (ebi->data->put_sems) + ebi->data->put_sems(ebi); + pm_runtime_put_sync_suspend(dev); return ret; } @@ -1153,7 +1730,25 @@ static void stm32_fmc2_ebi_remove(struct platform_device *pdev) of_platform_depopulate(&pdev->dev); stm32_fmc2_ebi_disable_banks(ebi); stm32_fmc2_ebi_disable(ebi); + if (ebi->data->put_sems) + ebi->data->put_sems(ebi); + pm_runtime_put_sync_suspend(&pdev->dev); +} + +static int __maybe_unused stm32_fmc2_ebi_runtime_suspend(struct device *dev) +{ + struct stm32_fmc2_ebi *ebi = dev_get_drvdata(dev); + clk_disable_unprepare(ebi->clk); + + return 0; +} + +static int __maybe_unused stm32_fmc2_ebi_runtime_resume(struct device *dev) +{ + struct stm32_fmc2_ebi *ebi = dev_get_drvdata(dev); + + return clk_prepare_enable(ebi->clk); } static int __maybe_unused stm32_fmc2_ebi_suspend(struct device *dev) @@ -1161,7 +1756,9 @@ static int __maybe_unused stm32_fmc2_ebi_suspend(struct device *dev) struct stm32_fmc2_ebi *ebi = dev_get_drvdata(dev); stm32_fmc2_ebi_disable(ebi); - clk_disable_unprepare(ebi->clk); + if (ebi->data->put_sems) + ebi->data->put_sems(ebi); + pm_runtime_put_sync_suspend(dev); pinctrl_pm_select_sleep_state(dev); return 0; @@ -1174,21 +1771,55 @@ static int __maybe_unused stm32_fmc2_ebi_resume(struct device *dev) pinctrl_pm_select_default_state(dev); - ret = clk_prepare_enable(ebi->clk); - if (ret) + ret = pm_runtime_resume_and_get(dev); + if (ret < 0) return ret; - stm32_fmc2_ebi_set_setup(ebi); + if (ebi->data->get_sems) + ebi->data->get_sems(ebi); + ebi->data->set_setup(ebi); stm32_fmc2_ebi_enable(ebi); return 0; } -static SIMPLE_DEV_PM_OPS(stm32_fmc2_ebi_pm_ops, stm32_fmc2_ebi_suspend, - stm32_fmc2_ebi_resume); +static const struct dev_pm_ops stm32_fmc2_ebi_pm_ops = { + SET_RUNTIME_PM_OPS(stm32_fmc2_ebi_runtime_suspend, + stm32_fmc2_ebi_runtime_resume, NULL) + SET_SYSTEM_SLEEP_PM_OPS(stm32_fmc2_ebi_suspend, stm32_fmc2_ebi_resume) +}; + +static const struct stm32_fmc2_ebi_data stm32_fmc2_ebi_mp1_data = { + .child_props = stm32_fmc2_child_props, + .nb_child_props = ARRAY_SIZE(stm32_fmc2_child_props), + .fmc2_enable_reg = FMC2_BCR1, + .fmc2_enable_bit = FMC2_BCR1_FMC2EN, + .nwait_used_by_ctrls = stm32_fmc2_ebi_nwait_used_by_ctrls, + .set_setup = stm32_fmc2_ebi_mp1_set_setup, + .save_setup = stm32_fmc2_ebi_mp1_save_setup, +}; + +static const struct stm32_fmc2_ebi_data stm32_fmc2_ebi_mp25_data = { + .child_props = stm32_fmc2_mp25_child_props, + .nb_child_props = ARRAY_SIZE(stm32_fmc2_mp25_child_props), + .fmc2_enable_reg = FMC2_CFGR, + .fmc2_enable_bit = FMC2_CFGR_FMC2EN, + .set_setup = stm32_fmc2_ebi_mp25_set_setup, + .save_setup = stm32_fmc2_ebi_mp25_save_setup, + .check_rif = stm32_fmc2_ebi_mp25_check_rif, + .put_sems = stm32_fmc2_ebi_mp25_put_sems, + .get_sems = stm32_fmc2_ebi_mp25_get_sems, +}; static const struct of_device_id stm32_fmc2_ebi_match[] = { - {.compatible = "st,stm32mp1-fmc2-ebi"}, + { + .compatible = "st,stm32mp1-fmc2-ebi", + .data = &stm32_fmc2_ebi_mp1_data, + }, + { + .compatible = "st,stm32mp25-fmc2-ebi", + .data = &stm32_fmc2_ebi_mp25_data, + }, {} }; MODULE_DEVICE_TABLE(of, stm32_fmc2_ebi_match); diff --git a/drivers/memory/tegra/tegra234.c b/drivers/memory/tegra/tegra234.c index abff87f917cb..5f57cea48b62 100644 --- a/drivers/memory/tegra/tegra234.c +++ b/drivers/memory/tegra/tegra234.c @@ -92,6 +92,8 @@ static const struct tegra_mc_client tegra234_mc_clients[] = { }, { .id = TEGRA234_MEMORY_CLIENT_DLA0RDB, .name = "dla0rdb", + .bpmp_id = TEGRA_ICC_BPMP_DLA_0, + .type = TEGRA_ICC_NISO, .sid = TEGRA234_SID_NVDLA0, .regs = { .sid = { @@ -102,6 +104,8 @@ static const struct tegra_mc_client tegra234_mc_clients[] = { }, { .id = TEGRA234_MEMORY_CLIENT_DLA0RDB1, .name = "dla0rdb1", + .bpmp_id = TEGRA_ICC_BPMP_DLA_0, + .type = TEGRA_ICC_NISO, .sid = TEGRA234_SID_NVDLA0, .regs = { .sid = { @@ -112,6 +116,8 @@ static const struct tegra_mc_client tegra234_mc_clients[] = { }, { .id = TEGRA234_MEMORY_CLIENT_DLA0WRB, .name = "dla0wrb", + .bpmp_id = TEGRA_ICC_BPMP_DLA_0, + .type = TEGRA_ICC_NISO, .sid = TEGRA234_SID_NVDLA0, .regs = { .sid = { @@ -121,7 +127,9 @@ static const struct tegra_mc_client tegra234_mc_clients[] = { }, }, { .id = TEGRA234_MEMORY_CLIENT_DLA1RDB, - .name = "dla0rdb", + .name = "dla1rdb", + .bpmp_id = TEGRA_ICC_BPMP_DLA_1, + .type = TEGRA_ICC_NISO, .sid = TEGRA234_SID_NVDLA1, .regs = { .sid = { @@ -407,7 +415,9 @@ static const struct tegra_mc_client tegra234_mc_clients[] = { }, }, { .id = TEGRA234_MEMORY_CLIENT_DLA1RDB1, - .name = "dla0rdb1", + .name = "dla1rdb1", + .bpmp_id = TEGRA_ICC_BPMP_DLA_1, + .type = TEGRA_ICC_NISO, .sid = TEGRA234_SID_NVDLA1, .regs = { .sid = { @@ -417,7 +427,9 @@ static const struct tegra_mc_client tegra234_mc_clients[] = { }, }, { .id = TEGRA234_MEMORY_CLIENT_DLA1WRB, - .name = "dla0wrb", + .name = "dla1wrb", + .bpmp_id = TEGRA_ICC_BPMP_DLA_1, + .type = TEGRA_ICC_NISO, .sid = TEGRA234_SID_NVDLA1, .regs = { .sid = { @@ -539,7 +551,7 @@ static const struct tegra_mc_client tegra234_mc_clients[] = { .bpmp_id = TEGRA_ICC_BPMP_NVJPG_0, .type = TEGRA_ICC_NISO, .sid = TEGRA234_SID_NVJPG, - .regs = { + .regs = { .sid = { .override = 0x3f8, .security = 0x3fc, @@ -660,6 +672,8 @@ static const struct tegra_mc_client tegra234_mc_clients[] = { }, { .id = TEGRA234_MEMORY_CLIENT_DLA0RDA, .name = "dla0rda", + .bpmp_id = TEGRA_ICC_BPMP_DLA_0, + .type = TEGRA_ICC_NISO, .sid = TEGRA234_SID_NVDLA0, .regs = { .sid = { @@ -670,6 +684,8 @@ static const struct tegra_mc_client tegra234_mc_clients[] = { }, { .id = TEGRA234_MEMORY_CLIENT_DLA0FALRDB, .name = "dla0falrdb", + .bpmp_id = TEGRA_ICC_BPMP_DLA_0, + .type = TEGRA_ICC_NISO, .sid = TEGRA234_SID_NVDLA0, .regs = { .sid = { @@ -680,6 +696,8 @@ static const struct tegra_mc_client tegra234_mc_clients[] = { }, { .id = TEGRA234_MEMORY_CLIENT_DLA0WRA, .name = "dla0wra", + .bpmp_id = TEGRA_ICC_BPMP_DLA_0, + .type = TEGRA_ICC_NISO, .sid = TEGRA234_SID_NVDLA0, .regs = { .sid = { @@ -690,6 +708,8 @@ static const struct tegra_mc_client tegra234_mc_clients[] = { }, { .id = TEGRA234_MEMORY_CLIENT_DLA0FALWRB, .name = "dla0falwrb", + .bpmp_id = TEGRA_ICC_BPMP_DLA_0, + .type = TEGRA_ICC_NISO, .sid = TEGRA234_SID_NVDLA0, .regs = { .sid = { @@ -699,7 +719,9 @@ static const struct tegra_mc_client tegra234_mc_clients[] = { }, }, { .id = TEGRA234_MEMORY_CLIENT_DLA1RDA, - .name = "dla0rda", + .name = "dla1rda", + .bpmp_id = TEGRA_ICC_BPMP_DLA_1, + .type = TEGRA_ICC_NISO, .sid = TEGRA234_SID_NVDLA1, .regs = { .sid = { @@ -709,7 +731,9 @@ static const struct tegra_mc_client tegra234_mc_clients[] = { }, }, { .id = TEGRA234_MEMORY_CLIENT_DLA1FALRDB, - .name = "dla0falrdb", + .name = "dla1falrdb", + .bpmp_id = TEGRA_ICC_BPMP_DLA_1, + .type = TEGRA_ICC_NISO, .sid = TEGRA234_SID_NVDLA1, .regs = { .sid = { @@ -719,7 +743,9 @@ static const struct tegra_mc_client tegra234_mc_clients[] = { }, }, { .id = TEGRA234_MEMORY_CLIENT_DLA1WRA, - .name = "dla0wra", + .name = "dla1wra", + .bpmp_id = TEGRA_ICC_BPMP_DLA_1, + .type = TEGRA_ICC_NISO, .sid = TEGRA234_SID_NVDLA1, .regs = { .sid = { @@ -729,7 +755,9 @@ static const struct tegra_mc_client tegra234_mc_clients[] = { }, }, { .id = TEGRA234_MEMORY_CLIENT_DLA1FALWRB, - .name = "dla0falwrb", + .name = "dla1falwrb", + .bpmp_id = TEGRA_ICC_BPMP_DLA_1, + .type = TEGRA_ICC_NISO, .sid = TEGRA234_SID_NVDLA1, .regs = { .sid = { @@ -908,6 +936,8 @@ static const struct tegra_mc_client tegra234_mc_clients[] = { }, { .id = TEGRA234_MEMORY_CLIENT_DLA0RDA1, .name = "dla0rda1", + .bpmp_id = TEGRA_ICC_BPMP_DLA_0, + .type = TEGRA_ICC_NISO, .sid = TEGRA234_SID_NVDLA0, .regs = { .sid = { @@ -917,7 +947,7 @@ static const struct tegra_mc_client tegra234_mc_clients[] = { }, }, { .id = TEGRA234_MEMORY_CLIENT_DLA1RDA1, - .name = "dla0rda1", + .name = "dla1rda1", .sid = TEGRA234_SID_NVDLA1, .regs = { .sid = { diff --git a/drivers/memstick/core/ms_block.c b/drivers/memstick/core/ms_block.c index 04115cd92433..47a314a4eb6f 100644 --- a/drivers/memstick/core/ms_block.c +++ b/drivers/memstick/core/ms_block.c @@ -2078,6 +2078,12 @@ static const struct blk_mq_ops msb_mq_ops = { static int msb_init_disk(struct memstick_dev *card) { struct msb_data *msb = memstick_get_drvdata(card); + struct queue_limits lim = { + .logical_block_size = msb->page_size, + .max_hw_sectors = MS_BLOCK_MAX_PAGES, + .max_segments = MS_BLOCK_MAX_SEGS, + .max_segment_size = MS_BLOCK_MAX_PAGES * msb->page_size, + }; int rc; unsigned long capacity; @@ -2093,19 +2099,13 @@ static int msb_init_disk(struct memstick_dev *card) if (rc) goto out_release_id; - msb->disk = blk_mq_alloc_disk(&msb->tag_set, card); + msb->disk = blk_mq_alloc_disk(&msb->tag_set, &lim, card); if (IS_ERR(msb->disk)) { rc = PTR_ERR(msb->disk); goto out_free_tag_set; } msb->queue = msb->disk->queue; - blk_queue_max_hw_sectors(msb->queue, MS_BLOCK_MAX_PAGES); - blk_queue_max_segments(msb->queue, MS_BLOCK_MAX_SEGS); - blk_queue_max_segment_size(msb->queue, - MS_BLOCK_MAX_PAGES * msb->page_size); - blk_queue_logical_block_size(msb->queue, msb->page_size); - sprintf(msb->disk->disk_name, "msblk%d", msb->disk_id); msb->disk->fops = &msb_bdops; msb->disk->private_data = msb; diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c index 5a69ed33999b..49accfdc89d6 100644 --- a/drivers/memstick/core/mspro_block.c +++ b/drivers/memstick/core/mspro_block.c @@ -1103,6 +1103,12 @@ static const struct blk_mq_ops mspro_mq_ops = { static int mspro_block_init_disk(struct memstick_dev *card) { struct mspro_block_data *msb = memstick_get_drvdata(card); + struct queue_limits lim = { + .logical_block_size = msb->page_size, + .max_hw_sectors = MSPRO_BLOCK_MAX_PAGES, + .max_segments = MSPRO_BLOCK_MAX_SEGS, + .max_segment_size = MSPRO_BLOCK_MAX_PAGES * msb->page_size, + }; struct mspro_devinfo *dev_info = NULL; struct mspro_sys_info *sys_info = NULL; struct mspro_sys_attr *s_attr = NULL; @@ -1138,18 +1144,13 @@ static int mspro_block_init_disk(struct memstick_dev *card) if (rc) goto out_release_id; - msb->disk = blk_mq_alloc_disk(&msb->tag_set, card); + msb->disk = blk_mq_alloc_disk(&msb->tag_set, &lim, card); if (IS_ERR(msb->disk)) { rc = PTR_ERR(msb->disk); goto out_free_tag_set; } msb->queue = msb->disk->queue; - blk_queue_max_hw_sectors(msb->queue, MSPRO_BLOCK_MAX_PAGES); - blk_queue_max_segments(msb->queue, MSPRO_BLOCK_MAX_SEGS); - blk_queue_max_segment_size(msb->queue, - MSPRO_BLOCK_MAX_PAGES * msb->page_size); - msb->disk->major = major; msb->disk->first_minor = disk_id << MSPRO_BLOCK_PART_SHIFT; msb->disk->minors = 1 << MSPRO_BLOCK_PART_SHIFT; @@ -1158,8 +1159,6 @@ static int mspro_block_init_disk(struct memstick_dev *card) sprintf(msb->disk->disk_name, "mspblk%d", disk_id); - blk_queue_logical_block_size(msb->queue, msb->page_size); - capacity = be16_to_cpu(sys_info->user_block_count); capacity *= be16_to_cpu(sys_info->block_size); capacity *= msb->page_size >> 9; diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c index 1c6c62a7f7f5..dbd26c3b245b 100644 --- a/drivers/misc/fastrpc.c +++ b/drivers/misc/fastrpc.c @@ -263,7 +263,6 @@ struct fastrpc_channel_ctx { int domain_id; int sesscount; int vmcount; - u64 perms; struct qcom_scm_vmperm vmperms[FASTRPC_MAX_VMIDS]; struct rpmsg_device *rpdev; struct fastrpc_session_ctx session[FASTRPC_MAX_SESSIONS]; @@ -1279,9 +1278,11 @@ static int fastrpc_init_create_static_process(struct fastrpc_user *fl, /* Map if we have any heap VMIDs associated with this ADSP Static Process. */ if (fl->cctx->vmcount) { + u64 src_perms = BIT(QCOM_SCM_VMID_HLOS); + err = qcom_scm_assign_mem(fl->cctx->remote_heap->phys, (u64)fl->cctx->remote_heap->size, - &fl->cctx->perms, + &src_perms, fl->cctx->vmperms, fl->cctx->vmcount); if (err) { dev_err(fl->sctx->dev, "Failed to assign memory with phys 0x%llx size 0x%llx err %d", @@ -1915,8 +1916,10 @@ static int fastrpc_req_mmap(struct fastrpc_user *fl, char __user *argp) /* Add memory to static PD pool, protection thru hypervisor */ if (req.flags == ADSP_MMAP_REMOTE_HEAP_ADDR && fl->cctx->vmcount) { + u64 src_perms = BIT(QCOM_SCM_VMID_HLOS); + err = qcom_scm_assign_mem(buf->phys, (u64)buf->size, - &fl->cctx->perms, fl->cctx->vmperms, fl->cctx->vmcount); + &src_perms, fl->cctx->vmperms, fl->cctx->vmcount); if (err) { dev_err(fl->sctx->dev, "Failed to assign memory phys 0x%llx size 0x%llx err %d", buf->phys, buf->size, err); @@ -2191,7 +2194,7 @@ static int fastrpc_cb_remove(struct platform_device *pdev) int i; spin_lock_irqsave(&cctx->lock, flags); - for (i = 1; i < FASTRPC_MAX_SESSIONS; i++) { + for (i = 0; i < FASTRPC_MAX_SESSIONS; i++) { if (cctx->session[i].sid == sess->sid) { cctx->session[i].valid = false; cctx->sesscount--; @@ -2290,7 +2293,6 @@ static int fastrpc_rpmsg_probe(struct rpmsg_device *rpdev) if (vmcount) { data->vmcount = vmcount; - data->perms = BIT(QCOM_SCM_VMID_HLOS); for (i = 0; i < data->vmcount; i++) { data->vmperms[i].vmid = vmids[i]; data->vmperms[i].perm = QCOM_SCM_PERM_RWX; diff --git a/drivers/misc/lis3lv02d/lis3lv02d_i2c.c b/drivers/misc/lis3lv02d/lis3lv02d_i2c.c index c6eb27d46cb0..15119584473c 100644 --- a/drivers/misc/lis3lv02d/lis3lv02d_i2c.c +++ b/drivers/misc/lis3lv02d/lis3lv02d_i2c.c @@ -198,8 +198,14 @@ static int lis3lv02d_i2c_suspend(struct device *dev) struct i2c_client *client = to_i2c_client(dev); struct lis3lv02d *lis3 = i2c_get_clientdata(client); - if (!lis3->pdata || !lis3->pdata->wakeup_flags) + /* Turn on for wakeup if turned off by runtime suspend */ + if (lis3->pdata && lis3->pdata->wakeup_flags) { + if (pm_runtime_suspended(dev)) + lis3lv02d_poweron(lis3); + /* For non wakeup turn off if not already turned off by runtime suspend */ + } else if (!pm_runtime_suspended(dev)) lis3lv02d_poweroff(lis3); + return 0; } @@ -208,13 +214,12 @@ static int lis3lv02d_i2c_resume(struct device *dev) struct i2c_client *client = to_i2c_client(dev); struct lis3lv02d *lis3 = i2c_get_clientdata(client); - /* - * pm_runtime documentation says that devices should always - * be powered on at resume. Pm_runtime turns them off after system - * wide resume is complete. - */ - if (!lis3->pdata || !lis3->pdata->wakeup_flags || - pm_runtime_suspended(dev)) + /* Turn back off if turned on for wakeup and runtime suspended*/ + if (lis3->pdata && lis3->pdata->wakeup_flags) { + if (pm_runtime_suspended(dev)) + lis3lv02d_poweroff(lis3); + /* For non wakeup turn back on if not runtime suspended */ + } else if (!pm_runtime_suspended(dev)) lis3lv02d_poweron(lis3); return 0; diff --git a/drivers/misc/mei/gsc_proxy/mei_gsc_proxy.c b/drivers/misc/mei/gsc_proxy/mei_gsc_proxy.c index be52b113aea9..89364bdbb129 100644 --- a/drivers/misc/mei/gsc_proxy/mei_gsc_proxy.c +++ b/drivers/misc/mei/gsc_proxy/mei_gsc_proxy.c @@ -96,7 +96,8 @@ static const struct component_master_ops mei_component_master_ops = { * * The function checks if the device is pci device and * Intel VGA adapter, the subcomponent is SW Proxy - * and the parent of MEI PCI and the parent of VGA are the same PCH device. + * and the VGA is on the bus 0 reserved for built-in devices + * to reject discrete GFX. * * @dev: master device * @subcomponent: subcomponent to match (I915_COMPONENT_SWPROXY) @@ -123,7 +124,8 @@ static int mei_gsc_proxy_component_match(struct device *dev, int subcomponent, if (subcomponent != I915_COMPONENT_GSC_PROXY) return 0; - return component_compare_dev(dev->parent, ((struct device *)data)->parent); + /* Only built-in GFX */ + return (pdev->bus->number == 0); } static int mei_gsc_proxy_probe(struct mei_cl_device *cldev, @@ -146,7 +148,7 @@ static int mei_gsc_proxy_probe(struct mei_cl_device *cldev, } component_match_add_typed(&cldev->dev, &master_match, - mei_gsc_proxy_component_match, cldev->dev.parent); + mei_gsc_proxy_component_match, NULL); if (IS_ERR_OR_NULL(master_match)) { ret = -ENOMEM; goto err_exit; diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h index 961e5d53a27a..aac36750d2c5 100644 --- a/drivers/misc/mei/hw-me-regs.h +++ b/drivers/misc/mei/hw-me-regs.h @@ -112,6 +112,8 @@ #define MEI_DEV_ID_RPL_S 0x7A68 /* Raptor Lake Point S */ #define MEI_DEV_ID_MTL_M 0x7E70 /* Meteor Lake Point M */ +#define MEI_DEV_ID_ARL_S 0x7F68 /* Arrow Lake Point S */ +#define MEI_DEV_ID_ARL_H 0x7770 /* Arrow Lake Point H */ /* * MEI HW Section diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c index 676d566f38dd..8cf636c54032 100644 --- a/drivers/misc/mei/pci-me.c +++ b/drivers/misc/mei/pci-me.c @@ -119,6 +119,8 @@ static const struct pci_device_id mei_me_pci_tbl[] = { {MEI_PCI_DEVICE(MEI_DEV_ID_RPL_S, MEI_ME_PCH15_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_MTL_M, MEI_ME_PCH15_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_ARL_S, MEI_ME_PCH15_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_ARL_H, MEI_ME_PCH15_CFG)}, /* required last entry */ {0, } diff --git a/drivers/misc/mei/vsc-tp.c b/drivers/misc/mei/vsc-tp.c index 6f4a4be6ccb5..55f7db490d3b 100644 --- a/drivers/misc/mei/vsc-tp.c +++ b/drivers/misc/mei/vsc-tp.c @@ -535,6 +535,7 @@ static const struct acpi_device_id vsc_tp_acpi_ids[] = { { "INTC1009" }, /* Raptor Lake */ { "INTC1058" }, /* Tiger Lake */ { "INTC1094" }, /* Alder Lake */ + { "INTC10D0" }, /* Meteor Lake */ {} }; MODULE_DEVICE_TABLE(acpi, vsc_tp_acpi_ids); diff --git a/drivers/misc/open-dice.c b/drivers/misc/open-dice.c index 8aea2d070a40..d279a4f195e2 100644 --- a/drivers/misc/open-dice.c +++ b/drivers/misc/open-dice.c @@ -140,7 +140,6 @@ static int __init open_dice_probe(struct platform_device *pdev) return -ENOMEM; *drvdata = (struct open_dice_drvdata){ - .lock = __MUTEX_INITIALIZER(drvdata->lock), .rmem = rmem, .misc = (struct miscdevice){ .parent = dev, @@ -150,6 +149,7 @@ static int __init open_dice_probe(struct platform_device *pdev) .mode = 0600, }, }; + mutex_init(&drvdata->lock); /* Index overflow check not needed, misc_register() will fail. */ snprintf(drvdata->name, sizeof(drvdata->name), DRIVER_NAME"%u", dev_idx++); diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index f410bee50132..58ed7193a3ca 100644 --- a/drivers/mmc/core/mmc.c +++ b/drivers/mmc/core/mmc.c @@ -1015,10 +1015,12 @@ static int mmc_select_bus_width(struct mmc_card *card) static unsigned ext_csd_bits[] = { EXT_CSD_BUS_WIDTH_8, EXT_CSD_BUS_WIDTH_4, + EXT_CSD_BUS_WIDTH_1, }; static unsigned bus_widths[] = { MMC_BUS_WIDTH_8, MMC_BUS_WIDTH_4, + MMC_BUS_WIDTH_1, }; struct mmc_host *host = card->host; unsigned idx, bus_width = 0; diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c index a0a2412f62a7..2ae60d208cdf 100644 --- a/drivers/mmc/core/queue.c +++ b/drivers/mmc/core/queue.c @@ -174,8 +174,8 @@ static struct scatterlist *mmc_alloc_sg(unsigned short sg_len, gfp_t gfp) return sg; } -static void mmc_queue_setup_discard(struct request_queue *q, - struct mmc_card *card) +static void mmc_queue_setup_discard(struct mmc_card *card, + struct queue_limits *lim) { unsigned max_discard; @@ -183,15 +183,17 @@ static void mmc_queue_setup_discard(struct request_queue *q, if (!max_discard) return; - blk_queue_max_discard_sectors(q, max_discard); - q->limits.discard_granularity = card->pref_erase << 9; - /* granularity must not be greater than max. discard */ - if (card->pref_erase > max_discard) - q->limits.discard_granularity = SECTOR_SIZE; + lim->max_hw_discard_sectors = max_discard; if (mmc_can_secure_erase_trim(card)) - blk_queue_max_secure_erase_sectors(q, max_discard); + lim->max_secure_erase_sectors = max_discard; if (mmc_can_trim(card) && card->erased_byte == 0) - blk_queue_max_write_zeroes_sectors(q, max_discard); + lim->max_write_zeroes_sectors = max_discard; + + /* granularity must not be greater than max. discard */ + if (card->pref_erase > max_discard) + lim->discard_granularity = SECTOR_SIZE; + else + lim->discard_granularity = card->pref_erase << 9; } static unsigned short mmc_get_max_segments(struct mmc_host *host) @@ -341,40 +343,53 @@ static const struct blk_mq_ops mmc_mq_ops = { .timeout = mmc_mq_timed_out, }; -static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card) +static struct gendisk *mmc_alloc_disk(struct mmc_queue *mq, + struct mmc_card *card) { struct mmc_host *host = card->host; - unsigned block_size = 512; + struct queue_limits lim = { }; + struct gendisk *disk; - blk_queue_flag_set(QUEUE_FLAG_NONROT, mq->queue); - blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, mq->queue); if (mmc_can_erase(card)) - mmc_queue_setup_discard(mq->queue, card); + mmc_queue_setup_discard(card, &lim); if (!mmc_dev(host)->dma_mask || !*mmc_dev(host)->dma_mask) - blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_HIGH); - blk_queue_max_hw_sectors(mq->queue, - min(host->max_blk_count, host->max_req_size / 512)); - if (host->can_dma_map_merge) - WARN(!blk_queue_can_use_dma_map_merging(mq->queue, - mmc_dev(host)), - "merging was advertised but not possible"); - blk_queue_max_segments(mq->queue, mmc_get_max_segments(host)); - - if (mmc_card_mmc(card) && card->ext_csd.data_sector_size) { - block_size = card->ext_csd.data_sector_size; - WARN_ON(block_size != 512 && block_size != 4096); - } + lim.bounce = BLK_BOUNCE_HIGH; + + lim.max_hw_sectors = min(host->max_blk_count, host->max_req_size / 512); + + if (mmc_card_mmc(card) && card->ext_csd.data_sector_size) + lim.logical_block_size = card->ext_csd.data_sector_size; + else + lim.logical_block_size = 512; + + WARN_ON_ONCE(lim.logical_block_size != 512 && + lim.logical_block_size != 4096); - blk_queue_logical_block_size(mq->queue, block_size); /* - * After blk_queue_can_use_dma_map_merging() was called with succeed, - * since it calls blk_queue_virt_boundary(), the mmc should not call - * both blk_queue_max_segment_size(). + * Setting a virt_boundary implicity sets a max_segment_size, so try + * to set the hardware one here. */ - if (!host->can_dma_map_merge) - blk_queue_max_segment_size(mq->queue, - round_down(host->max_seg_size, block_size)); + if (host->can_dma_map_merge) { + lim.virt_boundary_mask = dma_get_merge_boundary(mmc_dev(host)); + lim.max_segments = MMC_DMA_MAP_MERGE_SEGMENTS; + } else { + lim.max_segment_size = + round_down(host->max_seg_size, lim.logical_block_size); + lim.max_segments = host->max_segs; + } + + disk = blk_mq_alloc_disk(&mq->tag_set, &lim, mq); + if (IS_ERR(disk)) + return disk; + mq->queue = disk->queue; + + if (mmc_host_is_spi(host) && host->use_spi_crc) + blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, mq->queue); + blk_queue_rq_timeout(mq->queue, 60 * HZ); + + blk_queue_flag_set(QUEUE_FLAG_NONROT, mq->queue); + blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, mq->queue); dma_set_max_seg_size(mmc_dev(host), queue_max_segment_size(mq->queue)); @@ -386,6 +401,7 @@ static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card) init_waitqueue_head(&mq->wait); mmc_crypto_setup_queue(mq->queue, host); + return disk; } static inline bool mmc_merge_capable(struct mmc_host *host) @@ -447,18 +463,9 @@ struct gendisk *mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card) return ERR_PTR(ret); - disk = blk_mq_alloc_disk(&mq->tag_set, mq); - if (IS_ERR(disk)) { + disk = mmc_alloc_disk(mq, card); + if (IS_ERR(disk)) blk_mq_free_tag_set(&mq->tag_set); - return disk; - } - mq->queue = disk->queue; - - if (mmc_host_is_spi(host) && host->use_spi_crc) - blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, mq->queue); - blk_queue_rq_timeout(mq->queue, 60 * HZ); - - mmc_setup_queue(mq, card); return disk; } diff --git a/drivers/mmc/core/slot-gpio.c b/drivers/mmc/core/slot-gpio.c index 2a2d949a9344..39f45c2b6de8 100644 --- a/drivers/mmc/core/slot-gpio.c +++ b/drivers/mmc/core/slot-gpio.c @@ -75,11 +75,15 @@ EXPORT_SYMBOL(mmc_gpio_set_cd_irq); int mmc_gpio_get_ro(struct mmc_host *host) { struct mmc_gpio *ctx = host->slot.handler_priv; + int cansleep; if (!ctx || !ctx->ro_gpio) return -ENOSYS; - return gpiod_get_value_cansleep(ctx->ro_gpio); + cansleep = gpiod_cansleep(ctx->ro_gpio); + return cansleep ? + gpiod_get_value_cansleep(ctx->ro_gpio) : + gpiod_get_value(ctx->ro_gpio); } EXPORT_SYMBOL(mmc_gpio_get_ro); diff --git a/drivers/mmc/host/mmci_stm32_sdmmc.c b/drivers/mmc/host/mmci_stm32_sdmmc.c index 35067e1e6cd8..f5da7f9baa52 100644 --- a/drivers/mmc/host/mmci_stm32_sdmmc.c +++ b/drivers/mmc/host/mmci_stm32_sdmmc.c @@ -225,6 +225,8 @@ static int sdmmc_idma_start(struct mmci_host *host, unsigned int *datactrl) struct scatterlist *sg; int i; + host->dma_in_progress = true; + if (!host->variant->dma_lli || data->sg_len == 1 || idma->use_bounce_buffer) { u32 dma_addr; @@ -263,9 +265,30 @@ static int sdmmc_idma_start(struct mmci_host *host, unsigned int *datactrl) return 0; } +static void sdmmc_idma_error(struct mmci_host *host) +{ + struct mmc_data *data = host->data; + struct sdmmc_idma *idma = host->dma_priv; + + if (!dma_inprogress(host)) + return; + + writel_relaxed(0, host->base + MMCI_STM32_IDMACTRLR); + host->dma_in_progress = false; + data->host_cookie = 0; + + if (!idma->use_bounce_buffer) + dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, + mmc_get_dma_dir(data)); +} + static void sdmmc_idma_finalize(struct mmci_host *host, struct mmc_data *data) { + if (!dma_inprogress(host)) + return; + writel_relaxed(0, host->base + MMCI_STM32_IDMACTRLR); + host->dma_in_progress = false; if (!data->host_cookie) sdmmc_idma_unprep_data(host, data, 0); @@ -676,6 +699,7 @@ static struct mmci_host_ops sdmmc_variant_ops = { .dma_setup = sdmmc_idma_setup, .dma_start = sdmmc_idma_start, .dma_finalize = sdmmc_idma_finalize, + .dma_error = sdmmc_idma_error, .set_clkreg = mmci_sdmmc_set_clkreg, .set_pwrreg = mmci_sdmmc_set_pwrreg, .busy_complete = sdmmc_busy_complete, diff --git a/drivers/mmc/host/sdhci-pci-o2micro.c b/drivers/mmc/host/sdhci-pci-o2micro.c index 7bfee28116af..d4a02184784a 100644 --- a/drivers/mmc/host/sdhci-pci-o2micro.c +++ b/drivers/mmc/host/sdhci-pci-o2micro.c @@ -693,6 +693,35 @@ static int sdhci_pci_o2_init_sd_express(struct mmc_host *mmc, struct mmc_ios *io return 0; } +static void sdhci_pci_o2_set_power(struct sdhci_host *host, unsigned char mode, unsigned short vdd) +{ + struct sdhci_pci_chip *chip; + struct sdhci_pci_slot *slot = sdhci_priv(host); + u32 scratch_32 = 0; + u8 scratch_8 = 0; + + chip = slot->chip; + + if (mode == MMC_POWER_OFF) { + /* UnLock WP */ + pci_read_config_byte(chip->pdev, O2_SD_LOCK_WP, &scratch_8); + scratch_8 &= 0x7f; + pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch_8); + + /* Set PCR 0x354[16] to switch Clock Source back to OPE Clock */ + pci_read_config_dword(chip->pdev, O2_SD_OUTPUT_CLK_SOURCE_SWITCH, &scratch_32); + scratch_32 &= ~(O2_SD_SEL_DLL); + pci_write_config_dword(chip->pdev, O2_SD_OUTPUT_CLK_SOURCE_SWITCH, scratch_32); + + /* Lock WP */ + pci_read_config_byte(chip->pdev, O2_SD_LOCK_WP, &scratch_8); + scratch_8 |= 0x80; + pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch_8); + } + + sdhci_set_power(host, mode, vdd); +} + static int sdhci_pci_o2_probe_slot(struct sdhci_pci_slot *slot) { struct sdhci_pci_chip *chip; @@ -1051,6 +1080,7 @@ static const struct sdhci_ops sdhci_pci_o2_ops = { .set_bus_width = sdhci_set_bus_width, .reset = sdhci_reset, .set_uhs_signaling = sdhci_set_uhs_signaling, + .set_power = sdhci_pci_o2_set_power, }; const struct sdhci_pci_fixes sdhci_o2 = { diff --git a/drivers/mmc/host/sdhci-xenon-phy.c b/drivers/mmc/host/sdhci-xenon-phy.c index 8cf3a375de65..cc9d28b75eb9 100644 --- a/drivers/mmc/host/sdhci-xenon-phy.c +++ b/drivers/mmc/host/sdhci-xenon-phy.c @@ -11,6 +11,7 @@ #include <linux/slab.h> #include <linux/delay.h> #include <linux/ktime.h> +#include <linux/iopoll.h> #include <linux/of_address.h> #include "sdhci-pltfm.h" @@ -109,6 +110,8 @@ #define XENON_EMMC_PHY_LOGIC_TIMING_ADJUST (XENON_EMMC_PHY_REG_BASE + 0x18) #define XENON_LOGIC_TIMING_VALUE 0x00AA8977 +#define XENON_MAX_PHY_TIMEOUT_LOOPS 100 + /* * List offset of PHY registers and some special register values * in eMMC PHY 5.0 or eMMC PHY 5.1 @@ -216,6 +219,19 @@ static int xenon_alloc_emmc_phy(struct sdhci_host *host) return 0; } +static int xenon_check_stability_internal_clk(struct sdhci_host *host) +{ + u32 reg; + int err; + + err = read_poll_timeout(sdhci_readw, reg, reg & SDHCI_CLOCK_INT_STABLE, + 1100, 20000, false, host, SDHCI_CLOCK_CONTROL); + if (err) + dev_err(mmc_dev(host->mmc), "phy_init: Internal clock never stabilized.\n"); + + return err; +} + /* * eMMC 5.0/5.1 PHY init/re-init. * eMMC PHY init should be executed after: @@ -232,6 +248,11 @@ static int xenon_emmc_phy_init(struct sdhci_host *host) struct xenon_priv *priv = sdhci_pltfm_priv(pltfm_host); struct xenon_emmc_phy_regs *phy_regs = priv->emmc_phy_regs; + int ret = xenon_check_stability_internal_clk(host); + + if (ret) + return ret; + reg = sdhci_readl(host, phy_regs->timing_adj); reg |= XENON_PHY_INITIALIZAION; sdhci_writel(host, reg, phy_regs->timing_adj); @@ -259,18 +280,27 @@ static int xenon_emmc_phy_init(struct sdhci_host *host) /* get the wait time */ wait /= clock; wait++; - /* wait for host eMMC PHY init completes */ - udelay(wait); - reg = sdhci_readl(host, phy_regs->timing_adj); - reg &= XENON_PHY_INITIALIZAION; - if (reg) { + /* + * AC5X spec says bit must be polled until zero. + * We see cases in which timeout can take longer + * than the standard calculation on AC5X, which is + * expected following the spec comment above. + * According to the spec, we must wait as long as + * it takes for that bit to toggle on AC5X. + * Cap that with 100 delay loops so we won't get + * stuck here forever: + */ + + ret = read_poll_timeout(sdhci_readl, reg, + !(reg & XENON_PHY_INITIALIZAION), + wait, XENON_MAX_PHY_TIMEOUT_LOOPS * wait, + false, host, phy_regs->timing_adj); + if (ret) dev_err(mmc_dev(host->mmc), "eMMC PHY init cannot complete after %d us\n", - wait); - return -ETIMEDOUT; - } + wait * XENON_MAX_PHY_TIMEOUT_LOOPS); - return 0; + return ret; } #define ARMADA_3700_SOC_PAD_1_8V 0x1 diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c index aa44a23ec045..97a00ec9a4d4 100644 --- a/drivers/mtd/devices/block2mtd.c +++ b/drivers/mtd/devices/block2mtd.c @@ -37,7 +37,7 @@ /* Info for the block device */ struct block2mtd_dev { struct list_head list; - struct bdev_handle *bdev_handle; + struct file *bdev_file; struct mtd_info mtd; struct mutex write_mutex; }; @@ -55,8 +55,7 @@ static struct page *page_read(struct address_space *mapping, pgoff_t index) /* erase a specified part of the device */ static int _block2mtd_erase(struct block2mtd_dev *dev, loff_t to, size_t len) { - struct address_space *mapping = - dev->bdev_handle->bdev->bd_inode->i_mapping; + struct address_space *mapping = dev->bdev_file->f_mapping; struct page *page; pgoff_t index = to >> PAGE_SHIFT; // page index int pages = len >> PAGE_SHIFT; @@ -106,8 +105,7 @@ static int block2mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf) { struct block2mtd_dev *dev = mtd->priv; - struct address_space *mapping = - dev->bdev_handle->bdev->bd_inode->i_mapping; + struct address_space *mapping = dev->bdev_file->f_mapping; struct page *page; pgoff_t index = from >> PAGE_SHIFT; int offset = from & (PAGE_SIZE-1); @@ -142,8 +140,7 @@ static int _block2mtd_write(struct block2mtd_dev *dev, const u_char *buf, loff_t to, size_t len, size_t *retlen) { struct page *page; - struct address_space *mapping = - dev->bdev_handle->bdev->bd_inode->i_mapping; + struct address_space *mapping = dev->bdev_file->f_mapping; pgoff_t index = to >> PAGE_SHIFT; // page index int offset = to & ~PAGE_MASK; // page offset int cpylen; @@ -198,7 +195,7 @@ static int block2mtd_write(struct mtd_info *mtd, loff_t to, size_t len, static void block2mtd_sync(struct mtd_info *mtd) { struct block2mtd_dev *dev = mtd->priv; - sync_blockdev(dev->bdev_handle->bdev); + sync_blockdev(file_bdev(dev->bdev_file)); return; } @@ -210,10 +207,9 @@ static void block2mtd_free_device(struct block2mtd_dev *dev) kfree(dev->mtd.name); - if (dev->bdev_handle) { - invalidate_mapping_pages( - dev->bdev_handle->bdev->bd_inode->i_mapping, 0, -1); - bdev_release(dev->bdev_handle); + if (dev->bdev_file) { + invalidate_mapping_pages(dev->bdev_file->f_mapping, 0, -1); + fput(dev->bdev_file); } kfree(dev); @@ -223,10 +219,10 @@ static void block2mtd_free_device(struct block2mtd_dev *dev) * This function is marked __ref because it calls the __init marked * early_lookup_bdev when called from the early boot code. */ -static struct bdev_handle __ref *mdtblock_early_get_bdev(const char *devname, +static struct file __ref *mdtblock_early_get_bdev(const char *devname, blk_mode_t mode, int timeout, struct block2mtd_dev *dev) { - struct bdev_handle *bdev_handle = ERR_PTR(-ENODEV); + struct file *bdev_file = ERR_PTR(-ENODEV); #ifndef MODULE int i; @@ -234,7 +230,7 @@ static struct bdev_handle __ref *mdtblock_early_get_bdev(const char *devname, * We can't use early_lookup_bdev from a running system. */ if (system_state >= SYSTEM_RUNNING) - return bdev_handle; + return bdev_file; /* * We might not have the root device mounted at this point. @@ -253,20 +249,20 @@ static struct bdev_handle __ref *mdtblock_early_get_bdev(const char *devname, wait_for_device_probe(); if (!early_lookup_bdev(devname, &devt)) { - bdev_handle = bdev_open_by_dev(devt, mode, dev, NULL); - if (!IS_ERR(bdev_handle)) + bdev_file = bdev_file_open_by_dev(devt, mode, dev, NULL); + if (!IS_ERR(bdev_file)) break; } } #endif - return bdev_handle; + return bdev_file; } static struct block2mtd_dev *add_device(char *devname, int erase_size, char *label, int timeout) { const blk_mode_t mode = BLK_OPEN_READ | BLK_OPEN_WRITE; - struct bdev_handle *bdev_handle; + struct file *bdev_file; struct block_device *bdev; struct block2mtd_dev *dev; char *name; @@ -279,16 +275,16 @@ static struct block2mtd_dev *add_device(char *devname, int erase_size, return NULL; /* Get a handle on the device */ - bdev_handle = bdev_open_by_path(devname, mode, dev, NULL); - if (IS_ERR(bdev_handle)) - bdev_handle = mdtblock_early_get_bdev(devname, mode, timeout, + bdev_file = bdev_file_open_by_path(devname, mode, dev, NULL); + if (IS_ERR(bdev_file)) + bdev_file = mdtblock_early_get_bdev(devname, mode, timeout, dev); - if (IS_ERR(bdev_handle)) { + if (IS_ERR(bdev_file)) { pr_err("error: cannot open device %s\n", devname); goto err_free_block2mtd; } - dev->bdev_handle = bdev_handle; - bdev = bdev_handle->bdev; + dev->bdev_file = bdev_file; + bdev = file_bdev(bdev_file); if (MAJOR(bdev->bd_dev) == MTD_BLOCK_MAJOR) { pr_err("attempting to use an MTD device as a block device\n"); diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c index f0526dcc2162..3caa0717d46c 100644 --- a/drivers/mtd/mtd_blkdevs.c +++ b/drivers/mtd/mtd_blkdevs.c @@ -277,6 +277,7 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new) { struct mtd_blktrans_ops *tr = new->tr; struct mtd_blktrans_dev *d; + struct queue_limits lim = { }; int last_devnum = -1; struct gendisk *gd; int ret; @@ -331,9 +332,13 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new) BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING); if (ret) goto out_kfree_tag_set; + + lim.logical_block_size = tr->blksize; + if (tr->discard) + lim.max_hw_discard_sectors = UINT_MAX; /* Create gendisk */ - gd = blk_mq_alloc_disk(new->tag_set, new); + gd = blk_mq_alloc_disk(new->tag_set, &lim, new); if (IS_ERR(gd)) { ret = PTR_ERR(gd); goto out_free_tag_set; @@ -371,14 +376,9 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new) if (tr->flush) blk_queue_write_cache(new->rq, true, false); - blk_queue_logical_block_size(new->rq, tr->blksize); - blk_queue_flag_set(QUEUE_FLAG_NONROT, new->rq); blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, new->rq); - if (tr->discard) - blk_queue_max_discard_sectors(new->rq, UINT_MAX); - gd->queue = new->rq; if (new->readonly) diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c index e451b28840d5..5887feb347a4 100644 --- a/drivers/mtd/mtdcore.c +++ b/drivers/mtd/mtdcore.c @@ -621,6 +621,7 @@ static void mtd_check_of_node(struct mtd_info *mtd) if (plen == mtd_name_len && !strncmp(mtd->name, pname + offset, plen)) { mtd_set_of_node(mtd, mtd_dn); + of_node_put(mtd_dn); break; } } diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c index a46698744850..5b0f5a9cef81 100644 --- a/drivers/mtd/nand/raw/marvell_nand.c +++ b/drivers/mtd/nand/raw/marvell_nand.c @@ -290,16 +290,13 @@ static const struct marvell_hw_ecc_layout marvell_nfc_layouts[] = { MARVELL_LAYOUT( 2048, 512, 4, 1, 1, 2048, 32, 30, 0, 0, 0), MARVELL_LAYOUT( 2048, 512, 8, 2, 1, 1024, 0, 30,1024,32, 30), MARVELL_LAYOUT( 2048, 512, 8, 2, 1, 1024, 0, 30,1024,64, 30), - MARVELL_LAYOUT( 2048, 512, 12, 3, 2, 704, 0, 30,640, 0, 30), - MARVELL_LAYOUT( 2048, 512, 16, 5, 4, 512, 0, 30, 0, 32, 30), + MARVELL_LAYOUT( 2048, 512, 16, 4, 4, 512, 0, 30, 0, 32, 30), MARVELL_LAYOUT( 4096, 512, 4, 2, 2, 2048, 32, 30, 0, 0, 0), - MARVELL_LAYOUT( 4096, 512, 8, 5, 4, 1024, 0, 30, 0, 64, 30), - MARVELL_LAYOUT( 4096, 512, 12, 6, 5, 704, 0, 30,576, 32, 30), - MARVELL_LAYOUT( 4096, 512, 16, 9, 8, 512, 0, 30, 0, 32, 30), + MARVELL_LAYOUT( 4096, 512, 8, 4, 4, 1024, 0, 30, 0, 64, 30), + MARVELL_LAYOUT( 4096, 512, 16, 8, 8, 512, 0, 30, 0, 32, 30), MARVELL_LAYOUT( 8192, 512, 4, 4, 4, 2048, 0, 30, 0, 0, 0), - MARVELL_LAYOUT( 8192, 512, 8, 9, 8, 1024, 0, 30, 0, 160, 30), - MARVELL_LAYOUT( 8192, 512, 12, 12, 11, 704, 0, 30,448, 64, 30), - MARVELL_LAYOUT( 8192, 512, 16, 17, 16, 512, 0, 30, 0, 32, 30), + MARVELL_LAYOUT( 8192, 512, 8, 8, 8, 1024, 0, 30, 0, 160, 30), + MARVELL_LAYOUT( 8192, 512, 16, 16, 16, 512, 0, 30, 0, 32, 30), }; /** diff --git a/drivers/mtd/nand/spi/gigadevice.c b/drivers/mtd/nand/spi/gigadevice.c index 987710e09441..6023cba748bb 100644 --- a/drivers/mtd/nand/spi/gigadevice.c +++ b/drivers/mtd/nand/spi/gigadevice.c @@ -186,7 +186,7 @@ static int gd5fxgq4uexxg_ecc_get_status(struct spinand_device *spinand, { u8 status2; struct spi_mem_op op = SPINAND_GET_FEATURE_OP(GD5FXGQXXEXXG_REG_STATUS2, - &status2); + spinand->scratchbuf); int ret; switch (status & STATUS_ECC_MASK) { @@ -207,6 +207,7 @@ static int gd5fxgq4uexxg_ecc_get_status(struct spinand_device *spinand, * report the maximum of 4 in this case */ /* bits sorted this way (3...0): ECCS1,ECCS0,ECCSE1,ECCSE0 */ + status2 = *(spinand->scratchbuf); return ((status & STATUS_ECC_MASK) >> 2) | ((status2 & STATUS_ECC_MASK) >> 4); @@ -228,7 +229,7 @@ static int gd5fxgq5xexxg_ecc_get_status(struct spinand_device *spinand, { u8 status2; struct spi_mem_op op = SPINAND_GET_FEATURE_OP(GD5FXGQXXEXXG_REG_STATUS2, - &status2); + spinand->scratchbuf); int ret; switch (status & STATUS_ECC_MASK) { @@ -248,6 +249,7 @@ static int gd5fxgq5xexxg_ecc_get_status(struct spinand_device *spinand, * 1 ... 4 bits are flipped (and corrected) */ /* bits sorted this way (1...0): ECCSE1, ECCSE0 */ + status2 = *(spinand->scratchbuf); return ((status2 & STATUS_ECC_MASK) >> 4) + 1; case STATUS_ECC_UNCOR_ERROR: diff --git a/drivers/mtd/ubi/block.c b/drivers/mtd/ubi/block.c index 654bd7372cd8..5c8fdcc088a0 100644 --- a/drivers/mtd/ubi/block.c +++ b/drivers/mtd/ubi/block.c @@ -348,6 +348,9 @@ static int calc_disk_capacity(struct ubi_volume_info *vi, u64 *disk_capacity) int ubiblock_create(struct ubi_volume_info *vi) { + struct queue_limits lim = { + .max_segments = UBI_MAX_SG_COUNT, + }; struct ubiblock *dev; struct gendisk *gd; u64 disk_capacity; @@ -393,7 +396,7 @@ int ubiblock_create(struct ubi_volume_info *vi) /* Initialize the gendisk of this ubiblock device */ - gd = blk_mq_alloc_disk(&dev->tag_set, dev); + gd = blk_mq_alloc_disk(&dev->tag_set, &lim, dev); if (IS_ERR(gd)) { ret = PTR_ERR(gd); goto out_free_tags; @@ -416,7 +419,6 @@ int ubiblock_create(struct ubi_volume_info *vi) dev->gd = gd; dev->rq = gd->queue; - blk_queue_max_segments(dev->rq, UBI_MAX_SG_COUNT); list_add_tail(&dev->list, &ubiblock_devices); diff --git a/drivers/net/arcnet/arc-rawmode.c b/drivers/net/arcnet/arc-rawmode.c index 8c651fdee039..57f1729066f2 100644 --- a/drivers/net/arcnet/arc-rawmode.c +++ b/drivers/net/arcnet/arc-rawmode.c @@ -186,4 +186,5 @@ static void __exit arcnet_raw_exit(void) module_init(arcnet_raw_init); module_exit(arcnet_raw_exit); +MODULE_DESCRIPTION("ARCnet raw mode packet interface module"); MODULE_LICENSE("GPL"); diff --git a/drivers/net/arcnet/arc-rimi.c b/drivers/net/arcnet/arc-rimi.c index 8c3ccc7c83cd..53d10a04d1bd 100644 --- a/drivers/net/arcnet/arc-rimi.c +++ b/drivers/net/arcnet/arc-rimi.c @@ -312,6 +312,7 @@ module_param(node, int, 0); module_param(io, int, 0); module_param(irq, int, 0); module_param_string(device, device, sizeof(device), 0); +MODULE_DESCRIPTION("ARCnet COM90xx RIM I chipset driver"); MODULE_LICENSE("GPL"); static struct net_device *my_dev; diff --git a/drivers/net/arcnet/capmode.c b/drivers/net/arcnet/capmode.c index c09b567845e1..7a0a79973769 100644 --- a/drivers/net/arcnet/capmode.c +++ b/drivers/net/arcnet/capmode.c @@ -265,4 +265,5 @@ static void __exit capmode_module_exit(void) module_init(capmode_module_init); module_exit(capmode_module_exit); +MODULE_DESCRIPTION("ARCnet CAP mode packet interface module"); MODULE_LICENSE("GPL"); diff --git a/drivers/net/arcnet/com20020-pci.c b/drivers/net/arcnet/com20020-pci.c index 7b5c8bb02f11..c5e571ec94c9 100644 --- a/drivers/net/arcnet/com20020-pci.c +++ b/drivers/net/arcnet/com20020-pci.c @@ -61,6 +61,7 @@ module_param(timeout, int, 0); module_param(backplane, int, 0); module_param(clockp, int, 0); module_param(clockm, int, 0); +MODULE_DESCRIPTION("ARCnet COM20020 chipset PCI driver"); MODULE_LICENSE("GPL"); static void led_tx_set(struct led_classdev *led_cdev, diff --git a/drivers/net/arcnet/com20020.c b/drivers/net/arcnet/com20020.c index 06e1651b594b..a0053e3992a3 100644 --- a/drivers/net/arcnet/com20020.c +++ b/drivers/net/arcnet/com20020.c @@ -399,6 +399,7 @@ EXPORT_SYMBOL(com20020_found); EXPORT_SYMBOL(com20020_netdev_ops); #endif +MODULE_DESCRIPTION("ARCnet COM20020 chipset core driver"); MODULE_LICENSE("GPL"); #ifdef MODULE diff --git a/drivers/net/arcnet/com20020_cs.c b/drivers/net/arcnet/com20020_cs.c index dc3253b318da..75f08aa7528b 100644 --- a/drivers/net/arcnet/com20020_cs.c +++ b/drivers/net/arcnet/com20020_cs.c @@ -97,6 +97,7 @@ module_param(backplane, int, 0); module_param(clockp, int, 0); module_param(clockm, int, 0); +MODULE_DESCRIPTION("ARCnet COM20020 chipset PCMCIA driver"); MODULE_LICENSE("GPL"); /*====================================================================*/ diff --git a/drivers/net/arcnet/com90io.c b/drivers/net/arcnet/com90io.c index 37b47749fc8b..3b463fbc6402 100644 --- a/drivers/net/arcnet/com90io.c +++ b/drivers/net/arcnet/com90io.c @@ -350,6 +350,7 @@ static char device[9]; /* use eg. device=arc1 to change name */ module_param_hw(io, int, ioport, 0); module_param_hw(irq, int, irq, 0); module_param_string(device, device, sizeof(device), 0); +MODULE_DESCRIPTION("ARCnet COM90xx IO mapped chipset driver"); MODULE_LICENSE("GPL"); #ifndef MODULE diff --git a/drivers/net/arcnet/com90xx.c b/drivers/net/arcnet/com90xx.c index f49dae194284..b3b287c16561 100644 --- a/drivers/net/arcnet/com90xx.c +++ b/drivers/net/arcnet/com90xx.c @@ -645,6 +645,7 @@ static void com90xx_copy_from_card(struct net_device *dev, int bufnum, TIME(dev, "memcpy_fromio", count, memcpy_fromio(buf, memaddr, count)); } +MODULE_DESCRIPTION("ARCnet COM90xx normal chipset driver"); MODULE_LICENSE("GPL"); static int __init com90xx_init(void) diff --git a/drivers/net/arcnet/rfc1051.c b/drivers/net/arcnet/rfc1051.c index a7752a5b647f..46519ca63a0a 100644 --- a/drivers/net/arcnet/rfc1051.c +++ b/drivers/net/arcnet/rfc1051.c @@ -78,6 +78,7 @@ static void __exit arcnet_rfc1051_exit(void) module_init(arcnet_rfc1051_init); module_exit(arcnet_rfc1051_exit); +MODULE_DESCRIPTION("ARCNet packet format (RFC 1051) module"); MODULE_LICENSE("GPL"); /* Determine a packet's protocol ID. diff --git a/drivers/net/arcnet/rfc1201.c b/drivers/net/arcnet/rfc1201.c index a4c856282674..0edf35d971c5 100644 --- a/drivers/net/arcnet/rfc1201.c +++ b/drivers/net/arcnet/rfc1201.c @@ -35,6 +35,7 @@ #include "arcdevice.h" +MODULE_DESCRIPTION("ARCNet packet format (RFC 1201) module"); MODULE_LICENSE("GPL"); static __be16 type_trans(struct sk_buff *skb, struct net_device *dev); diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 4e0600c7b050..cd0683bcca03 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -1811,7 +1811,7 @@ void bond_xdp_set_features(struct net_device *bond_dev) ASSERT_RTNL(); - if (!bond_xdp_check(bond)) { + if (!bond_xdp_check(bond) || !bond_has_slaves(bond)) { xdp_clear_features_flag(bond_dev); return; } @@ -1819,6 +1819,8 @@ void bond_xdp_set_features(struct net_device *bond_dev) bond_for_each_slave(bond, slave, iter) val &= slave->dev->xdp_features; + val &= ~NETDEV_XDP_ACT_XSK_ZEROCOPY; + xdp_set_features_flag(bond_dev, val); } @@ -5909,9 +5911,6 @@ void bond_setup(struct net_device *bond_dev) if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP) bond_dev->features |= BOND_XFRM_FEATURES; #endif /* CONFIG_XFRM_OFFLOAD */ - - if (bond_xdp_check(bond)) - bond_dev->xdp_features = NETDEV_XDP_ACT_MASK; } /* Destroy a bonding device. diff --git a/drivers/net/can/dev/netlink.c b/drivers/net/can/dev/netlink.c index 036d85ef07f5..dfdc039d92a6 100644 --- a/drivers/net/can/dev/netlink.c +++ b/drivers/net/can/dev/netlink.c @@ -346,7 +346,7 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[], /* Neither of TDC parameters nor TDC flags are * provided: do calculation */ - can_calc_tdco(&priv->tdc, priv->tdc_const, &priv->data_bittiming, + can_calc_tdco(&priv->tdc, priv->tdc_const, &dbt, &priv->ctrlmode, priv->ctrlmode_supported); } /* else: both CAN_CTRLMODE_TDC_{AUTO,MANUAL} are explicitly * turned off. TDC is disabled: do nothing diff --git a/drivers/net/dsa/dsa_loop_bdinfo.c b/drivers/net/dsa/dsa_loop_bdinfo.c index 237066d30704..14ca42491512 100644 --- a/drivers/net/dsa/dsa_loop_bdinfo.c +++ b/drivers/net/dsa/dsa_loop_bdinfo.c @@ -32,4 +32,5 @@ static int __init dsa_loop_bdinfo_init(void) } arch_initcall(dsa_loop_bdinfo_init) +MODULE_DESCRIPTION("DSA mock-up switch driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/net/dsa/microchip/ksz8795.c b/drivers/net/dsa/microchip/ksz8795.c index 61b71bcfe396..c3da97abce20 100644 --- a/drivers/net/dsa/microchip/ksz8795.c +++ b/drivers/net/dsa/microchip/ksz8795.c @@ -49,9 +49,9 @@ static int ksz8_ind_write8(struct ksz_device *dev, u8 table, u16 addr, u8 data) mutex_lock(&dev->alu_mutex); ctrl_addr = IND_ACC_TABLE(table) | addr; - ret = ksz_write8(dev, regs[REG_IND_BYTE], data); + ret = ksz_write16(dev, regs[REG_IND_CTRL_0], ctrl_addr); if (!ret) - ret = ksz_write16(dev, regs[REG_IND_CTRL_0], ctrl_addr); + ret = ksz_write8(dev, regs[REG_IND_BYTE], data); mutex_unlock(&dev->alu_mutex); diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c index 391c4dbdff42..3c1f657593a8 100644 --- a/drivers/net/dsa/mt7530.c +++ b/drivers/net/dsa/mt7530.c @@ -2838,8 +2838,7 @@ static void mt753x_phylink_mac_link_up(struct dsa_switch *ds, int port, /* MT753x MAC works in 1G full duplex mode for all up-clocked * variants. */ - if (interface == PHY_INTERFACE_MODE_INTERNAL || - interface == PHY_INTERFACE_MODE_TRGMII || + if (interface == PHY_INTERFACE_MODE_TRGMII || (phy_interface_mode_is_8023z(interface))) { speed = SPEED_1000; duplex = DUPLEX_FULL; diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 383b3c4d6f59..614cabb5c1b0 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -3659,7 +3659,7 @@ static int mv88e6xxx_mdio_read_c45(struct mii_bus *bus, int phy, int devad, int err; if (!chip->info->ops->phy_read_c45) - return -EOPNOTSUPP; + return 0xffff; mv88e6xxx_reg_lock(chip); err = chip->info->ops->phy_read_c45(chip, bus, phy, devad, reg, &val); diff --git a/drivers/net/dsa/qca/qca8k-8xxx.c b/drivers/net/dsa/qca/qca8k-8xxx.c index c51f40960961..7a864329cb72 100644 --- a/drivers/net/dsa/qca/qca8k-8xxx.c +++ b/drivers/net/dsa/qca/qca8k-8xxx.c @@ -2051,12 +2051,11 @@ qca8k_sw_probe(struct mdio_device *mdiodev) priv->info = of_device_get_match_data(priv->dev); priv->reset_gpio = devm_gpiod_get_optional(priv->dev, "reset", - GPIOD_ASIS); + GPIOD_OUT_HIGH); if (IS_ERR(priv->reset_gpio)) return PTR_ERR(priv->reset_gpio); if (priv->reset_gpio) { - gpiod_set_value_cansleep(priv->reset_gpio, 1); /* The active low duration must be greater than 10 ms * and checkpatch.pl wants 20 ms. */ diff --git a/drivers/net/ethernet/adi/Kconfig b/drivers/net/ethernet/adi/Kconfig index da3bdd302502..760a9a60bc15 100644 --- a/drivers/net/ethernet/adi/Kconfig +++ b/drivers/net/ethernet/adi/Kconfig @@ -21,6 +21,7 @@ config ADIN1110 tristate "Analog Devices ADIN1110 MAC-PHY" depends on SPI && NET_SWITCHDEV select CRC8 + select PHYLIB help Say yes here to build support for Analog Devices ADIN1110 Low Power 10BASE-T1L Ethernet MAC-PHY. diff --git a/drivers/net/ethernet/amd/pds_core/adminq.c b/drivers/net/ethernet/amd/pds_core/adminq.c index 5beadabc2136..ea773cfa0af6 100644 --- a/drivers/net/ethernet/amd/pds_core/adminq.c +++ b/drivers/net/ethernet/amd/pds_core/adminq.c @@ -63,6 +63,15 @@ static int pdsc_process_notifyq(struct pdsc_qcq *qcq) return nq_work; } +static bool pdsc_adminq_inc_if_up(struct pdsc *pdsc) +{ + if (pdsc->state & BIT_ULL(PDSC_S_STOPPING_DRIVER) || + pdsc->state & BIT_ULL(PDSC_S_FW_DEAD)) + return false; + + return refcount_inc_not_zero(&pdsc->adminq_refcnt); +} + void pdsc_process_adminq(struct pdsc_qcq *qcq) { union pds_core_adminq_comp *comp; @@ -75,9 +84,9 @@ void pdsc_process_adminq(struct pdsc_qcq *qcq) int aq_work = 0; int credits; - /* Don't process AdminQ when shutting down */ - if (pdsc->state & BIT_ULL(PDSC_S_STOPPING_DRIVER)) { - dev_err(pdsc->dev, "%s: called while PDSC_S_STOPPING_DRIVER\n", + /* Don't process AdminQ when it's not up */ + if (!pdsc_adminq_inc_if_up(pdsc)) { + dev_err(pdsc->dev, "%s: called while adminq is unavailable\n", __func__); return; } @@ -124,6 +133,7 @@ credits: pds_core_intr_credits(&pdsc->intr_ctrl[qcq->intx], credits, PDS_CORE_INTR_CRED_REARM); + refcount_dec(&pdsc->adminq_refcnt); } void pdsc_work_thread(struct work_struct *work) @@ -135,18 +145,20 @@ void pdsc_work_thread(struct work_struct *work) irqreturn_t pdsc_adminq_isr(int irq, void *data) { - struct pdsc_qcq *qcq = data; - struct pdsc *pdsc = qcq->pdsc; + struct pdsc *pdsc = data; + struct pdsc_qcq *qcq; - /* Don't process AdminQ when shutting down */ - if (pdsc->state & BIT_ULL(PDSC_S_STOPPING_DRIVER)) { - dev_err(pdsc->dev, "%s: called while PDSC_S_STOPPING_DRIVER\n", + /* Don't process AdminQ when it's not up */ + if (!pdsc_adminq_inc_if_up(pdsc)) { + dev_err(pdsc->dev, "%s: called while adminq is unavailable\n", __func__); return IRQ_HANDLED; } + qcq = &pdsc->adminqcq; queue_work(pdsc->wq, &qcq->work); pds_core_intr_mask(&pdsc->intr_ctrl[qcq->intx], PDS_CORE_INTR_MASK_CLEAR); + refcount_dec(&pdsc->adminq_refcnt); return IRQ_HANDLED; } @@ -179,10 +191,16 @@ static int __pdsc_adminq_post(struct pdsc *pdsc, /* Check that the FW is running */ if (!pdsc_is_fw_running(pdsc)) { - u8 fw_status = ioread8(&pdsc->info_regs->fw_status); - - dev_info(pdsc->dev, "%s: post failed - fw not running %#02x:\n", - __func__, fw_status); + if (pdsc->info_regs) { + u8 fw_status = + ioread8(&pdsc->info_regs->fw_status); + + dev_info(pdsc->dev, "%s: post failed - fw not running %#02x:\n", + __func__, fw_status); + } else { + dev_info(pdsc->dev, "%s: post failed - BARs not setup\n", + __func__); + } ret = -ENXIO; goto err_out_unlock; @@ -230,6 +248,12 @@ int pdsc_adminq_post(struct pdsc *pdsc, int err = 0; int index; + if (!pdsc_adminq_inc_if_up(pdsc)) { + dev_dbg(pdsc->dev, "%s: preventing adminq cmd %u\n", + __func__, cmd->opcode); + return -ENXIO; + } + wc.qcq = &pdsc->adminqcq; index = __pdsc_adminq_post(pdsc, &pdsc->adminqcq, cmd, comp, &wc); if (index < 0) { @@ -248,10 +272,16 @@ int pdsc_adminq_post(struct pdsc *pdsc, break; if (!pdsc_is_fw_running(pdsc)) { - u8 fw_status = ioread8(&pdsc->info_regs->fw_status); - - dev_dbg(pdsc->dev, "%s: post wait failed - fw not running %#02x:\n", - __func__, fw_status); + if (pdsc->info_regs) { + u8 fw_status = + ioread8(&pdsc->info_regs->fw_status); + + dev_dbg(pdsc->dev, "%s: post wait failed - fw not running %#02x:\n", + __func__, fw_status); + } else { + dev_dbg(pdsc->dev, "%s: post wait failed - BARs not setup\n", + __func__); + } err = -ENXIO; break; } @@ -285,6 +315,8 @@ err_out: queue_work(pdsc->wq, &pdsc->health_work); } + refcount_dec(&pdsc->adminq_refcnt); + return err; } EXPORT_SYMBOL_GPL(pdsc_adminq_post); diff --git a/drivers/net/ethernet/amd/pds_core/auxbus.c b/drivers/net/ethernet/amd/pds_core/auxbus.c index 11c23a7f3172..fd1a5149c003 100644 --- a/drivers/net/ethernet/amd/pds_core/auxbus.c +++ b/drivers/net/ethernet/amd/pds_core/auxbus.c @@ -160,23 +160,19 @@ static struct pds_auxiliary_dev *pdsc_auxbus_dev_register(struct pdsc *cf, if (err < 0) { dev_warn(cf->dev, "auxiliary_device_init of %s failed: %pe\n", name, ERR_PTR(err)); - goto err_out; + kfree(padev); + return ERR_PTR(err); } err = auxiliary_device_add(aux_dev); if (err) { dev_warn(cf->dev, "auxiliary_device_add of %s failed: %pe\n", name, ERR_PTR(err)); - goto err_out_uninit; + auxiliary_device_uninit(aux_dev); + return ERR_PTR(err); } return padev; - -err_out_uninit: - auxiliary_device_uninit(aux_dev); -err_out: - kfree(padev); - return ERR_PTR(err); } int pdsc_auxbus_dev_del(struct pdsc *cf, struct pdsc *pf) diff --git a/drivers/net/ethernet/amd/pds_core/core.c b/drivers/net/ethernet/amd/pds_core/core.c index 0d2091e9eb28..7658a7286767 100644 --- a/drivers/net/ethernet/amd/pds_core/core.c +++ b/drivers/net/ethernet/amd/pds_core/core.c @@ -125,7 +125,7 @@ static int pdsc_qcq_intr_alloc(struct pdsc *pdsc, struct pdsc_qcq *qcq) snprintf(name, sizeof(name), "%s-%d-%s", PDS_CORE_DRV_NAME, pdsc->pdev->bus->number, qcq->q.name); - index = pdsc_intr_alloc(pdsc, name, pdsc_adminq_isr, qcq); + index = pdsc_intr_alloc(pdsc, name, pdsc_adminq_isr, pdsc); if (index < 0) return index; qcq->intx = index; @@ -404,10 +404,7 @@ int pdsc_setup(struct pdsc *pdsc, bool init) int numdescs; int err; - if (init) - err = pdsc_dev_init(pdsc); - else - err = pdsc_dev_reinit(pdsc); + err = pdsc_dev_init(pdsc); if (err) return err; @@ -450,6 +447,7 @@ int pdsc_setup(struct pdsc *pdsc, bool init) pdsc_debugfs_add_viftype(pdsc); } + refcount_set(&pdsc->adminq_refcnt, 1); clear_bit(PDSC_S_FW_DEAD, &pdsc->state); return 0; @@ -464,6 +462,8 @@ void pdsc_teardown(struct pdsc *pdsc, bool removing) if (!pdsc->pdev->is_virtfn) pdsc_devcmd_reset(pdsc); + if (pdsc->adminqcq.work.func) + cancel_work_sync(&pdsc->adminqcq.work); pdsc_qcq_free(pdsc, &pdsc->notifyqcq); pdsc_qcq_free(pdsc, &pdsc->adminqcq); @@ -476,10 +476,9 @@ void pdsc_teardown(struct pdsc *pdsc, bool removing) for (i = 0; i < pdsc->nintrs; i++) pdsc_intr_free(pdsc, i); - if (removing) { - kfree(pdsc->intr_info); - pdsc->intr_info = NULL; - } + kfree(pdsc->intr_info); + pdsc->intr_info = NULL; + pdsc->nintrs = 0; } if (pdsc->kern_dbpage) { @@ -487,6 +486,7 @@ void pdsc_teardown(struct pdsc *pdsc, bool removing) pdsc->kern_dbpage = NULL; } + pci_free_irq_vectors(pdsc->pdev); set_bit(PDSC_S_FW_DEAD, &pdsc->state); } @@ -512,6 +512,24 @@ void pdsc_stop(struct pdsc *pdsc) PDS_CORE_INTR_MASK_SET); } +static void pdsc_adminq_wait_and_dec_once_unused(struct pdsc *pdsc) +{ + /* The driver initializes the adminq_refcnt to 1 when the adminq is + * allocated and ready for use. Other users/requesters will increment + * the refcnt while in use. If the refcnt is down to 1 then the adminq + * is not in use and the refcnt can be cleared and adminq freed. Before + * calling this function the driver will set PDSC_S_FW_DEAD, which + * prevent subsequent attempts to use the adminq and increment the + * refcnt to fail. This guarantees that this function will eventually + * exit. + */ + while (!refcount_dec_if_one(&pdsc->adminq_refcnt)) { + dev_dbg_ratelimited(pdsc->dev, "%s: adminq in use\n", + __func__); + cpu_relax(); + } +} + void pdsc_fw_down(struct pdsc *pdsc) { union pds_core_notifyq_comp reset_event = { @@ -527,6 +545,8 @@ void pdsc_fw_down(struct pdsc *pdsc) if (pdsc->pdev->is_virtfn) return; + pdsc_adminq_wait_and_dec_once_unused(pdsc); + /* Notify clients of fw_down */ if (pdsc->fw_reporter) devlink_health_report(pdsc->fw_reporter, "FW down reported", pdsc); @@ -577,7 +597,13 @@ err_out: static void pdsc_check_pci_health(struct pdsc *pdsc) { - u8 fw_status = ioread8(&pdsc->info_regs->fw_status); + u8 fw_status; + + /* some sort of teardown already in progress */ + if (!pdsc->info_regs) + return; + + fw_status = ioread8(&pdsc->info_regs->fw_status); /* is PCI broken? */ if (fw_status != PDS_RC_BAD_PCI) diff --git a/drivers/net/ethernet/amd/pds_core/core.h b/drivers/net/ethernet/amd/pds_core/core.h index e35d3e7006bf..110c4b826b22 100644 --- a/drivers/net/ethernet/amd/pds_core/core.h +++ b/drivers/net/ethernet/amd/pds_core/core.h @@ -184,6 +184,7 @@ struct pdsc { struct mutex devcmd_lock; /* lock for dev_cmd operations */ struct mutex config_lock; /* lock for configuration operations */ spinlock_t adminq_lock; /* lock for adminq operations */ + refcount_t adminq_refcnt; struct pds_core_dev_info_regs __iomem *info_regs; struct pds_core_dev_cmd_regs __iomem *cmd_regs; struct pds_core_intr __iomem *intr_ctrl; @@ -280,7 +281,6 @@ int pdsc_devcmd_locked(struct pdsc *pdsc, union pds_core_dev_cmd *cmd, union pds_core_dev_comp *comp, int max_seconds); int pdsc_devcmd_init(struct pdsc *pdsc); int pdsc_devcmd_reset(struct pdsc *pdsc); -int pdsc_dev_reinit(struct pdsc *pdsc); int pdsc_dev_init(struct pdsc *pdsc); void pdsc_reset_prepare(struct pci_dev *pdev); diff --git a/drivers/net/ethernet/amd/pds_core/debugfs.c b/drivers/net/ethernet/amd/pds_core/debugfs.c index 8ec392299b7d..4e8579ca1c8c 100644 --- a/drivers/net/ethernet/amd/pds_core/debugfs.c +++ b/drivers/net/ethernet/amd/pds_core/debugfs.c @@ -64,6 +64,10 @@ DEFINE_SHOW_ATTRIBUTE(identity); void pdsc_debugfs_add_ident(struct pdsc *pdsc) { + /* This file will already exist in the reset flow */ + if (debugfs_lookup("identity", pdsc->dentry)) + return; + debugfs_create_file("identity", 0400, pdsc->dentry, pdsc, &identity_fops); } diff --git a/drivers/net/ethernet/amd/pds_core/dev.c b/drivers/net/ethernet/amd/pds_core/dev.c index 31940b857e0e..e65a1632df50 100644 --- a/drivers/net/ethernet/amd/pds_core/dev.c +++ b/drivers/net/ethernet/amd/pds_core/dev.c @@ -57,6 +57,9 @@ int pdsc_err_to_errno(enum pds_core_status_code code) bool pdsc_is_fw_running(struct pdsc *pdsc) { + if (!pdsc->info_regs) + return false; + pdsc->fw_status = ioread8(&pdsc->info_regs->fw_status); pdsc->last_fw_time = jiffies; pdsc->last_hb = ioread32(&pdsc->info_regs->fw_heartbeat); @@ -182,13 +185,17 @@ int pdsc_devcmd_locked(struct pdsc *pdsc, union pds_core_dev_cmd *cmd, { int err; + if (!pdsc->cmd_regs) + return -ENXIO; + memcpy_toio(&pdsc->cmd_regs->cmd, cmd, sizeof(*cmd)); pdsc_devcmd_dbell(pdsc); err = pdsc_devcmd_wait(pdsc, cmd->opcode, max_seconds); - memcpy_fromio(comp, &pdsc->cmd_regs->comp, sizeof(*comp)); if ((err == -ENXIO || err == -ETIMEDOUT) && pdsc->wq) queue_work(pdsc->wq, &pdsc->health_work); + else + memcpy_fromio(comp, &pdsc->cmd_regs->comp, sizeof(*comp)); return err; } @@ -309,13 +316,6 @@ static int pdsc_identify(struct pdsc *pdsc) return 0; } -int pdsc_dev_reinit(struct pdsc *pdsc) -{ - pdsc_init_devinfo(pdsc); - - return pdsc_identify(pdsc); -} - int pdsc_dev_init(struct pdsc *pdsc) { unsigned int nintrs; diff --git a/drivers/net/ethernet/amd/pds_core/devlink.c b/drivers/net/ethernet/amd/pds_core/devlink.c index e9948ea5bbcd..54864f27c87a 100644 --- a/drivers/net/ethernet/amd/pds_core/devlink.c +++ b/drivers/net/ethernet/amd/pds_core/devlink.c @@ -111,7 +111,8 @@ int pdsc_dl_info_get(struct devlink *dl, struct devlink_info_req *req, mutex_lock(&pdsc->devcmd_lock); err = pdsc_devcmd_locked(pdsc, &cmd, &comp, pdsc->devcmd_timeout * 2); - memcpy_fromio(&fw_list, pdsc->cmd_regs->data, sizeof(fw_list)); + if (!err) + memcpy_fromio(&fw_list, pdsc->cmd_regs->data, sizeof(fw_list)); mutex_unlock(&pdsc->devcmd_lock); if (err && err != -EIO) return err; diff --git a/drivers/net/ethernet/amd/pds_core/fw.c b/drivers/net/ethernet/amd/pds_core/fw.c index 90a811f3878a..fa626719e68d 100644 --- a/drivers/net/ethernet/amd/pds_core/fw.c +++ b/drivers/net/ethernet/amd/pds_core/fw.c @@ -107,6 +107,9 @@ int pdsc_firmware_update(struct pdsc *pdsc, const struct firmware *fw, dev_info(pdsc->dev, "Installing firmware\n"); + if (!pdsc->cmd_regs) + return -ENXIO; + dl = priv_to_devlink(pdsc); devlink_flash_update_status_notify(dl, "Preparing to flash", NULL, 0, 0); diff --git a/drivers/net/ethernet/amd/pds_core/main.c b/drivers/net/ethernet/amd/pds_core/main.c index 3080898d7b95..0050c5894563 100644 --- a/drivers/net/ethernet/amd/pds_core/main.c +++ b/drivers/net/ethernet/amd/pds_core/main.c @@ -37,6 +37,11 @@ static void pdsc_unmap_bars(struct pdsc *pdsc) struct pdsc_dev_bar *bars = pdsc->bars; unsigned int i; + pdsc->info_regs = NULL; + pdsc->cmd_regs = NULL; + pdsc->intr_status = NULL; + pdsc->intr_ctrl = NULL; + for (i = 0; i < PDS_CORE_BARS_MAX; i++) { if (bars[i].vaddr) pci_iounmap(pdsc->pdev, bars[i].vaddr); @@ -293,7 +298,7 @@ err_out_stop: err_out_teardown: pdsc_teardown(pdsc, PDSC_TEARDOWN_REMOVING); err_out_unmap_bars: - del_timer_sync(&pdsc->wdtimer); + timer_shutdown_sync(&pdsc->wdtimer); if (pdsc->wq) destroy_workqueue(pdsc->wq); mutex_destroy(&pdsc->config_lock); @@ -420,7 +425,7 @@ static void pdsc_remove(struct pci_dev *pdev) */ pdsc_sriov_configure(pdev, 0); - del_timer_sync(&pdsc->wdtimer); + timer_shutdown_sync(&pdsc->wdtimer); if (pdsc->wq) destroy_workqueue(pdsc->wq); @@ -433,7 +438,6 @@ static void pdsc_remove(struct pci_dev *pdev) mutex_destroy(&pdsc->config_lock); mutex_destroy(&pdsc->devcmd_lock); - pci_free_irq_vectors(pdev); pdsc_unmap_bars(pdsc); pci_release_regions(pdev); } @@ -445,13 +449,32 @@ static void pdsc_remove(struct pci_dev *pdev) devlink_free(dl); } +static void pdsc_stop_health_thread(struct pdsc *pdsc) +{ + if (pdsc->pdev->is_virtfn) + return; + + timer_shutdown_sync(&pdsc->wdtimer); + if (pdsc->health_work.func) + cancel_work_sync(&pdsc->health_work); +} + +static void pdsc_restart_health_thread(struct pdsc *pdsc) +{ + if (pdsc->pdev->is_virtfn) + return; + + timer_setup(&pdsc->wdtimer, pdsc_wdtimer_cb, 0); + mod_timer(&pdsc->wdtimer, jiffies + 1); +} + void pdsc_reset_prepare(struct pci_dev *pdev) { struct pdsc *pdsc = pci_get_drvdata(pdev); + pdsc_stop_health_thread(pdsc); pdsc_fw_down(pdsc); - pci_free_irq_vectors(pdev); pdsc_unmap_bars(pdsc); pci_release_regions(pdev); pci_disable_device(pdev); @@ -486,6 +509,7 @@ void pdsc_reset_done(struct pci_dev *pdev) } pdsc_fw_up(pdsc); + pdsc_restart_health_thread(pdsc); } static const struct pci_error_handlers pdsc_err_handler = { diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c b/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c index abd4832e4ed2..5acb3e16b567 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c @@ -993,7 +993,7 @@ int aq_ptp_ring_alloc(struct aq_nic_s *aq_nic) return 0; err_exit_hwts_rx: - aq_ring_free(&aq_ptp->hwts_rx); + aq_ring_hwts_rx_free(&aq_ptp->hwts_rx); err_exit_ptp_rx: aq_ring_free(&aq_ptp->ptp_rx); err_exit_ptp_tx: @@ -1011,7 +1011,7 @@ void aq_ptp_ring_free(struct aq_nic_s *aq_nic) aq_ring_free(&aq_ptp->ptp_tx); aq_ring_free(&aq_ptp->ptp_rx); - aq_ring_free(&aq_ptp->hwts_rx); + aq_ring_hwts_rx_free(&aq_ptp->hwts_rx); aq_ptp_skb_ring_release(&aq_ptp->skb_ring); } diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c index cda8597b4e14..f7433abd6591 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c @@ -919,6 +919,19 @@ void aq_ring_free(struct aq_ring_s *self) } } +void aq_ring_hwts_rx_free(struct aq_ring_s *self) +{ + if (!self) + return; + + if (self->dx_ring) { + dma_free_coherent(aq_nic_get_dev(self->aq_nic), + self->size * self->dx_size + AQ_CFG_RXDS_DEF, + self->dx_ring, self->dx_ring_pa); + self->dx_ring = NULL; + } +} + unsigned int aq_ring_fill_stats_data(struct aq_ring_s *self, u64 *data) { unsigned int count; diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h index 52847310740a..d627ace850ff 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h @@ -210,6 +210,7 @@ int aq_ring_rx_fill(struct aq_ring_s *self); int aq_ring_hwts_rx_alloc(struct aq_ring_s *self, struct aq_nic_s *aq_nic, unsigned int idx, unsigned int size, unsigned int dx_size); +void aq_ring_hwts_rx_free(struct aq_ring_s *self); void aq_ring_hwts_rx_clean(struct aq_ring_s *self, struct aq_nic_s *aq_nic); unsigned int aq_ring_fill_stats_data(struct aq_ring_s *self, u64 *data); diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp.c b/drivers/net/ethernet/broadcom/asp2/bcmasp.c index 29b04a274d07..80245c65cc90 100644 --- a/drivers/net/ethernet/broadcom/asp2/bcmasp.c +++ b/drivers/net/ethernet/broadcom/asp2/bcmasp.c @@ -535,9 +535,6 @@ int bcmasp_netfilt_get_all_active(struct bcmasp_intf *intf, u32 *rule_locs, int j = 0, i; for (i = 0; i < NUM_NET_FILTERS; i++) { - if (j == *rule_cnt) - return -EMSGSIZE; - if (!priv->net_filters[i].claimed || priv->net_filters[i].port != intf->port) continue; @@ -547,6 +544,9 @@ int bcmasp_netfilt_get_all_active(struct bcmasp_intf *intf, u32 *rule_locs, priv->net_filters[i - 1].wake_filter) continue; + if (j == *rule_cnt) + return -EMSGSIZE; + rule_locs[j++] = priv->net_filters[i].fs.location; } diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c index 53e542881255..6ad1366270f7 100644 --- a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c +++ b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c @@ -684,6 +684,8 @@ static int bcmasp_init_rx(struct bcmasp_intf *intf) intf->rx_buf_order = get_order(RING_BUFFER_SIZE); buffer_pg = alloc_pages(GFP_KERNEL, intf->rx_buf_order); + if (!buffer_pg) + return -ENOMEM; dma = dma_map_page(kdev, buffer_pg, 0, RING_BUFFER_SIZE, DMA_FROM_DEVICE); @@ -1048,6 +1050,9 @@ static int bcmasp_netif_init(struct net_device *dev, bool phy_connect) netdev_err(dev, "could not attach to PHY\n"); goto err_phy_disable; } + + /* Indicate that the MAC is responsible for PHY PM */ + phydev->mac_managed_pm = true; } else if (!intf->wolopts) { ret = phy_resume(dev->phydev); if (ret) @@ -1092,6 +1097,7 @@ static int bcmasp_netif_init(struct net_device *dev, bool phy_connect) return 0; err_reclaim_tx: + netif_napi_del(&intf->tx_napi); bcmasp_reclaim_free_all_tx(intf); err_phy_disconnect: if (phydev) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c index adad188e38b8..cc07660330f5 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c @@ -684,7 +684,7 @@ static void bnxt_stamp_tx_skb(struct bnxt *bp, struct sk_buff *skb) timestamp.hwtstamp = ns_to_ktime(ns); skb_tstamp_tx(ptp->tx_skb, ×tamp); } else { - netdev_WARN_ONCE(bp->dev, + netdev_warn_once(bp->dev, "TS query for TX timer failed rc = %x\n", rc); } diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c index 31191b520b58..c32174484a96 100644 --- a/drivers/net/ethernet/brocade/bna/bnad.c +++ b/drivers/net/ethernet/brocade/bna/bnad.c @@ -1091,10 +1091,10 @@ bnad_cb_tx_resume(struct bnad *bnad, struct bna_tx *tx) * Free all TxQs buffers and then notify TX_E_CLEANUP_DONE to Tx fsm. */ static void -bnad_tx_cleanup(struct delayed_work *work) +bnad_tx_cleanup(struct work_struct *work) { struct bnad_tx_info *tx_info = - container_of(work, struct bnad_tx_info, tx_cleanup_work); + container_of(work, struct bnad_tx_info, tx_cleanup_work.work); struct bnad *bnad = NULL; struct bna_tcb *tcb; unsigned long flags; @@ -1170,7 +1170,7 @@ bnad_cb_rx_stall(struct bnad *bnad, struct bna_rx *rx) * Free all RxQs buffers and then notify RX_E_CLEANUP_DONE to Rx fsm. */ static void -bnad_rx_cleanup(void *work) +bnad_rx_cleanup(struct work_struct *work) { struct bnad_rx_info *rx_info = container_of(work, struct bnad_rx_info, rx_cleanup_work); @@ -1991,8 +1991,7 @@ bnad_setup_tx(struct bnad *bnad, u32 tx_id) } tx_info->tx = tx; - INIT_DELAYED_WORK(&tx_info->tx_cleanup_work, - (work_func_t)bnad_tx_cleanup); + INIT_DELAYED_WORK(&tx_info->tx_cleanup_work, bnad_tx_cleanup); /* Register ISR for the Tx object */ if (intr_info->intr_type == BNA_INTR_T_MSIX) { @@ -2248,8 +2247,7 @@ bnad_setup_rx(struct bnad *bnad, u32 rx_id) rx_info->rx = rx; spin_unlock_irqrestore(&bnad->bna_lock, flags); - INIT_WORK(&rx_info->rx_cleanup_work, - (work_func_t)(bnad_rx_cleanup)); + INIT_WORK(&rx_info->rx_cleanup_work, bnad_rx_cleanup); /* * Init NAPI, so that state is set to NAPI_STATE_SCHED, diff --git a/drivers/net/ethernet/cisco/enic/vnic_vic.c b/drivers/net/ethernet/cisco/enic/vnic_vic.c index 20fcb20b42ed..66b577835338 100644 --- a/drivers/net/ethernet/cisco/enic/vnic_vic.c +++ b/drivers/net/ethernet/cisco/enic/vnic_vic.c @@ -49,7 +49,8 @@ int vic_provinfo_add_tlv(struct vic_provinfo *vp, u16 type, u16 length, tlv->type = htons(type); tlv->length = htons(length); - memcpy(tlv->value, value, length); + unsafe_memcpy(tlv->value, value, length, + /* Flexible array of flexible arrays */); vp->num_tlvs = htonl(ntohl(vp->num_tlvs) + 1); vp->length = htonl(ntohl(vp->length) + diff --git a/drivers/net/ethernet/engleder/tsnep_main.c b/drivers/net/ethernet/engleder/tsnep_main.c index 9aeff2b37a61..64eadd320798 100644 --- a/drivers/net/ethernet/engleder/tsnep_main.c +++ b/drivers/net/ethernet/engleder/tsnep_main.c @@ -719,17 +719,25 @@ static void tsnep_xdp_xmit_flush(struct tsnep_tx *tx) static bool tsnep_xdp_xmit_back(struct tsnep_adapter *adapter, struct xdp_buff *xdp, - struct netdev_queue *tx_nq, struct tsnep_tx *tx) + struct netdev_queue *tx_nq, struct tsnep_tx *tx, + bool zc) { struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp); bool xmit; + u32 type; if (unlikely(!xdpf)) return false; + /* no page pool for zero copy */ + if (zc) + type = TSNEP_TX_TYPE_XDP_NDO; + else + type = TSNEP_TX_TYPE_XDP_TX; + __netif_tx_lock(tx_nq, smp_processor_id()); - xmit = tsnep_xdp_xmit_frame_ring(xdpf, tx, TSNEP_TX_TYPE_XDP_TX); + xmit = tsnep_xdp_xmit_frame_ring(xdpf, tx, type); /* Avoid transmit queue timeout since we share it with the slow path */ if (xmit) @@ -1273,7 +1281,7 @@ static bool tsnep_xdp_run_prog(struct tsnep_rx *rx, struct bpf_prog *prog, case XDP_PASS: return false; case XDP_TX: - if (!tsnep_xdp_xmit_back(rx->adapter, xdp, tx_nq, tx)) + if (!tsnep_xdp_xmit_back(rx->adapter, xdp, tx_nq, tx, false)) goto out_failure; *status |= TSNEP_XDP_TX; return true; @@ -1323,7 +1331,7 @@ static bool tsnep_xdp_run_prog_zc(struct tsnep_rx *rx, struct bpf_prog *prog, case XDP_PASS: return false; case XDP_TX: - if (!tsnep_xdp_xmit_back(rx->adapter, xdp, tx_nq, tx)) + if (!tsnep_xdp_xmit_back(rx->adapter, xdp, tx_nq, tx, true)) goto out_failure; *status |= TSNEP_XDP_TX; return true; diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c index 9ba15d3183d7..758535adc9ff 100644 --- a/drivers/net/ethernet/freescale/fman/fman_memac.c +++ b/drivers/net/ethernet/freescale/fman/fman_memac.c @@ -1073,6 +1073,14 @@ int memac_initialization(struct mac_device *mac_dev, unsigned long capabilities; unsigned long *supported; + /* The internal connection to the serdes is XGMII, but this isn't + * really correct for the phy mode (which is the external connection). + * However, this is how all older device trees say that they want + * 10GBASE-R (aka XFI), so just convert it for them. + */ + if (mac_dev->phy_if == PHY_INTERFACE_MODE_XGMII) + mac_dev->phy_if = PHY_INTERFACE_MODE_10GBASER; + mac_dev->phylink_ops = &memac_mac_ops; mac_dev->set_promisc = memac_set_promiscuous; mac_dev->change_addr = memac_modify_mac_address; @@ -1139,7 +1147,7 @@ int memac_initialization(struct mac_device *mac_dev, * (and therefore that xfi_pcs cannot be set). If we are defaulting to * XGMII, assume this is for XFI. Otherwise, assume it is for SGMII. */ - if (err && mac_dev->phy_if == PHY_INTERFACE_MODE_XGMII) + if (err && mac_dev->phy_if == PHY_INTERFACE_MODE_10GBASER) memac->xfi_pcs = pcs; else memac->sgmii_pcs = pcs; @@ -1153,14 +1161,6 @@ int memac_initialization(struct mac_device *mac_dev, goto _return_fm_mac_free; } - /* The internal connection to the serdes is XGMII, but this isn't - * really correct for the phy mode (which is the external connection). - * However, this is how all older device trees say that they want - * 10GBASE-R (aka XFI), so just convert it for them. - */ - if (mac_dev->phy_if == PHY_INTERFACE_MODE_XGMII) - mac_dev->phy_if = PHY_INTERFACE_MODE_10GBASER; - /* TODO: The following interface modes are supported by (some) hardware * but not by this driver: * - 1000BASE-KX diff --git a/drivers/net/ethernet/google/gve/gve_rx.c b/drivers/net/ethernet/google/gve/gve_rx.c index 7a8dc5386fff..76615d47e055 100644 --- a/drivers/net/ethernet/google/gve/gve_rx.c +++ b/drivers/net/ethernet/google/gve/gve_rx.c @@ -356,7 +356,7 @@ static enum pkt_hash_types gve_rss_type(__be16 pkt_flags) static struct sk_buff *gve_rx_add_frags(struct napi_struct *napi, struct gve_rx_slot_page_info *page_info, - u16 packet_buffer_size, u16 len, + unsigned int truesize, u16 len, struct gve_rx_ctx *ctx) { u32 offset = page_info->page_offset + page_info->pad; @@ -389,10 +389,10 @@ static struct sk_buff *gve_rx_add_frags(struct napi_struct *napi, if (skb != ctx->skb_head) { ctx->skb_head->len += len; ctx->skb_head->data_len += len; - ctx->skb_head->truesize += packet_buffer_size; + ctx->skb_head->truesize += truesize; } skb_add_rx_frag(skb, num_frags, page_info->page, - offset, len, packet_buffer_size); + offset, len, truesize); return ctx->skb_head; } @@ -486,7 +486,7 @@ static struct sk_buff *gve_rx_copy_to_pool(struct gve_rx_ring *rx, memcpy(alloc_page_info.page_address, src, page_info->pad + len); skb = gve_rx_add_frags(napi, &alloc_page_info, - rx->packet_buffer_size, + PAGE_SIZE, len, ctx); u64_stats_update_begin(&rx->statss); diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h index a187582d2299..ba9c19e6994c 100644 --- a/drivers/net/ethernet/intel/e1000e/e1000.h +++ b/drivers/net/ethernet/intel/e1000e/e1000.h @@ -360,23 +360,43 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca); * As a result, a shift of INCVALUE_SHIFT_n is used to fit a value of * INCVALUE_n into the TIMINCA register allowing 32+8+(24-INCVALUE_SHIFT_n) * bits to count nanoseconds leaving the rest for fractional nonseconds. + * + * Any given INCVALUE also has an associated maximum adjustment value. This + * maximum adjustment value is the largest increase (or decrease) which can be + * safely applied without overflowing the INCVALUE. Since INCVALUE has + * a maximum range of 24 bits, its largest value is 0xFFFFFF. + * + * To understand where the maximum value comes from, consider the following + * equation: + * + * new_incval = base_incval + (base_incval * adjustment) / 1billion + * + * To avoid overflow that means: + * max_incval = base_incval + (base_incval * max_adj) / billion + * + * Re-arranging: + * max_adj = floor(((max_incval - base_incval) * 1billion) / 1billion) */ #define INCVALUE_96MHZ 125 #define INCVALUE_SHIFT_96MHZ 17 #define INCPERIOD_SHIFT_96MHZ 2 #define INCPERIOD_96MHZ (12 >> INCPERIOD_SHIFT_96MHZ) +#define MAX_PPB_96MHZ 23999900 /* 23,999,900 ppb */ #define INCVALUE_25MHZ 40 #define INCVALUE_SHIFT_25MHZ 18 #define INCPERIOD_25MHZ 1 +#define MAX_PPB_25MHZ 599999900 /* 599,999,900 ppb */ #define INCVALUE_24MHZ 125 #define INCVALUE_SHIFT_24MHZ 14 #define INCPERIOD_24MHZ 3 +#define MAX_PPB_24MHZ 999999999 /* 999,999,999 ppb */ #define INCVALUE_38400KHZ 26 #define INCVALUE_SHIFT_38400KHZ 19 #define INCPERIOD_38400KHZ 1 +#define MAX_PPB_38400KHZ 230769100 /* 230,769,100 ppb */ /* Another drawback of scaling the incvalue by a large factor is the * 64-bit SYSTIM register overflows more quickly. This is dealt with diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index a2788fd5f8bb..19e450a5bd31 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c @@ -2559,7 +2559,7 @@ void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw) hw->phy.ops.write_reg_page(hw, BM_RAR_H(i), (u16)(mac_reg & 0xFFFF)); hw->phy.ops.write_reg_page(hw, BM_RAR_CTRL(i), - FIELD_GET(E1000_RAH_AV, mac_reg)); + (u16)((mac_reg & E1000_RAH_AV) >> 16)); } e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg); diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c index 02d871bc112a..bbcfd529399b 100644 --- a/drivers/net/ethernet/intel/e1000e/ptp.c +++ b/drivers/net/ethernet/intel/e1000e/ptp.c @@ -280,8 +280,17 @@ void e1000e_ptp_init(struct e1000_adapter *adapter) switch (hw->mac.type) { case e1000_pch2lan: + adapter->ptp_clock_info.max_adj = MAX_PPB_96MHZ; + break; case e1000_pch_lpt: + if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI) + adapter->ptp_clock_info.max_adj = MAX_PPB_96MHZ; + else + adapter->ptp_clock_info.max_adj = MAX_PPB_25MHZ; + break; case e1000_pch_spt: + adapter->ptp_clock_info.max_adj = MAX_PPB_24MHZ; + break; case e1000_pch_cnp: case e1000_pch_tgp: case e1000_pch_adp: @@ -289,15 +298,14 @@ void e1000e_ptp_init(struct e1000_adapter *adapter) case e1000_pch_lnp: case e1000_pch_ptp: case e1000_pch_nvp: - if ((hw->mac.type < e1000_pch_lpt) || - (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI)) { - adapter->ptp_clock_info.max_adj = 24000000 - 1; - break; - } - fallthrough; + if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI) + adapter->ptp_clock_info.max_adj = MAX_PPB_24MHZ; + else + adapter->ptp_clock_info.max_adj = MAX_PPB_38400KHZ; + break; case e1000_82574: case e1000_82583: - adapter->ptp_clock_info.max_adj = 600000000 - 1; + adapter->ptp_clock_info.max_adj = MAX_PPB_25MHZ; break; default: break; diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.c b/drivers/net/ethernet/intel/i40e/i40e_dcb.c index 9d88ed6105fd..8db1eb0c1768 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_dcb.c +++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.c @@ -1523,7 +1523,7 @@ void i40e_dcb_hw_rx_ets_bw_config(struct i40e_hw *hw, u8 *bw_share, reg = rd32(hw, I40E_PRTDCB_RETSTCC(i)); reg &= ~(I40E_PRTDCB_RETSTCC_BWSHARE_MASK | I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK | - I40E_PRTDCB_RETSTCC_ETSTC_SHIFT); + I40E_PRTDCB_RETSTCC_ETSTC_MASK); reg |= FIELD_PREP(I40E_PRTDCB_RETSTCC_BWSHARE_MASK, bw_share[i]); reg |= FIELD_PREP(I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK, diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.h b/drivers/net/ethernet/intel/i40e/i40e_dcb.h index 6b60dc9b7736..d76497566e40 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_dcb.h +++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.h @@ -43,7 +43,7 @@ #define I40E_LLDP_TLV_SUBTYPE_SHIFT 0 #define I40E_LLDP_TLV_SUBTYPE_MASK (0xFF << I40E_LLDP_TLV_SUBTYPE_SHIFT) #define I40E_LLDP_TLV_OUI_SHIFT 8 -#define I40E_LLDP_TLV_OUI_MASK (0xFFFFFF << I40E_LLDP_TLV_OUI_SHIFT) +#define I40E_LLDP_TLV_OUI_MASK (0xFFFFFFU << I40E_LLDP_TLV_OUI_SHIFT) /* Defines for IEEE ETS TLV */ #define I40E_IEEE_ETS_MAXTC_SHIFT 0 diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 6e7fd473abfd..89a3401d20ab 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -4926,27 +4926,23 @@ int i40e_vsi_start_rings(struct i40e_vsi *vsi) void i40e_vsi_stop_rings(struct i40e_vsi *vsi) { struct i40e_pf *pf = vsi->back; - int pf_q, err, q_end; + u32 pf_q, tx_q_end, rx_q_end; /* When port TX is suspended, don't wait */ if (test_bit(__I40E_PORT_SUSPENDED, vsi->back->state)) return i40e_vsi_stop_rings_no_wait(vsi); - q_end = vsi->base_queue + vsi->num_queue_pairs; - for (pf_q = vsi->base_queue; pf_q < q_end; pf_q++) - i40e_pre_tx_queue_cfg(&pf->hw, (u32)pf_q, false); + tx_q_end = vsi->base_queue + + vsi->alloc_queue_pairs * (i40e_enabled_xdp_vsi(vsi) ? 2 : 1); + for (pf_q = vsi->base_queue; pf_q < tx_q_end; pf_q++) + i40e_pre_tx_queue_cfg(&pf->hw, pf_q, false); - for (pf_q = vsi->base_queue; pf_q < q_end; pf_q++) { - err = i40e_control_wait_rx_q(pf, pf_q, false); - if (err) - dev_info(&pf->pdev->dev, - "VSI seid %d Rx ring %d disable timeout\n", - vsi->seid, pf_q); - } + rx_q_end = vsi->base_queue + vsi->num_queue_pairs; + for (pf_q = vsi->base_queue; pf_q < rx_q_end; pf_q++) + i40e_control_rx_q(pf, pf_q, false); msleep(I40E_DISABLE_TX_GAP_MSEC); - pf_q = vsi->base_queue; - for (pf_q = vsi->base_queue; pf_q < q_end; pf_q++) + for (pf_q = vsi->base_queue; pf_q < tx_q_end; pf_q++) wr32(&pf->hw, I40E_QTX_ENA(pf_q), 0); i40e_vsi_wait_queues_disabled(vsi); @@ -5360,7 +5356,7 @@ static int i40e_pf_wait_queues_disabled(struct i40e_pf *pf) { int v, ret = 0; - for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { + for (v = 0; v < pf->num_alloc_vsi; v++) { if (pf->vsi[v]) { ret = i40e_vsi_wait_queues_disabled(pf->vsi[v]); if (ret) @@ -13564,9 +13560,9 @@ int i40e_queue_pair_disable(struct i40e_vsi *vsi, int queue_pair) return err; i40e_queue_pair_disable_irq(vsi, queue_pair); + i40e_queue_pair_toggle_napi(vsi, queue_pair, false /* off */); err = i40e_queue_pair_toggle_rings(vsi, queue_pair, false /* off */); i40e_clean_rx_ring(vsi->rx_rings[queue_pair]); - i40e_queue_pair_toggle_napi(vsi, queue_pair, false /* off */); i40e_queue_pair_clean_rings(vsi, queue_pair); i40e_queue_pair_reset_stats(vsi, queue_pair); diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h index af4269330581..ce1f11b8ad65 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h +++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h @@ -567,8 +567,7 @@ static inline bool i40e_is_fw_ver_lt(struct i40e_hw *hw, u16 maj, u16 min) **/ static inline bool i40e_is_fw_ver_eq(struct i40e_hw *hw, u16 maj, u16 min) { - return (hw->aq.fw_maj_ver > maj || - (hw->aq.fw_maj_ver == maj && hw->aq.fw_min_ver == min)); + return (hw->aq.fw_maj_ver == maj && hw->aq.fw_min_ver == min); } #endif /* _I40E_PROTOTYPE_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 908cdbd3ec5d..b34c71770887 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -2848,6 +2848,24 @@ error_param: (u8 *)&stats, sizeof(stats)); } +/** + * i40e_can_vf_change_mac + * @vf: pointer to the VF info + * + * Return true if the VF is allowed to change its MAC filters, false otherwise + */ +static bool i40e_can_vf_change_mac(struct i40e_vf *vf) +{ + /* If the VF MAC address has been set administratively (via the + * ndo_set_vf_mac command), then deny permission to the VF to + * add/delete unicast MAC addresses, unless the VF is trusted + */ + if (vf->pf_set_mac && !vf->trusted) + return false; + + return true; +} + #define I40E_MAX_MACVLAN_PER_HW 3072 #define I40E_MAX_MACVLAN_PER_PF(num_ports) (I40E_MAX_MACVLAN_PER_HW / \ (num_ports)) @@ -2907,8 +2925,8 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf, * The VF may request to set the MAC address filter already * assigned to it so do not return an error in that case. */ - if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) && - !is_multicast_ether_addr(addr) && vf->pf_set_mac && + if (!i40e_can_vf_change_mac(vf) && + !is_multicast_ether_addr(addr) && !ether_addr_equal(addr, vf->default_lan_addr.addr)) { dev_err(&pf->pdev->dev, "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n"); @@ -3114,19 +3132,29 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg) ret = -EINVAL; goto error_param; } - if (ether_addr_equal(al->list[i].addr, vf->default_lan_addr.addr)) - was_unimac_deleted = true; } vsi = pf->vsi[vf->lan_vsi_idx]; spin_lock_bh(&vsi->mac_filter_hash_lock); /* delete addresses from the list */ - for (i = 0; i < al->num_elements; i++) + for (i = 0; i < al->num_elements; i++) { + const u8 *addr = al->list[i].addr; + + /* Allow to delete VF primary MAC only if it was not set + * administratively by PF or if VF is trusted. + */ + if (ether_addr_equal(addr, vf->default_lan_addr.addr) && + i40e_can_vf_change_mac(vf)) + was_unimac_deleted = true; + else + continue; + if (i40e_del_mac_filter(vsi, al->list[i].addr)) { ret = -EINVAL; spin_unlock_bh(&vsi->mac_filter_hash_lock); goto error_param; } + } spin_unlock_bh(&vsi->mac_filter_hash_lock); diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c index 7ac847718882..c979192e44d1 100644 --- a/drivers/net/ethernet/intel/ice/ice_base.c +++ b/drivers/net/ethernet/intel/ice/ice_base.c @@ -190,15 +190,13 @@ static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx) q_vector = vsi->q_vectors[v_idx]; ice_for_each_tx_ring(tx_ring, q_vector->tx) { - if (vsi->netdev) - netif_queue_set_napi(vsi->netdev, tx_ring->q_index, - NETDEV_QUEUE_TYPE_TX, NULL); + ice_queue_set_napi(vsi, tx_ring->q_index, NETDEV_QUEUE_TYPE_TX, + NULL); tx_ring->q_vector = NULL; } ice_for_each_rx_ring(rx_ring, q_vector->rx) { - if (vsi->netdev) - netif_queue_set_napi(vsi->netdev, rx_ring->q_index, - NETDEV_QUEUE_TYPE_RX, NULL); + ice_queue_set_napi(vsi, rx_ring->q_index, NETDEV_QUEUE_TYPE_RX, + NULL); rx_ring->q_vector = NULL; } diff --git a/drivers/net/ethernet/intel/ice/ice_dpll.c b/drivers/net/ethernet/intel/ice/ice_dpll.c index b9c5eced6326..bd9b1fed74ab 100644 --- a/drivers/net/ethernet/intel/ice/ice_dpll.c +++ b/drivers/net/ethernet/intel/ice/ice_dpll.c @@ -31,6 +31,26 @@ static const char * const pin_type_name[] = { }; /** + * ice_dpll_is_reset - check if reset is in progress + * @pf: private board structure + * @extack: error reporting + * + * If reset is in progress, fill extack with error. + * + * Return: + * * false - no reset in progress + * * true - reset in progress + */ +static bool ice_dpll_is_reset(struct ice_pf *pf, struct netlink_ext_ack *extack) +{ + if (ice_is_reset_in_progress(pf->state)) { + NL_SET_ERR_MSG(extack, "PF reset in progress"); + return true; + } + return false; +} + +/** * ice_dpll_pin_freq_set - set pin's frequency * @pf: private board structure * @pin: pointer to a pin @@ -109,6 +129,9 @@ ice_dpll_frequency_set(const struct dpll_pin *pin, void *pin_priv, struct ice_pf *pf = d->pf; int ret; + if (ice_dpll_is_reset(pf, extack)) + return -EBUSY; + mutex_lock(&pf->dplls.lock); ret = ice_dpll_pin_freq_set(pf, p, pin_type, frequency, extack); mutex_unlock(&pf->dplls.lock); @@ -254,6 +277,7 @@ ice_dpll_output_frequency_get(const struct dpll_pin *pin, void *pin_priv, * ice_dpll_pin_enable - enable a pin on dplls * @hw: board private hw structure * @pin: pointer to a pin + * @dpll_idx: dpll index to connect to output pin * @pin_type: type of pin being enabled * @extack: error reporting * @@ -266,7 +290,7 @@ ice_dpll_output_frequency_get(const struct dpll_pin *pin, void *pin_priv, */ static int ice_dpll_pin_enable(struct ice_hw *hw, struct ice_dpll_pin *pin, - enum ice_dpll_pin_type pin_type, + u8 dpll_idx, enum ice_dpll_pin_type pin_type, struct netlink_ext_ack *extack) { u8 flags = 0; @@ -280,10 +304,12 @@ ice_dpll_pin_enable(struct ice_hw *hw, struct ice_dpll_pin *pin, ret = ice_aq_set_input_pin_cfg(hw, pin->idx, 0, flags, 0, 0); break; case ICE_DPLL_PIN_TYPE_OUTPUT: + flags = ICE_AQC_SET_CGU_OUT_CFG_UPDATE_SRC_SEL; if (pin->flags[0] & ICE_AQC_GET_CGU_OUT_CFG_ESYNC_EN) flags |= ICE_AQC_SET_CGU_OUT_CFG_ESYNC_EN; flags |= ICE_AQC_SET_CGU_OUT_CFG_OUT_EN; - ret = ice_aq_set_output_pin_cfg(hw, pin->idx, flags, 0, 0, 0); + ret = ice_aq_set_output_pin_cfg(hw, pin->idx, flags, dpll_idx, + 0, 0); break; default: return -EINVAL; @@ -370,7 +396,7 @@ ice_dpll_pin_state_update(struct ice_pf *pf, struct ice_dpll_pin *pin, case ICE_DPLL_PIN_TYPE_INPUT: ret = ice_aq_get_input_pin_cfg(&pf->hw, pin->idx, NULL, NULL, NULL, &pin->flags[0], - &pin->freq, NULL); + &pin->freq, &pin->phase_adjust); if (ret) goto err; if (ICE_AQC_GET_CGU_IN_CFG_FLG2_INPUT_EN & pin->flags[0]) { @@ -398,14 +424,27 @@ ice_dpll_pin_state_update(struct ice_pf *pf, struct ice_dpll_pin *pin, break; case ICE_DPLL_PIN_TYPE_OUTPUT: ret = ice_aq_get_output_pin_cfg(&pf->hw, pin->idx, - &pin->flags[0], NULL, + &pin->flags[0], &parent, &pin->freq, NULL); if (ret) goto err; - if (ICE_AQC_SET_CGU_OUT_CFG_OUT_EN & pin->flags[0]) - pin->state[0] = DPLL_PIN_STATE_CONNECTED; - else - pin->state[0] = DPLL_PIN_STATE_DISCONNECTED; + + parent &= ICE_AQC_GET_CGU_OUT_CFG_DPLL_SRC_SEL; + if (ICE_AQC_SET_CGU_OUT_CFG_OUT_EN & pin->flags[0]) { + pin->state[pf->dplls.eec.dpll_idx] = + parent == pf->dplls.eec.dpll_idx ? + DPLL_PIN_STATE_CONNECTED : + DPLL_PIN_STATE_DISCONNECTED; + pin->state[pf->dplls.pps.dpll_idx] = + parent == pf->dplls.pps.dpll_idx ? + DPLL_PIN_STATE_CONNECTED : + DPLL_PIN_STATE_DISCONNECTED; + } else { + pin->state[pf->dplls.eec.dpll_idx] = + DPLL_PIN_STATE_DISCONNECTED; + pin->state[pf->dplls.pps.dpll_idx] = + DPLL_PIN_STATE_DISCONNECTED; + } break; case ICE_DPLL_PIN_TYPE_RCLK_INPUT: for (parent = 0; parent < pf->dplls.rclk.num_parents; @@ -568,9 +607,13 @@ ice_dpll_pin_state_set(const struct dpll_pin *pin, void *pin_priv, struct ice_pf *pf = d->pf; int ret; + if (ice_dpll_is_reset(pf, extack)) + return -EBUSY; + mutex_lock(&pf->dplls.lock); if (enable) - ret = ice_dpll_pin_enable(&pf->hw, p, pin_type, extack); + ret = ice_dpll_pin_enable(&pf->hw, p, d->dpll_idx, pin_type, + extack); else ret = ice_dpll_pin_disable(&pf->hw, p, pin_type, extack); if (!ret) @@ -603,6 +646,11 @@ ice_dpll_output_state_set(const struct dpll_pin *pin, void *pin_priv, struct netlink_ext_ack *extack) { bool enable = state == DPLL_PIN_STATE_CONNECTED; + struct ice_dpll_pin *p = pin_priv; + struct ice_dpll *d = dpll_priv; + + if (!enable && p->state[d->dpll_idx] == DPLL_PIN_STATE_DISCONNECTED) + return 0; return ice_dpll_pin_state_set(pin, pin_priv, dpll, dpll_priv, enable, extack, ICE_DPLL_PIN_TYPE_OUTPUT); @@ -665,14 +713,16 @@ ice_dpll_pin_state_get(const struct dpll_pin *pin, void *pin_priv, struct ice_pf *pf = d->pf; int ret; + if (ice_dpll_is_reset(pf, extack)) + return -EBUSY; + mutex_lock(&pf->dplls.lock); ret = ice_dpll_pin_state_update(pf, p, pin_type, extack); if (ret) goto unlock; - if (pin_type == ICE_DPLL_PIN_TYPE_INPUT) + if (pin_type == ICE_DPLL_PIN_TYPE_INPUT || + pin_type == ICE_DPLL_PIN_TYPE_OUTPUT) *state = p->state[d->dpll_idx]; - else if (pin_type == ICE_DPLL_PIN_TYPE_OUTPUT) - *state = p->state[0]; ret = 0; unlock: mutex_unlock(&pf->dplls.lock); @@ -790,6 +840,9 @@ ice_dpll_input_prio_set(const struct dpll_pin *pin, void *pin_priv, struct ice_pf *pf = d->pf; int ret; + if (ice_dpll_is_reset(pf, extack)) + return -EBUSY; + mutex_lock(&pf->dplls.lock); ret = ice_dpll_hw_input_prio_set(pf, d, p, prio, extack); mutex_unlock(&pf->dplls.lock); @@ -910,6 +963,9 @@ ice_dpll_pin_phase_adjust_set(const struct dpll_pin *pin, void *pin_priv, u8 flag, flags_en = 0; int ret; + if (ice_dpll_is_reset(pf, extack)) + return -EBUSY; + mutex_lock(&pf->dplls.lock); switch (type) { case ICE_DPLL_PIN_TYPE_INPUT: @@ -1069,6 +1125,9 @@ ice_dpll_rclk_state_on_pin_set(const struct dpll_pin *pin, void *pin_priv, int ret = -EINVAL; u32 hw_idx; + if (ice_dpll_is_reset(pf, extack)) + return -EBUSY; + mutex_lock(&pf->dplls.lock); hw_idx = parent->idx - pf->dplls.base_rclk_idx; if (hw_idx >= pf->dplls.num_inputs) @@ -1123,6 +1182,9 @@ ice_dpll_rclk_state_on_pin_get(const struct dpll_pin *pin, void *pin_priv, int ret = -EINVAL; u32 hw_idx; + if (ice_dpll_is_reset(pf, extack)) + return -EBUSY; + mutex_lock(&pf->dplls.lock); hw_idx = parent->idx - pf->dplls.base_rclk_idx; if (hw_idx >= pf->dplls.num_inputs) @@ -1305,8 +1367,10 @@ static void ice_dpll_periodic_work(struct kthread_work *work) struct ice_pf *pf = container_of(d, struct ice_pf, dplls); struct ice_dpll *de = &pf->dplls.eec; struct ice_dpll *dp = &pf->dplls.pps; - int ret; + int ret = 0; + if (ice_is_reset_in_progress(pf->state)) + goto resched; mutex_lock(&pf->dplls.lock); ret = ice_dpll_update_state(pf, de, false); if (!ret) @@ -1326,6 +1390,7 @@ static void ice_dpll_periodic_work(struct kthread_work *work) ice_dpll_notify_changes(de); ice_dpll_notify_changes(dp); +resched: /* Run twice a second or reschedule if update failed */ kthread_queue_delayed_work(d->kworker, &d->work, ret ? msecs_to_jiffies(10) : @@ -1532,7 +1597,7 @@ static void ice_dpll_deinit_rclk_pin(struct ice_pf *pf) } if (WARN_ON_ONCE(!vsi || !vsi->netdev)) return; - netdev_dpll_pin_clear(vsi->netdev); + dpll_netdev_pin_clear(vsi->netdev); dpll_pin_put(rclk->pin); } @@ -1576,7 +1641,7 @@ ice_dpll_init_rclk_pins(struct ice_pf *pf, struct ice_dpll_pin *pin, } if (WARN_ON((!vsi || !vsi->netdev))) return -EINVAL; - netdev_dpll_pin_set(vsi->netdev, pf->dplls.rclk.pin); + dpll_netdev_pin_set(vsi->netdev, pf->dplls.rclk.pin); return 0; @@ -2055,6 +2120,7 @@ void ice_dpll_init(struct ice_pf *pf) struct ice_dplls *d = &pf->dplls; int err = 0; + mutex_init(&d->lock); err = ice_dpll_init_info(pf, cgu); if (err) goto err_exit; @@ -2067,7 +2133,6 @@ void ice_dpll_init(struct ice_pf *pf) err = ice_dpll_init_pins(pf, cgu); if (err) goto deinit_pps; - mutex_init(&d->lock); if (cgu) { err = ice_dpll_init_worker(pf); if (err) diff --git a/drivers/net/ethernet/intel/ice/ice_lag.c b/drivers/net/ethernet/intel/ice/ice_lag.c index 2a25323105e5..467372d541d2 100644 --- a/drivers/net/ethernet/intel/ice/ice_lag.c +++ b/drivers/net/ethernet/intel/ice/ice_lag.c @@ -152,6 +152,27 @@ ice_lag_find_hw_by_lport(struct ice_lag *lag, u8 lport) } /** + * ice_pkg_has_lport_extract - check if lport extraction supported + * @hw: HW struct + */ +static bool ice_pkg_has_lport_extract(struct ice_hw *hw) +{ + int i; + + for (i = 0; i < hw->blk[ICE_BLK_SW].es.count; i++) { + u16 offset; + u8 fv_prot; + + ice_find_prot_off(hw, ICE_BLK_SW, ICE_SW_DEFAULT_PROFILE, i, + &fv_prot, &offset); + if (fv_prot == ICE_FV_PROT_MDID && + offset == ICE_LP_EXT_BUF_OFFSET) + return true; + } + return false; +} + +/** * ice_lag_find_primary - returns pointer to primary interfaces lag struct * @lag: local interfaces lag struct */ @@ -1206,7 +1227,7 @@ static void ice_lag_del_prune_list(struct ice_lag *lag, struct ice_pf *event_pf) } /** - * ice_lag_init_feature_support_flag - Check for NVM support for LAG + * ice_lag_init_feature_support_flag - Check for package and NVM support for LAG * @pf: PF struct */ static void ice_lag_init_feature_support_flag(struct ice_pf *pf) @@ -1219,7 +1240,7 @@ static void ice_lag_init_feature_support_flag(struct ice_pf *pf) else ice_clear_feature_support(pf, ICE_F_ROCE_LAG); - if (caps->sriov_lag) + if (caps->sriov_lag && ice_pkg_has_lport_extract(&pf->hw)) ice_set_feature_support(pf, ICE_F_SRIOV_LAG); else ice_clear_feature_support(pf, ICE_F_SRIOV_LAG); diff --git a/drivers/net/ethernet/intel/ice/ice_lag.h b/drivers/net/ethernet/intel/ice/ice_lag.h index ede833dfa658..183b38792ef2 100644 --- a/drivers/net/ethernet/intel/ice/ice_lag.h +++ b/drivers/net/ethernet/intel/ice/ice_lag.h @@ -17,6 +17,9 @@ enum ice_lag_role { #define ICE_LAG_INVALID_PORT 0xFF #define ICE_LAG_RESET_RETRIES 5 +#define ICE_SW_DEFAULT_PROFILE 0 +#define ICE_FV_PROT_MDID 255 +#define ICE_LP_EXT_BUF_OFFSET 32 struct ice_pf; struct ice_vf; diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index 9be724291ef8..fc23dbe302b4 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -2426,7 +2426,7 @@ ice_vsi_cfg_def(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params) ice_vsi_map_rings_to_vectors(vsi); /* Associate q_vector rings to napi */ - ice_vsi_set_napi_queues(vsi, true); + ice_vsi_set_napi_queues(vsi); vsi->stat_offsets_loaded = false; @@ -2904,19 +2904,19 @@ void ice_vsi_dis_irq(struct ice_vsi *vsi) } /** - * ice_queue_set_napi - Set the napi instance for the queue + * __ice_queue_set_napi - Set the napi instance for the queue * @dev: device to which NAPI and queue belong * @queue_index: Index of queue * @type: queue type as RX or TX * @napi: NAPI context * @locked: is the rtnl_lock already held * - * Set the napi instance for the queue + * Set the napi instance for the queue. Caller indicates the lock status. */ static void -ice_queue_set_napi(struct net_device *dev, unsigned int queue_index, - enum netdev_queue_type type, struct napi_struct *napi, - bool locked) +__ice_queue_set_napi(struct net_device *dev, unsigned int queue_index, + enum netdev_queue_type type, struct napi_struct *napi, + bool locked) { if (!locked) rtnl_lock(); @@ -2926,26 +2926,79 @@ ice_queue_set_napi(struct net_device *dev, unsigned int queue_index, } /** - * ice_q_vector_set_napi_queues - Map queue[s] associated with the napi + * ice_queue_set_napi - Set the napi instance for the queue + * @vsi: VSI being configured + * @queue_index: Index of queue + * @type: queue type as RX or TX + * @napi: NAPI context + * + * Set the napi instance for the queue. The rtnl lock state is derived from the + * execution path. + */ +void +ice_queue_set_napi(struct ice_vsi *vsi, unsigned int queue_index, + enum netdev_queue_type type, struct napi_struct *napi) +{ + struct ice_pf *pf = vsi->back; + + if (!vsi->netdev) + return; + + if (current_work() == &pf->serv_task || + test_bit(ICE_PREPARED_FOR_RESET, pf->state) || + test_bit(ICE_DOWN, pf->state) || + test_bit(ICE_SUSPENDED, pf->state)) + __ice_queue_set_napi(vsi->netdev, queue_index, type, napi, + false); + else + __ice_queue_set_napi(vsi->netdev, queue_index, type, napi, + true); +} + +/** + * __ice_q_vector_set_napi_queues - Map queue[s] associated with the napi * @q_vector: q_vector pointer * @locked: is the rtnl_lock already held * + * Associate the q_vector napi with all the queue[s] on the vector. + * Caller indicates the lock status. + */ +void __ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector, bool locked) +{ + struct ice_rx_ring *rx_ring; + struct ice_tx_ring *tx_ring; + + ice_for_each_rx_ring(rx_ring, q_vector->rx) + __ice_queue_set_napi(q_vector->vsi->netdev, rx_ring->q_index, + NETDEV_QUEUE_TYPE_RX, &q_vector->napi, + locked); + + ice_for_each_tx_ring(tx_ring, q_vector->tx) + __ice_queue_set_napi(q_vector->vsi->netdev, tx_ring->q_index, + NETDEV_QUEUE_TYPE_TX, &q_vector->napi, + locked); + /* Also set the interrupt number for the NAPI */ + netif_napi_set_irq(&q_vector->napi, q_vector->irq.virq); +} + +/** + * ice_q_vector_set_napi_queues - Map queue[s] associated with the napi + * @q_vector: q_vector pointer + * * Associate the q_vector napi with all the queue[s] on the vector */ -void ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector, bool locked) +void ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector) { struct ice_rx_ring *rx_ring; struct ice_tx_ring *tx_ring; ice_for_each_rx_ring(rx_ring, q_vector->rx) - ice_queue_set_napi(q_vector->vsi->netdev, rx_ring->q_index, - NETDEV_QUEUE_TYPE_RX, &q_vector->napi, - locked); + ice_queue_set_napi(q_vector->vsi, rx_ring->q_index, + NETDEV_QUEUE_TYPE_RX, &q_vector->napi); ice_for_each_tx_ring(tx_ring, q_vector->tx) - ice_queue_set_napi(q_vector->vsi->netdev, tx_ring->q_index, - NETDEV_QUEUE_TYPE_TX, &q_vector->napi, - locked); + ice_queue_set_napi(q_vector->vsi, tx_ring->q_index, + NETDEV_QUEUE_TYPE_TX, &q_vector->napi); /* Also set the interrupt number for the NAPI */ netif_napi_set_irq(&q_vector->napi, q_vector->irq.virq); } @@ -2953,11 +3006,10 @@ void ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector, bool locked) /** * ice_vsi_set_napi_queues * @vsi: VSI pointer - * @locked: is the rtnl_lock already held * * Associate queue[s] with napi for all vectors */ -void ice_vsi_set_napi_queues(struct ice_vsi *vsi, bool locked) +void ice_vsi_set_napi_queues(struct ice_vsi *vsi) { int i; @@ -2965,7 +3017,7 @@ void ice_vsi_set_napi_queues(struct ice_vsi *vsi, bool locked) return; ice_for_each_q_vector(vsi, i) - ice_q_vector_set_napi_queues(vsi->q_vectors[i], locked); + ice_q_vector_set_napi_queues(vsi->q_vectors[i]); } /** @@ -3140,7 +3192,7 @@ ice_vsi_realloc_stat_arrays(struct ice_vsi *vsi) } } - tx_ring_stats = vsi_stat->rx_ring_stats; + tx_ring_stats = vsi_stat->tx_ring_stats; vsi_stat->tx_ring_stats = krealloc_array(vsi_stat->tx_ring_stats, req_txq, sizeof(*vsi_stat->tx_ring_stats), diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h index 71bd27244941..bfcfc582a4c0 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_lib.h @@ -91,9 +91,15 @@ void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc); struct ice_vsi * ice_vsi_setup(struct ice_pf *pf, struct ice_vsi_cfg_params *params); -void ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector, bool locked); +void +ice_queue_set_napi(struct ice_vsi *vsi, unsigned int queue_index, + enum netdev_queue_type type, struct napi_struct *napi); + +void __ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector, bool locked); + +void ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector); -void ice_vsi_set_napi_queues(struct ice_vsi *vsi, bool locked); +void ice_vsi_set_napi_queues(struct ice_vsi *vsi); int ice_vsi_release(struct ice_vsi *vsi); diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index dd4a9bc0dfdc..df6a68ab747e 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -3495,7 +3495,7 @@ static void ice_napi_add(struct ice_vsi *vsi) ice_for_each_q_vector(vsi, v_idx) { netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi, ice_napi_poll); - ice_q_vector_set_napi_queues(vsi->q_vectors[v_idx], false); + __ice_q_vector_set_napi_queues(vsi->q_vectors[v_idx], false); } } @@ -5447,6 +5447,7 @@ static int ice_reinit_interrupt_scheme(struct ice_pf *pf) if (ret) goto err_reinit; ice_vsi_map_rings_to_vectors(pf->vsi[v]); + ice_vsi_set_napi_queues(pf->vsi[v]); } ret = ice_req_irq_msix_misc(pf); @@ -8012,6 +8013,8 @@ ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, pf_sw = pf->first_sw; /* find the attribute in the netlink message */ br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); + if (!br_spec) + return -EINVAL; nla_for_each_nested(attr, br_spec, rem) { __u16 mode; diff --git a/drivers/net/ethernet/intel/ice/ice_osdep.h b/drivers/net/ethernet/intel/ice/ice_osdep.h index 82bc54fec7f3..a2562f04267f 100644 --- a/drivers/net/ethernet/intel/ice/ice_osdep.h +++ b/drivers/net/ethernet/intel/ice/ice_osdep.h @@ -24,7 +24,7 @@ #define rd64(a, reg) readq((a)->hw_addr + (reg)) #define ice_flush(a) rd32((a), GLGEN_STAT) -#define ICE_M(m, s) ((m) << (s)) +#define ICE_M(m, s) ((m ## U) << (s)) struct ice_dma_mem { void *va; diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c index a94a1c48c3de..b0f78c2f2790 100644 --- a/drivers/net/ethernet/intel/ice/ice_sriov.c +++ b/drivers/net/ethernet/intel/ice/ice_sriov.c @@ -1068,6 +1068,7 @@ int ice_sriov_set_msix_vec_count(struct pci_dev *vf_dev, int msix_vec_count) struct ice_pf *pf = pci_get_drvdata(pdev); u16 prev_msix, prev_queues, queues; bool needs_rebuild = false; + struct ice_vsi *vsi; struct ice_vf *vf; int id; @@ -1102,6 +1103,10 @@ int ice_sriov_set_msix_vec_count(struct pci_dev *vf_dev, int msix_vec_count) if (!vf) return -ENOENT; + vsi = ice_get_vf_vsi(vf); + if (!vsi) + return -ENOENT; + prev_msix = vf->num_msix; prev_queues = vf->num_vf_qs; @@ -1122,7 +1127,7 @@ int ice_sriov_set_msix_vec_count(struct pci_dev *vf_dev, int msix_vec_count) if (vf->first_vector_idx < 0) goto unroll; - if (ice_vf_reconfig_vsi(vf)) { + if (ice_vf_reconfig_vsi(vf) || ice_vf_init_host_cfg(vf, vsi)) { /* Try to rebuild with previous values */ needs_rebuild = true; goto unroll; @@ -1148,8 +1153,10 @@ unroll: if (vf->first_vector_idx < 0) return -EINVAL; - if (needs_rebuild) + if (needs_rebuild) { ice_vf_reconfig_vsi(vf); + ice_vf_init_host_cfg(vf, vsi); + } ice_ena_vf_mappings(vf); ice_put_vf(vf); diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h index 41ab6d7bbd9e..a508e917ce5f 100644 --- a/drivers/net/ethernet/intel/ice/ice_type.h +++ b/drivers/net/ethernet/intel/ice/ice_type.h @@ -1072,7 +1072,7 @@ struct ice_aq_get_set_rss_lut_params { #define ICE_OROM_VER_BUILD_SHIFT 8 #define ICE_OROM_VER_BUILD_MASK (0xffff << ICE_OROM_VER_BUILD_SHIFT) #define ICE_OROM_VER_SHIFT 24 -#define ICE_OROM_VER_MASK (0xff << ICE_OROM_VER_SHIFT) +#define ICE_OROM_VER_MASK (0xffU << ICE_OROM_VER_SHIFT) #define ICE_SR_PFA_PTR 0x40 #define ICE_SR_1ST_NVM_BANK_PTR 0x42 #define ICE_SR_NVM_BANK_SIZE 0x43 diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c index c925813ec9ca..6f2328a049bf 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c @@ -440,7 +440,6 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg) vf->driver_caps = *(u32 *)msg; else vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 | - VIRTCHNL_VF_OFFLOAD_RSS_REG | VIRTCHNL_VF_OFFLOAD_VLAN; vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2; @@ -453,14 +452,8 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg) vfres->vf_cap_flags |= ice_vc_get_vlan_caps(hw, vf, vsi, vf->driver_caps); - if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) { + if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF; - } else { - if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ) - vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ; - else - vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG; - } if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC; diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c index 5e19d48a05b4..d796dbd2a440 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c @@ -13,8 +13,6 @@ * - opcodes needed by VF when caps are activated * * Caps that don't use new opcodes (no opcodes should be allowed): - * - VIRTCHNL_VF_OFFLOAD_RSS_AQ - * - VIRTCHNL_VF_OFFLOAD_RSS_REG * - VIRTCHNL_VF_OFFLOAD_WB_ON_ITR * - VIRTCHNL_VF_OFFLOAD_CRC * - VIRTCHNL_VF_OFFLOAD_RX_POLLING diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c index 8b81a1677045..2eecd0f39aa6 100644 --- a/drivers/net/ethernet/intel/ice/ice_xsk.c +++ b/drivers/net/ethernet/intel/ice/ice_xsk.c @@ -179,6 +179,10 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx) return -EBUSY; usleep_range(1000, 2000); } + + ice_qvec_dis_irq(vsi, rx_ring, q_vector); + ice_qvec_toggle_napi(vsi, q_vector, false); + netif_tx_stop_queue(netdev_get_tx_queue(vsi->netdev, q_idx)); ice_fill_txq_meta(vsi, tx_ring, &txq_meta); @@ -195,13 +199,10 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx) if (err) return err; } - ice_qvec_dis_irq(vsi, rx_ring, q_vector); - err = ice_vsi_ctrl_one_rx_ring(vsi, false, q_idx, true); if (err) return err; - ice_qvec_toggle_napi(vsi, q_vector, false); ice_qp_clean_rings(vsi, q_idx); ice_qp_reset_stats(vsi, q_idx); @@ -259,11 +260,11 @@ static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx) if (err) return err; - clear_bit(ICE_CFG_BUSY, vsi->state); ice_qvec_toggle_napi(vsi, q_vector, true); ice_qvec_ena_irq(vsi, q_vector); netif_tx_start_queue(netdev_get_tx_queue(vsi->netdev, q_idx)); + clear_bit(ICE_CFG_BUSY, vsi->state); return 0; } diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c index d0cdd63b3d5b..390977a76de2 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c +++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c @@ -2087,8 +2087,10 @@ int idpf_send_disable_queues_msg(struct idpf_vport *vport) set_bit(__IDPF_Q_POLL_MODE, vport->txqs[i]->flags); /* schedule the napi to receive all the marker packets */ + local_bh_disable(); for (i = 0; i < vport->num_q_vectors; i++) napi_schedule(&vport->q_vectors[i].napi); + local_bh_enable(); return idpf_wait_for_marker_event(vport); } diff --git a/drivers/net/ethernet/intel/idpf/virtchnl2.h b/drivers/net/ethernet/intel/idpf/virtchnl2.h index 8dc837889723..4a3c4454d25a 100644 --- a/drivers/net/ethernet/intel/idpf/virtchnl2.h +++ b/drivers/net/ethernet/intel/idpf/virtchnl2.h @@ -978,7 +978,7 @@ struct virtchnl2_ptype { u8 proto_id_count; __le16 pad; __le16 proto_id[]; -}; +} __packed __aligned(2); VIRTCHNL2_CHECK_STRUCT_LEN(6, virtchnl2_ptype); /** diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h index a2b759531cb7..3c2dc7bdebb5 100644 --- a/drivers/net/ethernet/intel/igb/igb.h +++ b/drivers/net/ethernet/intel/igb/igb.h @@ -637,7 +637,7 @@ struct igb_adapter { struct timespec64 period; } perout[IGB_N_PEROUT]; - char fw_version[32]; + char fw_version[48]; #ifdef CONFIG_IGB_HWMON struct hwmon_buff *igb_hwmon_buff; bool ets; diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 4df8d4153aa5..cebb44f51d5f 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -3069,7 +3069,6 @@ void igb_set_fw_version(struct igb_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; struct e1000_fw_version fw; - char *lbuf; igb_get_fw_version(hw, &fw); @@ -3077,34 +3076,36 @@ void igb_set_fw_version(struct igb_adapter *adapter) case e1000_i210: case e1000_i211: if (!(igb_get_flash_presence_i210(hw))) { - lbuf = kasprintf(GFP_KERNEL, "%2d.%2d-%d", - fw.invm_major, fw.invm_minor, - fw.invm_img_type); + snprintf(adapter->fw_version, + sizeof(adapter->fw_version), + "%2d.%2d-%d", + fw.invm_major, fw.invm_minor, + fw.invm_img_type); break; } fallthrough; default: /* if option rom is valid, display its version too */ if (fw.or_valid) { - lbuf = kasprintf(GFP_KERNEL, "%d.%d, 0x%08x, %d.%d.%d", - fw.eep_major, fw.eep_minor, - fw.etrack_id, fw.or_major, fw.or_build, - fw.or_patch); + snprintf(adapter->fw_version, + sizeof(adapter->fw_version), + "%d.%d, 0x%08x, %d.%d.%d", + fw.eep_major, fw.eep_minor, fw.etrack_id, + fw.or_major, fw.or_build, fw.or_patch); /* no option rom */ } else if (fw.etrack_id != 0X0000) { - lbuf = kasprintf(GFP_KERNEL, "%d.%d, 0x%08x", - fw.eep_major, fw.eep_minor, - fw.etrack_id); + snprintf(adapter->fw_version, + sizeof(adapter->fw_version), + "%d.%d, 0x%08x", + fw.eep_major, fw.eep_minor, fw.etrack_id); } else { - lbuf = kasprintf(GFP_KERNEL, "%d.%d.%d", fw.eep_major, - fw.eep_minor, fw.eep_build); + snprintf(adapter->fw_version, + sizeof(adapter->fw_version), + "%d.%d.%d", + fw.eep_major, fw.eep_minor, fw.eep_build); } break; } - - /* the truncate happens here if it doesn't fit */ - strscpy(adapter->fw_version, lbuf, sizeof(adapter->fw_version)); - kfree(lbuf); } /** diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c index 319c544b9f04..f94570556120 100644 --- a/drivers/net/ethernet/intel/igb/igb_ptp.c +++ b/drivers/net/ethernet/intel/igb/igb_ptp.c @@ -957,7 +957,7 @@ static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter) igb_ptp_systim_to_hwtstamp(adapter, &shhwtstamps, regval); /* adjust timestamp for the TX latency based on link speed */ - if (adapter->hw.mac.type == e1000_i210) { + if (hw->mac.type == e1000_i210 || hw->mac.type == e1000_i211) { switch (adapter->link_speed) { case SPEED_10: adjust = IGB_I210_TX_LATENCY_10; @@ -1003,6 +1003,7 @@ int igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va, ktime_t *timestamp) { struct igb_adapter *adapter = q_vector->adapter; + struct e1000_hw *hw = &adapter->hw; struct skb_shared_hwtstamps ts; __le64 *regval = (__le64 *)va; int adjust = 0; @@ -1022,7 +1023,7 @@ int igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va, igb_ptp_systim_to_hwtstamp(adapter, &ts, le64_to_cpu(regval[1])); /* adjust timestamp for the RX latency based on link speed */ - if (adapter->hw.mac.type == e1000_i210) { + if (hw->mac.type == e1000_i210 || hw->mac.type == e1000_i211) { switch (adapter->link_speed) { case SPEED_10: adjust = IGB_I210_RX_LATENCY_10; diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index ba8d3fe186ae..81c21a893ede 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -6487,7 +6487,7 @@ static int igc_xdp_xmit(struct net_device *dev, int num_frames, int cpu = smp_processor_id(); struct netdev_queue *nq; struct igc_ring *ring; - int i, drops; + int i, nxmit; if (unlikely(!netif_carrier_ok(dev))) return -ENETDOWN; @@ -6503,16 +6503,15 @@ static int igc_xdp_xmit(struct net_device *dev, int num_frames, /* Avoid transmit queue timeout since we share it with the slow path */ txq_trans_cond_update(nq); - drops = 0; + nxmit = 0; for (i = 0; i < num_frames; i++) { int err; struct xdp_frame *xdpf = frames[i]; err = igc_xdp_init_tx_descriptor(ring, xdpf); - if (err) { - xdp_return_frame_rx_napi(xdpf); - drops++; - } + if (err) + break; + nxmit++; } if (flags & XDP_XMIT_FLUSH) @@ -6520,7 +6519,7 @@ static int igc_xdp_xmit(struct net_device *dev, int num_frames, __netif_tx_unlock(nq); - return num_frames - drops; + return nxmit; } static void igc_trigger_rxtxq_interrupt(struct igc_adapter *adapter, diff --git a/drivers/net/ethernet/intel/igc/igc_phy.c b/drivers/net/ethernet/intel/igc/igc_phy.c index 7cd8716d2ffa..861f37076861 100644 --- a/drivers/net/ethernet/intel/igc/igc_phy.c +++ b/drivers/net/ethernet/intel/igc/igc_phy.c @@ -130,11 +130,7 @@ void igc_power_down_phy_copper(struct igc_hw *hw) /* The PHY will retain its settings across a power down/up cycle */ hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg); mii_reg |= MII_CR_POWER_DOWN; - - /* Temporary workaround - should be removed when PHY will implement - * IEEE registers as properly - */ - /* hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg);*/ + hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg); usleep_range(1000, 2000); } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index bd541527c8c7..99876b765b08 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -2939,8 +2939,8 @@ static void ixgbe_check_lsc(struct ixgbe_adapter *adapter) static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter, u64 qmask) { - u32 mask; struct ixgbe_hw *hw = &adapter->hw; + u32 mask; switch (hw->mac.type) { case ixgbe_mac_82598EB: @@ -10525,6 +10525,44 @@ static void ixgbe_reset_rxr_stats(struct ixgbe_ring *rx_ring) } /** + * ixgbe_irq_disable_single - Disable single IRQ vector + * @adapter: adapter structure + * @ring: ring index + **/ +static void ixgbe_irq_disable_single(struct ixgbe_adapter *adapter, u32 ring) +{ + struct ixgbe_hw *hw = &adapter->hw; + u64 qmask = BIT_ULL(ring); + u32 mask; + + switch (adapter->hw.mac.type) { + case ixgbe_mac_82598EB: + mask = qmask & IXGBE_EIMC_RTX_QUEUE; + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, mask); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: + mask = (qmask & 0xFFFFFFFF); + if (mask) + IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask); + mask = (qmask >> 32); + if (mask) + IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask); + break; + default: + break; + } + IXGBE_WRITE_FLUSH(&adapter->hw); + if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) + synchronize_irq(adapter->msix_entries[ring].vector); + else + synchronize_irq(adapter->pdev->irq); +} + +/** * ixgbe_txrx_ring_disable - Disable Rx/Tx/XDP Tx rings * @adapter: adapter structure * @ring: ring index @@ -10540,6 +10578,11 @@ void ixgbe_txrx_ring_disable(struct ixgbe_adapter *adapter, int ring) tx_ring = adapter->tx_ring[ring]; xdp_ring = adapter->xdp_ring[ring]; + ixgbe_irq_disable_single(adapter, ring); + + /* Rx/Tx/XDP Tx share the same napi context. */ + napi_disable(&rx_ring->q_vector->napi); + ixgbe_disable_txr(adapter, tx_ring); if (xdp_ring) ixgbe_disable_txr(adapter, xdp_ring); @@ -10548,9 +10591,6 @@ void ixgbe_txrx_ring_disable(struct ixgbe_adapter *adapter, int ring) if (xdp_ring) synchronize_rcu(); - /* Rx/Tx/XDP Tx share the same napi context. */ - napi_disable(&rx_ring->q_vector->napi); - ixgbe_clean_tx_ring(tx_ring); if (xdp_ring) ixgbe_clean_tx_ring(xdp_ring); @@ -10578,9 +10618,6 @@ void ixgbe_txrx_ring_enable(struct ixgbe_adapter *adapter, int ring) tx_ring = adapter->tx_ring[ring]; xdp_ring = adapter->xdp_ring[ring]; - /* Rx/Tx/XDP Tx share the same napi context. */ - napi_enable(&rx_ring->q_vector->napi); - ixgbe_configure_tx_ring(adapter, tx_ring); if (xdp_ring) ixgbe_configure_tx_ring(adapter, xdp_ring); @@ -10589,6 +10626,11 @@ void ixgbe_txrx_ring_enable(struct ixgbe_adapter *adapter, int ring) clear_bit(__IXGBE_TX_DISABLED, &tx_ring->state); if (xdp_ring) clear_bit(__IXGBE_TX_DISABLED, &xdp_ring->state); + + /* Rx/Tx/XDP Tx share the same napi context. */ + napi_enable(&rx_ring->q_vector->napi); + ixgbe_irq_enable_queues(adapter, BIT_ULL(ring)); + IXGBE_WRITE_FLUSH(&adapter->hw); } /** diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c index 6208923e29a2..c1adc94a5a65 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c @@ -716,7 +716,8 @@ static s32 ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr, if ((command & IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK) != 0) { error = FIELD_GET(IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK, command); hw_dbg(hw, "Failed to read, error %x\n", error); - return -EIO; + ret = -EIO; + goto out; } if (!ret) diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c index 167145bdcb75..516adb50f9f6 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c @@ -61,28 +61,6 @@ int rvu_npc_get_tx_nibble_cfg(struct rvu *rvu, u64 nibble_ena) return 0; } -static int npc_mcam_verify_pf_func(struct rvu *rvu, - struct mcam_entry *entry_data, u8 intf, - u16 pcifunc) -{ - u16 pf_func, pf_func_mask; - - if (is_npc_intf_rx(intf)) - return 0; - - pf_func_mask = (entry_data->kw_mask[0] >> 32) & - NPC_KEX_PF_FUNC_MASK; - pf_func = (entry_data->kw[0] >> 32) & NPC_KEX_PF_FUNC_MASK; - - pf_func = be16_to_cpu((__force __be16)pf_func); - if (pf_func_mask != NPC_KEX_PF_FUNC_MASK || - ((pf_func & ~RVU_PFVF_FUNC_MASK) != - (pcifunc & ~RVU_PFVF_FUNC_MASK))) - return -EINVAL; - - return 0; -} - void rvu_npc_set_pkind(struct rvu *rvu, int pkind, struct rvu_pfvf *pfvf) { int blkaddr; @@ -437,6 +415,10 @@ static void npc_fixup_vf_rule(struct rvu *rvu, struct npc_mcam *mcam, return; } + /* AF modifies given action iff PF/VF has requested for it */ + if ((entry->action & 0xFULL) != NIX_RX_ACTION_DEFAULT) + return; + /* copy VF default entry action to the VF mcam entry */ rx_action = npc_get_default_entry_action(rvu, mcam, blkaddr, target_func); @@ -1850,8 +1832,8 @@ void npc_mcam_rsrcs_deinit(struct rvu *rvu) { struct npc_mcam *mcam = &rvu->hw->mcam; - kfree(mcam->bmap); - kfree(mcam->bmap_reverse); + bitmap_free(mcam->bmap); + bitmap_free(mcam->bmap_reverse); kfree(mcam->entry2pfvf_map); kfree(mcam->cntr2pfvf_map); kfree(mcam->entry2cntr_map); @@ -1904,21 +1886,20 @@ int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr) mcam->pf_offset = mcam->nixlf_offset + nixlf_count; /* Allocate bitmaps for managing MCAM entries */ - mcam->bmap = kmalloc_array(BITS_TO_LONGS(mcam->bmap_entries), - sizeof(long), GFP_KERNEL); + mcam->bmap = bitmap_zalloc(mcam->bmap_entries, GFP_KERNEL); if (!mcam->bmap) return -ENOMEM; - mcam->bmap_reverse = kmalloc_array(BITS_TO_LONGS(mcam->bmap_entries), - sizeof(long), GFP_KERNEL); + mcam->bmap_reverse = bitmap_zalloc(mcam->bmap_entries, GFP_KERNEL); if (!mcam->bmap_reverse) goto free_bmap; mcam->bmap_fcnt = mcam->bmap_entries; /* Alloc memory for saving entry to RVU PFFUNC allocation mapping */ - mcam->entry2pfvf_map = kmalloc_array(mcam->bmap_entries, - sizeof(u16), GFP_KERNEL); + mcam->entry2pfvf_map = kcalloc(mcam->bmap_entries, sizeof(u16), + GFP_KERNEL); + if (!mcam->entry2pfvf_map) goto free_bmap_reverse; @@ -1941,21 +1922,21 @@ int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr) if (err) goto free_entry_map; - mcam->cntr2pfvf_map = kmalloc_array(mcam->counters.max, - sizeof(u16), GFP_KERNEL); + mcam->cntr2pfvf_map = kcalloc(mcam->counters.max, sizeof(u16), + GFP_KERNEL); if (!mcam->cntr2pfvf_map) goto free_cntr_bmap; /* Alloc memory for MCAM entry to counter mapping and for tracking * counter's reference count. */ - mcam->entry2cntr_map = kmalloc_array(mcam->bmap_entries, - sizeof(u16), GFP_KERNEL); + mcam->entry2cntr_map = kcalloc(mcam->bmap_entries, sizeof(u16), + GFP_KERNEL); if (!mcam->entry2cntr_map) goto free_cntr_map; - mcam->cntr_refcnt = kmalloc_array(mcam->counters.max, - sizeof(u16), GFP_KERNEL); + mcam->cntr_refcnt = kcalloc(mcam->counters.max, sizeof(u16), + GFP_KERNEL); if (!mcam->cntr_refcnt) goto free_entry_cntr_map; @@ -1988,9 +1969,9 @@ free_cntr_bmap: free_entry_map: kfree(mcam->entry2pfvf_map); free_bmap_reverse: - kfree(mcam->bmap_reverse); + bitmap_free(mcam->bmap_reverse); free_bmap: - kfree(mcam->bmap); + bitmap_free(mcam->bmap); return -ENOMEM; } @@ -2852,12 +2833,6 @@ int rvu_mbox_handler_npc_mcam_write_entry(struct rvu *rvu, else nix_intf = pfvf->nix_rx_intf; - if (!is_pffunc_af(pcifunc) && - npc_mcam_verify_pf_func(rvu, &req->entry_data, req->intf, pcifunc)) { - rc = NPC_MCAM_INVALID_REQ; - goto exit; - } - /* For AF installed rules, the nix_intf should be set to target NIX */ if (is_pffunc_af(req->hdr.pcifunc)) nix_intf = req->intf; @@ -3209,10 +3184,6 @@ int rvu_mbox_handler_npc_mcam_alloc_and_write_entry(struct rvu *rvu, if (!is_npc_interface_valid(rvu, req->intf)) return NPC_MCAM_INVALID_REQ; - if (npc_mcam_verify_pf_func(rvu, &req->entry_data, req->intf, - req->hdr.pcifunc)) - return NPC_MCAM_INVALID_REQ; - /* Try to allocate a MCAM entry */ entry_req.hdr.pcifunc = req->hdr.pcifunc; entry_req.contig = true; diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c index 7ca6941ea0b9..02d0b707aea5 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c @@ -951,8 +951,11 @@ int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura) if (pfvf->ptp && qidx < pfvf->hw.tx_queues) { err = qmem_alloc(pfvf->dev, &sq->timestamps, qset->sqe_cnt, sizeof(*sq->timestamps)); - if (err) + if (err) { + kfree(sq->sg); + sq->sg = NULL; return err; + } } sq->head = 0; @@ -968,7 +971,14 @@ int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura) sq->stats.bytes = 0; sq->stats.pkts = 0; - return pfvf->hw_ops->sq_aq_init(pfvf, qidx, sqb_aura); + err = pfvf->hw_ops->sq_aq_init(pfvf, qidx, sqb_aura); + if (err) { + kfree(sq->sg); + sq->sg = NULL; + return err; + } + + return 0; } diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c index 2928898c7f8d..7f786de61014 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c @@ -314,7 +314,6 @@ static int otx2_set_channels(struct net_device *dev, pfvf->hw.tx_queues = channel->tx_count; if (pfvf->xdp_prog) pfvf->hw.xdp_queues = channel->rx_count; - pfvf->hw.non_qos_queues = pfvf->hw.tx_queues + pfvf->hw.xdp_queues; if (if_up) err = dev->netdev_ops->ndo_open(dev); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c index a57455aebff6..e5fe67e73865 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c @@ -1744,6 +1744,7 @@ int otx2_open(struct net_device *netdev) /* RQ and SQs are mapped to different CQs, * so find out max CQ IRQs (i.e CINTs) needed. */ + pf->hw.non_qos_queues = pf->hw.tx_queues + pf->hw.xdp_queues; pf->hw.cint_cnt = max3(pf->hw.rx_queues, pf->hw.tx_queues, pf->hw.tc_tx_queues); @@ -2643,8 +2644,6 @@ static int otx2_xdp_setup(struct otx2_nic *pf, struct bpf_prog *prog) xdp_features_clear_redirect_target(dev); } - pf->hw.non_qos_queues += pf->hw.xdp_queues; - if (if_up) otx2_open(pf->netdev); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c index 4d519ea833b2..f828d32737af 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c @@ -1403,7 +1403,7 @@ static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf, struct otx2_cq_queue *cq, bool *need_xdp_flush) { - unsigned char *hard_start, *data; + unsigned char *hard_start; int qidx = cq->cq_idx; struct xdp_buff xdp; struct page *page; @@ -1417,9 +1417,8 @@ static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf, xdp_init_buff(&xdp, pfvf->rbsize, &cq->xdp_rxq); - data = (unsigned char *)phys_to_virt(pa); - hard_start = page_address(page); - xdp_prepare_buff(&xdp, hard_start, data - hard_start, + hard_start = (unsigned char *)phys_to_virt(pa); + xdp_prepare_buff(&xdp, hard_start, OTX2_HEAD_ROOM, cqe->sg.seg_size, false); act = bpf_prog_run_xdp(prog, &xdp); diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index a6e91573f8da..de123350bd46 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -4761,7 +4761,10 @@ static int mtk_probe(struct platform_device *pdev) } if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA)) { - err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(36)); + err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(36)); + if (!err) + err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); + if (err) { dev_err(&pdev->dev, "Wrong DMA config\n"); return -EINVAL; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c index 3e064234f6fe..98d4306929f3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c @@ -157,6 +157,12 @@ static int mlx5_devlink_reload_down(struct devlink *devlink, bool netns_change, return -EOPNOTSUPP; } + if (action == DEVLINK_RELOAD_ACTION_FW_ACTIVATE && + !dev->priv.fw_reset) { + NL_SET_ERR_MSG_MOD(extack, "FW activate is unsupported for this function"); + return -EOPNOTSUPP; + } + if (mlx5_core_is_pf(dev) && pci_num_vf(pdev)) NL_SET_ERR_MSG_MOD(extack, "reload while VFs are present is unfavorable"); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dpll.c b/drivers/net/ethernet/mellanox/mlx5/core/dpll.c index 18fed2b34fb1..d74a5aaf4268 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/dpll.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/dpll.c @@ -261,7 +261,7 @@ static void mlx5_dpll_netdev_dpll_pin_set(struct mlx5_dpll *mdpll, { if (mdpll->tracking_netdev) return; - netdev_dpll_pin_set(netdev, mdpll->dpll_pin); + dpll_netdev_pin_set(netdev, mdpll->dpll_pin); mdpll->tracking_netdev = netdev; } @@ -269,7 +269,7 @@ static void mlx5_dpll_netdev_dpll_pin_clear(struct mlx5_dpll *mdpll) { if (!mdpll->tracking_netdev) return; - netdev_dpll_pin_clear(mdpll->tracking_netdev); + dpll_netdev_pin_clear(mdpll->tracking_netdev); mdpll->tracking_netdev = NULL; } @@ -389,7 +389,7 @@ static void mlx5_dpll_remove(struct auxiliary_device *adev) struct mlx5_dpll *mdpll = auxiliary_get_drvdata(adev); struct mlx5_core_dev *mdev = mdpll->mdev; - cancel_delayed_work(&mdpll->work); + cancel_delayed_work_sync(&mdpll->work); mlx5_dpll_mdev_netdev_untrack(mdpll, mdev); destroy_workqueue(mdpll->wq); dpll_pin_unregister(mdpll->dpll, mdpll->dpll_pin, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c index 078f56a3cbb2..ca05b3252a1b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c @@ -42,9 +42,9 @@ mlx5e_ptp_port_ts_cqe_list_add(struct mlx5e_ptp_port_ts_cqe_list *list, u8 metad WARN_ON_ONCE(tracker->inuse); tracker->inuse = true; - spin_lock(&list->tracker_list_lock); + spin_lock_bh(&list->tracker_list_lock); list_add_tail(&tracker->entry, &list->tracker_list_head); - spin_unlock(&list->tracker_list_lock); + spin_unlock_bh(&list->tracker_list_lock); } static void @@ -54,9 +54,9 @@ mlx5e_ptp_port_ts_cqe_list_remove(struct mlx5e_ptp_port_ts_cqe_list *list, u8 me WARN_ON_ONCE(!tracker->inuse); tracker->inuse = false; - spin_lock(&list->tracker_list_lock); + spin_lock_bh(&list->tracker_list_lock); list_del(&tracker->entry); - spin_unlock(&list->tracker_list_lock); + spin_unlock_bh(&list->tracker_list_lock); } void mlx5e_ptpsq_track_metadata(struct mlx5e_ptpsq *ptpsq, u8 metadata) @@ -155,7 +155,7 @@ static void mlx5e_ptpsq_mark_ts_cqes_undelivered(struct mlx5e_ptpsq *ptpsq, struct mlx5e_ptp_metadata_map *metadata_map = &ptpsq->metadata_map; struct mlx5e_ptp_port_ts_cqe_tracker *pos, *n; - spin_lock(&cqe_list->tracker_list_lock); + spin_lock_bh(&cqe_list->tracker_list_lock); list_for_each_entry_safe(pos, n, &cqe_list->tracker_list_head, entry) { struct sk_buff *skb = mlx5e_ptp_metadata_map_lookup(metadata_map, pos->metadata_id); @@ -170,7 +170,7 @@ static void mlx5e_ptpsq_mark_ts_cqes_undelivered(struct mlx5e_ptpsq *ptpsq, pos->inuse = false; list_del(&pos->entry); } - spin_unlock(&cqe_list->tracker_list_lock); + spin_unlock_bh(&cqe_list->tracker_list_lock); } #define PTP_WQE_CTR2IDX(val) ((val) & ptpsq->ts_cqe_ctr_mask) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c index 86bf007fd05b..b500cc2c9689 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c @@ -37,7 +37,7 @@ mlx5e_tc_post_act_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains, if (!MLX5_CAP_FLOWTABLE_TYPE(priv->mdev, ignore_flow_level, table_type)) { if (priv->mdev->coredev_type == MLX5_COREDEV_PF) - mlx5_core_warn(priv->mdev, "firmware level support is missing\n"); + mlx5_core_dbg(priv->mdev, "firmware flow level support is missing\n"); err = -EOPNOTSUPP; goto err_check; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c index d4ebd8743114..b2cabd6ab86c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c @@ -310,9 +310,9 @@ static void mlx5e_macsec_destroy_object(struct mlx5_core_dev *mdev, u32 macsec_o mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); } -static void mlx5e_macsec_cleanup_sa(struct mlx5e_macsec *macsec, - struct mlx5e_macsec_sa *sa, - bool is_tx, struct net_device *netdev, u32 fs_id) +static void mlx5e_macsec_cleanup_sa_fs(struct mlx5e_macsec *macsec, + struct mlx5e_macsec_sa *sa, bool is_tx, + struct net_device *netdev, u32 fs_id) { int action = (is_tx) ? MLX5_ACCEL_MACSEC_ACTION_ENCRYPT : MLX5_ACCEL_MACSEC_ACTION_DECRYPT; @@ -322,20 +322,49 @@ static void mlx5e_macsec_cleanup_sa(struct mlx5e_macsec *macsec, mlx5_macsec_fs_del_rule(macsec->mdev->macsec_fs, sa->macsec_rule, action, netdev, fs_id); - mlx5e_macsec_destroy_object(macsec->mdev, sa->macsec_obj_id); sa->macsec_rule = NULL; } +static void mlx5e_macsec_cleanup_sa(struct mlx5e_macsec *macsec, + struct mlx5e_macsec_sa *sa, bool is_tx, + struct net_device *netdev, u32 fs_id) +{ + mlx5e_macsec_cleanup_sa_fs(macsec, sa, is_tx, netdev, fs_id); + mlx5e_macsec_destroy_object(macsec->mdev, sa->macsec_obj_id); +} + +static int mlx5e_macsec_init_sa_fs(struct macsec_context *ctx, + struct mlx5e_macsec_sa *sa, bool encrypt, + bool is_tx, u32 *fs_id) +{ + struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev); + struct mlx5_macsec_fs *macsec_fs = priv->mdev->macsec_fs; + struct mlx5_macsec_rule_attrs rule_attrs; + union mlx5_macsec_rule *macsec_rule; + + rule_attrs.macsec_obj_id = sa->macsec_obj_id; + rule_attrs.sci = sa->sci; + rule_attrs.assoc_num = sa->assoc_num; + rule_attrs.action = (is_tx) ? MLX5_ACCEL_MACSEC_ACTION_ENCRYPT : + MLX5_ACCEL_MACSEC_ACTION_DECRYPT; + + macsec_rule = mlx5_macsec_fs_add_rule(macsec_fs, ctx, &rule_attrs, fs_id); + if (!macsec_rule) + return -ENOMEM; + + sa->macsec_rule = macsec_rule; + + return 0; +} + static int mlx5e_macsec_init_sa(struct macsec_context *ctx, struct mlx5e_macsec_sa *sa, bool encrypt, bool is_tx, u32 *fs_id) { struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev); struct mlx5e_macsec *macsec = priv->macsec; - struct mlx5_macsec_rule_attrs rule_attrs; struct mlx5_core_dev *mdev = priv->mdev; struct mlx5_macsec_obj_attrs obj_attrs; - union mlx5_macsec_rule *macsec_rule; int err; obj_attrs.next_pn = sa->next_pn; @@ -357,20 +386,12 @@ static int mlx5e_macsec_init_sa(struct macsec_context *ctx, if (err) return err; - rule_attrs.macsec_obj_id = sa->macsec_obj_id; - rule_attrs.sci = sa->sci; - rule_attrs.assoc_num = sa->assoc_num; - rule_attrs.action = (is_tx) ? MLX5_ACCEL_MACSEC_ACTION_ENCRYPT : - MLX5_ACCEL_MACSEC_ACTION_DECRYPT; - - macsec_rule = mlx5_macsec_fs_add_rule(mdev->macsec_fs, ctx, &rule_attrs, fs_id); - if (!macsec_rule) { - err = -ENOMEM; - goto destroy_macsec_object; + if (sa->active) { + err = mlx5e_macsec_init_sa_fs(ctx, sa, encrypt, is_tx, fs_id); + if (err) + goto destroy_macsec_object; } - sa->macsec_rule = macsec_rule; - return 0; destroy_macsec_object: @@ -526,9 +547,7 @@ static int mlx5e_macsec_add_txsa(struct macsec_context *ctx) goto destroy_sa; macsec_device->tx_sa[assoc_num] = tx_sa; - if (!secy->operational || - assoc_num != tx_sc->encoding_sa || - !tx_sa->active) + if (!secy->operational) goto out; err = mlx5e_macsec_init_sa(ctx, tx_sa, tx_sc->encrypt, true, NULL); @@ -595,7 +614,7 @@ static int mlx5e_macsec_upd_txsa(struct macsec_context *ctx) goto out; if (ctx_tx_sa->active) { - err = mlx5e_macsec_init_sa(ctx, tx_sa, tx_sc->encrypt, true, NULL); + err = mlx5e_macsec_init_sa_fs(ctx, tx_sa, tx_sc->encrypt, true, NULL); if (err) goto out; } else { @@ -604,7 +623,7 @@ static int mlx5e_macsec_upd_txsa(struct macsec_context *ctx) goto out; } - mlx5e_macsec_cleanup_sa(macsec, tx_sa, true, ctx->secy->netdev, 0); + mlx5e_macsec_cleanup_sa_fs(macsec, tx_sa, true, ctx->secy->netdev, 0); } out: mutex_unlock(&macsec->lock); @@ -1030,8 +1049,9 @@ static int mlx5e_macsec_del_rxsa(struct macsec_context *ctx) goto out; } - mlx5e_macsec_cleanup_sa(macsec, rx_sa, false, ctx->secy->netdev, - rx_sc->sc_xarray_element->fs_id); + if (rx_sa->active) + mlx5e_macsec_cleanup_sa(macsec, rx_sa, false, ctx->secy->netdev, + rx_sc->sc_xarray_element->fs_id); mlx5_destroy_encryption_key(macsec->mdev, rx_sa->enc_key_id); kfree(rx_sa); rx_sc->rx_sa[assoc_num] = NULL; @@ -1112,8 +1132,8 @@ static int macsec_upd_secy_hw_address(struct macsec_context *ctx, if (!rx_sa || !rx_sa->macsec_rule) continue; - mlx5e_macsec_cleanup_sa(macsec, rx_sa, false, ctx->secy->netdev, - rx_sc->sc_xarray_element->fs_id); + mlx5e_macsec_cleanup_sa_fs(macsec, rx_sa, false, ctx->secy->netdev, + rx_sc->sc_xarray_element->fs_id); } } @@ -1124,8 +1144,8 @@ static int macsec_upd_secy_hw_address(struct macsec_context *ctx, continue; if (rx_sa->active) { - err = mlx5e_macsec_init_sa(ctx, rx_sa, true, false, - &rx_sc->sc_xarray_element->fs_id); + err = mlx5e_macsec_init_sa_fs(ctx, rx_sa, true, false, + &rx_sc->sc_xarray_element->fs_id); if (err) goto out; } @@ -1178,7 +1198,7 @@ static int mlx5e_macsec_upd_secy(struct macsec_context *ctx) if (!tx_sa) continue; - mlx5e_macsec_cleanup_sa(macsec, tx_sa, true, ctx->secy->netdev, 0); + mlx5e_macsec_cleanup_sa_fs(macsec, tx_sa, true, ctx->secy->netdev, 0); } for (i = 0; i < MACSEC_NUM_AN; ++i) { @@ -1187,7 +1207,7 @@ static int mlx5e_macsec_upd_secy(struct macsec_context *ctx) continue; if (tx_sa->assoc_num == tx_sc->encoding_sa && tx_sa->active) { - err = mlx5e_macsec_init_sa(ctx, tx_sa, tx_sc->encrypt, true, NULL); + err = mlx5e_macsec_init_sa_fs(ctx, tx_sa, tx_sc->encrypt, true, NULL); if (err) goto out; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index 5c166d9d2dca..2fa076b23fbe 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -401,6 +401,8 @@ mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb, mlx5e_skb_cb_hwtstamp_init(skb); mlx5e_ptp_metadata_map_put(&sq->ptpsq->metadata_map, skb, metadata_index); + /* ensure skb is put on metadata_map before tracking the index */ + wmb(); mlx5e_ptpsq_track_metadata(sq->ptpsq, metadata_index); if (!netif_tx_queue_stopped(sq->txq) && mlx5e_ptpsq_metadata_freelist_empty(sq->ptpsq)) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c index 190f10aba170..5a0047bdcb51 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c @@ -152,7 +152,7 @@ void mlx5_esw_ipsec_restore_dest_uplink(struct mlx5_core_dev *mdev) xa_for_each(&esw->offloads.vport_reps, i, rep) { rpriv = rep->rep_data[REP_ETH].priv; - if (!rpriv || !rpriv->netdev || !atomic_read(&rpriv->tc_ht.nelems)) + if (!rpriv || !rpriv->netdev) continue; rhashtable_walk_enter(&rpriv->tc_ht, &iter); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index b0455134c98e..baaae628b0a0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -535,21 +535,26 @@ esw_src_port_rewrite_supported(struct mlx5_eswitch *esw) } static bool -esw_dests_to_vf_pf_vports(struct mlx5_flow_destination *dests, int max_dest) +esw_dests_to_int_external(struct mlx5_flow_destination *dests, int max_dest) { - bool vf_dest = false, pf_dest = false; + bool internal_dest = false, external_dest = false; int i; for (i = 0; i < max_dest; i++) { - if (dests[i].type != MLX5_FLOW_DESTINATION_TYPE_VPORT) + if (dests[i].type != MLX5_FLOW_DESTINATION_TYPE_VPORT && + dests[i].type != MLX5_FLOW_DESTINATION_TYPE_UPLINK) continue; - if (dests[i].vport.num == MLX5_VPORT_UPLINK) - pf_dest = true; + /* Uplink dest is external, but considered as internal + * if there is reformat because firmware uses LB+hairpin to support it. + */ + if (dests[i].vport.num == MLX5_VPORT_UPLINK && + !(dests[i].vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID)) + external_dest = true; else - vf_dest = true; + internal_dest = true; - if (vf_dest && pf_dest) + if (internal_dest && external_dest) return true; } @@ -695,9 +700,9 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, /* Header rewrite with combined wire+loopback in FDB is not allowed */ if ((flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) && - esw_dests_to_vf_pf_vports(dest, i)) { + esw_dests_to_int_external(dest, i)) { esw_warn(esw->dev, - "FDB: Header rewrite with forwarding to both PF and VF is not allowed\n"); + "FDB: Header rewrite with forwarding to both internal and external dests is not allowed\n"); rule = ERR_PTR(-EINVAL); goto err_esw_get; } @@ -3658,22 +3663,6 @@ static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode) return 0; } -static bool esw_offloads_devlink_ns_eq_netdev_ns(struct devlink *devlink) -{ - struct mlx5_core_dev *dev = devlink_priv(devlink); - struct net *devl_net, *netdev_net; - bool ret = false; - - mutex_lock(&dev->mlx5e_res.uplink_netdev_lock); - if (dev->mlx5e_res.uplink_netdev) { - netdev_net = dev_net(dev->mlx5e_res.uplink_netdev); - devl_net = devlink_net(devlink); - ret = net_eq(devl_net, netdev_net); - } - mutex_unlock(&dev->mlx5e_res.uplink_netdev_lock); - return ret; -} - int mlx5_eswitch_block_mode(struct mlx5_core_dev *dev) { struct mlx5_eswitch *esw = dev->priv.eswitch; @@ -3718,13 +3707,6 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode, if (esw_mode_from_devlink(mode, &mlx5_mode)) return -EINVAL; - if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV && - !esw_offloads_devlink_ns_eq_netdev_ns(devlink)) { - NL_SET_ERR_MSG_MOD(extack, - "Can't change E-Switch mode to switchdev when netdev net namespace has diverged from the devlink's."); - return -EPERM; - } - mlx5_lag_disable_change(esw->dev); err = mlx5_esw_try_lock(esw); if (err < 0) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c index f27eab6e4929..2911aa34a5be 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c @@ -703,19 +703,30 @@ void mlx5_fw_reset_events_start(struct mlx5_core_dev *dev) { struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset; + if (!fw_reset) + return; + MLX5_NB_INIT(&fw_reset->nb, fw_reset_event_notifier, GENERAL_EVENT); mlx5_eq_notifier_register(dev, &fw_reset->nb); } void mlx5_fw_reset_events_stop(struct mlx5_core_dev *dev) { - mlx5_eq_notifier_unregister(dev, &dev->priv.fw_reset->nb); + struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset; + + if (!fw_reset) + return; + + mlx5_eq_notifier_unregister(dev, &fw_reset->nb); } void mlx5_drain_fw_reset(struct mlx5_core_dev *dev) { struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset; + if (!fw_reset) + return; + set_bit(MLX5_FW_RESET_FLAGS_DROP_NEW_REQUESTS, &fw_reset->reset_flags); cancel_work_sync(&fw_reset->fw_live_patch_work); cancel_work_sync(&fw_reset->reset_request_work); @@ -733,9 +744,13 @@ static const struct devlink_param mlx5_fw_reset_devlink_params[] = { int mlx5_fw_reset_init(struct mlx5_core_dev *dev) { - struct mlx5_fw_reset *fw_reset = kzalloc(sizeof(*fw_reset), GFP_KERNEL); + struct mlx5_fw_reset *fw_reset; int err; + if (!MLX5_CAP_MCAM_REG(dev, mfrl)) + return 0; + + fw_reset = kzalloc(sizeof(*fw_reset), GFP_KERNEL); if (!fw_reset) return -ENOMEM; fw_reset->wq = create_singlethread_workqueue("mlx5_fw_reset_events"); @@ -771,6 +786,9 @@ void mlx5_fw_reset_cleanup(struct mlx5_core_dev *dev) { struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset; + if (!fw_reset) + return; + devl_params_unregister(priv_to_devlink(dev), mlx5_fw_reset_devlink_params, ARRAY_SIZE(mlx5_fw_reset_devlink_params)); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c index 8ff6dc9bc803..b5c709bba155 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/health.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c @@ -452,10 +452,10 @@ mlx5_fw_reporter_diagnose(struct devlink_health_reporter *reporter, struct health_buffer __iomem *h = health->health; u8 synd = ioread8(&h->synd); + devlink_fmsg_u8_pair_put(fmsg, "Syndrome", synd); if (!synd) return 0; - devlink_fmsg_u8_pair_put(fmsg, "Syndrome", synd); devlink_fmsg_string_pair_put(fmsg, "Description", hsynd_str(synd)); return 0; diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_lag.c b/drivers/net/ethernet/microchip/lan966x/lan966x_lag.c index 41fa2523d91d..5f2cd9a8cf8f 100644 --- a/drivers/net/ethernet/microchip/lan966x/lan966x_lag.c +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_lag.c @@ -37,19 +37,24 @@ static void lan966x_lag_set_aggr_pgids(struct lan966x *lan966x) /* Now, set PGIDs for each active LAG */ for (lag = 0; lag < lan966x->num_phys_ports; ++lag) { - struct net_device *bond = lan966x->ports[lag]->bond; + struct lan966x_port *port = lan966x->ports[lag]; int num_active_ports = 0; + struct net_device *bond; unsigned long bond_mask; u8 aggr_idx[16]; - if (!bond || (visited & BIT(lag))) + if (!port || !port->bond || (visited & BIT(lag))) continue; + bond = port->bond; bond_mask = lan966x_lag_get_mask(lan966x, bond); for_each_set_bit(p, &bond_mask, lan966x->num_phys_ports) { struct lan966x_port *port = lan966x->ports[p]; + if (!port) + continue; + lan_wr(ANA_PGID_PGID_SET(bond_mask), lan966x, ANA_PGID(p)); if (port->lag_tx_active) diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_port.c b/drivers/net/ethernet/microchip/lan966x/lan966x_port.c index 92108d354051..2e83bbb9477e 100644 --- a/drivers/net/ethernet/microchip/lan966x/lan966x_port.c +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_port.c @@ -168,9 +168,10 @@ static void lan966x_port_link_up(struct lan966x_port *port) lan966x_taprio_speed_set(port, config->speed); /* Also the GIGA_MODE_ENA(1) needs to be set regardless of the - * port speed for QSGMII ports. + * port speed for QSGMII or SGMII ports. */ - if (phy_interface_num_ports(config->portmode) == 4) + if (phy_interface_num_ports(config->portmode) == 4 || + config->portmode == PHY_INTERFACE_MODE_SGMII) mode = DEV_MAC_MODE_CFG_GIGA_MODE_ENA_SET(1); lan_wr(config->duplex | mode, diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c b/drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c index 4af285918ea2..75868b3f548e 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c @@ -347,10 +347,10 @@ int sparx5_del_mact_entry(struct sparx5 *sparx5, list) { if ((vid == 0 || mact_entry->vid == vid) && ether_addr_equal(addr, mact_entry->mac)) { + sparx5_mact_forget(sparx5, addr, mact_entry->vid); + list_del(&mact_entry->list); devm_kfree(sparx5->dev, mact_entry); - - sparx5_mact_forget(sparx5, addr, mact_entry->vid); } } mutex_unlock(&sparx5->mact_lock); diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c index d1f7fc8b1b71..3c066b62e689 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c @@ -757,6 +757,7 @@ static int mchp_sparx5_probe(struct platform_device *pdev) platform_set_drvdata(pdev, sparx5); sparx5->pdev = pdev; sparx5->dev = &pdev->dev; + spin_lock_init(&sparx5->tx_lock); /* Do switch core reset if available */ reset = devm_reset_control_get_optional_shared(&pdev->dev, "switch"); diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.h b/drivers/net/ethernet/microchip/sparx5/sparx5_main.h index 6f565c0c0c3d..316fed5f2735 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.h +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.h @@ -280,6 +280,7 @@ struct sparx5 { int xtr_irq; /* Frame DMA */ int fdma_irq; + spinlock_t tx_lock; /* lock for frame transmission */ struct sparx5_rx rx; struct sparx5_tx tx; /* PTP */ diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c b/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c index 6db6ac6a3bbc..ac7e1cffbcec 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c @@ -244,10 +244,12 @@ netdev_tx_t sparx5_port_xmit_impl(struct sk_buff *skb, struct net_device *dev) } skb_tx_timestamp(skb); + spin_lock(&sparx5->tx_lock); if (sparx5->fdma_irq > 0) ret = sparx5_fdma_xmit(sparx5, ifh, skb); else ret = sparx5_inject(sparx5, ifh, skb, dev); + spin_unlock(&sparx5->tx_lock); if (ret == -EBUSY) goto busy; diff --git a/drivers/net/ethernet/netronome/nfp/flower/conntrack.c b/drivers/net/ethernet/netronome/nfp/flower/conntrack.c index 2967bab72505..15180538b80a 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/conntrack.c +++ b/drivers/net/ethernet/netronome/nfp/flower/conntrack.c @@ -1424,10 +1424,30 @@ static void nfp_nft_ct_translate_mangle_action(struct flow_action_entry *mangle_ mangle_action->mangle.mask = (__force u32)cpu_to_be32(mangle_action->mangle.mask); return; + /* Both struct tcphdr and struct udphdr start with + * __be16 source; + * __be16 dest; + * so we can use the same code for both. + */ case FLOW_ACT_MANGLE_HDR_TYPE_TCP: case FLOW_ACT_MANGLE_HDR_TYPE_UDP: - mangle_action->mangle.val = (__force u16)cpu_to_be16(mangle_action->mangle.val); - mangle_action->mangle.mask = (__force u16)cpu_to_be16(mangle_action->mangle.mask); + if (mangle_action->mangle.offset == offsetof(struct tcphdr, source)) { + mangle_action->mangle.val = + (__force u32)cpu_to_be32(mangle_action->mangle.val << 16); + /* The mask of mangle action is inverse mask, + * so clear the dest tp port with 0xFFFF to + * instead of rotate-left operation. + */ + mangle_action->mangle.mask = + (__force u32)cpu_to_be32(mangle_action->mangle.mask << 16 | 0xFFFF); + } + if (mangle_action->mangle.offset == offsetof(struct tcphdr, dest)) { + mangle_action->mangle.offset = 0; + mangle_action->mangle.val = + (__force u32)cpu_to_be32(mangle_action->mangle.val); + mangle_action->mangle.mask = + (__force u32)cpu_to_be32(mangle_action->mangle.mask); + } return; default: @@ -1864,10 +1884,30 @@ int nfp_fl_ct_handle_post_ct(struct nfp_flower_priv *priv, { struct flow_rule *rule = flow_cls_offload_flow_rule(flow); struct nfp_fl_ct_flow_entry *ct_entry; + struct flow_action_entry *ct_goto; struct nfp_fl_ct_zone_entry *zt; + struct flow_action_entry *act; bool wildcarded = false; struct flow_match_ct ct; - struct flow_action_entry *ct_goto; + int i; + + flow_action_for_each(i, act, &rule->action) { + switch (act->id) { + case FLOW_ACTION_REDIRECT: + case FLOW_ACTION_REDIRECT_INGRESS: + case FLOW_ACTION_MIRRED: + case FLOW_ACTION_MIRRED_INGRESS: + if (act->dev->rtnl_link_ops && + !strcmp(act->dev->rtnl_link_ops->kind, "openvswitch")) { + NL_SET_ERR_MSG_MOD(extack, + "unsupported offload: out port is openvswitch internal port"); + return -EOPNOTSUPP; + } + break; + default: + break; + } + } flow_rule_match_ct(rule, &ct); if (!ct.mask->ct_zone) { diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c index e522845c7c21..0d7d138d6e0d 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c +++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c @@ -1084,7 +1084,7 @@ nfp_tunnel_add_shared_mac(struct nfp_app *app, struct net_device *netdev, u16 nfp_mac_idx = 0; entry = nfp_tunnel_lookup_offloaded_macs(app, netdev->dev_addr); - if (entry && nfp_tunnel_is_mac_idx_global(entry->index)) { + if (entry && (nfp_tunnel_is_mac_idx_global(entry->index) || netif_is_lag_port(netdev))) { if (entry->bridge_count || !nfp_flower_is_supported_bridge(netdev)) { nfp_tunnel_offloaded_macs_inc_ref_and_link(entry, diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index 3b3210d823e8..f28e769e6fda 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -2776,6 +2776,7 @@ static void nfp_net_netdev_init(struct nfp_net *nn) case NFP_NFD_VER_NFD3: netdev->netdev_ops = &nfp_nfd3_netdev_ops; netdev->xdp_features |= NETDEV_XDP_ACT_XSK_ZEROCOPY; + netdev->xdp_features |= NETDEV_XDP_ACT_REDIRECT; break; case NFP_NFD_VER_NFDK: netdev->netdev_ops = &nfp_nfdk_netdev_ops; diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c index 33b4c2856316..3f10c5365c80 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c @@ -537,11 +537,13 @@ static int enable_bars(struct nfp6000_pcie *nfp, u16 interface) const u32 barcfg_msix_general = NFP_PCIE_BAR_PCIE2CPP_MapType( NFP_PCIE_BAR_PCIE2CPP_MapType_GENERAL) | - NFP_PCIE_BAR_PCIE2CPP_LengthSelect_32BIT; + NFP_PCIE_BAR_PCIE2CPP_LengthSelect( + NFP_PCIE_BAR_PCIE2CPP_LengthSelect_32BIT); const u32 barcfg_msix_xpb = NFP_PCIE_BAR_PCIE2CPP_MapType( NFP_PCIE_BAR_PCIE2CPP_MapType_BULK) | - NFP_PCIE_BAR_PCIE2CPP_LengthSelect_32BIT | + NFP_PCIE_BAR_PCIE2CPP_LengthSelect( + NFP_PCIE_BAR_PCIE2CPP_LengthSelect_32BIT) | NFP_PCIE_BAR_PCIE2CPP_Target_BaseAddress( NFP_CPP_TARGET_ISLAND_XPB); const u32 barcfg_explicit[4] = { diff --git a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c index c49aa358e424..6ba8d4aca0a0 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c @@ -93,6 +93,7 @@ static void ionic_unmap_bars(struct ionic *ionic) bars[i].len = 0; } } + ionic->num_bars = 0; } void __iomem *ionic_bus_map_dbpage(struct ionic *ionic, int page_num) @@ -215,15 +216,17 @@ out: static void ionic_clear_pci(struct ionic *ionic) { - ionic->idev.dev_info_regs = NULL; - ionic->idev.dev_cmd_regs = NULL; - ionic->idev.intr_status = NULL; - ionic->idev.intr_ctrl = NULL; - - ionic_unmap_bars(ionic); - pci_release_regions(ionic->pdev); + if (ionic->num_bars) { + ionic->idev.dev_info_regs = NULL; + ionic->idev.dev_cmd_regs = NULL; + ionic->idev.intr_status = NULL; + ionic->idev.intr_ctrl = NULL; + + ionic_unmap_bars(ionic); + pci_release_regions(ionic->pdev); + } - if (atomic_read(&ionic->pdev->enable_cnt) > 0) + if (pci_is_enabled(ionic->pdev)) pci_disable_device(ionic->pdev); } diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.c b/drivers/net/ethernet/pensando/ionic/ionic_dev.c index 1e7c71f7f081..746072b4dbd0 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_dev.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.c @@ -319,22 +319,32 @@ do_check_time: u8 ionic_dev_cmd_status(struct ionic_dev *idev) { + if (!idev->dev_cmd_regs) + return (u8)PCI_ERROR_RESPONSE; return ioread8(&idev->dev_cmd_regs->comp.comp.status); } bool ionic_dev_cmd_done(struct ionic_dev *idev) { + if (!idev->dev_cmd_regs) + return false; return ioread32(&idev->dev_cmd_regs->done) & IONIC_DEV_CMD_DONE; } void ionic_dev_cmd_comp(struct ionic_dev *idev, union ionic_dev_cmd_comp *comp) { + if (!idev->dev_cmd_regs) + return; memcpy_fromio(comp, &idev->dev_cmd_regs->comp, sizeof(*comp)); } void ionic_dev_cmd_go(struct ionic_dev *idev, union ionic_dev_cmd *cmd) { idev->opcode = cmd->cmd.opcode; + + if (!idev->dev_cmd_regs) + return; + memcpy_toio(&idev->dev_cmd_regs->cmd, cmd, sizeof(*cmd)); iowrite32(0, &idev->dev_cmd_regs->done); iowrite32(1, &idev->dev_cmd_regs->doorbell); diff --git a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c index cd3c0b01402e..0ffc9c4904ac 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c @@ -90,18 +90,23 @@ static void ionic_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p) { struct ionic_lif *lif = netdev_priv(netdev); + struct ionic_dev *idev; unsigned int offset; unsigned int size; regs->version = IONIC_DEV_CMD_REG_VERSION; + idev = &lif->ionic->idev; + if (!idev->dev_info_regs) + return; + offset = 0; size = IONIC_DEV_INFO_REG_COUNT * sizeof(u32); memcpy_fromio(p + offset, lif->ionic->idev.dev_info_regs->words, size); offset += size; size = IONIC_DEV_CMD_REG_COUNT * sizeof(u32); - memcpy_fromio(p + offset, lif->ionic->idev.dev_cmd_regs->words, size); + memcpy_fromio(p + offset, idev->dev_cmd_regs->words, size); } static void ionic_get_link_ext_stats(struct net_device *netdev, diff --git a/drivers/net/ethernet/pensando/ionic/ionic_fw.c b/drivers/net/ethernet/pensando/ionic/ionic_fw.c index 5f40324cd243..3c209c1a2337 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_fw.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_fw.c @@ -109,6 +109,11 @@ int ionic_firmware_update(struct ionic_lif *lif, const struct firmware *fw, dl = priv_to_devlink(ionic); devlink_flash_update_status_notify(dl, "Preparing to flash", NULL, 0, 0); + if (!idev->dev_cmd_regs) { + err = -ENXIO; + goto err_out; + } + buf_sz = sizeof(idev->dev_cmd_regs->data); netdev_dbg(netdev, diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c index cf2d5ad7b68c..fcb44ceeb6aa 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c @@ -3559,7 +3559,10 @@ int ionic_lif_init(struct ionic_lif *lif) goto err_out_notifyq_deinit; } - err = ionic_init_nic_features(lif); + if (test_bit(IONIC_LIF_F_FW_RESET, lif->state)) + err = ionic_set_nic_features(lif, lif->netdev->features); + else + err = ionic_init_nic_features(lif); if (err) goto err_out_notifyq_deinit; diff --git a/drivers/net/ethernet/pensando/ionic/ionic_main.c b/drivers/net/ethernet/pensando/ionic/ionic_main.c index 165ab08ad2dd..2f479de329fe 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_main.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_main.c @@ -416,6 +416,9 @@ static void ionic_dev_cmd_clean(struct ionic *ionic) { struct ionic_dev *idev = &ionic->idev; + if (!idev->dev_cmd_regs) + return; + iowrite32(0, &idev->dev_cmd_regs->doorbell); memset_io(&idev->dev_cmd_regs->cmd, 0, sizeof(idev->dev_cmd_regs->cmd)); } diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c index 54cd96b035d6..6f4776759863 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c @@ -579,6 +579,9 @@ int ionic_tx_napi(struct napi_struct *napi, int budget) work_done = ionic_cq_service(cq, budget, ionic_tx_service, NULL, NULL); + if (unlikely(!budget)) + return budget; + if (work_done < budget && napi_complete_done(napi, work_done)) { ionic_dim_update(qcq, IONIC_LIF_F_TX_DIM_INTR); flags |= IONIC_INTR_CRED_UNMASK; @@ -607,6 +610,9 @@ int ionic_rx_napi(struct napi_struct *napi, int budget) u32 work_done = 0; u32 flags = 0; + if (unlikely(!budget)) + return budget; + lif = cq->bound_q->lif; idev = &lif->ionic->idev; @@ -656,6 +662,9 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget) tx_work_done = ionic_cq_service(txcq, IONIC_TX_BUDGET_DEFAULT, ionic_tx_service, NULL, NULL); + if (unlikely(!budget)) + return budget; + rx_work_done = ionic_cq_service(rxcq, budget, ionic_rx_service, NULL, NULL); diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index 0e3731f50fc2..f7566cfa45ca 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -772,29 +772,25 @@ static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q) struct ravb_rx_desc *desc; struct sk_buff *skb; dma_addr_t dma_addr; + int rx_packets = 0; u8 desc_status; - int boguscnt; u16 pkt_len; u8 die_dt; int entry; int limit; + int i; entry = priv->cur_rx[q] % priv->num_rx_ring[q]; - boguscnt = priv->dirty_rx[q] + priv->num_rx_ring[q] - priv->cur_rx[q]; + limit = priv->dirty_rx[q] + priv->num_rx_ring[q] - priv->cur_rx[q]; stats = &priv->stats[q]; - boguscnt = min(boguscnt, *quota); - limit = boguscnt; desc = &priv->gbeth_rx_ring[entry]; - while (desc->die_dt != DT_FEMPTY) { + for (i = 0; i < limit && rx_packets < *quota && desc->die_dt != DT_FEMPTY; i++) { /* Descriptor type must be checked before all other reads */ dma_rmb(); desc_status = desc->msc; pkt_len = le16_to_cpu(desc->ds_cc) & RX_DS; - if (--boguscnt < 0) - break; - /* We use 0-byte descriptors to mark the DMA mapping errors */ if (!pkt_len) continue; @@ -820,7 +816,7 @@ static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q) skb_put(skb, pkt_len); skb->protocol = eth_type_trans(skb, ndev); napi_gro_receive(&priv->napi[q], skb); - stats->rx_packets++; + rx_packets++; stats->rx_bytes += pkt_len; break; case DT_FSTART: @@ -848,7 +844,7 @@ static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q) eth_type_trans(priv->rx_1st_skb, ndev); napi_gro_receive(&priv->napi[q], priv->rx_1st_skb); - stats->rx_packets++; + rx_packets++; stats->rx_bytes += pkt_len; break; } @@ -887,9 +883,9 @@ static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q) desc->die_dt = DT_FEMPTY; } - *quota -= limit - (++boguscnt); - - return boguscnt <= 0; + stats->rx_packets += rx_packets; + *quota -= rx_packets; + return *quota == 0; } /* Packet receive function for Ethernet AVB */ diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index 721c1f8e892f..5ba606a596e7 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h @@ -59,28 +59,51 @@ #undef FRAME_FILTER_DEBUG /* #define FRAME_FILTER_DEBUG */ +struct stmmac_q_tx_stats { + u64_stats_t tx_bytes; + u64_stats_t tx_set_ic_bit; + u64_stats_t tx_tso_frames; + u64_stats_t tx_tso_nfrags; +}; + +struct stmmac_napi_tx_stats { + u64_stats_t tx_packets; + u64_stats_t tx_pkt_n; + u64_stats_t poll; + u64_stats_t tx_clean; + u64_stats_t tx_set_ic_bit; +}; + struct stmmac_txq_stats { - u64 tx_bytes; - u64 tx_packets; - u64 tx_pkt_n; - u64 tx_normal_irq_n; - u64 napi_poll; - u64 tx_clean; - u64 tx_set_ic_bit; - u64 tx_tso_frames; - u64 tx_tso_nfrags; - struct u64_stats_sync syncp; + /* Updates protected by tx queue lock. */ + struct u64_stats_sync q_syncp; + struct stmmac_q_tx_stats q; + + /* Updates protected by NAPI poll logic. */ + struct u64_stats_sync napi_syncp; + struct stmmac_napi_tx_stats napi; } ____cacheline_aligned_in_smp; +struct stmmac_napi_rx_stats { + u64_stats_t rx_bytes; + u64_stats_t rx_packets; + u64_stats_t rx_pkt_n; + u64_stats_t poll; +}; + struct stmmac_rxq_stats { - u64 rx_bytes; - u64 rx_packets; - u64 rx_pkt_n; - u64 rx_normal_irq_n; - u64 napi_poll; - struct u64_stats_sync syncp; + /* Updates protected by NAPI poll logic. */ + struct u64_stats_sync napi_syncp; + struct stmmac_napi_rx_stats napi; } ____cacheline_aligned_in_smp; +/* Updates on each CPU protected by not allowing nested irqs. */ +struct stmmac_pcpu_stats { + struct u64_stats_sync syncp; + u64_stats_t rx_normal_irq_n[MTL_MAX_TX_QUEUES]; + u64_stats_t tx_normal_irq_n[MTL_MAX_RX_QUEUES]; +}; + /* Extra statistic and debug information exposed by ethtool */ struct stmmac_extra_stats { /* Transmit errors */ @@ -205,6 +228,7 @@ struct stmmac_extra_stats { /* per queue statistics */ struct stmmac_txq_stats txq_stats[MTL_MAX_TX_QUEUES]; struct stmmac_rxq_stats rxq_stats[MTL_MAX_RX_QUEUES]; + struct stmmac_pcpu_stats __percpu *pcpu_stats; unsigned long rx_dropped; unsigned long rx_errors; unsigned long tx_dropped; @@ -216,6 +240,7 @@ struct stmmac_safety_stats { unsigned long mac_errors[32]; unsigned long mtl_errors[32]; unsigned long dma_errors[32]; + unsigned long dma_dpp_errors[32]; }; /* Number of fields in Safety Stats */ diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c index 8f730ada71f9..6b65420e11b5 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c @@ -353,6 +353,10 @@ static int imx_dwmac_probe(struct platform_device *pdev) if (data->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY) plat_dat->flags |= STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY; + /* Default TX Q0 to use TSO and rest TXQ for TBS */ + for (int i = 1; i < plat_dat->tx_queues_to_use; i++) + plat_dat->tx_queues_cfg[i].tbs_en = 1; + plat_dat->host_dma_width = dwmac->ops->addr_width; plat_dat->init = imx_dwmac_init; plat_dat->exit = imx_dwmac_exit; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c index 137741b94122..b21d99faa2d0 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c @@ -441,8 +441,7 @@ static int sun8i_dwmac_dma_interrupt(struct stmmac_priv *priv, struct stmmac_extra_stats *x, u32 chan, u32 dir) { - struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[chan]; - struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[chan]; + struct stmmac_pcpu_stats *stats = this_cpu_ptr(priv->xstats.pcpu_stats); int ret = 0; u32 v; @@ -455,9 +454,9 @@ static int sun8i_dwmac_dma_interrupt(struct stmmac_priv *priv, if (v & EMAC_TX_INT) { ret |= handle_tx; - u64_stats_update_begin(&txq_stats->syncp); - txq_stats->tx_normal_irq_n++; - u64_stats_update_end(&txq_stats->syncp); + u64_stats_update_begin(&stats->syncp); + u64_stats_inc(&stats->tx_normal_irq_n[chan]); + u64_stats_update_end(&stats->syncp); } if (v & EMAC_TX_DMA_STOP_INT) @@ -479,9 +478,9 @@ static int sun8i_dwmac_dma_interrupt(struct stmmac_priv *priv, if (v & EMAC_RX_INT) { ret |= handle_rx; - u64_stats_update_begin(&rxq_stats->syncp); - rxq_stats->rx_normal_irq_n++; - u64_stats_update_end(&rxq_stats->syncp); + u64_stats_update_begin(&stats->syncp); + u64_stats_inc(&stats->rx_normal_irq_n[chan]); + u64_stats_update_end(&stats->syncp); } if (v & EMAC_RX_BUF_UA_INT) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c index 9470d3fd2ded..0d185e54eb7e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c @@ -171,8 +171,7 @@ int dwmac4_dma_interrupt(struct stmmac_priv *priv, void __iomem *ioaddr, const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs; u32 intr_status = readl(ioaddr + DMA_CHAN_STATUS(dwmac4_addrs, chan)); u32 intr_en = readl(ioaddr + DMA_CHAN_INTR_ENA(dwmac4_addrs, chan)); - struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[chan]; - struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[chan]; + struct stmmac_pcpu_stats *stats = this_cpu_ptr(priv->xstats.pcpu_stats); int ret = 0; if (dir == DMA_DIR_RX) @@ -201,15 +200,15 @@ int dwmac4_dma_interrupt(struct stmmac_priv *priv, void __iomem *ioaddr, } /* TX/RX NORMAL interrupts */ if (likely(intr_status & DMA_CHAN_STATUS_RI)) { - u64_stats_update_begin(&rxq_stats->syncp); - rxq_stats->rx_normal_irq_n++; - u64_stats_update_end(&rxq_stats->syncp); + u64_stats_update_begin(&stats->syncp); + u64_stats_inc(&stats->rx_normal_irq_n[chan]); + u64_stats_update_end(&stats->syncp); ret |= handle_rx; } if (likely(intr_status & DMA_CHAN_STATUS_TI)) { - u64_stats_update_begin(&txq_stats->syncp); - txq_stats->tx_normal_irq_n++; - u64_stats_update_end(&txq_stats->syncp); + u64_stats_update_begin(&stats->syncp); + u64_stats_inc(&stats->tx_normal_irq_n[chan]); + u64_stats_update_end(&stats->syncp); ret |= handle_tx; } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c index 7907d62d3437..85e18f9a22f9 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c @@ -162,8 +162,7 @@ static void show_rx_process_state(unsigned int status) int dwmac_dma_interrupt(struct stmmac_priv *priv, void __iomem *ioaddr, struct stmmac_extra_stats *x, u32 chan, u32 dir) { - struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[chan]; - struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[chan]; + struct stmmac_pcpu_stats *stats = this_cpu_ptr(priv->xstats.pcpu_stats); int ret = 0; /* read the status register (CSR5) */ u32 intr_status = readl(ioaddr + DMA_STATUS); @@ -215,16 +214,16 @@ int dwmac_dma_interrupt(struct stmmac_priv *priv, void __iomem *ioaddr, u32 value = readl(ioaddr + DMA_INTR_ENA); /* to schedule NAPI on real RIE event. */ if (likely(value & DMA_INTR_ENA_RIE)) { - u64_stats_update_begin(&rxq_stats->syncp); - rxq_stats->rx_normal_irq_n++; - u64_stats_update_end(&rxq_stats->syncp); + u64_stats_update_begin(&stats->syncp); + u64_stats_inc(&stats->rx_normal_irq_n[chan]); + u64_stats_update_end(&stats->syncp); ret |= handle_rx; } } if (likely(intr_status & DMA_STATUS_TI)) { - u64_stats_update_begin(&txq_stats->syncp); - txq_stats->tx_normal_irq_n++; - u64_stats_update_end(&txq_stats->syncp); + u64_stats_update_begin(&stats->syncp); + u64_stats_inc(&stats->tx_normal_irq_n[chan]); + u64_stats_update_end(&stats->syncp); ret |= handle_tx; } if (unlikely(intr_status & DMA_STATUS_ERI)) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h index 207ff1799f2c..6a2c7d22df1e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h @@ -303,6 +303,8 @@ #define XGMAC_RXCEIE BIT(4) #define XGMAC_TXCEIE BIT(0) #define XGMAC_MTL_ECC_INT_STATUS 0x000010cc +#define XGMAC_MTL_DPP_CONTROL 0x000010e0 +#define XGMAC_DPP_DISABLE BIT(0) #define XGMAC_MTL_TXQ_OPMODE(x) (0x00001100 + (0x80 * (x))) #define XGMAC_TQS GENMASK(25, 16) #define XGMAC_TQS_SHIFT 16 @@ -385,6 +387,7 @@ #define XGMAC_DCEIE BIT(1) #define XGMAC_TCEIE BIT(0) #define XGMAC_DMA_ECC_INT_STATUS 0x0000306c +#define XGMAC_DMA_DPP_INT_STATUS 0x00003074 #define XGMAC_DMA_CH_CONTROL(x) (0x00003100 + (0x80 * (x))) #define XGMAC_SPH BIT(24) #define XGMAC_PBLx8 BIT(16) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c index eb48211d9b0e..1af2f89a0504 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c @@ -830,6 +830,44 @@ static const struct dwxgmac3_error_desc dwxgmac3_dma_errors[32]= { { false, "UNKNOWN", "Unknown Error" }, /* 31 */ }; +#define DPP_RX_ERR "Read Rx Descriptor Parity checker Error" +#define DPP_TX_ERR "Read Tx Descriptor Parity checker Error" + +static const struct dwxgmac3_error_desc dwxgmac3_dma_dpp_errors[32] = { + { true, "TDPES0", DPP_TX_ERR }, + { true, "TDPES1", DPP_TX_ERR }, + { true, "TDPES2", DPP_TX_ERR }, + { true, "TDPES3", DPP_TX_ERR }, + { true, "TDPES4", DPP_TX_ERR }, + { true, "TDPES5", DPP_TX_ERR }, + { true, "TDPES6", DPP_TX_ERR }, + { true, "TDPES7", DPP_TX_ERR }, + { true, "TDPES8", DPP_TX_ERR }, + { true, "TDPES9", DPP_TX_ERR }, + { true, "TDPES10", DPP_TX_ERR }, + { true, "TDPES11", DPP_TX_ERR }, + { true, "TDPES12", DPP_TX_ERR }, + { true, "TDPES13", DPP_TX_ERR }, + { true, "TDPES14", DPP_TX_ERR }, + { true, "TDPES15", DPP_TX_ERR }, + { true, "RDPES0", DPP_RX_ERR }, + { true, "RDPES1", DPP_RX_ERR }, + { true, "RDPES2", DPP_RX_ERR }, + { true, "RDPES3", DPP_RX_ERR }, + { true, "RDPES4", DPP_RX_ERR }, + { true, "RDPES5", DPP_RX_ERR }, + { true, "RDPES6", DPP_RX_ERR }, + { true, "RDPES7", DPP_RX_ERR }, + { true, "RDPES8", DPP_RX_ERR }, + { true, "RDPES9", DPP_RX_ERR }, + { true, "RDPES10", DPP_RX_ERR }, + { true, "RDPES11", DPP_RX_ERR }, + { true, "RDPES12", DPP_RX_ERR }, + { true, "RDPES13", DPP_RX_ERR }, + { true, "RDPES14", DPP_RX_ERR }, + { true, "RDPES15", DPP_RX_ERR }, +}; + static void dwxgmac3_handle_dma_err(struct net_device *ndev, void __iomem *ioaddr, bool correctable, struct stmmac_safety_stats *stats) @@ -841,6 +879,13 @@ static void dwxgmac3_handle_dma_err(struct net_device *ndev, dwxgmac3_log_error(ndev, value, correctable, "DMA", dwxgmac3_dma_errors, STAT_OFF(dma_errors), stats); + + value = readl(ioaddr + XGMAC_DMA_DPP_INT_STATUS); + writel(value, ioaddr + XGMAC_DMA_DPP_INT_STATUS); + + dwxgmac3_log_error(ndev, value, false, "DMA_DPP", + dwxgmac3_dma_dpp_errors, + STAT_OFF(dma_dpp_errors), stats); } static int @@ -881,6 +926,12 @@ dwxgmac3_safety_feat_config(void __iomem *ioaddr, unsigned int asp, value |= XGMAC_TMOUTEN; /* FSM Timeout Feature */ writel(value, ioaddr + XGMAC_MAC_FSM_CONTROL); + /* 5. Enable Data Path Parity Protection */ + value = readl(ioaddr + XGMAC_MTL_DPP_CONTROL); + /* already enabled by default, explicit enable it again */ + value &= ~XGMAC_DPP_DISABLE; + writel(value, ioaddr + XGMAC_MTL_DPP_CONTROL); + return 0; } @@ -914,7 +965,11 @@ static int dwxgmac3_safety_feat_irq_status(struct net_device *ndev, ret |= !corr; } - err = dma & (XGMAC_DEUIS | XGMAC_DECIS); + /* DMA_DPP_Interrupt_Status is indicated by MCSIS bit in + * DMA_Safety_Interrupt_Status, so we handle DMA Data Path + * Parity Errors here + */ + err = dma & (XGMAC_DEUIS | XGMAC_DECIS | XGMAC_MCSIS); corr = dma & XGMAC_DECIS; if (err) { dwxgmac3_handle_dma_err(ndev, ioaddr, corr, stats); @@ -930,6 +985,7 @@ static const struct dwxgmac3_error { { dwxgmac3_mac_errors }, { dwxgmac3_mtl_errors }, { dwxgmac3_dma_errors }, + { dwxgmac3_dma_dpp_errors }, }; static int dwxgmac3_safety_feat_dump(struct stmmac_safety_stats *stats, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c index 3cde695fec91..dd2ab6185c40 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c @@ -337,8 +337,7 @@ static int dwxgmac2_dma_interrupt(struct stmmac_priv *priv, struct stmmac_extra_stats *x, u32 chan, u32 dir) { - struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[chan]; - struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[chan]; + struct stmmac_pcpu_stats *stats = this_cpu_ptr(priv->xstats.pcpu_stats); u32 intr_status = readl(ioaddr + XGMAC_DMA_CH_STATUS(chan)); u32 intr_en = readl(ioaddr + XGMAC_DMA_CH_INT_EN(chan)); int ret = 0; @@ -367,15 +366,15 @@ static int dwxgmac2_dma_interrupt(struct stmmac_priv *priv, /* TX/RX NORMAL interrupts */ if (likely(intr_status & XGMAC_NIS)) { if (likely(intr_status & XGMAC_RI)) { - u64_stats_update_begin(&rxq_stats->syncp); - rxq_stats->rx_normal_irq_n++; - u64_stats_update_end(&rxq_stats->syncp); + u64_stats_update_begin(&stats->syncp); + u64_stats_inc(&stats->rx_normal_irq_n[chan]); + u64_stats_update_end(&stats->syncp); ret |= handle_rx; } if (likely(intr_status & (XGMAC_TI | XGMAC_TBU))) { - u64_stats_update_begin(&txq_stats->syncp); - txq_stats->tx_normal_irq_n++; - u64_stats_update_end(&txq_stats->syncp); + u64_stats_update_begin(&stats->syncp); + u64_stats_inc(&stats->tx_normal_irq_n[chan]); + u64_stats_update_end(&stats->syncp); ret |= handle_tx; } } diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.c b/drivers/net/ethernet/stmicro/stmmac/hwif.c index 1bd34b2a47e8..29367105df54 100644 --- a/drivers/net/ethernet/stmicro/stmmac/hwif.c +++ b/drivers/net/ethernet/stmicro/stmmac/hwif.c @@ -224,7 +224,7 @@ static const struct stmmac_hwif_entry { .regs = { .ptp_off = PTP_GMAC4_OFFSET, .mmc_off = MMC_GMAC4_OFFSET, - .est_off = EST_XGMAC_OFFSET, + .est_off = EST_GMAC4_OFFSET, }, .desc = &dwmac4_desc_ops, .dma = &dwmac410_dma_ops, diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c index 42d27b97dd1d..ec44becf0e2d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c @@ -549,44 +549,79 @@ stmmac_set_pauseparam(struct net_device *netdev, } } +static u64 stmmac_get_rx_normal_irq_n(struct stmmac_priv *priv, int q) +{ + u64 total; + int cpu; + + total = 0; + for_each_possible_cpu(cpu) { + struct stmmac_pcpu_stats *pcpu; + unsigned int start; + u64 irq_n; + + pcpu = per_cpu_ptr(priv->xstats.pcpu_stats, cpu); + do { + start = u64_stats_fetch_begin(&pcpu->syncp); + irq_n = u64_stats_read(&pcpu->rx_normal_irq_n[q]); + } while (u64_stats_fetch_retry(&pcpu->syncp, start)); + total += irq_n; + } + return total; +} + +static u64 stmmac_get_tx_normal_irq_n(struct stmmac_priv *priv, int q) +{ + u64 total; + int cpu; + + total = 0; + for_each_possible_cpu(cpu) { + struct stmmac_pcpu_stats *pcpu; + unsigned int start; + u64 irq_n; + + pcpu = per_cpu_ptr(priv->xstats.pcpu_stats, cpu); + do { + start = u64_stats_fetch_begin(&pcpu->syncp); + irq_n = u64_stats_read(&pcpu->tx_normal_irq_n[q]); + } while (u64_stats_fetch_retry(&pcpu->syncp, start)); + total += irq_n; + } + return total; +} + static void stmmac_get_per_qstats(struct stmmac_priv *priv, u64 *data) { u32 tx_cnt = priv->plat->tx_queues_to_use; u32 rx_cnt = priv->plat->rx_queues_to_use; unsigned int start; - int q, stat; - char *p; + int q; for (q = 0; q < tx_cnt; q++) { struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[q]; - struct stmmac_txq_stats snapshot; + u64 pkt_n; do { - start = u64_stats_fetch_begin(&txq_stats->syncp); - snapshot = *txq_stats; - } while (u64_stats_fetch_retry(&txq_stats->syncp, start)); + start = u64_stats_fetch_begin(&txq_stats->napi_syncp); + pkt_n = u64_stats_read(&txq_stats->napi.tx_pkt_n); + } while (u64_stats_fetch_retry(&txq_stats->napi_syncp, start)); - p = (char *)&snapshot + offsetof(struct stmmac_txq_stats, tx_pkt_n); - for (stat = 0; stat < STMMAC_TXQ_STATS; stat++) { - *data++ = (*(u64 *)p); - p += sizeof(u64); - } + *data++ = pkt_n; + *data++ = stmmac_get_tx_normal_irq_n(priv, q); } for (q = 0; q < rx_cnt; q++) { struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[q]; - struct stmmac_rxq_stats snapshot; + u64 pkt_n; do { - start = u64_stats_fetch_begin(&rxq_stats->syncp); - snapshot = *rxq_stats; - } while (u64_stats_fetch_retry(&rxq_stats->syncp, start)); + start = u64_stats_fetch_begin(&rxq_stats->napi_syncp); + pkt_n = u64_stats_read(&rxq_stats->napi.rx_pkt_n); + } while (u64_stats_fetch_retry(&rxq_stats->napi_syncp, start)); - p = (char *)&snapshot + offsetof(struct stmmac_rxq_stats, rx_pkt_n); - for (stat = 0; stat < STMMAC_RXQ_STATS; stat++) { - *data++ = (*(u64 *)p); - p += sizeof(u64); - } + *data++ = pkt_n; + *data++ = stmmac_get_rx_normal_irq_n(priv, q); } } @@ -645,39 +680,49 @@ static void stmmac_get_ethtool_stats(struct net_device *dev, pos = j; for (i = 0; i < rx_queues_count; i++) { struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[i]; - struct stmmac_rxq_stats snapshot; + struct stmmac_napi_rx_stats snapshot; + u64 n_irq; j = pos; do { - start = u64_stats_fetch_begin(&rxq_stats->syncp); - snapshot = *rxq_stats; - } while (u64_stats_fetch_retry(&rxq_stats->syncp, start)); - - data[j++] += snapshot.rx_pkt_n; - data[j++] += snapshot.rx_normal_irq_n; - normal_irq_n += snapshot.rx_normal_irq_n; - napi_poll += snapshot.napi_poll; + start = u64_stats_fetch_begin(&rxq_stats->napi_syncp); + snapshot = rxq_stats->napi; + } while (u64_stats_fetch_retry(&rxq_stats->napi_syncp, start)); + + data[j++] += u64_stats_read(&snapshot.rx_pkt_n); + n_irq = stmmac_get_rx_normal_irq_n(priv, i); + data[j++] += n_irq; + normal_irq_n += n_irq; + napi_poll += u64_stats_read(&snapshot.poll); } pos = j; for (i = 0; i < tx_queues_count; i++) { struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[i]; - struct stmmac_txq_stats snapshot; + struct stmmac_napi_tx_stats napi_snapshot; + struct stmmac_q_tx_stats q_snapshot; + u64 n_irq; j = pos; do { - start = u64_stats_fetch_begin(&txq_stats->syncp); - snapshot = *txq_stats; - } while (u64_stats_fetch_retry(&txq_stats->syncp, start)); - - data[j++] += snapshot.tx_pkt_n; - data[j++] += snapshot.tx_normal_irq_n; - normal_irq_n += snapshot.tx_normal_irq_n; - data[j++] += snapshot.tx_clean; - data[j++] += snapshot.tx_set_ic_bit; - data[j++] += snapshot.tx_tso_frames; - data[j++] += snapshot.tx_tso_nfrags; - napi_poll += snapshot.napi_poll; + start = u64_stats_fetch_begin(&txq_stats->q_syncp); + q_snapshot = txq_stats->q; + } while (u64_stats_fetch_retry(&txq_stats->q_syncp, start)); + do { + start = u64_stats_fetch_begin(&txq_stats->napi_syncp); + napi_snapshot = txq_stats->napi; + } while (u64_stats_fetch_retry(&txq_stats->napi_syncp, start)); + + data[j++] += u64_stats_read(&napi_snapshot.tx_pkt_n); + n_irq = stmmac_get_tx_normal_irq_n(priv, i); + data[j++] += n_irq; + normal_irq_n += n_irq; + data[j++] += u64_stats_read(&napi_snapshot.tx_clean); + data[j++] += u64_stats_read(&q_snapshot.tx_set_ic_bit) + + u64_stats_read(&napi_snapshot.tx_set_ic_bit); + data[j++] += u64_stats_read(&q_snapshot.tx_tso_frames); + data[j++] += u64_stats_read(&q_snapshot.tx_tso_nfrags); + napi_poll += u64_stats_read(&napi_snapshot.poll); } normal_irq_n += priv->xstats.rx_early_irq; data[j++] = normal_irq_n; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index b334eb16da23..7c6aef033a45 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -2482,7 +2482,6 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget) struct xdp_desc xdp_desc; bool work_done = true; u32 tx_set_ic_bit = 0; - unsigned long flags; /* Avoids TX time-out as we are sharing with slow path */ txq_trans_cond_update(nq); @@ -2566,9 +2565,9 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget) tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size); entry = tx_q->cur_tx; } - flags = u64_stats_update_begin_irqsave(&txq_stats->syncp); - txq_stats->tx_set_ic_bit += tx_set_ic_bit; - u64_stats_update_end_irqrestore(&txq_stats->syncp, flags); + u64_stats_update_begin(&txq_stats->napi_syncp); + u64_stats_add(&txq_stats->napi.tx_set_ic_bit, tx_set_ic_bit); + u64_stats_update_end(&txq_stats->napi_syncp); if (tx_desc) { stmmac_flush_tx_descriptors(priv, queue); @@ -2616,7 +2615,6 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue, unsigned int bytes_compl = 0, pkts_compl = 0; unsigned int entry, xmits = 0, count = 0; u32 tx_packets = 0, tx_errors = 0; - unsigned long flags; __netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue)); @@ -2674,7 +2672,8 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue, } if (skb) { stmmac_get_tx_hwtstamp(priv, p, skb); - } else { + } else if (tx_q->xsk_pool && + xp_tx_metadata_enabled(tx_q->xsk_pool)) { struct stmmac_xsk_tx_complete tx_compl = { .priv = priv, .desc = p, @@ -2782,11 +2781,11 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue, if (tx_q->dirty_tx != tx_q->cur_tx) *pending_packets = true; - flags = u64_stats_update_begin_irqsave(&txq_stats->syncp); - txq_stats->tx_packets += tx_packets; - txq_stats->tx_pkt_n += tx_packets; - txq_stats->tx_clean++; - u64_stats_update_end_irqrestore(&txq_stats->syncp, flags); + u64_stats_update_begin(&txq_stats->napi_syncp); + u64_stats_add(&txq_stats->napi.tx_packets, tx_packets); + u64_stats_add(&txq_stats->napi.tx_pkt_n, tx_packets); + u64_stats_inc(&txq_stats->napi.tx_clean); + u64_stats_update_end(&txq_stats->napi_syncp); priv->xstats.tx_errors += tx_errors; @@ -3932,6 +3931,9 @@ static int __stmmac_open(struct net_device *dev, priv->rx_copybreak = STMMAC_RX_COPYBREAK; buf_sz = dma_conf->dma_buf_sz; + for (int i = 0; i < MTL_MAX_TX_QUEUES; i++) + if (priv->dma_conf.tx_queue[i].tbs & STMMAC_TBS_EN) + dma_conf->tx_queue[i].tbs = priv->dma_conf.tx_queue[i].tbs; memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf)); stmmac_reset_queues_param(priv); @@ -4004,8 +4006,10 @@ static void stmmac_fpe_stop_wq(struct stmmac_priv *priv) { set_bit(__FPE_REMOVING, &priv->fpe_task_state); - if (priv->fpe_wq) + if (priv->fpe_wq) { destroy_workqueue(priv->fpe_wq); + priv->fpe_wq = NULL; + } netdev_info(priv->dev, "FPE workqueue stop"); } @@ -4210,7 +4214,6 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) struct stmmac_tx_queue *tx_q; bool has_vlan, set_ic; u8 proto_hdr_len, hdr; - unsigned long flags; u32 pay_len, mss; dma_addr_t des; int i; @@ -4375,13 +4378,13 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue)); } - flags = u64_stats_update_begin_irqsave(&txq_stats->syncp); - txq_stats->tx_bytes += skb->len; - txq_stats->tx_tso_frames++; - txq_stats->tx_tso_nfrags += nfrags; + u64_stats_update_begin(&txq_stats->q_syncp); + u64_stats_add(&txq_stats->q.tx_bytes, skb->len); + u64_stats_inc(&txq_stats->q.tx_tso_frames); + u64_stats_add(&txq_stats->q.tx_tso_nfrags, nfrags); if (set_ic) - txq_stats->tx_set_ic_bit++; - u64_stats_update_end_irqrestore(&txq_stats->syncp, flags); + u64_stats_inc(&txq_stats->q.tx_set_ic_bit); + u64_stats_update_end(&txq_stats->q_syncp); if (priv->sarc_type) stmmac_set_desc_sarc(priv, first, priv->sarc_type); @@ -4480,7 +4483,6 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) struct stmmac_tx_queue *tx_q; bool has_vlan, set_ic; int entry, first_tx; - unsigned long flags; dma_addr_t des; tx_q = &priv->dma_conf.tx_queue[queue]; @@ -4650,11 +4652,11 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue)); } - flags = u64_stats_update_begin_irqsave(&txq_stats->syncp); - txq_stats->tx_bytes += skb->len; + u64_stats_update_begin(&txq_stats->q_syncp); + u64_stats_add(&txq_stats->q.tx_bytes, skb->len); if (set_ic) - txq_stats->tx_set_ic_bit++; - u64_stats_update_end_irqrestore(&txq_stats->syncp, flags); + u64_stats_inc(&txq_stats->q.tx_set_ic_bit); + u64_stats_update_end(&txq_stats->q_syncp); if (priv->sarc_type) stmmac_set_desc_sarc(priv, first, priv->sarc_type); @@ -4918,12 +4920,11 @@ static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue, set_ic = false; if (set_ic) { - unsigned long flags; tx_q->tx_count_frames = 0; stmmac_set_tx_ic(priv, tx_desc); - flags = u64_stats_update_begin_irqsave(&txq_stats->syncp); - txq_stats->tx_set_ic_bit++; - u64_stats_update_end_irqrestore(&txq_stats->syncp, flags); + u64_stats_update_begin(&txq_stats->q_syncp); + u64_stats_inc(&txq_stats->q.tx_set_ic_bit); + u64_stats_update_end(&txq_stats->q_syncp); } stmmac_enable_dma_transmission(priv, priv->ioaddr); @@ -5073,7 +5074,6 @@ static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue, unsigned int len = xdp->data_end - xdp->data; enum pkt_hash_types hash_type; int coe = priv->hw->rx_csum; - unsigned long flags; struct sk_buff *skb; u32 hash; @@ -5103,10 +5103,10 @@ static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue, skb_record_rx_queue(skb, queue); napi_gro_receive(&ch->rxtx_napi, skb); - flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp); - rxq_stats->rx_pkt_n++; - rxq_stats->rx_bytes += len; - u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags); + u64_stats_update_begin(&rxq_stats->napi_syncp); + u64_stats_inc(&rxq_stats->napi.rx_pkt_n); + u64_stats_add(&rxq_stats->napi.rx_bytes, len); + u64_stats_update_end(&rxq_stats->napi_syncp); } static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget) @@ -5188,7 +5188,6 @@ static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue) unsigned int desc_size; struct bpf_prog *prog; bool failure = false; - unsigned long flags; int xdp_status = 0; int status = 0; @@ -5343,9 +5342,9 @@ read_again: stmmac_finalize_xdp_rx(priv, xdp_status); - flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp); - rxq_stats->rx_pkt_n += count; - u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags); + u64_stats_update_begin(&rxq_stats->napi_syncp); + u64_stats_add(&rxq_stats->napi.rx_pkt_n, count); + u64_stats_update_end(&rxq_stats->napi_syncp); priv->xstats.rx_dropped += rx_dropped; priv->xstats.rx_errors += rx_errors; @@ -5383,7 +5382,6 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) unsigned int desc_size; struct sk_buff *skb = NULL; struct stmmac_xdp_buff ctx; - unsigned long flags; int xdp_status = 0; int buf_sz; @@ -5643,11 +5641,11 @@ drain_data: stmmac_rx_refill(priv, queue); - flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp); - rxq_stats->rx_packets += rx_packets; - rxq_stats->rx_bytes += rx_bytes; - rxq_stats->rx_pkt_n += count; - u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags); + u64_stats_update_begin(&rxq_stats->napi_syncp); + u64_stats_add(&rxq_stats->napi.rx_packets, rx_packets); + u64_stats_add(&rxq_stats->napi.rx_bytes, rx_bytes); + u64_stats_add(&rxq_stats->napi.rx_pkt_n, count); + u64_stats_update_end(&rxq_stats->napi_syncp); priv->xstats.rx_dropped += rx_dropped; priv->xstats.rx_errors += rx_errors; @@ -5662,13 +5660,12 @@ static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget) struct stmmac_priv *priv = ch->priv_data; struct stmmac_rxq_stats *rxq_stats; u32 chan = ch->index; - unsigned long flags; int work_done; rxq_stats = &priv->xstats.rxq_stats[chan]; - flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp); - rxq_stats->napi_poll++; - u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags); + u64_stats_update_begin(&rxq_stats->napi_syncp); + u64_stats_inc(&rxq_stats->napi.poll); + u64_stats_update_end(&rxq_stats->napi_syncp); work_done = stmmac_rx(priv, budget, chan); if (work_done < budget && napi_complete_done(napi, work_done)) { @@ -5690,13 +5687,12 @@ static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget) struct stmmac_txq_stats *txq_stats; bool pending_packets = false; u32 chan = ch->index; - unsigned long flags; int work_done; txq_stats = &priv->xstats.txq_stats[chan]; - flags = u64_stats_update_begin_irqsave(&txq_stats->syncp); - txq_stats->napi_poll++; - u64_stats_update_end_irqrestore(&txq_stats->syncp, flags); + u64_stats_update_begin(&txq_stats->napi_syncp); + u64_stats_inc(&txq_stats->napi.poll); + u64_stats_update_end(&txq_stats->napi_syncp); work_done = stmmac_tx_clean(priv, budget, chan, &pending_packets); work_done = min(work_done, budget); @@ -5726,17 +5722,16 @@ static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget) struct stmmac_rxq_stats *rxq_stats; struct stmmac_txq_stats *txq_stats; u32 chan = ch->index; - unsigned long flags; rxq_stats = &priv->xstats.rxq_stats[chan]; - flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp); - rxq_stats->napi_poll++; - u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags); + u64_stats_update_begin(&rxq_stats->napi_syncp); + u64_stats_inc(&rxq_stats->napi.poll); + u64_stats_update_end(&rxq_stats->napi_syncp); txq_stats = &priv->xstats.txq_stats[chan]; - flags = u64_stats_update_begin_irqsave(&txq_stats->syncp); - txq_stats->napi_poll++; - u64_stats_update_end_irqrestore(&txq_stats->syncp, flags); + u64_stats_update_begin(&txq_stats->napi_syncp); + u64_stats_inc(&txq_stats->napi.poll); + u64_stats_update_end(&txq_stats->napi_syncp); tx_done = stmmac_tx_clean(priv, budget, chan, &tx_pending_packets); tx_done = min(tx_done, budget); @@ -6067,11 +6062,6 @@ static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id) struct net_device *dev = (struct net_device *)dev_id; struct stmmac_priv *priv = netdev_priv(dev); - if (unlikely(!dev)) { - netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__); - return IRQ_NONE; - } - /* Check if adapter is up */ if (test_bit(STMMAC_DOWN, &priv->state)) return IRQ_HANDLED; @@ -6087,11 +6077,6 @@ static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id) struct net_device *dev = (struct net_device *)dev_id; struct stmmac_priv *priv = netdev_priv(dev); - if (unlikely(!dev)) { - netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__); - return IRQ_NONE; - } - /* Check if adapter is up */ if (test_bit(STMMAC_DOWN, &priv->state)) return IRQ_HANDLED; @@ -6113,11 +6098,6 @@ static irqreturn_t stmmac_msi_intr_tx(int irq, void *data) dma_conf = container_of(tx_q, struct stmmac_dma_conf, tx_queue[chan]); priv = container_of(dma_conf, struct stmmac_priv, dma_conf); - if (unlikely(!data)) { - netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__); - return IRQ_NONE; - } - /* Check if adapter is up */ if (test_bit(STMMAC_DOWN, &priv->state)) return IRQ_HANDLED; @@ -6144,11 +6124,6 @@ static irqreturn_t stmmac_msi_intr_rx(int irq, void *data) dma_conf = container_of(rx_q, struct stmmac_dma_conf, rx_queue[chan]); priv = container_of(dma_conf, struct stmmac_priv, dma_conf); - if (unlikely(!data)) { - netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__); - return IRQ_NONE; - } - /* Check if adapter is up */ if (test_bit(STMMAC_DOWN, &priv->state)) return IRQ_HANDLED; @@ -7062,10 +7037,13 @@ static void stmmac_get_stats64(struct net_device *dev, struct rtnl_link_stats64 u64 tx_bytes; do { - start = u64_stats_fetch_begin(&txq_stats->syncp); - tx_packets = txq_stats->tx_packets; - tx_bytes = txq_stats->tx_bytes; - } while (u64_stats_fetch_retry(&txq_stats->syncp, start)); + start = u64_stats_fetch_begin(&txq_stats->q_syncp); + tx_bytes = u64_stats_read(&txq_stats->q.tx_bytes); + } while (u64_stats_fetch_retry(&txq_stats->q_syncp, start)); + do { + start = u64_stats_fetch_begin(&txq_stats->napi_syncp); + tx_packets = u64_stats_read(&txq_stats->napi.tx_packets); + } while (u64_stats_fetch_retry(&txq_stats->napi_syncp, start)); stats->tx_packets += tx_packets; stats->tx_bytes += tx_bytes; @@ -7077,10 +7055,10 @@ static void stmmac_get_stats64(struct net_device *dev, struct rtnl_link_stats64 u64 rx_bytes; do { - start = u64_stats_fetch_begin(&rxq_stats->syncp); - rx_packets = rxq_stats->rx_packets; - rx_bytes = rxq_stats->rx_bytes; - } while (u64_stats_fetch_retry(&rxq_stats->syncp, start)); + start = u64_stats_fetch_begin(&rxq_stats->napi_syncp); + rx_packets = u64_stats_read(&rxq_stats->napi.rx_packets); + rx_bytes = u64_stats_read(&rxq_stats->napi.rx_bytes); + } while (u64_stats_fetch_retry(&rxq_stats->napi_syncp, start)); stats->rx_packets += rx_packets; stats->rx_bytes += rx_bytes; @@ -7474,9 +7452,16 @@ int stmmac_dvr_probe(struct device *device, priv->dev = ndev; for (i = 0; i < MTL_MAX_RX_QUEUES; i++) - u64_stats_init(&priv->xstats.rxq_stats[i].syncp); - for (i = 0; i < MTL_MAX_TX_QUEUES; i++) - u64_stats_init(&priv->xstats.txq_stats[i].syncp); + u64_stats_init(&priv->xstats.rxq_stats[i].napi_syncp); + for (i = 0; i < MTL_MAX_TX_QUEUES; i++) { + u64_stats_init(&priv->xstats.txq_stats[i].q_syncp); + u64_stats_init(&priv->xstats.txq_stats[i].napi_syncp); + } + + priv->xstats.pcpu_stats = + devm_netdev_alloc_pcpu_stats(device, struct stmmac_pcpu_stats); + if (!priv->xstats.pcpu_stats) + return -ENOMEM; stmmac_set_ethtool_ops(ndev); priv->pause = pause; diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig index be01450c20dc..1530d13984d4 100644 --- a/drivers/net/ethernet/ti/Kconfig +++ b/drivers/net/ethernet/ti/Kconfig @@ -189,6 +189,7 @@ config TI_ICSSG_PRUETH select TI_K3_CPPI_DESC_POOL depends on PRU_REMOTEPROC depends on ARCH_K3 && OF && TI_K3_UDMA_GLUE_LAYER + depends on PTP_1588_CLOCK_OPTIONAL help Support dual Gigabit Ethernet ports over the ICSSG PRU Subsystem. This subsystem is available starting with the AM65 platform. diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c index 9d2f4ac783e4..2939a21ca74f 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c +++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c @@ -294,7 +294,7 @@ static void am65_cpsw_nuss_ndo_host_tx_timeout(struct net_device *ndev, txqueue, netif_tx_queue_stopped(netif_txq), jiffies_to_msecs(jiffies - trans_start), - dql_avail(&netif_txq->dql), + netdev_queue_dql_avail(netif_txq), k3_cppi_desc_pool_avail(tx_chn->desc_pool)); if (netif_tx_queue_stopped(netif_txq)) { diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index ea85c6dd5484..c0a5abd8d9a8 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -631,6 +631,8 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv) } } + phy->mac_managed_pm = true; + slave->phy = phy; phy_attached_info(slave->phy); diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c index 498c50c6d1a7..087dcb67505a 100644 --- a/drivers/net/ethernet/ti/cpsw_new.c +++ b/drivers/net/ethernet/ti/cpsw_new.c @@ -773,6 +773,9 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv) slave->slave_num); return; } + + phy->mac_managed_pm = true; + slave->phy = phy; phy_attached_info(slave->phy); diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c index bcccf43d368b..dbbea9146040 100644 --- a/drivers/net/ethernet/ti/cpts.c +++ b/drivers/net/ethernet/ti/cpts.c @@ -638,6 +638,16 @@ static void cpts_calc_mult_shift(struct cpts *cpts) freq, cpts->cc.mult, cpts->cc.shift, (ns - NSEC_PER_SEC)); } +static void cpts_clk_unregister(void *clk) +{ + clk_hw_unregister_mux(clk); +} + +static void cpts_clk_del_provider(void *np) +{ + of_clk_del_provider(np); +} + static int cpts_of_mux_clk_setup(struct cpts *cpts, struct device_node *node) { struct device_node *refclk_np; @@ -687,9 +697,7 @@ static int cpts_of_mux_clk_setup(struct cpts *cpts, struct device_node *node) goto mux_fail; } - ret = devm_add_action_or_reset(cpts->dev, - (void(*)(void *))clk_hw_unregister_mux, - clk_hw); + ret = devm_add_action_or_reset(cpts->dev, cpts_clk_unregister, clk_hw); if (ret) { dev_err(cpts->dev, "add clkmux unreg action %d", ret); goto mux_fail; @@ -699,8 +707,7 @@ static int cpts_of_mux_clk_setup(struct cpts *cpts, struct device_node *node) if (ret) goto mux_fail; - ret = devm_add_action_or_reset(cpts->dev, - (void(*)(void *))of_clk_del_provider, + ret = devm_add_action_or_reset(cpts->dev, cpts_clk_del_provider, refclk_np); if (ret) { dev_err(cpts->dev, "add clkmux provider unreg action %d", ret); diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c index d5b75af163d3..c1b0d35c8d05 100644 --- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c +++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c @@ -384,18 +384,18 @@ static int gelic_descr_prepare_rx(struct gelic_card *card, if (gelic_descr_get_status(descr) != GELIC_DESCR_DMA_NOT_IN_USE) dev_info(ctodev(card), "%s: ERROR status\n", __func__); - descr->skb = netdev_alloc_skb(*card->netdev, rx_skb_size); - if (!descr->skb) { - descr->hw_regs.payload.dev_addr = 0; /* tell DMAC don't touch memory */ - return -ENOMEM; - } descr->hw_regs.dmac_cmd_status = 0; descr->hw_regs.result_size = 0; descr->hw_regs.valid_size = 0; descr->hw_regs.data_error = 0; descr->hw_regs.payload.dev_addr = 0; descr->hw_regs.payload.size = 0; - descr->skb = NULL; + + descr->skb = netdev_alloc_skb(*card->netdev, rx_skb_size); + if (!descr->skb) { + descr->hw_regs.payload.dev_addr = 0; /* tell DMAC don't touch memory */ + return -ENOMEM; + } offset = ((unsigned long)descr->skb->data) & (GELIC_NET_RXBUF_ALIGN - 1); diff --git a/drivers/net/fddi/skfp/skfddi.c b/drivers/net/fddi/skfp/skfddi.c index 2b6a607ac0b7..a273362c9e70 100644 --- a/drivers/net/fddi/skfp/skfddi.c +++ b/drivers/net/fddi/skfp/skfddi.c @@ -153,6 +153,7 @@ static const struct pci_device_id skfddi_pci_tbl[] = { { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(pci, skfddi_pci_tbl); +MODULE_DESCRIPTION("SysKonnect FDDI PCI driver"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Mirko Lindner <mlindner@syskonnect.de>"); diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index 32c51c244153..c4ed36c71897 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -221,7 +221,7 @@ static void geneve_rx(struct geneve_dev *geneve, struct geneve_sock *gs, struct genevehdr *gnvh = geneve_hdr(skb); struct metadata_dst *tun_dst = NULL; unsigned int len; - int err = 0; + int nh, err = 0; void *oiph; if (ip_tunnel_collect_metadata() || gs->collect_md) { @@ -272,9 +272,23 @@ static void geneve_rx(struct geneve_dev *geneve, struct geneve_sock *gs, skb->pkt_type = PACKET_HOST; } - oiph = skb_network_header(skb); + /* Save offset of outer header relative to skb->head, + * because we are going to reset the network header to the inner header + * and might change skb->head. + */ + nh = skb_network_header(skb) - skb->head; + skb_reset_network_header(skb); + if (!pskb_inet_may_pull(skb)) { + DEV_STATS_INC(geneve->dev, rx_length_errors); + DEV_STATS_INC(geneve->dev, rx_errors); + goto drop; + } + + /* Get the outer header. */ + oiph = skb->head + nh; + if (geneve_get_sk_family(gs) == AF_INET) err = IP_ECN_decapsulate(oiph, skb); #if IS_ENABLED(CONFIG_IPV6) diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c index b1919278e931..2b5357d94ff5 100644 --- a/drivers/net/gtp.c +++ b/drivers/net/gtp.c @@ -1903,26 +1903,26 @@ static int __init gtp_init(void) get_random_bytes(>p_h_initval, sizeof(gtp_h_initval)); - err = rtnl_link_register(>p_link_ops); + err = register_pernet_subsys(>p_net_ops); if (err < 0) goto error_out; - err = genl_register_family(>p_genl_family); + err = rtnl_link_register(>p_link_ops); if (err < 0) - goto unreg_rtnl_link; + goto unreg_pernet_subsys; - err = register_pernet_subsys(>p_net_ops); + err = genl_register_family(>p_genl_family); if (err < 0) - goto unreg_genl_family; + goto unreg_rtnl_link; pr_info("GTP module loaded (pdp ctx size %zd bytes)\n", sizeof(struct pdp_ctx)); return 0; -unreg_genl_family: - genl_unregister_family(>p_genl_family); unreg_rtnl_link: rtnl_link_unregister(>p_link_ops); +unreg_pernet_subsys: + unregister_pernet_subsys(>p_net_ops); error_out: pr_err("error loading GTP module loaded\n"); return err; diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 1dafa44155d0..a6fcbda64ecc 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -708,7 +708,10 @@ void netvsc_device_remove(struct hv_device *device) /* Disable NAPI and disassociate its context from the device. */ for (i = 0; i < net_device->num_chn; i++) { /* See also vmbus_reset_channel_cb(). */ - napi_disable(&net_device->chan_table[i].napi); + /* only disable enabled NAPI channel */ + if (i < ndev->real_num_rx_queues) + napi_disable(&net_device->chan_table[i].napi); + netif_napi_del(&net_device->chan_table[i].napi); } diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 273bd8a20122..11831a1c9762 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -42,6 +42,10 @@ #define LINKCHANGE_INT (2 * HZ) #define VF_TAKEOVER_INT (HZ / 10) +/* Macros to define the context of vf registration */ +#define VF_REG_IN_PROBE 1 +#define VF_REG_IN_NOTIFIER 2 + static unsigned int ring_size __ro_after_init = 128; module_param(ring_size, uint, 0444); MODULE_PARM_DESC(ring_size, "Ring buffer size (# of 4K pages)"); @@ -2185,7 +2189,7 @@ static rx_handler_result_t netvsc_vf_handle_frame(struct sk_buff **pskb) } static int netvsc_vf_join(struct net_device *vf_netdev, - struct net_device *ndev) + struct net_device *ndev, int context) { struct net_device_context *ndev_ctx = netdev_priv(ndev); int ret; @@ -2208,7 +2212,11 @@ static int netvsc_vf_join(struct net_device *vf_netdev, goto upper_link_failed; } - schedule_delayed_work(&ndev_ctx->vf_takeover, VF_TAKEOVER_INT); + /* If this registration is called from probe context vf_takeover + * is taken care of later in probe itself. + */ + if (context == VF_REG_IN_NOTIFIER) + schedule_delayed_work(&ndev_ctx->vf_takeover, VF_TAKEOVER_INT); call_netdevice_notifiers(NETDEV_JOIN, vf_netdev); @@ -2346,7 +2354,7 @@ static int netvsc_prepare_bonding(struct net_device *vf_netdev) return NOTIFY_DONE; } -static int netvsc_register_vf(struct net_device *vf_netdev) +static int netvsc_register_vf(struct net_device *vf_netdev, int context) { struct net_device_context *net_device_ctx; struct netvsc_device *netvsc_dev; @@ -2386,7 +2394,7 @@ static int netvsc_register_vf(struct net_device *vf_netdev) netdev_info(ndev, "VF registering: %s\n", vf_netdev->name); - if (netvsc_vf_join(vf_netdev, ndev) != 0) + if (netvsc_vf_join(vf_netdev, ndev, context) != 0) return NOTIFY_DONE; dev_hold(vf_netdev); @@ -2484,10 +2492,31 @@ static int netvsc_unregister_vf(struct net_device *vf_netdev) return NOTIFY_OK; } +static int check_dev_is_matching_vf(struct net_device *event_ndev) +{ + /* Skip NetVSC interfaces */ + if (event_ndev->netdev_ops == &device_ops) + return -ENODEV; + + /* Avoid non-Ethernet type devices */ + if (event_ndev->type != ARPHRD_ETHER) + return -ENODEV; + + /* Avoid Vlan dev with same MAC registering as VF */ + if (is_vlan_dev(event_ndev)) + return -ENODEV; + + /* Avoid Bonding master dev with same MAC registering as VF */ + if (netif_is_bond_master(event_ndev)) + return -ENODEV; + + return 0; +} + static int netvsc_probe(struct hv_device *dev, const struct hv_vmbus_device_id *dev_id) { - struct net_device *net = NULL; + struct net_device *net = NULL, *vf_netdev; struct net_device_context *net_device_ctx; struct netvsc_device_info *device_info = NULL; struct netvsc_device *nvdev; @@ -2599,6 +2628,30 @@ static int netvsc_probe(struct hv_device *dev, } list_add(&net_device_ctx->list, &netvsc_dev_list); + + /* When the hv_netvsc driver is unloaded and reloaded, the + * NET_DEVICE_REGISTER for the vf device is replayed before probe + * is complete. This is because register_netdevice_notifier() gets + * registered before vmbus_driver_register() so that callback func + * is set before probe and we don't miss events like NETDEV_POST_INIT + * So, in this section we try to register the matching vf device that + * is present as a netdevice, knowing that its register call is not + * processed in the netvsc_netdev_notifier(as probing is progress and + * get_netvsc_byslot fails). + */ + for_each_netdev(dev_net(net), vf_netdev) { + ret = check_dev_is_matching_vf(vf_netdev); + if (ret != 0) + continue; + + if (net != get_netvsc_byslot(vf_netdev)) + continue; + + netvsc_prepare_bonding(vf_netdev); + netvsc_register_vf(vf_netdev, VF_REG_IN_PROBE); + __netvsc_vf_setup(net, vf_netdev); + break; + } rtnl_unlock(); netvsc_devinfo_put(device_info); @@ -2754,28 +2807,17 @@ static int netvsc_netdev_event(struct notifier_block *this, unsigned long event, void *ptr) { struct net_device *event_dev = netdev_notifier_info_to_dev(ptr); + int ret = 0; - /* Skip our own events */ - if (event_dev->netdev_ops == &device_ops) - return NOTIFY_DONE; - - /* Avoid non-Ethernet type devices */ - if (event_dev->type != ARPHRD_ETHER) - return NOTIFY_DONE; - - /* Avoid Vlan dev with same MAC registering as VF */ - if (is_vlan_dev(event_dev)) - return NOTIFY_DONE; - - /* Avoid Bonding master dev with same MAC registering as VF */ - if (netif_is_bond_master(event_dev)) + ret = check_dev_is_matching_vf(event_dev); + if (ret != 0) return NOTIFY_DONE; switch (event) { case NETDEV_POST_INIT: return netvsc_prepare_bonding(event_dev); case NETDEV_REGISTER: - return netvsc_register_vf(event_dev); + return netvsc_register_vf(event_dev, VF_REG_IN_NOTIFIER); case NETDEV_UNREGISTER: return netvsc_unregister_vf(event_dev); case NETDEV_UP: diff --git a/drivers/net/ieee802154/fakelb.c b/drivers/net/ieee802154/fakelb.c index 35e55f198e05..2930141d7dd2 100644 --- a/drivers/net/ieee802154/fakelb.c +++ b/drivers/net/ieee802154/fakelb.c @@ -259,4 +259,5 @@ static __exit void fake_remove_module(void) module_init(fakelb_init_module); module_exit(fake_remove_module); +MODULE_DESCRIPTION("IEEE 802.15.4 loopback driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/net/ipa/ipa_interrupt.c b/drivers/net/ipa/ipa_interrupt.c index 4bc05948f772..a78c692f2d3c 100644 --- a/drivers/net/ipa/ipa_interrupt.c +++ b/drivers/net/ipa/ipa_interrupt.c @@ -212,7 +212,7 @@ void ipa_interrupt_suspend_clear_all(struct ipa_interrupt *interrupt) u32 unit_count; u32 unit; - unit_count = roundup(ipa->endpoint_count, 32); + unit_count = DIV_ROUND_UP(ipa->endpoint_count, 32); for (unit = 0; unit < unit_count; unit++) { const struct reg *reg; u32 val; diff --git a/drivers/net/ipvlan/ipvtap.c b/drivers/net/ipvlan/ipvtap.c index 60944a4beada..1afc4c47be73 100644 --- a/drivers/net/ipvlan/ipvtap.c +++ b/drivers/net/ipvlan/ipvtap.c @@ -237,4 +237,5 @@ static void __exit ipvtap_exit(void) module_exit(ipvtap_exit); MODULE_ALIAS_RTNL_LINK("ipvtap"); MODULE_AUTHOR("Sainath Grandhi <sainath.grandhi@intel.com>"); +MODULE_DESCRIPTION("IP-VLAN based tap driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/net/netdevsim/dev.c b/drivers/net/netdevsim/dev.c index b4d3b9cde8bd..92a7a36b93ac 100644 --- a/drivers/net/netdevsim/dev.c +++ b/drivers/net/netdevsim/dev.c @@ -835,14 +835,14 @@ static void nsim_dev_trap_report_work(struct work_struct *work) trap_report_dw.work); nsim_dev = nsim_trap_data->nsim_dev; - /* For each running port and enabled packet trap, generate a UDP - * packet with a random 5-tuple and report it. - */ if (!devl_trylock(priv_to_devlink(nsim_dev))) { - schedule_delayed_work(&nsim_dev->trap_data->trap_report_dw, 0); + schedule_delayed_work(&nsim_dev->trap_data->trap_report_dw, 1); return; } + /* For each running port and enabled packet trap, generate a UDP + * packet with a random 5-tuple and report it. + */ list_for_each_entry(nsim_dev_port, &nsim_dev->port_list, list) { if (!netif_running(nsim_dev_port->ns->netdev)) continue; diff --git a/drivers/net/phy/mdio_devres.c b/drivers/net/phy/mdio_devres.c index 69b829e6ab35..7fd3377dbd79 100644 --- a/drivers/net/phy/mdio_devres.c +++ b/drivers/net/phy/mdio_devres.c @@ -131,4 +131,5 @@ int __devm_of_mdiobus_register(struct device *dev, struct mii_bus *mdio, EXPORT_SYMBOL(__devm_of_mdiobus_register); #endif /* CONFIG_OF_MDIO */ +MODULE_DESCRIPTION("Network MDIO bus devres helpers"); MODULE_LICENSE("GPL"); diff --git a/drivers/net/phy/mediatek-ge-soc.c b/drivers/net/phy/mediatek-ge-soc.c index 8a20d9889f10..0f3a1538a8b8 100644 --- a/drivers/net/phy/mediatek-ge-soc.c +++ b/drivers/net/phy/mediatek-ge-soc.c @@ -489,7 +489,7 @@ static int tx_r50_fill_result(struct phy_device *phydev, u16 tx_r50_cal_val, u16 reg, val; if (phydev->drv->phy_id == MTK_GPHY_ID_MT7988) - bias = -2; + bias = -1; val = clamp_val(bias + tx_r50_cal_val, 0, 63); @@ -705,6 +705,11 @@ restore: static void mt798x_phy_common_finetune(struct phy_device *phydev) { phy_select_page(phydev, MTK_PHY_PAGE_EXTENDED_52B5); + /* SlvDSPreadyTime = 24, MasDSPreadyTime = 24 */ + __phy_write(phydev, 0x11, 0xc71); + __phy_write(phydev, 0x12, 0xc); + __phy_write(phydev, 0x10, 0x8fae); + /* EnabRandUpdTrig = 1 */ __phy_write(phydev, 0x11, 0x2f00); __phy_write(phydev, 0x12, 0xe); @@ -715,15 +720,56 @@ static void mt798x_phy_common_finetune(struct phy_device *phydev) __phy_write(phydev, 0x12, 0x0); __phy_write(phydev, 0x10, 0x83aa); - /* TrFreeze = 0 */ + /* FfeUpdGainForce = 1(Enable), FfeUpdGainForceVal = 4 */ + __phy_write(phydev, 0x11, 0x240); + __phy_write(phydev, 0x12, 0x0); + __phy_write(phydev, 0x10, 0x9680); + + /* TrFreeze = 0 (mt7988 default) */ __phy_write(phydev, 0x11, 0x0); __phy_write(phydev, 0x12, 0x0); __phy_write(phydev, 0x10, 0x9686); + /* SSTrKp100 = 5 */ + /* SSTrKf100 = 6 */ + /* SSTrKp1000Mas = 5 */ + /* SSTrKf1000Mas = 6 */ /* SSTrKp1000Slv = 5 */ + /* SSTrKf1000Slv = 6 */ __phy_write(phydev, 0x11, 0xbaef); __phy_write(phydev, 0x12, 0x2e); __phy_write(phydev, 0x10, 0x968c); + phy_restore_page(phydev, MTK_PHY_PAGE_STANDARD, 0); +} + +static void mt7981_phy_finetune(struct phy_device *phydev) +{ + u16 val[8] = { 0x01ce, 0x01c1, + 0x020f, 0x0202, + 0x03d0, 0x03c0, + 0x0013, 0x0005 }; + int i, k; + + /* 100M eye finetune: + * Keep middle level of TX MLT3 shapper as default. + * Only change TX MLT3 overshoot level here. + */ + for (k = 0, i = 1; i < 12; i++) { + if (i % 3 == 0) + continue; + phy_write_mmd(phydev, MDIO_MMD_VEND1, i, val[k++]); + } + + phy_select_page(phydev, MTK_PHY_PAGE_EXTENDED_52B5); + /* ResetSyncOffset = 6 */ + __phy_write(phydev, 0x11, 0x600); + __phy_write(phydev, 0x12, 0x0); + __phy_write(phydev, 0x10, 0x8fc0); + + /* VgaDecRate = 1 */ + __phy_write(phydev, 0x11, 0x4c2a); + __phy_write(phydev, 0x12, 0x3e); + __phy_write(phydev, 0x10, 0x8fa4); /* MrvlTrFix100Kp = 3, MrvlTrFix100Kf = 2, * MrvlTrFix1000Kp = 3, MrvlTrFix1000Kf = 2 @@ -738,7 +784,7 @@ static void mt798x_phy_common_finetune(struct phy_device *phydev) __phy_write(phydev, 0x10, 0x8ec0); phy_restore_page(phydev, MTK_PHY_PAGE_STANDARD, 0); - /* TR_OPEN_LOOP_EN = 1, lpf_x_average = 9*/ + /* TR_OPEN_LOOP_EN = 1, lpf_x_average = 9 */ phy_modify_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_RG_DEV1E_REG234, MTK_PHY_TR_OPEN_LOOP_EN_MASK | MTK_PHY_LPF_X_AVERAGE_MASK, BIT(0) | FIELD_PREP(MTK_PHY_LPF_X_AVERAGE_MASK, 0x9)); @@ -771,48 +817,6 @@ static void mt798x_phy_common_finetune(struct phy_device *phydev) phy_write_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_LDO_OUTPUT_V, 0x2222); } -static void mt7981_phy_finetune(struct phy_device *phydev) -{ - u16 val[8] = { 0x01ce, 0x01c1, - 0x020f, 0x0202, - 0x03d0, 0x03c0, - 0x0013, 0x0005 }; - int i, k; - - /* 100M eye finetune: - * Keep middle level of TX MLT3 shapper as default. - * Only change TX MLT3 overshoot level here. - */ - for (k = 0, i = 1; i < 12; i++) { - if (i % 3 == 0) - continue; - phy_write_mmd(phydev, MDIO_MMD_VEND1, i, val[k++]); - } - - phy_select_page(phydev, MTK_PHY_PAGE_EXTENDED_52B5); - /* SlvDSPreadyTime = 24, MasDSPreadyTime = 24 */ - __phy_write(phydev, 0x11, 0xc71); - __phy_write(phydev, 0x12, 0xc); - __phy_write(phydev, 0x10, 0x8fae); - - /* ResetSyncOffset = 6 */ - __phy_write(phydev, 0x11, 0x600); - __phy_write(phydev, 0x12, 0x0); - __phy_write(phydev, 0x10, 0x8fc0); - - /* VgaDecRate = 1 */ - __phy_write(phydev, 0x11, 0x4c2a); - __phy_write(phydev, 0x12, 0x3e); - __phy_write(phydev, 0x10, 0x8fa4); - - /* FfeUpdGainForce = 4 */ - __phy_write(phydev, 0x11, 0x240); - __phy_write(phydev, 0x12, 0x0); - __phy_write(phydev, 0x10, 0x9680); - - phy_restore_page(phydev, MTK_PHY_PAGE_STANDARD, 0); -} - static void mt7988_phy_finetune(struct phy_device *phydev) { u16 val[12] = { 0x0187, 0x01cd, 0x01c8, 0x0182, @@ -827,17 +831,7 @@ static void mt7988_phy_finetune(struct phy_device *phydev) /* TCT finetune */ phy_write_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_RG_TX_FILTER, 0x5); - /* Disable TX power saving */ - phy_modify_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_RXADC_CTRL_RG7, - MTK_PHY_DA_AD_BUF_BIAS_LP_MASK, 0x3 << 8); - phy_select_page(phydev, MTK_PHY_PAGE_EXTENDED_52B5); - - /* SlvDSPreadyTime = 24, MasDSPreadyTime = 12 */ - __phy_write(phydev, 0x11, 0x671); - __phy_write(phydev, 0x12, 0xc); - __phy_write(phydev, 0x10, 0x8fae); - /* ResetSyncOffset = 5 */ __phy_write(phydev, 0x11, 0x500); __phy_write(phydev, 0x12, 0x0); @@ -845,13 +839,27 @@ static void mt7988_phy_finetune(struct phy_device *phydev) /* VgaDecRate is 1 at default on mt7988 */ - phy_restore_page(phydev, MTK_PHY_PAGE_STANDARD, 0); + /* MrvlTrFix100Kp = 6, MrvlTrFix100Kf = 7, + * MrvlTrFix1000Kp = 6, MrvlTrFix1000Kf = 7 + */ + __phy_write(phydev, 0x11, 0xb90a); + __phy_write(phydev, 0x12, 0x6f); + __phy_write(phydev, 0x10, 0x8f82); + + /* RemAckCntLimitCtrl = 1 */ + __phy_write(phydev, 0x11, 0xfbba); + __phy_write(phydev, 0x12, 0xc3); + __phy_write(phydev, 0x10, 0x87f8); - phy_select_page(phydev, MTK_PHY_PAGE_EXTENDED_2A30); - /* TxClkOffset = 2 */ - __phy_modify(phydev, MTK_PHY_ANARG_RG, MTK_PHY_TCLKOFFSET_MASK, - FIELD_PREP(MTK_PHY_TCLKOFFSET_MASK, 0x2)); phy_restore_page(phydev, MTK_PHY_PAGE_STANDARD, 0); + + /* TR_OPEN_LOOP_EN = 1, lpf_x_average = 10 */ + phy_modify_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_RG_DEV1E_REG234, + MTK_PHY_TR_OPEN_LOOP_EN_MASK | MTK_PHY_LPF_X_AVERAGE_MASK, + BIT(0) | FIELD_PREP(MTK_PHY_LPF_X_AVERAGE_MASK, 0xa)); + + /* rg_tr_lpf_cnt_val = 1023 */ + phy_write_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_RG_LPF_CNT_VAL, 0x3ff); } static void mt798x_phy_eee(struct phy_device *phydev) @@ -884,11 +892,11 @@ static void mt798x_phy_eee(struct phy_device *phydev) MTK_PHY_LPI_SLV_SEND_TX_EN, FIELD_PREP(MTK_PHY_LPI_SLV_SEND_TX_TIMER_MASK, 0x120)); - phy_modify_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_RG_DEV1E_REG239, - MTK_PHY_LPI_SEND_LOC_TIMER_MASK | - MTK_PHY_LPI_TXPCS_LOC_RCV, - FIELD_PREP(MTK_PHY_LPI_SEND_LOC_TIMER_MASK, 0x117)); + /* Keep MTK_PHY_LPI_SEND_LOC_TIMER as 375 */ + phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_RG_DEV1E_REG239, + MTK_PHY_LPI_TXPCS_LOC_RCV); + /* This also fixes some IoT issues, such as CH340 */ phy_modify_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_RG_DEV1E_REG2C7, MTK_PHY_MAX_GAIN_MASK | MTK_PHY_MIN_GAIN_MASK, FIELD_PREP(MTK_PHY_MAX_GAIN_MASK, 0x8) | @@ -922,7 +930,7 @@ static void mt798x_phy_eee(struct phy_device *phydev) __phy_write(phydev, 0x12, 0x0); __phy_write(phydev, 0x10, 0x9690); - /* REG_EEE_st2TrKf1000 = 3 */ + /* REG_EEE_st2TrKf1000 = 2 */ __phy_write(phydev, 0x11, 0x114f); __phy_write(phydev, 0x12, 0x2); __phy_write(phydev, 0x10, 0x969a); @@ -947,7 +955,7 @@ static void mt798x_phy_eee(struct phy_device *phydev) __phy_write(phydev, 0x12, 0x0); __phy_write(phydev, 0x10, 0x96b8); - /* REGEEE_wake_slv_tr_wait_dfesigdet_en = 1 */ + /* REGEEE_wake_slv_tr_wait_dfesigdet_en = 0 */ __phy_write(phydev, 0x11, 0x1463); __phy_write(phydev, 0x12, 0x0); __phy_write(phydev, 0x10, 0x96ca); @@ -1459,6 +1467,13 @@ static int mt7988_phy_probe(struct phy_device *phydev) if (err) return err; + /* Disable TX power saving at probing to: + * 1. Meet common mode compliance test criteria + * 2. Make sure that TX-VCM calibration works fine + */ + phy_modify_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_RXADC_CTRL_RG7, + MTK_PHY_DA_AD_BUF_BIAS_LP_MASK, 0x3 << 8); + return mt798x_phy_calibration(phydev); } diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c index 894172a3e15f..337899c69738 100644 --- a/drivers/net/phy/realtek.c +++ b/drivers/net/phy/realtek.c @@ -421,9 +421,11 @@ static int rtl8211f_config_init(struct phy_device *phydev) ERR_PTR(ret)); return ret; } + + return genphy_soft_reset(phydev); } - return genphy_soft_reset(phydev); + return 0; } static int rtl821x_suspend(struct phy_device *phydev) diff --git a/drivers/net/plip/plip.c b/drivers/net/plip/plip.c index 40ce8abe6999..cc7d1113ece0 100644 --- a/drivers/net/plip/plip.c +++ b/drivers/net/plip/plip.c @@ -1437,4 +1437,5 @@ static int __init plip_init (void) module_init(plip_init); module_exit(plip_cleanup_module); +MODULE_DESCRIPTION("PLIP (parallel port) network module"); MODULE_LICENSE("GPL"); diff --git a/drivers/net/ppp/bsd_comp.c b/drivers/net/ppp/bsd_comp.c index db0dc36d12e3..55954594e157 100644 --- a/drivers/net/ppp/bsd_comp.c +++ b/drivers/net/ppp/bsd_comp.c @@ -1166,5 +1166,6 @@ static void __exit bsdcomp_cleanup(void) module_init(bsdcomp_init); module_exit(bsdcomp_cleanup); +MODULE_DESCRIPTION("PPP BSD-Compress compression module"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_ALIAS("ppp-compress-" __stringify(CI_BSD_COMPRESS)); diff --git a/drivers/net/ppp/ppp_async.c b/drivers/net/ppp/ppp_async.c index 840da924708b..c33c3db3cc08 100644 --- a/drivers/net/ppp/ppp_async.c +++ b/drivers/net/ppp/ppp_async.c @@ -87,6 +87,7 @@ struct asyncppp { static int flag_time = HZ; module_param(flag_time, int, 0); MODULE_PARM_DESC(flag_time, "ppp_async: interval between flagged packets (in clock ticks)"); +MODULE_DESCRIPTION("PPP async serial channel module"); MODULE_LICENSE("GPL"); MODULE_ALIAS_LDISC(N_PPP); @@ -460,6 +461,10 @@ ppp_async_ioctl(struct ppp_channel *chan, unsigned int cmd, unsigned long arg) case PPPIOCSMRU: if (get_user(val, p)) break; + if (val > U16_MAX) { + err = -EINVAL; + break; + } if (val < PPP_MRU) val = PPP_MRU; ap->mru = val; diff --git a/drivers/net/ppp/ppp_deflate.c b/drivers/net/ppp/ppp_deflate.c index e6d48e5c65a3..4d2ff63f2ee2 100644 --- a/drivers/net/ppp/ppp_deflate.c +++ b/drivers/net/ppp/ppp_deflate.c @@ -630,6 +630,7 @@ static void __exit deflate_cleanup(void) module_init(deflate_init); module_exit(deflate_cleanup); +MODULE_DESCRIPTION("PPP Deflate compression module"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_ALIAS("ppp-compress-" __stringify(CI_DEFLATE)); MODULE_ALIAS("ppp-compress-" __stringify(CI_DEFLATE_DRAFT)); diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c index 0193af2d31c9..3dd52bf28f15 100644 --- a/drivers/net/ppp/ppp_generic.c +++ b/drivers/net/ppp/ppp_generic.c @@ -3604,6 +3604,7 @@ EXPORT_SYMBOL(ppp_input_error); EXPORT_SYMBOL(ppp_output_wakeup); EXPORT_SYMBOL(ppp_register_compressor); EXPORT_SYMBOL(ppp_unregister_compressor); +MODULE_DESCRIPTION("Generic PPP layer driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS_CHARDEV(PPP_MAJOR, 0); MODULE_ALIAS_RTNL_LINK("ppp"); diff --git a/drivers/net/ppp/ppp_synctty.c b/drivers/net/ppp/ppp_synctty.c index 52d05ce4a281..45bf59ac8f57 100644 --- a/drivers/net/ppp/ppp_synctty.c +++ b/drivers/net/ppp/ppp_synctty.c @@ -724,5 +724,6 @@ ppp_sync_cleanup(void) module_init(ppp_sync_init); module_exit(ppp_sync_cleanup); +MODULE_DESCRIPTION("PPP synchronous TTY channel module"); MODULE_LICENSE("GPL"); MODULE_ALIAS_LDISC(N_SYNC_PPP); diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c index 8e7238e97d0a..2ea4f4890d23 100644 --- a/drivers/net/ppp/pppoe.c +++ b/drivers/net/ppp/pppoe.c @@ -1007,26 +1007,21 @@ static int pppoe_recvmsg(struct socket *sock, struct msghdr *m, struct sk_buff *skb; int error = 0; - if (sk->sk_state & PPPOX_BOUND) { - error = -EIO; - goto end; - } + if (sk->sk_state & PPPOX_BOUND) + return -EIO; skb = skb_recv_datagram(sk, flags, &error); - if (error < 0) - goto end; + if (!skb) + return error; - if (skb) { - total_len = min_t(size_t, total_len, skb->len); - error = skb_copy_datagram_msg(skb, 0, m, total_len); - if (error == 0) { - consume_skb(skb); - return total_len; - } + total_len = min_t(size_t, total_len, skb->len); + error = skb_copy_datagram_msg(skb, 0, m, total_len); + if (error == 0) { + consume_skb(skb); + return total_len; } kfree_skb(skb); -end: return error; } diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 4a4f8c8e79fa..8f95a562b8d0 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -653,6 +653,7 @@ static void __tun_detach(struct tun_file *tfile, bool clean) tun->tfiles[tun->numqueues - 1]); ntfile = rtnl_dereference(tun->tfiles[index]); ntfile->queue_index = index; + ntfile->xdp_rxq.queue_index = index; rcu_assign_pointer(tun->tfiles[tun->numqueues - 1], NULL); diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c index 99ec1d4a972d..8b6d6a1b3c2e 100644 --- a/drivers/net/usb/dm9601.c +++ b/drivers/net/usb/dm9601.c @@ -232,7 +232,7 @@ static int dm9601_mdio_read(struct net_device *netdev, int phy_id, int loc) err = dm_read_shared_word(dev, 1, loc, &res); if (err < 0) { netdev_err(dev->net, "MDIO read error: %d\n", err); - return err; + return 0; } netdev_dbg(dev->net, diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index a6d653ff552a..d2aa2c5b1989 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c @@ -1501,7 +1501,9 @@ static int lan78xx_link_reset(struct lan78xx_net *dev) lan78xx_rx_urb_submit_all(dev); + local_bh_disable(); napi_schedule(&dev->napi); + local_bh_enable(); } return 0; @@ -3033,7 +3035,8 @@ static int lan78xx_reset(struct lan78xx_net *dev) if (dev->chipid == ID_REV_CHIP_ID_7801_) buf &= ~MAC_CR_GMII_EN_; - if (dev->chipid == ID_REV_CHIP_ID_7800_) { + if (dev->chipid == ID_REV_CHIP_ID_7800_ || + dev->chipid == ID_REV_CHIP_ID_7850_) { ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig); if (!ret && sig != EEPROM_INDICATOR) { /* Implies there is no external eeprom. Set mac speed */ @@ -3132,7 +3135,8 @@ static int lan78xx_open(struct net_device *net) done: mutex_unlock(&dev->dev_mutex); - usb_autopm_put_interface(dev->intf); + if (ret < 0) + usb_autopm_put_interface(dev->intf); return ret; } diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c index a530f20ee257..2fa46baa589e 100644 --- a/drivers/net/usb/smsc95xx.c +++ b/drivers/net/usb/smsc95xx.c @@ -2105,6 +2105,11 @@ static const struct usb_device_id products[] = { .driver_info = (unsigned long) &smsc95xx_info, }, { + /* SYSTEC USB-SPEmodule1 10BASE-T1L Ethernet Device */ + USB_DEVICE(0x0878, 0x1400), + .driver_info = (unsigned long)&smsc95xx_info, + }, + { /* Microchip's EVB-LAN8670-USB 10BASE-T1S Ethernet Device */ USB_DEVICE(0x184F, 0x0051), .driver_info = (unsigned long)&smsc95xx_info, diff --git a/drivers/net/veth.c b/drivers/net/veth.c index 578e36ea1589..cd4a6fe458f9 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -1208,14 +1208,6 @@ static int veth_enable_xdp(struct net_device *dev) veth_disable_xdp_range(dev, 0, dev->real_num_rx_queues, true); return err; } - - if (!veth_gro_requested(dev)) { - /* user-space did not require GRO, but adding XDP - * is supposed to get GRO working - */ - dev->features |= NETIF_F_GRO; - netdev_features_change(dev); - } } } @@ -1235,18 +1227,9 @@ static void veth_disable_xdp(struct net_device *dev) for (i = 0; i < dev->real_num_rx_queues; i++) rcu_assign_pointer(priv->rq[i].xdp_prog, NULL); - if (!netif_running(dev) || !veth_gro_requested(dev)) { + if (!netif_running(dev) || !veth_gro_requested(dev)) veth_napi_del(dev); - /* if user-space did not require GRO, since adding XDP - * enabled it, clear it now - */ - if (!veth_gro_requested(dev) && netif_running(dev)) { - dev->features &= ~NETIF_F_GRO; - netdev_features_change(dev); - } - } - veth_disable_xdp_range(dev, 0, dev->real_num_rx_queues, false); } @@ -1478,7 +1461,8 @@ static int veth_alloc_queues(struct net_device *dev) struct veth_priv *priv = netdev_priv(dev); int i; - priv->rq = kcalloc(dev->num_rx_queues, sizeof(*priv->rq), GFP_KERNEL_ACCOUNT); + priv->rq = kvcalloc(dev->num_rx_queues, sizeof(*priv->rq), + GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL); if (!priv->rq) return -ENOMEM; @@ -1494,7 +1478,7 @@ static void veth_free_queues(struct net_device *dev) { struct veth_priv *priv = netdev_priv(dev); - kfree(priv->rq); + kvfree(priv->rq); } static int veth_dev_init(struct net_device *dev) @@ -1654,6 +1638,14 @@ static int veth_xdp_set(struct net_device *dev, struct bpf_prog *prog, } if (!old_prog) { + if (!veth_gro_requested(dev)) { + /* user-space did not require GRO, but adding + * XDP is supposed to get GRO working + */ + dev->features |= NETIF_F_GRO; + netdev_features_change(dev); + } + peer->hw_features &= ~NETIF_F_GSO_SOFTWARE; peer->max_mtu = max_mtu; } @@ -1669,6 +1661,14 @@ static int veth_xdp_set(struct net_device *dev, struct bpf_prog *prog, if (dev->flags & IFF_UP) veth_disable_xdp(dev); + /* if user-space did not require GRO, since adding XDP + * enabled it, clear it now + */ + if (!veth_gro_requested(dev)) { + dev->features &= ~NETIF_F_GRO; + netdev_features_change(dev); + } + if (peer) { peer->hw_features |= NETIF_F_GSO_SOFTWARE; peer->max_mtu = ETH_MAX_MTU; diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c index 43e0db78d42b..a742cec44e3d 100644 --- a/drivers/net/wireless/ath/ar5523/ar5523.c +++ b/drivers/net/wireless/ath/ar5523/ar5523.c @@ -1803,5 +1803,6 @@ static struct usb_driver ar5523_driver = { module_usb_driver(ar5523_driver); +MODULE_DESCRIPTION("Atheros AR5523 wireless driver"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_FIRMWARE(AR5523_FIRMWARE_FILE); diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c index 41119fb177e3..4e6b4df8562f 100644 --- a/drivers/net/wireless/ath/wcn36xx/main.c +++ b/drivers/net/wireless/ath/wcn36xx/main.c @@ -1685,6 +1685,7 @@ static struct platform_driver wcn36xx_driver = { module_platform_driver(wcn36xx_driver); +MODULE_DESCRIPTION("Qualcomm Atheros WCN3660/3680 wireless driver"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_AUTHOR("Eugene Krasnikov k.eugene.e@gmail.com"); MODULE_FIRMWARE(WLAN_NV_FILE); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bca/module.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bca/module.c index d55f3271d619..4f0c1e1a8e60 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bca/module.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bca/module.c @@ -20,6 +20,7 @@ static void __exit brcmf_bca_exit(void) brcmf_fwvid_unregister_vendor(BRCMF_FWVENDOR_BCA, THIS_MODULE); } +MODULE_DESCRIPTION("Broadcom FullMAC WLAN driver plugin for Broadcom AP chipsets"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_IMPORT_NS(BRCMFMAC); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index 133c5ea6429c..28d6a30cc010 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c @@ -3779,8 +3779,10 @@ static int brcmf_internal_escan_add_info(struct cfg80211_scan_request *req, if (req->channels[i] == chan) break; } - if (i == req->n_channels) - req->channels[req->n_channels++] = chan; + if (i == req->n_channels) { + req->n_channels++; + req->channels[i] = chan; + } for (i = 0; i < req->n_ssids; i++) { if (req->ssids[i].ssid_len == ssid_len && diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/module.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/module.c index f82fbbe3ecef..90d06cda03a2 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/module.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/module.c @@ -20,6 +20,7 @@ static void __exit brcmf_cyw_exit(void) brcmf_fwvid_unregister_vendor(BRCMF_FWVENDOR_CYW, THIS_MODULE); } +MODULE_DESCRIPTION("Broadcom FullMAC WLAN driver plugin for Cypress/Infineon chipsets"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_IMPORT_NS(BRCMFMAC); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/wcc/module.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/wcc/module.c index 02918d434556..b66135e3cff4 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/wcc/module.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/wcc/module.c @@ -20,6 +20,7 @@ static void __exit brcmf_wcc_exit(void) brcmf_fwvid_unregister_vendor(BRCMF_FWVENDOR_WCC, THIS_MODULE); } +MODULE_DESCRIPTION("Broadcom FullMAC WLAN driver plugin for Broadcom mobility chipsets"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_IMPORT_NS(BRCMFMAC); diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c index b96f30d11644..dcc4810cb324 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c @@ -618,7 +618,7 @@ int iwl_sar_get_wrds_table(struct iwl_fw_runtime *fwrt) &tbl_rev); if (!IS_ERR(wifi_pkg)) { if (tbl_rev != 2) { - ret = PTR_ERR(wifi_pkg); + ret = -EINVAL; goto out_free; } @@ -634,7 +634,7 @@ int iwl_sar_get_wrds_table(struct iwl_fw_runtime *fwrt) &tbl_rev); if (!IS_ERR(wifi_pkg)) { if (tbl_rev != 1) { - ret = PTR_ERR(wifi_pkg); + ret = -EINVAL; goto out_free; } @@ -650,7 +650,7 @@ int iwl_sar_get_wrds_table(struct iwl_fw_runtime *fwrt) &tbl_rev); if (!IS_ERR(wifi_pkg)) { if (tbl_rev != 0) { - ret = PTR_ERR(wifi_pkg); + ret = -EINVAL; goto out_free; } @@ -707,7 +707,7 @@ int iwl_sar_get_ewrd_table(struct iwl_fw_runtime *fwrt) &tbl_rev); if (!IS_ERR(wifi_pkg)) { if (tbl_rev != 2) { - ret = PTR_ERR(wifi_pkg); + ret = -EINVAL; goto out_free; } @@ -723,7 +723,7 @@ int iwl_sar_get_ewrd_table(struct iwl_fw_runtime *fwrt) &tbl_rev); if (!IS_ERR(wifi_pkg)) { if (tbl_rev != 1) { - ret = PTR_ERR(wifi_pkg); + ret = -EINVAL; goto out_free; } @@ -739,7 +739,7 @@ int iwl_sar_get_ewrd_table(struct iwl_fw_runtime *fwrt) &tbl_rev); if (!IS_ERR(wifi_pkg)) { if (tbl_rev != 0) { - ret = PTR_ERR(wifi_pkg); + ret = -EINVAL; goto out_free; } @@ -1116,6 +1116,9 @@ int iwl_acpi_get_ppag_table(struct iwl_fw_runtime *fwrt) goto read_table; } + ret = PTR_ERR(wifi_pkg); + goto out_free; + read_table: fwrt->ppag_ver = tbl_rev; flags = &wifi_pkg->package.elements[1]; diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h b/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h index 798731ecbefd..b740c65a7dca 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h @@ -537,7 +537,7 @@ enum iwl_fw_dbg_config_cmd_type { }; /* LDBG_CFG_CMD_TYPE_API_E_VER_1 */ /* this token disables debug asserts in the firmware */ -#define IWL_FW_DBG_CONFIG_TOKEN 0x00011301 +#define IWL_FW_DBG_CONFIG_TOKEN 0x00010001 /** * struct iwl_fw_dbg_config_cmd - configure FW debug diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h b/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h index 9c69d3674384..e6c0f928a6bb 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2005-2014, 2019-2021, 2023 Intel Corporation + * Copyright (C) 2005-2014, 2019-2021, 2023-2024 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -66,6 +66,16 @@ enum iwl_gen2_tx_fifo { IWL_GEN2_TRIG_TX_FIFO_VO, }; +enum iwl_bz_tx_fifo { + IWL_BZ_EDCA_TX_FIFO_BK, + IWL_BZ_EDCA_TX_FIFO_BE, + IWL_BZ_EDCA_TX_FIFO_VI, + IWL_BZ_EDCA_TX_FIFO_VO, + IWL_BZ_TRIG_TX_FIFO_BK, + IWL_BZ_TRIG_TX_FIFO_BE, + IWL_BZ_TRIG_TX_FIFO_VI, + IWL_BZ_TRIG_TX_FIFO_VO, +}; /** * enum iwl_tx_queue_cfg_actions - TXQ config options * @TX_QUEUE_CFG_ENABLE_QUEUE: enable a queue diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c index e27774e7ed74..80fda056e46a 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2005-2014, 2018-2023 Intel Corporation + * Copyright (C) 2005-2014, 2018-2024 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2015-2017 Intel Deutschland GmbH */ @@ -19,7 +19,6 @@ * @fwrt_ptr: pointer to the buffer coming from fwrt * @trans_ptr: pointer to struct %iwl_trans_dump_data which contains the * transport's data. - * @trans_len: length of the valid data in trans_ptr * @fwrt_len: length of the valid data in fwrt_ptr */ struct iwl_fw_dump_ptrs { diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c index ffe2670720c9..abf8001bdac1 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c @@ -128,6 +128,7 @@ static void iwl_dealloc_ucode(struct iwl_drv *drv) kfree(drv->fw.ucode_capa.cmd_versions); kfree(drv->fw.phy_integration_ver); kfree(drv->trans->dbg.pc_data); + drv->trans->dbg.pc_data = NULL; for (i = 0; i < IWL_UCODE_TYPE_MAX; i++) iwl_free_fw_img(drv, drv->fw.img + i); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c index 402896988686..2f6774ec37b2 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c @@ -668,7 +668,6 @@ static const struct ieee80211_sband_iftype_data iwl_he_eht_capa[] = { .has_eht = true, .eht_cap_elem = { .mac_cap_info[0] = - IEEE80211_EHT_MAC_CAP0_EPCS_PRIO_ACCESS | IEEE80211_EHT_MAC_CAP0_OM_CONTROL | IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE1 | IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE2 | @@ -793,7 +792,6 @@ static const struct ieee80211_sband_iftype_data iwl_he_eht_capa[] = { .has_eht = true, .eht_cap_elem = { .mac_cap_info[0] = - IEEE80211_EHT_MAC_CAP0_EPCS_PRIO_ACCESS | IEEE80211_EHT_MAC_CAP0_OM_CONTROL | IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE1 | IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE2, @@ -1020,8 +1018,7 @@ iwl_nvm_fixup_sband_iftd(struct iwl_trans *trans, if (CSR_HW_REV_TYPE(trans->hw_rev) == IWL_CFG_MAC_TYPE_GL && iftype_data->eht_cap.has_eht) { iftype_data->eht_cap.eht_cap_elem.mac_cap_info[0] &= - ~(IEEE80211_EHT_MAC_CAP0_EPCS_PRIO_ACCESS | - IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE1 | + ~(IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE1 | IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE2); iftype_data->eht_cap.eht_cap_elem.phy_cap_info[3] &= ~(IEEE80211_EHT_PHY_CAP0_PARTIAL_BW_UL_MU_MIMO | diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c index 4582afb149d7..05b64176859e 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c @@ -1279,7 +1279,9 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw, mvm->net_detect = true; } else { - struct iwl_wowlan_config_cmd wowlan_config_cmd = {}; + struct iwl_wowlan_config_cmd wowlan_config_cmd = { + .offloading_tid = 0, + }; wowlan_config_cmd.sta_id = mvmvif->deflink.ap_sta_id; @@ -1291,6 +1293,11 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw, goto out_noreset; } + ret = iwl_mvm_sta_ensure_queue( + mvm, ap_sta->txq[wowlan_config_cmd.offloading_tid]); + if (ret) + goto out_noreset; + ret = iwl_mvm_get_wowlan_config(mvm, wowlan, &wowlan_config_cmd, vif, mvmvif, ap_sta); if (ret) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c index c4f96125cf33..25a5a31e63c2 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c @@ -31,6 +31,17 @@ const u8 iwl_mvm_ac_to_gen2_tx_fifo[] = { IWL_GEN2_TRIG_TX_FIFO_BK, }; +const u8 iwl_mvm_ac_to_bz_tx_fifo[] = { + IWL_BZ_EDCA_TX_FIFO_VO, + IWL_BZ_EDCA_TX_FIFO_VI, + IWL_BZ_EDCA_TX_FIFO_BE, + IWL_BZ_EDCA_TX_FIFO_BK, + IWL_BZ_TRIG_TX_FIFO_VO, + IWL_BZ_TRIG_TX_FIFO_VI, + IWL_BZ_TRIG_TX_FIFO_BE, + IWL_BZ_TRIG_TX_FIFO_BK, +}; + struct iwl_mvm_mac_iface_iterator_data { struct iwl_mvm *mvm; struct ieee80211_vif *vif; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index 7f13dff04b26..53e26c3c3a9a 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -1600,7 +1600,8 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, */ if (vif->type == NL80211_IFTYPE_AP || vif->type == NL80211_IFTYPE_ADHOC) { - iwl_mvm_vif_dbgfs_add_link(mvm, vif); + if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) + iwl_mvm_vif_dbgfs_add_link(mvm, vif); ret = 0; goto out; } @@ -1640,7 +1641,8 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, iwl_mvm_chandef_get_primary_80(&vif->bss_conf.chandef); } - iwl_mvm_vif_dbgfs_add_link(mvm, vif); + if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) + iwl_mvm_vif_dbgfs_add_link(mvm, vif); if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && vif->type == NL80211_IFTYPE_STATION && !vif->p2p && @@ -3685,6 +3687,9 @@ iwl_mvm_sta_state_notexist_to_none(struct iwl_mvm *mvm, NL80211_TDLS_SETUP); } + if (ret) + return ret; + for_each_sta_active_link(vif, sta, link_sta, i) link_sta->agg.max_rc_amsdu_len = 1; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c index 61170173f917..893b69fc841b 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c @@ -81,7 +81,8 @@ static int iwl_mvm_mld_mac_add_interface(struct ieee80211_hw *hw, ieee80211_hw_set(mvm->hw, RX_INCLUDES_FCS); } - iwl_mvm_vif_dbgfs_add_link(mvm, vif); + if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) + iwl_mvm_vif_dbgfs_add_link(mvm, vif); if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && vif->type == NL80211_IFTYPE_STATION && !vif->p2p && @@ -437,6 +438,9 @@ __iwl_mvm_mld_unassign_vif_chanctx(struct iwl_mvm *mvm, mvmvif->ap_ibss_active = false; } + iwl_mvm_link_changed(mvm, vif, link_conf, + LINK_CONTEXT_MODIFY_ACTIVE, false); + if (iwl_mvm_is_esr_supported(mvm->fwrt.trans) && n_active > 1) { int ret = iwl_mvm_esr_mode_inactive(mvm, vif); @@ -448,9 +452,6 @@ __iwl_mvm_mld_unassign_vif_chanctx(struct iwl_mvm *mvm, if (vif->type == NL80211_IFTYPE_MONITOR) iwl_mvm_mld_rm_snif_sta(mvm, vif); - iwl_mvm_link_changed(mvm, vif, link_conf, - LINK_CONTEXT_MODIFY_ACTIVE, false); - if (switching_chanctx) return; mvmvif->link[link_id]->phy_ctxt = NULL; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index 40627961b834..81dbef6947f5 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -1581,12 +1581,16 @@ static inline int iwl_mvm_max_active_links(struct iwl_mvm *mvm, extern const u8 iwl_mvm_ac_to_tx_fifo[]; extern const u8 iwl_mvm_ac_to_gen2_tx_fifo[]; +extern const u8 iwl_mvm_ac_to_bz_tx_fifo[]; static inline u8 iwl_mvm_mac_ac_to_tx_fifo(struct iwl_mvm *mvm, enum ieee80211_ac_numbers ac) { - return iwl_mvm_has_new_tx_api(mvm) ? - iwl_mvm_ac_to_gen2_tx_fifo[ac] : iwl_mvm_ac_to_tx_fifo[ac]; + if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) + return iwl_mvm_ac_to_bz_tx_fifo[ac]; + if (iwl_mvm_has_new_tx_api(mvm)) + return iwl_mvm_ac_to_gen2_tx_fifo[ac]; + return iwl_mvm_ac_to_tx_fifo[ac]; } struct iwl_rate_info { diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c index 886d00098528..af15d470c69b 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c @@ -505,6 +505,10 @@ static bool iwl_mvm_is_dup(struct ieee80211_sta *sta, int queue, return false; mvm_sta = iwl_mvm_sta_from_mac80211(sta); + + if (WARN_ON_ONCE(!mvm_sta->dup_data)) + return false; + dup_data = &mvm_sta->dup_data[queue]; /* diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index 2a3ca9785974..c2e0cff740e9 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c @@ -1502,6 +1502,34 @@ out_err: return ret; } +int iwl_mvm_sta_ensure_queue(struct iwl_mvm *mvm, + struct ieee80211_txq *txq) +{ + struct iwl_mvm_txq *mvmtxq = iwl_mvm_txq_from_mac80211(txq); + int ret = -EINVAL; + + lockdep_assert_held(&mvm->mutex); + + if (likely(test_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state)) || + !txq->sta) { + return 0; + } + + if (!iwl_mvm_sta_alloc_queue(mvm, txq->sta, txq->ac, txq->tid)) { + set_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state); + ret = 0; + } + + local_bh_disable(); + spin_lock(&mvm->add_stream_lock); + if (!list_empty(&mvmtxq->list)) + list_del_init(&mvmtxq->list); + spin_unlock(&mvm->add_stream_lock); + local_bh_enable(); + + return ret; +} + void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk) { struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h index b33a0ce096d4..3cf8a70274ce 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2018-2023 Intel Corporation + * Copyright (C) 2012-2014, 2018-2024 Intel Corporation * Copyright (C) 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2015-2016 Intel Deutschland GmbH */ @@ -571,6 +571,7 @@ void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm, bool disable); void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif); +int iwl_mvm_sta_ensure_queue(struct iwl_mvm *mvm, struct ieee80211_txq *txq); void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk); int iwl_mvm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct iwl_mvm_int_sta *sta, u8 *addr, u32 cipher, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c index 218fdf1ed530..2e653a417d62 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2018-2023 Intel Corporation + * Copyright (C) 2012-2014, 2018-2024 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2017 Intel Deutschland GmbH */ @@ -972,6 +972,7 @@ void iwl_mvm_rx_session_protect_notif(struct iwl_mvm *mvm, if (!le32_to_cpu(notif->status) || !le32_to_cpu(notif->start)) { /* End TE, notify mac80211 */ mvmvif->time_event_data.id = SESSION_PROTECT_CONF_MAX_ID; + mvmvif->time_event_data.link_id = -1; iwl_mvm_p2p_roc_finished(mvm); ieee80211_remain_on_channel_expired(mvm->hw); } else if (le32_to_cpu(notif->start)) { diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index db986bfc4dc3..461f26d9214e 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c @@ -520,13 +520,24 @@ static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm, } } +static void iwl_mvm_copy_hdr(void *cmd, const void *hdr, int hdrlen, + const u8 *addr3_override) +{ + struct ieee80211_hdr *out_hdr = cmd; + + memcpy(cmd, hdr, hdrlen); + if (addr3_override) + memcpy(out_hdr->addr3, addr3_override, ETH_ALEN); +} + /* * Allocates and sets the Tx cmd the driver data pointers in the skb */ static struct iwl_device_tx_cmd * iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb, struct ieee80211_tx_info *info, int hdrlen, - struct ieee80211_sta *sta, u8 sta_id) + struct ieee80211_sta *sta, u8 sta_id, + const u8 *addr3_override) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; struct iwl_device_tx_cmd *dev_cmd; @@ -584,7 +595,7 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb, cmd->len = cpu_to_le16((u16)skb->len); /* Copy MAC header from skb into command buffer */ - memcpy(cmd->hdr, hdr, hdrlen); + iwl_mvm_copy_hdr(cmd->hdr, hdr, hdrlen, addr3_override); cmd->flags = cpu_to_le16(flags); cmd->rate_n_flags = cpu_to_le32(rate_n_flags); @@ -599,7 +610,7 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb, cmd->len = cpu_to_le16((u16)skb->len); /* Copy MAC header from skb into command buffer */ - memcpy(cmd->hdr, hdr, hdrlen); + iwl_mvm_copy_hdr(cmd->hdr, hdr, hdrlen, addr3_override); cmd->flags = cpu_to_le32(flags); cmd->rate_n_flags = cpu_to_le32(rate_n_flags); @@ -617,7 +628,7 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb, iwl_mvm_set_tx_cmd_rate(mvm, tx_cmd, info, sta, hdr->frame_control); /* Copy MAC header from skb into command buffer */ - memcpy(tx_cmd->hdr, hdr, hdrlen); + iwl_mvm_copy_hdr(tx_cmd->hdr, hdr, hdrlen, addr3_override); out: return dev_cmd; @@ -820,7 +831,8 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) IWL_DEBUG_TX(mvm, "station Id %d, queue=%d\n", sta_id, queue); - dev_cmd = iwl_mvm_set_tx_params(mvm, skb, &info, hdrlen, NULL, sta_id); + dev_cmd = iwl_mvm_set_tx_params(mvm, skb, &info, hdrlen, NULL, sta_id, + NULL); if (!dev_cmd) return -1; @@ -1140,7 +1152,8 @@ static int iwl_mvm_tx_pkt_queued(struct iwl_mvm *mvm, */ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, struct ieee80211_tx_info *info, - struct ieee80211_sta *sta) + struct ieee80211_sta *sta, + const u8 *addr3_override) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; struct iwl_mvm_sta *mvmsta; @@ -1172,7 +1185,8 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, iwl_mvm_probe_resp_set_noa(mvm, skb); dev_cmd = iwl_mvm_set_tx_params(mvm, skb, info, hdrlen, - sta, mvmsta->deflink.sta_id); + sta, mvmsta->deflink.sta_id, + addr3_override); if (!dev_cmd) goto drop; @@ -1294,9 +1308,11 @@ int iwl_mvm_tx_skb_sta(struct iwl_mvm *mvm, struct sk_buff *skb, struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct ieee80211_tx_info info; struct sk_buff_head mpdus_skbs; + struct ieee80211_vif *vif; unsigned int payload_len; int ret; struct sk_buff *orig_skb = skb; + const u8 *addr3; if (WARN_ON_ONCE(!mvmsta)) return -1; @@ -1307,26 +1323,59 @@ int iwl_mvm_tx_skb_sta(struct iwl_mvm *mvm, struct sk_buff *skb, memcpy(&info, skb->cb, sizeof(info)); if (!skb_is_gso(skb)) - return iwl_mvm_tx_mpdu(mvm, skb, &info, sta); + return iwl_mvm_tx_mpdu(mvm, skb, &info, sta, NULL); payload_len = skb_tail_pointer(skb) - skb_transport_header(skb) - tcp_hdrlen(skb) + skb->data_len; if (payload_len <= skb_shinfo(skb)->gso_size) - return iwl_mvm_tx_mpdu(mvm, skb, &info, sta); + return iwl_mvm_tx_mpdu(mvm, skb, &info, sta, NULL); __skb_queue_head_init(&mpdus_skbs); + vif = info.control.vif; + if (!vif) + return -1; + ret = iwl_mvm_tx_tso(mvm, skb, &info, sta, &mpdus_skbs); if (ret) return ret; WARN_ON(skb_queue_empty(&mpdus_skbs)); + /* + * As described in IEEE sta 802.11-2020, table 9-30 (Address + * field contents), A-MSDU address 3 should contain the BSSID + * address. + * Pass address 3 down to iwl_mvm_tx_mpdu() and further to set it + * in the command header. We need to preserve the original + * address 3 in the skb header to correctly create all the + * A-MSDU subframe headers from it. + */ + switch (vif->type) { + case NL80211_IFTYPE_STATION: + addr3 = vif->cfg.ap_addr; + break; + case NL80211_IFTYPE_AP: + addr3 = vif->addr; + break; + default: + addr3 = NULL; + break; + } + while (!skb_queue_empty(&mpdus_skbs)) { + struct ieee80211_hdr *hdr; + bool amsdu; + skb = __skb_dequeue(&mpdus_skbs); + hdr = (void *)skb->data; + amsdu = ieee80211_is_data_qos(hdr->frame_control) && + (*ieee80211_get_qos_ctl(hdr) & + IEEE80211_QOS_CTL_A_MSDU_PRESENT); - ret = iwl_mvm_tx_mpdu(mvm, skb, &info, sta); + ret = iwl_mvm_tx_mpdu(mvm, skb, &info, sta, + amsdu ? addr3 : NULL); if (ret) { /* Free skbs created as part of TSO logic that have not yet been dequeued */ __skb_queue_purge(&mpdus_skbs); diff --git a/drivers/net/wireless/intersil/p54/p54spi.c b/drivers/net/wireless/intersil/p54/p54spi.c index ce0179b8ab36..0073b5e0f9c9 100644 --- a/drivers/net/wireless/intersil/p54/p54spi.c +++ b/drivers/net/wireless/intersil/p54/p54spi.c @@ -700,6 +700,7 @@ static struct spi_driver p54spi_driver = { module_spi_driver(p54spi_driver); +MODULE_DESCRIPTION("Prism54 SPI wireless driver"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Christian Lamparter <chunkeey@web.de>"); MODULE_ALIAS("spi:cx3110x"); diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/main.c b/drivers/net/wireless/mediatek/mt76/mt7603/main.c index 89d738deea62..e2146d30e553 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/main.c @@ -728,6 +728,7 @@ const struct ieee80211_ops mt7603_ops = { .set_sar_specs = mt7603_set_sar_specs, }; +MODULE_DESCRIPTION("MediaTek MT7603E and MT76x8 wireless driver"); MODULE_LICENSE("Dual BSD/GPL"); static int __init mt7603_init(void) diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/drivers/net/wireless/mediatek/mt76/mt7615/main.c index dab16b5fc386..0971c164b57e 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c @@ -1375,4 +1375,5 @@ const struct ieee80211_ops mt7615_ops = { }; EXPORT_SYMBOL_GPL(mt7615_ops); +MODULE_DESCRIPTION("MediaTek MT7615E and MT7663E wireless driver"); MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c index ac036a072439..87a956ea3ad7 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c @@ -270,4 +270,5 @@ static void __exit mt7615_exit(void) module_init(mt7615_init); module_exit(mt7615_exit); +MODULE_DESCRIPTION("MediaTek MT7615E MMIO helpers"); MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c b/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c index 67cedd2555f9..9692890ba51b 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c @@ -253,4 +253,5 @@ module_sdio_driver(mt7663s_driver); MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>"); MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>"); +MODULE_DESCRIPTION("MediaTek MT7663S (SDIO) wireless driver"); MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/usb.c b/drivers/net/wireless/mediatek/mt76/mt7615/usb.c index 04963b9f7498..df737e1ff27b 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/usb.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/usb.c @@ -281,4 +281,5 @@ module_usb_driver(mt7663u_driver); MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>"); MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>"); +MODULE_DESCRIPTION("MediaTek MT7663U (USB) wireless driver"); MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c b/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c index 0052d103e276..820b39590027 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c @@ -349,4 +349,5 @@ EXPORT_SYMBOL_GPL(mt7663_usb_sdio_register_device); MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>"); MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>"); +MODULE_DESCRIPTION("MediaTek MT7663 SDIO/USB helpers"); MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c index 96494ba2fdf7..3a20ba0d2492 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c @@ -3160,4 +3160,5 @@ exit: EXPORT_SYMBOL_GPL(mt76_connac2_mcu_fill_message); MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>"); +MODULE_DESCRIPTION("MediaTek MT76x connac layer helpers"); MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c index c3a392a1a659..bcd24c9072ec 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c @@ -342,4 +342,5 @@ int mt76x0_eeprom_init(struct mt76x02_dev *dev) return 0; } +MODULE_DESCRIPTION("MediaTek MT76x EEPROM helpers"); MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c index 9277ff38b7a2..293e66fa83d5 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c @@ -302,6 +302,7 @@ static const struct pci_device_id mt76x0e_device_table[] = { MODULE_DEVICE_TABLE(pci, mt76x0e_device_table); MODULE_FIRMWARE(MT7610E_FIRMWARE); MODULE_FIRMWARE(MT7650E_FIRMWARE); +MODULE_DESCRIPTION("MediaTek MT76x0E (PCIe) wireless driver"); MODULE_LICENSE("Dual BSD/GPL"); static struct pci_driver mt76x0e_driver = { diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c index 0422c332354a..dd042949cf82 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c @@ -336,6 +336,7 @@ err: MODULE_DEVICE_TABLE(usb, mt76x0_device_table); MODULE_FIRMWARE(MT7610E_FIRMWARE); MODULE_FIRMWARE(MT7610U_FIRMWARE); +MODULE_DESCRIPTION("MediaTek MT76x0U (USB) wireless driver"); MODULE_LICENSE("GPL"); static struct usb_driver mt76x0_driver = { diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c index 02da543dfc5c..b2cc44914294 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c @@ -293,4 +293,5 @@ void mt76x02u_init_mcu(struct mt76_dev *dev) EXPORT_SYMBOL_GPL(mt76x02u_init_mcu); MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>"); +MODULE_DESCRIPTION("MediaTek MT76x02 MCU helpers"); MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c index 8a0e8124b894..8020446be37b 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c @@ -696,4 +696,5 @@ void mt76x02_config_mac_addr_list(struct mt76x02_dev *dev) } EXPORT_SYMBOL_GPL(mt76x02_config_mac_addr_list); +MODULE_DESCRIPTION("MediaTek MT76x02 helpers"); MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c index 8c01855885ce..1fe5f5a02f93 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c @@ -506,4 +506,5 @@ int mt76x2_eeprom_init(struct mt76x02_dev *dev) } EXPORT_SYMBOL_GPL(mt76x2_eeprom_init); +MODULE_DESCRIPTION("MediaTek MT76x2 EEPROM helpers"); MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c index df85ebc6e1df..30959746e924 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c @@ -165,6 +165,7 @@ mt76x2e_resume(struct pci_dev *pdev) MODULE_DEVICE_TABLE(pci, mt76x2e_device_table); MODULE_FIRMWARE(MT7662_FIRMWARE); MODULE_FIRMWARE(MT7662_ROM_PATCH); +MODULE_DESCRIPTION("MediaTek MT76x2E (PCIe) wireless driver"); MODULE_LICENSE("Dual BSD/GPL"); static struct pci_driver mt76pci_driver = { diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c index 55068f3252ef..ca78e14251c2 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c @@ -147,4 +147,5 @@ static struct usb_driver mt76x2u_driver = { module_usb_driver(mt76x2u_driver); MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>"); +MODULE_DESCRIPTION("MediaTek MT76x2U (USB) wireless driver"); MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c index aff4f21e843d..3039f53e2245 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c @@ -958,4 +958,5 @@ static void __exit mt7915_exit(void) module_init(mt7915_init); module_exit(mt7915_exit); +MODULE_DESCRIPTION("MediaTek MT7915E MMIO helpers"); MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c index 0645417e0582..0d5adc5ddae3 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c @@ -1418,5 +1418,6 @@ const struct ieee80211_ops mt7921_ops = { }; EXPORT_SYMBOL_GPL(mt7921_ops); +MODULE_DESCRIPTION("MediaTek MT7921 core driver"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>"); diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c index 57903c6e4f11..dde26f327478 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c @@ -544,4 +544,5 @@ MODULE_FIRMWARE(MT7922_FIRMWARE_WM); MODULE_FIRMWARE(MT7922_ROM_PATCH); MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>"); MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>"); +MODULE_DESCRIPTION("MediaTek MT7921E (PCIe) wireless driver"); MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c b/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c index 7591e54d2897..a9ce1e746b95 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c @@ -323,5 +323,6 @@ static struct sdio_driver mt7921s_driver = { .drv.pm = pm_sleep_ptr(&mt7921s_pm_ops), }; module_sdio_driver(mt7921s_driver); +MODULE_DESCRIPTION("MediaTek MT7921S (SDIO) wireless driver"); MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>"); MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/usb.c b/drivers/net/wireless/mediatek/mt76/mt7921/usb.c index e5258c74fc07..8b7c03c47598 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/usb.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/usb.c @@ -336,5 +336,6 @@ static struct usb_driver mt7921u_driver = { }; module_usb_driver(mt7921u_driver); +MODULE_DESCRIPTION("MediaTek MT7921U (USB) wireless driver"); MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>"); MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/main.c b/drivers/net/wireless/mediatek/mt76/mt7925/main.c index 8f1075da4903..125a1be3cb64 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7925/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7925/main.c @@ -1450,4 +1450,5 @@ const struct ieee80211_ops mt7925_ops = { EXPORT_SYMBOL_GPL(mt7925_ops); MODULE_AUTHOR("Deren Wu <deren.wu@mediatek.com>"); +MODULE_DESCRIPTION("MediaTek MT7925 core driver"); MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/pci.c b/drivers/net/wireless/mediatek/mt76/mt7925/pci.c index 734f31ee40d3..1fd99a856541 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7925/pci.c +++ b/drivers/net/wireless/mediatek/mt76/mt7925/pci.c @@ -583,4 +583,5 @@ MODULE_FIRMWARE(MT7925_FIRMWARE_WM); MODULE_FIRMWARE(MT7925_ROM_PATCH); MODULE_AUTHOR("Deren Wu <deren.wu@mediatek.com>"); MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>"); +MODULE_DESCRIPTION("MediaTek MT7925E (PCIe) wireless driver"); MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/usb.c b/drivers/net/wireless/mediatek/mt76/mt7925/usb.c index 9b885c5b3ed5..1e0f094fc905 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7925/usb.c +++ b/drivers/net/wireless/mediatek/mt76/mt7925/usb.c @@ -329,4 +329,5 @@ static struct usb_driver mt7925u_driver = { module_usb_driver(mt7925u_driver); MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>"); +MODULE_DESCRIPTION("MediaTek MT7925U (USB) wireless driver"); MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/mediatek/mt76/mt792x_core.c b/drivers/net/wireless/mediatek/mt76/mt792x_core.c index 502be22dbe36..c42101aa9e45 100644 --- a/drivers/net/wireless/mediatek/mt76/mt792x_core.c +++ b/drivers/net/wireless/mediatek/mt76/mt792x_core.c @@ -862,5 +862,6 @@ int mt792x_load_firmware(struct mt792x_dev *dev) } EXPORT_SYMBOL_GPL(mt792x_load_firmware); +MODULE_DESCRIPTION("MediaTek MT792x core driver"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>"); diff --git a/drivers/net/wireless/mediatek/mt76/mt792x_usb.c b/drivers/net/wireless/mediatek/mt76/mt792x_usb.c index 2dd283caed36..589a3efb9f8c 100644 --- a/drivers/net/wireless/mediatek/mt76/mt792x_usb.c +++ b/drivers/net/wireless/mediatek/mt76/mt792x_usb.c @@ -314,5 +314,6 @@ void mt792xu_disconnect(struct usb_interface *usb_intf) } EXPORT_SYMBOL_GPL(mt792xu_disconnect); +MODULE_DESCRIPTION("MediaTek MT792x USB helpers"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>"); diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c index 3c729b563edc..699be57309c2 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c @@ -4477,7 +4477,8 @@ int mt7996_mcu_set_txpower_sku(struct mt7996_phy *phy) skb_put_data(skb, &req, sizeof(req)); /* cck and ofdm */ - skb_put_data(skb, &la.cck, sizeof(la.cck) + sizeof(la.ofdm)); + skb_put_data(skb, &la.cck, sizeof(la.cck)); + skb_put_data(skb, &la.ofdm, sizeof(la.ofdm)); /* ht20 */ skb_put_data(skb, &la.mcs[0], 8); /* ht40 */ diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c index c50d89a445e9..9f2abfa273c9 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c +++ b/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c @@ -650,4 +650,5 @@ static void __exit mt7996_exit(void) module_init(mt7996_init); module_exit(mt7996_exit); +MODULE_DESCRIPTION("MediaTek MT7996 MMIO helpers"); MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/mediatek/mt76/sdio.c b/drivers/net/wireless/mediatek/mt76/sdio.c index c52d550f0c32..3e88798df017 100644 --- a/drivers/net/wireless/mediatek/mt76/sdio.c +++ b/drivers/net/wireless/mediatek/mt76/sdio.c @@ -672,4 +672,5 @@ EXPORT_SYMBOL_GPL(mt76s_init); MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>"); MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>"); +MODULE_DESCRIPTION("MediaTek MT76x SDIO helpers"); MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/mediatek/mt76/usb.c b/drivers/net/wireless/mediatek/mt76/usb.c index 1584665fe3cb..5a0bcb5071bd 100644 --- a/drivers/net/wireless/mediatek/mt76/usb.c +++ b/drivers/net/wireless/mediatek/mt76/usb.c @@ -1128,4 +1128,5 @@ int mt76u_init(struct mt76_dev *dev, struct usb_interface *intf) EXPORT_SYMBOL_GPL(mt76u_init); MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>"); +MODULE_DESCRIPTION("MediaTek MT76x USB helpers"); MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/mediatek/mt76/util.c b/drivers/net/wireless/mediatek/mt76/util.c index fc76c66ff1a5..d6c01a2dd198 100644 --- a/drivers/net/wireless/mediatek/mt76/util.c +++ b/drivers/net/wireless/mediatek/mt76/util.c @@ -138,4 +138,5 @@ int __mt76_worker_fn(void *ptr) } EXPORT_SYMBOL_GPL(__mt76_worker_fn); +MODULE_DESCRIPTION("MediaTek MT76x helpers"); MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/microchip/wilc1000/netdev.c b/drivers/net/wireless/microchip/wilc1000/netdev.c index 91d71e0f7ef2..81e8f25863f5 100644 --- a/drivers/net/wireless/microchip/wilc1000/netdev.c +++ b/drivers/net/wireless/microchip/wilc1000/netdev.c @@ -1018,5 +1018,6 @@ unregister_netdev: return ERR_PTR(ret); } +MODULE_DESCRIPTION("Atmel WILC1000 core wireless driver"); MODULE_LICENSE("GPL"); MODULE_FIRMWARE(WILC1000_FW(WILC1000_API_VER)); diff --git a/drivers/net/wireless/microchip/wilc1000/sdio.c b/drivers/net/wireless/microchip/wilc1000/sdio.c index 0d13e3e46e98..d6d394693090 100644 --- a/drivers/net/wireless/microchip/wilc1000/sdio.c +++ b/drivers/net/wireless/microchip/wilc1000/sdio.c @@ -984,4 +984,5 @@ static struct sdio_driver wilc_sdio_driver = { module_driver(wilc_sdio_driver, sdio_register_driver, sdio_unregister_driver); +MODULE_DESCRIPTION("Atmel WILC1000 SDIO wireless driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/net/wireless/microchip/wilc1000/spi.c b/drivers/net/wireless/microchip/wilc1000/spi.c index 77b4cdff73c3..1d8b241ce43c 100644 --- a/drivers/net/wireless/microchip/wilc1000/spi.c +++ b/drivers/net/wireless/microchip/wilc1000/spi.c @@ -273,6 +273,7 @@ static struct spi_driver wilc_spi_driver = { .remove = wilc_bus_remove, }; module_spi_driver(wilc_spi_driver); +MODULE_DESCRIPTION("Atmel WILC1000 SPI wireless driver"); MODULE_LICENSE("GPL"); static int wilc_spi_tx(struct wilc *wilc, u8 *b, u32 len) diff --git a/drivers/net/wireless/ti/wl1251/sdio.c b/drivers/net/wireless/ti/wl1251/sdio.c index 301bd0043a43..4e5b351f80f0 100644 --- a/drivers/net/wireless/ti/wl1251/sdio.c +++ b/drivers/net/wireless/ti/wl1251/sdio.c @@ -343,5 +343,6 @@ static void __exit wl1251_sdio_exit(void) module_init(wl1251_sdio_init); module_exit(wl1251_sdio_exit); +MODULE_DESCRIPTION("TI WL1251 SDIO helpers"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Kalle Valo <kvalo@adurom.com>"); diff --git a/drivers/net/wireless/ti/wl1251/spi.c b/drivers/net/wireless/ti/wl1251/spi.c index 29292f06bd3d..1936bb3af54a 100644 --- a/drivers/net/wireless/ti/wl1251/spi.c +++ b/drivers/net/wireless/ti/wl1251/spi.c @@ -342,6 +342,7 @@ static struct spi_driver wl1251_spi_driver = { module_spi_driver(wl1251_spi_driver); +MODULE_DESCRIPTION("TI WL1251 SPI helpers"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Kalle Valo <kvalo@adurom.com>"); MODULE_ALIAS("spi:wl1251"); diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c index de045fe4ca1e..b26d42b4e3cc 100644 --- a/drivers/net/wireless/ti/wl12xx/main.c +++ b/drivers/net/wireless/ti/wl12xx/main.c @@ -1955,6 +1955,7 @@ module_param_named(tcxo, tcxo_param, charp, 0); MODULE_PARM_DESC(tcxo, "TCXO clock: 19.2, 26, 38.4, 52, 16.368, 32.736, 16.8, 33.6"); +MODULE_DESCRIPTION("TI WL12xx wireless driver"); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>"); MODULE_FIRMWARE(WL127X_FW_NAME_SINGLE); diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c index 20d9181b3410..2ccac1cdec01 100644 --- a/drivers/net/wireless/ti/wl18xx/main.c +++ b/drivers/net/wireless/ti/wl18xx/main.c @@ -2086,6 +2086,7 @@ module_param_named(num_rx_desc, num_rx_desc_param, int, 0400); MODULE_PARM_DESC(num_rx_desc_param, "Number of Rx descriptors: u8 (default is 32)"); +MODULE_DESCRIPTION("TI WiLink 8 wireless driver"); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>"); MODULE_FIRMWARE(WL18XX_FW_NAME); diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c index fb9ed97774c7..5736acb4d206 100644 --- a/drivers/net/wireless/ti/wlcore/main.c +++ b/drivers/net/wireless/ti/wlcore/main.c @@ -6793,6 +6793,7 @@ MODULE_PARM_DESC(bug_on_recovery, "BUG() on fw recovery"); module_param(no_recovery, int, 0600); MODULE_PARM_DESC(no_recovery, "Prevent HW recovery. FW will remain stuck."); +MODULE_DESCRIPTION("TI WLAN core driver"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>"); MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>"); diff --git a/drivers/net/wireless/ti/wlcore/sdio.c b/drivers/net/wireless/ti/wlcore/sdio.c index f0686635db46..eb5482ed76ae 100644 --- a/drivers/net/wireless/ti/wlcore/sdio.c +++ b/drivers/net/wireless/ti/wlcore/sdio.c @@ -447,6 +447,7 @@ module_sdio_driver(wl1271_sdio_driver); module_param(dump, bool, 0600); MODULE_PARM_DESC(dump, "Enable sdio read/write dumps."); +MODULE_DESCRIPTION("TI WLAN SDIO helpers"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>"); MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>"); diff --git a/drivers/net/wireless/ti/wlcore/spi.c b/drivers/net/wireless/ti/wlcore/spi.c index 7d9a139db59e..0aa2b2f3c5c9 100644 --- a/drivers/net/wireless/ti/wlcore/spi.c +++ b/drivers/net/wireless/ti/wlcore/spi.c @@ -562,6 +562,7 @@ static struct spi_driver wl1271_spi_driver = { }; module_spi_driver(wl1271_spi_driver); +MODULE_DESCRIPTION("TI WLAN SPI helpers"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>"); MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>"); diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index d7503aef599f..ef76850d9bcd 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -104,13 +104,12 @@ bool provides_xdp_headroom = true; module_param(provides_xdp_headroom, bool, 0644); static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx, - u8 status); + s8 status); static void make_tx_response(struct xenvif_queue *queue, - struct xen_netif_tx_request *txp, + const struct xen_netif_tx_request *txp, unsigned int extra_count, - s8 st); -static void push_tx_responses(struct xenvif_queue *queue); + s8 status); static void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx); @@ -208,13 +207,9 @@ static void xenvif_tx_err(struct xenvif_queue *queue, unsigned int extra_count, RING_IDX end) { RING_IDX cons = queue->tx.req_cons; - unsigned long flags; do { - spin_lock_irqsave(&queue->response_lock, flags); make_tx_response(queue, txp, extra_count, XEN_NETIF_RSP_ERROR); - push_tx_responses(queue); - spin_unlock_irqrestore(&queue->response_lock, flags); if (cons == end) break; RING_COPY_REQUEST(&queue->tx, cons++, txp); @@ -465,12 +460,7 @@ static void xenvif_get_requests(struct xenvif_queue *queue, for (shinfo->nr_frags = 0; nr_slots > 0 && shinfo->nr_frags < MAX_SKB_FRAGS; nr_slots--) { if (unlikely(!txp->size)) { - unsigned long flags; - - spin_lock_irqsave(&queue->response_lock, flags); make_tx_response(queue, txp, 0, XEN_NETIF_RSP_OKAY); - push_tx_responses(queue); - spin_unlock_irqrestore(&queue->response_lock, flags); ++txp; continue; } @@ -496,14 +486,8 @@ static void xenvif_get_requests(struct xenvif_queue *queue, for (shinfo->nr_frags = 0; shinfo->nr_frags < nr_slots; ++txp) { if (unlikely(!txp->size)) { - unsigned long flags; - - spin_lock_irqsave(&queue->response_lock, flags); make_tx_response(queue, txp, 0, XEN_NETIF_RSP_OKAY); - push_tx_responses(queue); - spin_unlock_irqrestore(&queue->response_lock, - flags); continue; } @@ -995,7 +979,6 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue, (ret == 0) ? XEN_NETIF_RSP_OKAY : XEN_NETIF_RSP_ERROR); - push_tx_responses(queue); continue; } @@ -1007,7 +990,6 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue, make_tx_response(queue, &txreq, extra_count, XEN_NETIF_RSP_OKAY); - push_tx_responses(queue); continue; } @@ -1433,8 +1415,35 @@ int xenvif_tx_action(struct xenvif_queue *queue, int budget) return work_done; } +static void _make_tx_response(struct xenvif_queue *queue, + const struct xen_netif_tx_request *txp, + unsigned int extra_count, + s8 status) +{ + RING_IDX i = queue->tx.rsp_prod_pvt; + struct xen_netif_tx_response *resp; + + resp = RING_GET_RESPONSE(&queue->tx, i); + resp->id = txp->id; + resp->status = status; + + while (extra_count-- != 0) + RING_GET_RESPONSE(&queue->tx, ++i)->status = XEN_NETIF_RSP_NULL; + + queue->tx.rsp_prod_pvt = ++i; +} + +static void push_tx_responses(struct xenvif_queue *queue) +{ + int notify; + + RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify); + if (notify) + notify_remote_via_irq(queue->tx_irq); +} + static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx, - u8 status) + s8 status) { struct pending_tx_info *pending_tx_info; pending_ring_idx_t index; @@ -1444,8 +1453,8 @@ static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx, spin_lock_irqsave(&queue->response_lock, flags); - make_tx_response(queue, &pending_tx_info->req, - pending_tx_info->extra_count, status); + _make_tx_response(queue, &pending_tx_info->req, + pending_tx_info->extra_count, status); /* Release the pending index before pusing the Tx response so * its available before a new Tx request is pushed by the @@ -1459,32 +1468,19 @@ static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx, spin_unlock_irqrestore(&queue->response_lock, flags); } - static void make_tx_response(struct xenvif_queue *queue, - struct xen_netif_tx_request *txp, + const struct xen_netif_tx_request *txp, unsigned int extra_count, - s8 st) + s8 status) { - RING_IDX i = queue->tx.rsp_prod_pvt; - struct xen_netif_tx_response *resp; - - resp = RING_GET_RESPONSE(&queue->tx, i); - resp->id = txp->id; - resp->status = st; - - while (extra_count-- != 0) - RING_GET_RESPONSE(&queue->tx, ++i)->status = XEN_NETIF_RSP_NULL; + unsigned long flags; - queue->tx.rsp_prod_pvt = ++i; -} + spin_lock_irqsave(&queue->response_lock, flags); -static void push_tx_responses(struct xenvif_queue *queue) -{ - int notify; + _make_tx_response(queue, txp, extra_count, status); + push_tx_responses(queue); - RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify); - if (notify) - notify_remote_via_irq(queue->tx_irq); + spin_unlock_irqrestore(&queue->response_lock, flags); } static void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx) @@ -1782,5 +1778,6 @@ static void __exit netback_fini(void) } module_exit(netback_fini); +MODULE_DESCRIPTION("Xen backend network device module"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_ALIAS("xen-backend:vif"); diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c index bb3726b622ad..4d0c527e8576 100644 --- a/drivers/nvdimm/btt.c +++ b/drivers/nvdimm/btt.c @@ -1496,19 +1496,21 @@ static int btt_blk_init(struct btt *btt) { struct nd_btt *nd_btt = btt->nd_btt; struct nd_namespace_common *ndns = nd_btt->ndns; - int rc = -ENOMEM; + struct queue_limits lim = { + .logical_block_size = btt->sector_size, + .max_hw_sectors = UINT_MAX, + }; + int rc; - btt->btt_disk = blk_alloc_disk(NUMA_NO_NODE); - if (!btt->btt_disk) - return -ENOMEM; + btt->btt_disk = blk_alloc_disk(&lim, NUMA_NO_NODE); + if (IS_ERR(btt->btt_disk)) + return PTR_ERR(btt->btt_disk); nvdimm_namespace_disk_name(ndns, btt->btt_disk->disk_name); btt->btt_disk->first_minor = 0; btt->btt_disk->fops = &btt_fops; btt->btt_disk->private_data = btt; - blk_queue_logical_block_size(btt->btt_disk->queue, btt->sector_size); - blk_queue_max_hw_sectors(btt->btt_disk->queue, UINT_MAX); blk_queue_flag_set(QUEUE_FLAG_NONROT, btt->btt_disk->queue); blk_queue_flag_set(QUEUE_FLAG_SYNCHRONOUS, btt->btt_disk->queue); diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c index 4e8fdcb3f1c8..8dcc10b6db5b 100644 --- a/drivers/nvdimm/pmem.c +++ b/drivers/nvdimm/pmem.c @@ -451,6 +451,11 @@ static int pmem_attach_disk(struct device *dev, { struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev); struct nd_region *nd_region = to_nd_region(dev->parent); + struct queue_limits lim = { + .logical_block_size = pmem_sector_size(ndns), + .physical_block_size = PAGE_SIZE, + .max_hw_sectors = UINT_MAX, + }; int nid = dev_to_node(dev), fua; struct resource *res = &nsio->res; struct range bb_range; @@ -497,9 +502,9 @@ static int pmem_attach_disk(struct device *dev, return -EBUSY; } - disk = blk_alloc_disk(nid); - if (!disk) - return -ENOMEM; + disk = blk_alloc_disk(&lim, nid); + if (IS_ERR(disk)) + return PTR_ERR(disk); q = disk->queue; pmem->disk = disk; @@ -539,9 +544,6 @@ static int pmem_attach_disk(struct device *dev, pmem->virt_addr = addr; blk_queue_write_cache(q, true, fua); - blk_queue_physical_block_size(q, PAGE_SIZE); - blk_queue_logical_block_size(q, pmem_sector_size(ndns)); - blk_queue_max_hw_sectors(q, UINT_MAX); blk_queue_flag_set(QUEUE_FLAG_NONROT, q); blk_queue_flag_set(QUEUE_FLAG_SYNCHRONOUS, q); if (pmem->pfn_flags & PFN_MAP) diff --git a/drivers/nvme/common/auth.c b/drivers/nvme/common/auth.c index a23ab5c968b9..a3455f1d67fa 100644 --- a/drivers/nvme/common/auth.c +++ b/drivers/nvme/common/auth.c @@ -471,4 +471,5 @@ int nvme_auth_generate_key(u8 *secret, struct nvme_dhchap_key **ret_key) } EXPORT_SYMBOL_GPL(nvme_auth_generate_key); +MODULE_DESCRIPTION("NVMe Authentication framework"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/nvme/common/keyring.c b/drivers/nvme/common/keyring.c index a5c0431c101c..6f7e7a8fa5ae 100644 --- a/drivers/nvme/common/keyring.c +++ b/drivers/nvme/common/keyring.c @@ -181,5 +181,6 @@ static void __exit nvme_keyring_exit(void) MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Hannes Reinecke <hare@suse.de>"); +MODULE_DESCRIPTION("NVMe Keyring implementation"); module_init(nvme_keyring_init); module_exit(nvme_keyring_exit); diff --git a/drivers/nvme/host/apple.c b/drivers/nvme/host/apple.c index 596bb11eeba5..a480cdeac288 100644 --- a/drivers/nvme/host/apple.c +++ b/drivers/nvme/host/apple.c @@ -797,6 +797,7 @@ static int apple_nvme_init_request(struct blk_mq_tag_set *set, static void apple_nvme_disable(struct apple_nvme *anv, bool shutdown) { + enum nvme_ctrl_state state = nvme_ctrl_state(&anv->ctrl); u32 csts = readl(anv->mmio_nvme + NVME_REG_CSTS); bool dead = false, freeze = false; unsigned long flags; @@ -808,8 +809,8 @@ static void apple_nvme_disable(struct apple_nvme *anv, bool shutdown) if (csts & NVME_CSTS_CFS) dead = true; - if (anv->ctrl.state == NVME_CTRL_LIVE || - anv->ctrl.state == NVME_CTRL_RESETTING) { + if (state == NVME_CTRL_LIVE || + state == NVME_CTRL_RESETTING) { freeze = true; nvme_start_freeze(&anv->ctrl); } @@ -881,7 +882,7 @@ static enum blk_eh_timer_return apple_nvme_timeout(struct request *req) unsigned long flags; u32 csts = readl(anv->mmio_nvme + NVME_REG_CSTS); - if (anv->ctrl.state != NVME_CTRL_LIVE) { + if (nvme_ctrl_state(&anv->ctrl) != NVME_CTRL_LIVE) { /* * From rdma.c: * If we are resetting, connecting or deleting we should @@ -985,10 +986,10 @@ static void apple_nvme_reset_work(struct work_struct *work) u32 boot_status, aqa; struct apple_nvme *anv = container_of(work, struct apple_nvme, ctrl.reset_work); + enum nvme_ctrl_state state = nvme_ctrl_state(&anv->ctrl); - if (anv->ctrl.state != NVME_CTRL_RESETTING) { - dev_warn(anv->dev, "ctrl state %d is not RESETTING\n", - anv->ctrl.state); + if (state != NVME_CTRL_RESETTING) { + dev_warn(anv->dev, "ctrl state %d is not RESETTING\n", state); ret = -ENODEV; goto out; } @@ -1515,7 +1516,7 @@ static int apple_nvme_probe(struct platform_device *pdev) goto put_dev; } - anv->ctrl.admin_q = blk_mq_init_queue(&anv->admin_tagset); + anv->ctrl.admin_q = blk_mq_alloc_queue(&anv->admin_tagset, NULL, NULL); if (IS_ERR(anv->ctrl.admin_q)) { ret = -ENOMEM; goto put_dev; diff --git a/drivers/nvme/host/auth.c b/drivers/nvme/host/auth.c index 72c0525c75f5..a264b3ae078b 100644 --- a/drivers/nvme/host/auth.c +++ b/drivers/nvme/host/auth.c @@ -48,11 +48,6 @@ struct nvme_dhchap_queue_context { static struct workqueue_struct *nvme_auth_wq; -#define nvme_auth_flags_from_qid(qid) \ - (qid == 0) ? 0 : BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_RESERVED -#define nvme_auth_queue_from_qid(ctrl, qid) \ - (qid == 0) ? (ctrl)->fabrics_q : (ctrl)->connect_q - static inline int ctrl_max_dhchaps(struct nvme_ctrl *ctrl) { return ctrl->opts->nr_io_queues + ctrl->opts->nr_write_queues + @@ -63,10 +58,15 @@ static int nvme_auth_submit(struct nvme_ctrl *ctrl, int qid, void *data, size_t data_len, bool auth_send) { struct nvme_command cmd = {}; - blk_mq_req_flags_t flags = nvme_auth_flags_from_qid(qid); - struct request_queue *q = nvme_auth_queue_from_qid(ctrl, qid); + nvme_submit_flags_t flags = NVME_SUBMIT_RETRY; + struct request_queue *q = ctrl->fabrics_q; int ret; + if (qid != 0) { + flags |= NVME_SUBMIT_NOWAIT | NVME_SUBMIT_RESERVED; + q = ctrl->connect_q; + } + cmd.auth_common.opcode = nvme_fabrics_command; cmd.auth_common.secp = NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER; cmd.auth_common.spsp0 = 0x01; @@ -80,8 +80,7 @@ static int nvme_auth_submit(struct nvme_ctrl *ctrl, int qid, } ret = __nvme_submit_sync_cmd(q, &cmd, NULL, data, data_len, - qid == 0 ? NVME_QID_ANY : qid, - 0, flags); + qid == 0 ? NVME_QID_ANY : qid, flags); if (ret > 0) dev_warn(ctrl->device, "qid %d auth_send failed with status %d\n", qid, ret); @@ -897,7 +896,7 @@ static void nvme_ctrl_auth_work(struct work_struct *work) * If the ctrl is no connected, bail as reconnect will handle * authentication. */ - if (ctrl->state != NVME_CTRL_LIVE) + if (nvme_ctrl_state(ctrl) != NVME_CTRL_LIVE) return; /* Authenticate admin queue first */ diff --git a/drivers/nvme/host/constants.c b/drivers/nvme/host/constants.c index 20f46c230885..6f2ebb5fcdb0 100644 --- a/drivers/nvme/host/constants.c +++ b/drivers/nvme/host/constants.c @@ -171,15 +171,15 @@ static const char * const nvme_statuses[] = { [NVME_SC_HOST_ABORTED_CMD] = "Host Aborted Command", }; -const unsigned char *nvme_get_error_status_str(u16 status) +const char *nvme_get_error_status_str(u16 status) { status &= 0x7ff; if (status < ARRAY_SIZE(nvme_statuses) && nvme_statuses[status]) - return nvme_statuses[status & 0x7ff]; + return nvme_statuses[status]; return "Unknown"; } -const unsigned char *nvme_get_opcode_str(u8 opcode) +const char *nvme_get_opcode_str(u8 opcode) { if (opcode < ARRAY_SIZE(nvme_ops) && nvme_ops[opcode]) return nvme_ops[opcode]; @@ -187,7 +187,7 @@ const unsigned char *nvme_get_opcode_str(u8 opcode) } EXPORT_SYMBOL_GPL(nvme_get_opcode_str); -const unsigned char *nvme_get_admin_opcode_str(u8 opcode) +const char *nvme_get_admin_opcode_str(u8 opcode) { if (opcode < ARRAY_SIZE(nvme_admin_ops) && nvme_admin_ops[opcode]) return nvme_admin_ops[opcode]; @@ -195,7 +195,7 @@ const unsigned char *nvme_get_admin_opcode_str(u8 opcode) } EXPORT_SYMBOL_GPL(nvme_get_admin_opcode_str); -const unsigned char *nvme_get_fabrics_opcode_str(u8 opcode) { +const char *nvme_get_fabrics_opcode_str(u8 opcode) { if (opcode < ARRAY_SIZE(nvme_fabrics_ops) && nvme_fabrics_ops[opcode]) return nvme_fabrics_ops[opcode]; return "Unknown"; diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 85ab0fcf9e88..00864a634470 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -114,12 +114,21 @@ static DEFINE_MUTEX(nvme_subsystems_lock); static DEFINE_IDA(nvme_instance_ida); static dev_t nvme_ctrl_base_chr_devt; -static struct class *nvme_class; -static struct class *nvme_subsys_class; +static int nvme_class_uevent(const struct device *dev, struct kobj_uevent_env *env); +static const struct class nvme_class = { + .name = "nvme", + .dev_uevent = nvme_class_uevent, +}; + +static const struct class nvme_subsys_class = { + .name = "nvme-subsystem", +}; static DEFINE_IDA(nvme_ns_chr_minor_ida); static dev_t nvme_ns_chr_devt; -static struct class *nvme_ns_chr_class; +static const struct class nvme_ns_chr_class = { + .name = "nvme-generic", +}; static void nvme_put_subsystem(struct nvme_subsystem *subsys); static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl, @@ -338,6 +347,30 @@ static void nvme_log_error(struct request *req) nr->status & NVME_SC_DNR ? "DNR " : ""); } +static void nvme_log_err_passthru(struct request *req) +{ + struct nvme_ns *ns = req->q->queuedata; + struct nvme_request *nr = nvme_req(req); + + pr_err_ratelimited("%s: %s(0x%x), %s (sct 0x%x / sc 0x%x) %s%s" + "cdw10=0x%x cdw11=0x%x cdw12=0x%x cdw13=0x%x cdw14=0x%x cdw15=0x%x\n", + ns ? ns->disk->disk_name : dev_name(nr->ctrl->device), + ns ? nvme_get_opcode_str(nr->cmd->common.opcode) : + nvme_get_admin_opcode_str(nr->cmd->common.opcode), + nr->cmd->common.opcode, + nvme_get_error_status_str(nr->status), + nr->status >> 8 & 7, /* Status Code Type */ + nr->status & 0xff, /* Status Code */ + nr->status & NVME_SC_MORE ? "MORE " : "", + nr->status & NVME_SC_DNR ? "DNR " : "", + nr->cmd->common.cdw10, + nr->cmd->common.cdw11, + nr->cmd->common.cdw12, + nr->cmd->common.cdw13, + nr->cmd->common.cdw14, + nr->cmd->common.cdw14); +} + enum nvme_disposition { COMPLETE, RETRY, @@ -385,8 +418,12 @@ static inline void nvme_end_req(struct request *req) { blk_status_t status = nvme_error_status(nvme_req(req)->status); - if (unlikely(nvme_req(req)->status && !(req->rq_flags & RQF_QUIET))) - nvme_log_error(req); + if (unlikely(nvme_req(req)->status && !(req->rq_flags & RQF_QUIET))) { + if (blk_rq_is_passthrough(req)) + nvme_log_err_passthru(req); + else + nvme_log_error(req); + } nvme_end_req_zoned(req); nvme_trace_bio_complete(req); if (req->cmd_flags & REQ_NVME_MPATH) @@ -679,10 +716,21 @@ static inline void nvme_clear_nvme_request(struct request *req) /* initialize a passthrough request */ void nvme_init_request(struct request *req, struct nvme_command *cmd) { - if (req->q->queuedata) + struct nvme_request *nr = nvme_req(req); + bool logging_enabled; + + if (req->q->queuedata) { + struct nvme_ns *ns = req->q->disk->private_data; + + logging_enabled = ns->head->passthru_err_log_enabled; req->timeout = NVME_IO_TIMEOUT; - else /* no queuedata implies admin queue */ + } else { /* no queuedata implies admin queue */ + logging_enabled = nr->ctrl->passthru_err_log_enabled; req->timeout = NVME_ADMIN_TIMEOUT; + } + + if (!logging_enabled) + req->rq_flags |= RQF_QUIET; /* passthru commands should let the driver set the SGL flags */ cmd->common.flags &= ~NVME_CMD_SGL_ALL; @@ -691,8 +739,7 @@ void nvme_init_request(struct request *req, struct nvme_command *cmd) if (req->mq_hctx->type == HCTX_TYPE_POLL) req->cmd_flags |= REQ_POLLED; nvme_clear_nvme_request(req); - req->rq_flags |= RQF_QUIET; - memcpy(nvme_req(req)->cmd, cmd, sizeof(*cmd)); + memcpy(nr->cmd, cmd, sizeof(*cmd)); } EXPORT_SYMBOL_GPL(nvme_init_request); @@ -721,7 +768,7 @@ blk_status_t nvme_fail_nonready_command(struct nvme_ctrl *ctrl, EXPORT_SYMBOL_GPL(nvme_fail_nonready_command); bool __nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq, - bool queue_live) + bool queue_live, enum nvme_ctrl_state state) { struct nvme_request *req = nvme_req(rq); @@ -742,7 +789,7 @@ bool __nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq, * command, which is require to set the queue live in the * appropinquate states. */ - switch (nvme_ctrl_state(ctrl)) { + switch (state) { case NVME_CTRL_CONNECTING: if (blk_rq_is_passthrough(rq) && nvme_is_fabrics(req->cmd) && (req->cmd->fabrics.fctype == nvme_fabrics_type_connect || @@ -1051,20 +1098,27 @@ EXPORT_SYMBOL_NS_GPL(nvme_execute_rq, NVME_TARGET_PASSTHRU); */ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, union nvme_result *result, void *buffer, unsigned bufflen, - int qid, int at_head, blk_mq_req_flags_t flags) + int qid, nvme_submit_flags_t flags) { struct request *req; int ret; + blk_mq_req_flags_t blk_flags = 0; + if (flags & NVME_SUBMIT_NOWAIT) + blk_flags |= BLK_MQ_REQ_NOWAIT; + if (flags & NVME_SUBMIT_RESERVED) + blk_flags |= BLK_MQ_REQ_RESERVED; if (qid == NVME_QID_ANY) - req = blk_mq_alloc_request(q, nvme_req_op(cmd), flags); + req = blk_mq_alloc_request(q, nvme_req_op(cmd), blk_flags); else - req = blk_mq_alloc_request_hctx(q, nvme_req_op(cmd), flags, + req = blk_mq_alloc_request_hctx(q, nvme_req_op(cmd), blk_flags, qid - 1); if (IS_ERR(req)) return PTR_ERR(req); nvme_init_request(req, cmd); + if (flags & NVME_SUBMIT_RETRY) + req->cmd_flags &= ~REQ_FAILFAST_DRIVER; if (buffer && bufflen) { ret = blk_rq_map_kern(q, req, buffer, bufflen, GFP_KERNEL); @@ -1072,7 +1126,7 @@ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, goto out; } - ret = nvme_execute_rq(req, at_head); + ret = nvme_execute_rq(req, flags & NVME_SUBMIT_AT_HEAD); if (result && ret >= 0) *result = nvme_req(req)->result; out: @@ -1085,7 +1139,7 @@ int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, void *buffer, unsigned bufflen) { return __nvme_submit_sync_cmd(q, cmd, NULL, buffer, bufflen, - NVME_QID_ANY, 0, 0); + NVME_QID_ANY, 0); } EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd); @@ -1108,6 +1162,10 @@ u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode) effects &= ~NVME_CMD_EFFECTS_CSE_MASK; } else { effects = le32_to_cpu(ctrl->effects->acs[opcode]); + + /* Ignore execution restrictions if any relaxation bits are set */ + if (effects & NVME_CMD_EFFECTS_CSER_MASK) + effects &= ~NVME_CMD_EFFECTS_CSE_MASK; } return effects; @@ -1349,8 +1407,10 @@ static int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id) error = nvme_submit_sync_cmd(dev->admin_q, &c, *id, sizeof(struct nvme_id_ctrl)); - if (error) + if (error) { kfree(*id); + *id = NULL; + } return error; } @@ -1479,6 +1539,7 @@ int nvme_identify_ns(struct nvme_ctrl *ctrl, unsigned nsid, if (error) { dev_warn(ctrl->device, "Identify namespace failed (%d)\n", error); kfree(*id); + *id = NULL; } return error; } @@ -1560,7 +1621,7 @@ static int nvme_features(struct nvme_ctrl *dev, u8 op, unsigned int fid, c.features.dword11 = cpu_to_le32(dword11); ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &res, - buffer, buflen, NVME_QID_ANY, 0, 0); + buffer, buflen, NVME_QID_ANY, 0); if (ret >= 0 && result) *result = le32_to_cpu(res.u32); return ret; @@ -1678,12 +1739,23 @@ int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo) return 0; } -#ifdef CONFIG_BLK_DEV_INTEGRITY -static void nvme_init_integrity(struct gendisk *disk, - struct nvme_ns_head *head, u32 max_integrity_segments) +static bool nvme_init_integrity(struct gendisk *disk, struct nvme_ns_head *head) { struct blk_integrity integrity = { }; + blk_integrity_unregister(disk); + + if (!head->ms) + return true; + + /* + * PI can always be supported as we can ask the controller to simply + * insert/strip it, which is not possible for other kinds of metadata. + */ + if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY) || + !(head->features & NVME_NS_METADATA_SUPPORTED)) + return nvme_ns_has_pi(head); + switch (head->pi_type) { case NVME_NS_DPS_PI_TYPE3: switch (head->guard_type) { @@ -1726,53 +1798,32 @@ static void nvme_init_integrity(struct gendisk *disk, } integrity.tuple_size = head->ms; + integrity.pi_offset = head->pi_offset; blk_integrity_register(disk, &integrity); - blk_queue_max_integrity_segments(disk->queue, max_integrity_segments); -} -#else -static void nvme_init_integrity(struct gendisk *disk, - struct nvme_ns_head *head, u32 max_integrity_segments) -{ + return true; } -#endif /* CONFIG_BLK_DEV_INTEGRITY */ -static void nvme_config_discard(struct nvme_ctrl *ctrl, struct gendisk *disk, - struct nvme_ns_head *head) +static void nvme_config_discard(struct nvme_ns *ns, struct queue_limits *lim) { - struct request_queue *queue = disk->queue; - u32 max_discard_sectors; - - if (ctrl->dmrsl && ctrl->dmrsl <= nvme_sect_to_lba(head, UINT_MAX)) { - max_discard_sectors = nvme_lba_to_sect(head, ctrl->dmrsl); - } else if (ctrl->oncs & NVME_CTRL_ONCS_DSM) { - max_discard_sectors = UINT_MAX; - } else { - blk_queue_max_discard_sectors(queue, 0); - return; - } + struct nvme_ctrl *ctrl = ns->ctrl; BUILD_BUG_ON(PAGE_SIZE / sizeof(struct nvme_dsm_range) < NVME_DSM_MAX_RANGES); - /* - * If discard is already enabled, don't reset queue limits. - * - * This works around the fact that the block layer can't cope well with - * updating the hardware limits when overridden through sysfs. This is - * harmless because discard limits in NVMe are purely advisory. - */ - if (queue->limits.max_discard_sectors) - return; + if (ctrl->dmrsl && ctrl->dmrsl <= nvme_sect_to_lba(ns->head, UINT_MAX)) + lim->max_hw_discard_sectors = + nvme_lba_to_sect(ns->head, ctrl->dmrsl); + else if (ctrl->oncs & NVME_CTRL_ONCS_DSM) + lim->max_hw_discard_sectors = UINT_MAX; + else + lim->max_hw_discard_sectors = 0; + + lim->discard_granularity = lim->logical_block_size; - blk_queue_max_discard_sectors(queue, max_discard_sectors); if (ctrl->dmrl) - blk_queue_max_discard_segments(queue, ctrl->dmrl); + lim->max_discard_segments = ctrl->dmrl; else - blk_queue_max_discard_segments(queue, NVME_DSM_MAX_RANGES); - queue->limits.discard_granularity = queue_logical_block_size(queue); - - if (ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES) - blk_queue_max_write_zeroes_sectors(queue, UINT_MAX); + lim->max_discard_segments = NVME_DSM_MAX_RANGES; } static bool nvme_ns_ids_equal(struct nvme_ns_ids *a, struct nvme_ns_ids *b) @@ -1783,42 +1834,38 @@ static bool nvme_ns_ids_equal(struct nvme_ns_ids *a, struct nvme_ns_ids *b) a->csi == b->csi; } -static int nvme_init_ms(struct nvme_ctrl *ctrl, struct nvme_ns_head *head, - struct nvme_id_ns *id) +static int nvme_identify_ns_nvm(struct nvme_ctrl *ctrl, unsigned int nsid, + struct nvme_id_ns_nvm **nvmp) { - bool first = id->dps & NVME_NS_DPS_PI_FIRST; - unsigned lbaf = nvme_lbaf_index(id->flbas); - struct nvme_command c = { }; + struct nvme_command c = { + .identify.opcode = nvme_admin_identify, + .identify.nsid = cpu_to_le32(nsid), + .identify.cns = NVME_ID_CNS_CS_NS, + .identify.csi = NVME_CSI_NVM, + }; struct nvme_id_ns_nvm *nvm; - int ret = 0; - u32 elbaf; - - head->pi_size = 0; - head->ms = le16_to_cpu(id->lbaf[lbaf].ms); - if (!(ctrl->ctratt & NVME_CTRL_ATTR_ELBAS)) { - head->pi_size = sizeof(struct t10_pi_tuple); - head->guard_type = NVME_NVM_NS_16B_GUARD; - goto set_pi; - } + int ret; nvm = kzalloc(sizeof(*nvm), GFP_KERNEL); if (!nvm) return -ENOMEM; - c.identify.opcode = nvme_admin_identify; - c.identify.nsid = cpu_to_le32(head->ns_id); - c.identify.cns = NVME_ID_CNS_CS_NS; - c.identify.csi = NVME_CSI_NVM; - ret = nvme_submit_sync_cmd(ctrl->admin_q, &c, nvm, sizeof(*nvm)); if (ret) - goto free_data; + kfree(nvm); + else + *nvmp = nvm; + return ret; +} - elbaf = le32_to_cpu(nvm->elbaf[lbaf]); +static void nvme_configure_pi_elbas(struct nvme_ns_head *head, + struct nvme_id_ns *id, struct nvme_id_ns_nvm *nvm) +{ + u32 elbaf = le32_to_cpu(nvm->elbaf[nvme_lbaf_index(id->flbas)]); /* no support for storage tag formats right now */ if (nvme_elbaf_sts(elbaf)) - goto free_data; + return; head->guard_type = nvme_elbaf_guard_type(elbaf); switch (head->guard_type) { @@ -1831,30 +1878,31 @@ static int nvme_init_ms(struct nvme_ctrl *ctrl, struct nvme_ns_head *head, default: break; } - -free_data: - kfree(nvm); -set_pi: - if (head->pi_size && (first || head->ms == head->pi_size)) - head->pi_type = id->dps & NVME_NS_DPS_PI_MASK; - else - head->pi_type = 0; - - return ret; } -static int nvme_configure_metadata(struct nvme_ctrl *ctrl, - struct nvme_ns_head *head, struct nvme_id_ns *id) +static void nvme_configure_metadata(struct nvme_ctrl *ctrl, + struct nvme_ns_head *head, struct nvme_id_ns *id, + struct nvme_id_ns_nvm *nvm) { - int ret; - - ret = nvme_init_ms(ctrl, head, id); - if (ret) - return ret; - head->features &= ~(NVME_NS_METADATA_SUPPORTED | NVME_NS_EXT_LBAS); + head->pi_type = 0; + head->pi_size = 0; + head->pi_offset = 0; + head->ms = le16_to_cpu(id->lbaf[nvme_lbaf_index(id->flbas)].ms); if (!head->ms || !(ctrl->ops->flags & NVME_F_METADATA_SUPPORTED)) - return 0; + return; + + if (nvm && (ctrl->ctratt & NVME_CTRL_ATTR_ELBAS)) { + nvme_configure_pi_elbas(head, id, nvm); + } else { + head->pi_size = sizeof(struct t10_pi_tuple); + head->guard_type = NVME_NVM_NS_16B_GUARD; + } + + if (head->pi_size && head->ms >= head->pi_size) + head->pi_type = id->dps & NVME_NS_DPS_PI_MASK; + if (!(id->dps & NVME_NS_DPS_PI_FIRST)) + head->pi_offset = head->ms - head->pi_size; if (ctrl->ops->flags & NVME_F_FABRICS) { /* @@ -1863,7 +1911,7 @@ static int nvme_configure_metadata(struct nvme_ctrl *ctrl, * remap the separate metadata buffer from the block layer. */ if (WARN_ON_ONCE(!(id->flbas & NVME_NS_FLBAS_META_EXT))) - return 0; + return; head->features |= NVME_NS_EXT_LBAS; @@ -1890,33 +1938,32 @@ static int nvme_configure_metadata(struct nvme_ctrl *ctrl, else head->features |= NVME_NS_METADATA_SUPPORTED; } - return 0; } -static void nvme_set_queue_limits(struct nvme_ctrl *ctrl, - struct request_queue *q) +static u32 nvme_max_drv_segments(struct nvme_ctrl *ctrl) { - bool vwc = ctrl->vwc & NVME_CTRL_VWC_PRESENT; - - if (ctrl->max_hw_sectors) { - u32 max_segments = - (ctrl->max_hw_sectors / (NVME_CTRL_PAGE_SIZE >> 9)) + 1; + return ctrl->max_hw_sectors / (NVME_CTRL_PAGE_SIZE >> SECTOR_SHIFT) + 1; +} - max_segments = min_not_zero(max_segments, ctrl->max_segments); - blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors); - blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX)); - } - blk_queue_virt_boundary(q, NVME_CTRL_PAGE_SIZE - 1); - blk_queue_dma_alignment(q, 3); - blk_queue_write_cache(q, vwc, vwc); +static void nvme_set_ctrl_limits(struct nvme_ctrl *ctrl, + struct queue_limits *lim) +{ + lim->max_hw_sectors = ctrl->max_hw_sectors; + lim->max_segments = min_t(u32, USHRT_MAX, + min_not_zero(nvme_max_drv_segments(ctrl), ctrl->max_segments)); + lim->max_integrity_segments = ctrl->max_integrity_segments; + lim->virt_boundary_mask = NVME_CTRL_PAGE_SIZE - 1; + lim->max_segment_size = UINT_MAX; + lim->dma_alignment = 3; } -static void nvme_update_disk_info(struct nvme_ctrl *ctrl, struct gendisk *disk, - struct nvme_ns_head *head, struct nvme_id_ns *id) +static bool nvme_update_disk_info(struct nvme_ns *ns, struct nvme_id_ns *id, + struct queue_limits *lim) { - sector_t capacity = nvme_lba_to_sect(head, le64_to_cpu(id->nsze)); + struct nvme_ns_head *head = ns->head; u32 bs = 1U << head->lba_shift; u32 atomic_bs, phys_bs, io_opt = 0; + bool valid = true; /* * The block layer can't support LBA sizes larger than the page size @@ -1924,12 +1971,10 @@ static void nvme_update_disk_info(struct nvme_ctrl *ctrl, struct gendisk *disk, * allow block I/O. */ if (head->lba_shift > PAGE_SHIFT || head->lba_shift < SECTOR_SHIFT) { - capacity = 0; bs = (1 << 9); + valid = false; } - blk_integrity_unregister(disk); - atomic_bs = phys_bs = bs; if (id->nabo == 0) { /* @@ -1940,7 +1985,7 @@ static void nvme_update_disk_info(struct nvme_ctrl *ctrl, struct gendisk *disk, if (id->nsfeat & NVME_NS_FEAT_ATOMICS && id->nawupf) atomic_bs = (1 + le16_to_cpu(id->nawupf)) * bs; else - atomic_bs = (1 + ctrl->subsys->awupf) * bs; + atomic_bs = (1 + ns->ctrl->subsys->awupf) * bs; } if (id->nsfeat & NVME_NS_FEAT_IO_OPT) { @@ -1950,36 +1995,20 @@ static void nvme_update_disk_info(struct nvme_ctrl *ctrl, struct gendisk *disk, io_opt = bs * (1 + le16_to_cpu(id->nows)); } - blk_queue_logical_block_size(disk->queue, bs); /* * Linux filesystems assume writing a single physical block is * an atomic operation. Hence limit the physical block size to the * value of the Atomic Write Unit Power Fail parameter. */ - blk_queue_physical_block_size(disk->queue, min(phys_bs, atomic_bs)); - blk_queue_io_min(disk->queue, phys_bs); - blk_queue_io_opt(disk->queue, io_opt); - - /* - * Register a metadata profile for PI, or the plain non-integrity NVMe - * metadata masquerading as Type 0 if supported, otherwise reject block - * I/O to namespaces with metadata except when the namespace supports - * PI, as it can strip/insert in that case. - */ - if (head->ms) { - if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY) && - (head->features & NVME_NS_METADATA_SUPPORTED)) - nvme_init_integrity(disk, head, - ctrl->max_integrity_segments); - else if (!nvme_ns_has_pi(head)) - capacity = 0; - } - - set_capacity_and_notify(disk, capacity); - - nvme_config_discard(ctrl, disk, head); - blk_queue_max_write_zeroes_sectors(disk->queue, - ctrl->max_zeroes_sectors); + lim->logical_block_size = bs; + lim->physical_block_size = min(phys_bs, atomic_bs); + lim->io_min = phys_bs; + lim->io_opt = io_opt; + if (ns->ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES) + lim->max_write_zeroes_sectors = UINT_MAX; + else + lim->max_write_zeroes_sectors = ns->ctrl->max_zeroes_sectors; + return valid; } static bool nvme_ns_is_readonly(struct nvme_ns *ns, struct nvme_ns_info *info) @@ -1993,7 +2022,8 @@ static inline bool nvme_first_scan(struct gendisk *disk) return !disk_live(disk); } -static void nvme_set_chunk_sectors(struct nvme_ns *ns, struct nvme_id_ns *id) +static void nvme_set_chunk_sectors(struct nvme_ns *ns, struct nvme_id_ns *id, + struct queue_limits *lim) { struct nvme_ctrl *ctrl = ns->ctrl; u32 iob; @@ -2021,38 +2051,36 @@ static void nvme_set_chunk_sectors(struct nvme_ns *ns, struct nvme_id_ns *id) return; } - blk_queue_chunk_sectors(ns->queue, iob); + lim->chunk_sectors = iob; } static int nvme_update_ns_info_generic(struct nvme_ns *ns, struct nvme_ns_info *info) { + struct queue_limits lim; + int ret; + blk_mq_freeze_queue(ns->disk->queue); - nvme_set_queue_limits(ns->ctrl, ns->queue); + lim = queue_limits_start_update(ns->disk->queue); + nvme_set_ctrl_limits(ns->ctrl, &lim); + ret = queue_limits_commit_update(ns->disk->queue, &lim); set_disk_ro(ns->disk, nvme_ns_is_readonly(ns, info)); blk_mq_unfreeze_queue(ns->disk->queue); - if (nvme_ns_head_multipath(ns->head)) { - blk_mq_freeze_queue(ns->head->disk->queue); - set_disk_ro(ns->head->disk, nvme_ns_is_readonly(ns, info)); - nvme_mpath_revalidate_paths(ns); - blk_stack_limits(&ns->head->disk->queue->limits, - &ns->queue->limits, 0); - ns->head->disk->flags |= GENHD_FL_HIDDEN; - blk_mq_unfreeze_queue(ns->head->disk->queue); - } - /* Hide the block-interface for these devices */ - ns->disk->flags |= GENHD_FL_HIDDEN; - set_bit(NVME_NS_READY, &ns->flags); - - return 0; + if (!ret) + ret = -ENODEV; + return ret; } static int nvme_update_ns_info_block(struct nvme_ns *ns, struct nvme_ns_info *info) { + bool vwc = ns->ctrl->vwc & NVME_CTRL_VWC_PRESENT; + struct queue_limits lim; + struct nvme_id_ns_nvm *nvm = NULL; struct nvme_id_ns *id; + sector_t capacity; unsigned lbaf; int ret; @@ -2064,30 +2092,52 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns, /* namespace not allocated or attached */ info->is_removed = true; ret = -ENODEV; - goto error; + goto out; + } + + if (ns->ctrl->ctratt & NVME_CTRL_ATTR_ELBAS) { + ret = nvme_identify_ns_nvm(ns->ctrl, info->nsid, &nvm); + if (ret < 0) + goto out; } blk_mq_freeze_queue(ns->disk->queue); lbaf = nvme_lbaf_index(id->flbas); ns->head->lba_shift = id->lbaf[lbaf].ds; ns->head->nuse = le64_to_cpu(id->nuse); - nvme_set_queue_limits(ns->ctrl, ns->queue); + capacity = nvme_lba_to_sect(ns->head, le64_to_cpu(id->nsze)); - ret = nvme_configure_metadata(ns->ctrl, ns->head, id); - if (ret < 0) { - blk_mq_unfreeze_queue(ns->disk->queue); - goto out; - } - nvme_set_chunk_sectors(ns, id); - nvme_update_disk_info(ns->ctrl, ns->disk, ns->head, id); - - if (ns->head->ids.csi == NVME_CSI_ZNS) { - ret = nvme_update_zone_info(ns, lbaf); + lim = queue_limits_start_update(ns->disk->queue); + nvme_set_ctrl_limits(ns->ctrl, &lim); + nvme_configure_metadata(ns->ctrl, ns->head, id, nvm); + nvme_set_chunk_sectors(ns, id, &lim); + if (!nvme_update_disk_info(ns, id, &lim)) + capacity = 0; + nvme_config_discard(ns, &lim); + if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) && + ns->head->ids.csi == NVME_CSI_ZNS) { + ret = nvme_update_zone_info(ns, lbaf, &lim); if (ret) { blk_mq_unfreeze_queue(ns->disk->queue); goto out; } } + ret = queue_limits_commit_update(ns->disk->queue, &lim); + if (ret) { + blk_mq_unfreeze_queue(ns->disk->queue); + goto out; + } + + /* + * Register a metadata profile for PI, or the plain non-integrity NVMe + * metadata masquerading as Type 0 if supported, otherwise reject block + * I/O to namespaces with metadata except when the namespace supports + * PI, as it can strip/insert in that case. + */ + if (!nvme_init_integrity(ns->disk, ns->head)) + capacity = 0; + + set_capacity_and_notify(ns->disk, capacity); /* * Only set the DEAC bit if the device guarantees that reads from @@ -2098,62 +2148,81 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns, if ((id->dlfeat & 0x7) == 0x1 && (id->dlfeat & (1 << 3))) ns->head->features |= NVME_NS_DEAC; set_disk_ro(ns->disk, nvme_ns_is_readonly(ns, info)); + blk_queue_write_cache(ns->disk->queue, vwc, vwc); set_bit(NVME_NS_READY, &ns->flags); blk_mq_unfreeze_queue(ns->disk->queue); if (blk_queue_is_zoned(ns->queue)) { - ret = nvme_revalidate_zones(ns); + ret = blk_revalidate_disk_zones(ns->disk, NULL); if (ret && !nvme_first_scan(ns->disk)) goto out; } - if (nvme_ns_head_multipath(ns->head)) { - blk_mq_freeze_queue(ns->head->disk->queue); - nvme_update_disk_info(ns->ctrl, ns->head->disk, ns->head, id); - set_disk_ro(ns->head->disk, nvme_ns_is_readonly(ns, info)); - nvme_mpath_revalidate_paths(ns); - blk_stack_limits(&ns->head->disk->queue->limits, - &ns->queue->limits, 0); - disk_update_readahead(ns->head->disk); - blk_mq_unfreeze_queue(ns->head->disk->queue); - } - ret = 0; out: - /* - * If probing fails due an unsupported feature, hide the block device, - * but still allow other access. - */ - if (ret == -ENODEV) { - ns->disk->flags |= GENHD_FL_HIDDEN; - set_bit(NVME_NS_READY, &ns->flags); - ret = 0; - } - -error: + kfree(nvm); kfree(id); return ret; } static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_ns_info *info) { + bool unsupported = false; + int ret; + switch (info->ids.csi) { case NVME_CSI_ZNS: if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED)) { dev_info(ns->ctrl->device, "block device for nsid %u not supported without CONFIG_BLK_DEV_ZONED\n", info->nsid); - return nvme_update_ns_info_generic(ns, info); + ret = nvme_update_ns_info_generic(ns, info); + break; } - return nvme_update_ns_info_block(ns, info); + ret = nvme_update_ns_info_block(ns, info); + break; case NVME_CSI_NVM: - return nvme_update_ns_info_block(ns, info); + ret = nvme_update_ns_info_block(ns, info); + break; default: dev_info(ns->ctrl->device, "block device for nsid %u not supported (csi %u)\n", info->nsid, info->ids.csi); - return nvme_update_ns_info_generic(ns, info); + ret = nvme_update_ns_info_generic(ns, info); + break; + } + + /* + * If probing fails due an unsupported feature, hide the block device, + * but still allow other access. + */ + if (ret == -ENODEV) { + ns->disk->flags |= GENHD_FL_HIDDEN; + set_bit(NVME_NS_READY, &ns->flags); + unsupported = true; + ret = 0; + } + + if (!ret && nvme_ns_head_multipath(ns->head)) { + struct queue_limits lim; + + blk_mq_freeze_queue(ns->head->disk->queue); + if (unsupported) + ns->head->disk->flags |= GENHD_FL_HIDDEN; + else + nvme_init_integrity(ns->head->disk, ns->head); + set_capacity_and_notify(ns->head->disk, get_capacity(ns->disk)); + set_disk_ro(ns->head->disk, nvme_ns_is_readonly(ns, info)); + nvme_mpath_revalidate_paths(ns); + + lim = queue_limits_start_update(ns->head->disk->queue); + queue_limits_stack_bdev(&lim, ns->disk->part0, 0, + ns->head->disk->disk_name); + ret = queue_limits_commit_update(ns->head->disk->queue, &lim); + blk_mq_unfreeze_queue(ns->head->disk->queue); } + + return ret; } #ifdef CONFIG_BLK_SED_OPAL @@ -2172,7 +2241,7 @@ static int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t l cmd.common.cdw11 = cpu_to_le32(len); return __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, NULL, buffer, len, - NVME_QID_ANY, 1, 0); + NVME_QID_ANY, NVME_SUBMIT_AT_HEAD); } static void nvme_configure_opal(struct nvme_ctrl *ctrl, bool was_suspended) @@ -2828,7 +2897,7 @@ static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) subsys->awupf = le16_to_cpu(id->awupf); nvme_mpath_default_iopolicy(subsys); - subsys->dev.class = nvme_subsys_class; + subsys->dev.class = &nvme_subsys_class; subsys->dev.release = nvme_release_subsystem; subsys->dev.groups = nvme_subsys_attrs_groups; dev_set_name(&subsys->dev, "nvme-subsys%d", ctrl->instance); @@ -3068,11 +3137,17 @@ static int nvme_check_ctrl_fabric_info(struct nvme_ctrl *ctrl, struct nvme_id_ct return -EINVAL; } + if (!ctrl->maxcmd) { + dev_err(ctrl->device, "Maximum outstanding commands is 0\n"); + return -EINVAL; + } + return 0; } static int nvme_init_identify(struct nvme_ctrl *ctrl) { + struct queue_limits lim; struct nvme_id_ctrl *id; u32 max_hw_sectors; bool prev_apst_enabled; @@ -3139,7 +3214,12 @@ static int nvme_init_identify(struct nvme_ctrl *ctrl) ctrl->max_hw_sectors = min_not_zero(ctrl->max_hw_sectors, max_hw_sectors); - nvme_set_queue_limits(ctrl, ctrl->admin_q); + lim = queue_limits_start_update(ctrl->admin_q); + nvme_set_ctrl_limits(ctrl, &lim); + ret = queue_limits_commit_update(ctrl->admin_q, &lim); + if (ret) + goto out_free; + ctrl->sgls = le32_to_cpu(id->sgls); ctrl->kas = le16_to_cpu(id->kas); ctrl->max_namespaces = le32_to_cpu(id->mnan); @@ -3371,7 +3451,7 @@ int nvme_cdev_add(struct cdev *cdev, struct device *cdev_device, if (minor < 0) return minor; cdev_device->devt = MKDEV(MAJOR(nvme_ns_chr_devt), minor); - cdev_device->class = nvme_ns_chr_class; + cdev_device->class = &nvme_ns_chr_class; cdev_device->release = nvme_cdev_rel; device_initialize(cdev_device); cdev_init(cdev, fops); @@ -3643,7 +3723,7 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, struct nvme_ns_info *info) if (!ns) return; - disk = blk_mq_alloc_disk(ctrl->tagset, ns); + disk = blk_mq_alloc_disk(ctrl->tagset, NULL, ns); if (IS_ERR(disk)) goto out_free_ns; disk->fops = &nvme_bdev_ops; @@ -3714,6 +3794,13 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, struct nvme_ns_info *info) nvme_mpath_add_disk(ns, info->anagrpid); nvme_fault_inject_init(&ns->fault_inject, ns->disk->disk_name); + /* + * Set ns->disk->device->driver_data to ns so we can access + * ns->head->passthru_err_log_enabled in + * nvme_io_passthru_err_log_enabled_[store | show](). + */ + dev_set_drvdata(disk_to_dev(ns->disk), ns); + return; out_cleanup_ns_from_list: @@ -4138,6 +4225,7 @@ static bool nvme_ctrl_pp_status(struct nvme_ctrl *ctrl) static void nvme_get_fw_slot_info(struct nvme_ctrl *ctrl) { struct nvme_fw_slot_info_log *log; + u8 next_fw_slot, cur_fw_slot; log = kmalloc(sizeof(*log), GFP_KERNEL); if (!log) @@ -4149,13 +4237,15 @@ static void nvme_get_fw_slot_info(struct nvme_ctrl *ctrl) goto out_free_log; } - if (log->afi & 0x70 || !(log->afi & 0x7)) { + cur_fw_slot = log->afi & 0x7; + next_fw_slot = (log->afi & 0x70) >> 4; + if (!cur_fw_slot || (next_fw_slot && (cur_fw_slot != next_fw_slot))) { dev_info(ctrl->device, "Firmware is activated after next Controller Level Reset\n"); goto out_free_log; } - memcpy(ctrl->subsys->firmware_rev, &log->frs[(log->afi & 0x7) - 1], + memcpy(ctrl->subsys->firmware_rev, &log->frs[cur_fw_slot - 1], sizeof(ctrl->subsys->firmware_rev)); out_free_log: @@ -4294,6 +4384,7 @@ EXPORT_SYMBOL_GPL(nvme_complete_async_event); int nvme_alloc_admin_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set, const struct blk_mq_ops *ops, unsigned int cmd_size) { + struct queue_limits lim = {}; int ret; memset(set, 0, sizeof(*set)); @@ -4313,14 +4404,14 @@ int nvme_alloc_admin_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set, if (ret) return ret; - ctrl->admin_q = blk_mq_init_queue(set); + ctrl->admin_q = blk_mq_alloc_queue(set, &lim, NULL); if (IS_ERR(ctrl->admin_q)) { ret = PTR_ERR(ctrl->admin_q); goto out_free_tagset; } if (ctrl->ops->flags & NVME_F_FABRICS) { - ctrl->fabrics_q = blk_mq_init_queue(set); + ctrl->fabrics_q = blk_mq_alloc_queue(set, NULL, NULL); if (IS_ERR(ctrl->fabrics_q)) { ret = PTR_ERR(ctrl->fabrics_q); goto out_cleanup_admin_q; @@ -4384,7 +4475,7 @@ int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set, return ret; if (ctrl->ops->flags & NVME_F_FABRICS) { - ctrl->connect_q = blk_mq_init_queue(set); + ctrl->connect_q = blk_mq_alloc_queue(set, NULL, NULL); if (IS_ERR(ctrl->connect_q)) { ret = PTR_ERR(ctrl->connect_q); goto out_free_tag_set; @@ -4514,6 +4605,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev, int ret; WRITE_ONCE(ctrl->state, NVME_CTRL_NEW); + ctrl->passthru_err_log_enabled = false; clear_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags); spin_lock_init(&ctrl->lock); mutex_init(&ctrl->scan_lock); @@ -4553,7 +4645,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev, ctrl->device = &ctrl->ctrl_device; ctrl->device->devt = MKDEV(MAJOR(nvme_ctrl_base_chr_devt), ctrl->instance); - ctrl->device->class = nvme_class; + ctrl->device->class = &nvme_class; ctrl->device->parent = ctrl->dev; if (ops->dev_attr_groups) ctrl->device->groups = ops->dev_attr_groups; @@ -4786,42 +4878,36 @@ static int __init nvme_core_init(void) if (result < 0) goto destroy_delete_wq; - nvme_class = class_create("nvme"); - if (IS_ERR(nvme_class)) { - result = PTR_ERR(nvme_class); + result = class_register(&nvme_class); + if (result) goto unregister_chrdev; - } - nvme_class->dev_uevent = nvme_class_uevent; - nvme_subsys_class = class_create("nvme-subsystem"); - if (IS_ERR(nvme_subsys_class)) { - result = PTR_ERR(nvme_subsys_class); + result = class_register(&nvme_subsys_class); + if (result) goto destroy_class; - } result = alloc_chrdev_region(&nvme_ns_chr_devt, 0, NVME_MINORS, "nvme-generic"); if (result < 0) goto destroy_subsys_class; - nvme_ns_chr_class = class_create("nvme-generic"); - if (IS_ERR(nvme_ns_chr_class)) { - result = PTR_ERR(nvme_ns_chr_class); + result = class_register(&nvme_ns_chr_class); + if (result) goto unregister_generic_ns; - } + result = nvme_init_auth(); if (result) goto destroy_ns_chr; return 0; destroy_ns_chr: - class_destroy(nvme_ns_chr_class); + class_unregister(&nvme_ns_chr_class); unregister_generic_ns: unregister_chrdev_region(nvme_ns_chr_devt, NVME_MINORS); destroy_subsys_class: - class_destroy(nvme_subsys_class); + class_unregister(&nvme_subsys_class); destroy_class: - class_destroy(nvme_class); + class_unregister(&nvme_class); unregister_chrdev: unregister_chrdev_region(nvme_ctrl_base_chr_devt, NVME_MINORS); destroy_delete_wq: @@ -4837,9 +4923,9 @@ out: static void __exit nvme_core_exit(void) { nvme_exit_auth(); - class_destroy(nvme_ns_chr_class); - class_destroy(nvme_subsys_class); - class_destroy(nvme_class); + class_unregister(&nvme_ns_chr_class); + class_unregister(&nvme_subsys_class); + class_unregister(&nvme_class); unregister_chrdev_region(nvme_ns_chr_devt, NVME_MINORS); unregister_chrdev_region(nvme_ctrl_base_chr_devt, NVME_MINORS); destroy_workqueue(nvme_delete_wq); @@ -4851,5 +4937,6 @@ static void __exit nvme_core_exit(void) MODULE_LICENSE("GPL"); MODULE_VERSION("1.0"); +MODULE_DESCRIPTION("NVMe host core framework"); module_init(nvme_core_init); module_exit(nvme_core_exit); diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c index b5752a77ad98..1f0ea1f32d22 100644 --- a/drivers/nvme/host/fabrics.c +++ b/drivers/nvme/host/fabrics.c @@ -180,7 +180,7 @@ int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val) cmd.prop_get.offset = cpu_to_le32(off); ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, NULL, 0, - NVME_QID_ANY, 0, 0); + NVME_QID_ANY, 0); if (ret >= 0) *val = le64_to_cpu(res.u64); @@ -226,7 +226,7 @@ int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val) cmd.prop_get.offset = cpu_to_le32(off); ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, NULL, 0, - NVME_QID_ANY, 0, 0); + NVME_QID_ANY, 0); if (ret >= 0) *val = le64_to_cpu(res.u64); @@ -271,7 +271,7 @@ int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val) cmd.prop_set.value = cpu_to_le64(val); ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, NULL, NULL, 0, - NVME_QID_ANY, 0, 0); + NVME_QID_ANY, 0); if (unlikely(ret)) dev_err(ctrl->device, "Property Set error: %d, offset %#x\n", @@ -450,8 +450,10 @@ int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl) return -ENOMEM; ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, - data, sizeof(*data), NVME_QID_ANY, 1, - BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT); + data, sizeof(*data), NVME_QID_ANY, + NVME_SUBMIT_AT_HEAD | + NVME_SUBMIT_NOWAIT | + NVME_SUBMIT_RESERVED); if (ret) { nvmf_log_connect_error(ctrl, ret, le32_to_cpu(res.u32), &cmd, data); @@ -525,11 +527,14 @@ int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid) return -ENOMEM; ret = __nvme_submit_sync_cmd(ctrl->connect_q, &cmd, &res, - data, sizeof(*data), qid, 1, - BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT); + data, sizeof(*data), qid, + NVME_SUBMIT_AT_HEAD | + NVME_SUBMIT_RESERVED | + NVME_SUBMIT_NOWAIT); if (ret) { nvmf_log_connect_error(ctrl, ret, le32_to_cpu(res.u32), &cmd, data); + goto out_free_data; } result = le32_to_cpu(res.u32); if (result & (NVME_CONNECT_AUTHREQ_ATR | NVME_CONNECT_AUTHREQ_ASCR)) { @@ -633,7 +638,7 @@ static struct key *nvmf_parse_key(int key_id) } key = key_lookup(key_id); - if (!IS_ERR(key)) + if (IS_ERR(key)) pr_err("key id %08x not found\n", key_id); else pr_debug("Using key id %08x\n", key_id); @@ -1314,7 +1319,10 @@ out_free_opts: return ERR_PTR(ret); } -static struct class *nvmf_class; +static const struct class nvmf_class = { + .name = "nvme-fabrics", +}; + static struct device *nvmf_device; static DEFINE_MUTEX(nvmf_dev_mutex); @@ -1434,15 +1442,14 @@ static int __init nvmf_init(void) if (!nvmf_default_host) return -ENOMEM; - nvmf_class = class_create("nvme-fabrics"); - if (IS_ERR(nvmf_class)) { + ret = class_register(&nvmf_class); + if (ret) { pr_err("couldn't register class nvme-fabrics\n"); - ret = PTR_ERR(nvmf_class); goto out_free_host; } nvmf_device = - device_create(nvmf_class, NULL, MKDEV(0, 0), NULL, "ctl"); + device_create(&nvmf_class, NULL, MKDEV(0, 0), NULL, "ctl"); if (IS_ERR(nvmf_device)) { pr_err("couldn't create nvme-fabrics device!\n"); ret = PTR_ERR(nvmf_device); @@ -1458,9 +1465,9 @@ static int __init nvmf_init(void) return 0; out_destroy_device: - device_destroy(nvmf_class, MKDEV(0, 0)); + device_destroy(&nvmf_class, MKDEV(0, 0)); out_destroy_class: - class_destroy(nvmf_class); + class_unregister(&nvmf_class); out_free_host: nvmf_host_put(nvmf_default_host); return ret; @@ -1469,8 +1476,8 @@ out_free_host: static void __exit nvmf_exit(void) { misc_deregister(&nvmf_misc); - device_destroy(nvmf_class, MKDEV(0, 0)); - class_destroy(nvmf_class); + device_destroy(&nvmf_class, MKDEV(0, 0)); + class_unregister(&nvmf_class); nvmf_host_put(nvmf_default_host); BUILD_BUG_ON(sizeof(struct nvmf_common_command) != 64); @@ -1488,6 +1495,7 @@ static void __exit nvmf_exit(void) } MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("NVMe host fabrics library"); module_init(nvmf_init); module_exit(nvmf_exit); diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h index fbaee5a7be19..06cc54851b1b 100644 --- a/drivers/nvme/host/fabrics.h +++ b/drivers/nvme/host/fabrics.h @@ -185,9 +185,11 @@ static inline bool nvmf_ctlr_matches_baseopts(struct nvme_ctrl *ctrl, struct nvmf_ctrl_options *opts) { - if (ctrl->state == NVME_CTRL_DELETING || - ctrl->state == NVME_CTRL_DELETING_NOIO || - ctrl->state == NVME_CTRL_DEAD || + enum nvme_ctrl_state state = nvme_ctrl_state(ctrl); + + if (state == NVME_CTRL_DELETING || + state == NVME_CTRL_DELETING_NOIO || + state == NVME_CTRL_DEAD || strcmp(opts->subsysnqn, ctrl->opts->subsysnqn) || strcmp(opts->host->nqn, ctrl->opts->host->nqn) || !uuid_equal(&opts->host->id, &ctrl->opts->host->id)) diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c index 16847a316421..68a5d971657b 100644 --- a/drivers/nvme/host/fc.c +++ b/drivers/nvme/host/fc.c @@ -221,11 +221,6 @@ static LIST_HEAD(nvme_fc_lport_list); static DEFINE_IDA(nvme_fc_local_port_cnt); static DEFINE_IDA(nvme_fc_ctrl_cnt); -static struct workqueue_struct *nvme_fc_wq; - -static bool nvme_fc_waiting_to_unload; -static DECLARE_COMPLETION(nvme_fc_unload_proceed); - /* * These items are short-term. They will eventually be moved into * a generic FC class. See comments in module init. @@ -255,8 +250,6 @@ nvme_fc_free_lport(struct kref *ref) /* remove from transport list */ spin_lock_irqsave(&nvme_fc_lock, flags); list_del(&lport->port_list); - if (nvme_fc_waiting_to_unload && list_empty(&nvme_fc_lport_list)) - complete(&nvme_fc_unload_proceed); spin_unlock_irqrestore(&nvme_fc_lock, flags); ida_free(&nvme_fc_local_port_cnt, lport->localport.port_num); @@ -2574,6 +2567,7 @@ static enum blk_eh_timer_return nvme_fc_timeout(struct request *rq) { struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq); struct nvme_fc_ctrl *ctrl = op->ctrl; + u16 qnum = op->queue->qnum; struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu; struct nvme_command *sqe = &cmdiu->sqe; @@ -2582,10 +2576,11 @@ static enum blk_eh_timer_return nvme_fc_timeout(struct request *rq) * will detect the aborted io and will fail the connection. */ dev_info(ctrl->ctrl.device, - "NVME-FC{%d.%d}: io timeout: opcode %d fctype %d w10/11: " + "NVME-FC{%d.%d}: io timeout: opcode %d fctype %d (%s) w10/11: " "x%08x/x%08x\n", - ctrl->cnum, op->queue->qnum, sqe->common.opcode, - sqe->connect.fctype, sqe->common.cdw10, sqe->common.cdw11); + ctrl->cnum, qnum, sqe->common.opcode, sqe->fabrics.fctype, + nvme_fabrics_opcode_str(qnum, sqe), + sqe->common.cdw10, sqe->common.cdw11); if (__nvme_fc_abort_op(ctrl, op)) nvme_fc_error_recovery(ctrl, "io timeout abort failed"); @@ -3575,8 +3570,8 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts, flush_delayed_work(&ctrl->connect_work); dev_info(ctrl->ctrl.device, - "NVME-FC{%d}: new ctrl: NQN \"%s\"\n", - ctrl->cnum, nvmf_ctrl_subsysnqn(&ctrl->ctrl)); + "NVME-FC{%d}: new ctrl: NQN \"%s\", hostnqn: %s\n", + ctrl->cnum, nvmf_ctrl_subsysnqn(&ctrl->ctrl), opts->host->nqn); return &ctrl->ctrl; @@ -3894,10 +3889,6 @@ static int __init nvme_fc_init_module(void) { int ret; - nvme_fc_wq = alloc_workqueue("nvme_fc_wq", WQ_MEM_RECLAIM, 0); - if (!nvme_fc_wq) - return -ENOMEM; - /* * NOTE: * It is expected that in the future the kernel will combine @@ -3915,7 +3906,7 @@ static int __init nvme_fc_init_module(void) ret = class_register(&fc_class); if (ret) { pr_err("couldn't register class fc\n"); - goto out_destroy_wq; + return ret; } /* @@ -3939,8 +3930,6 @@ out_destroy_device: device_destroy(&fc_class, MKDEV(0, 0)); out_destroy_class: class_unregister(&fc_class); -out_destroy_wq: - destroy_workqueue(nvme_fc_wq); return ret; } @@ -3960,48 +3949,27 @@ nvme_fc_delete_controllers(struct nvme_fc_rport *rport) spin_unlock(&rport->lock); } -static void -nvme_fc_cleanup_for_unload(void) +static void __exit nvme_fc_exit_module(void) { struct nvme_fc_lport *lport; struct nvme_fc_rport *rport; - - list_for_each_entry(lport, &nvme_fc_lport_list, port_list) { - list_for_each_entry(rport, &lport->endp_list, endp_list) { - nvme_fc_delete_controllers(rport); - } - } -} - -static void __exit nvme_fc_exit_module(void) -{ unsigned long flags; - bool need_cleanup = false; spin_lock_irqsave(&nvme_fc_lock, flags); - nvme_fc_waiting_to_unload = true; - if (!list_empty(&nvme_fc_lport_list)) { - need_cleanup = true; - nvme_fc_cleanup_for_unload(); - } + list_for_each_entry(lport, &nvme_fc_lport_list, port_list) + list_for_each_entry(rport, &lport->endp_list, endp_list) + nvme_fc_delete_controllers(rport); spin_unlock_irqrestore(&nvme_fc_lock, flags); - if (need_cleanup) { - pr_info("%s: waiting for ctlr deletes\n", __func__); - wait_for_completion(&nvme_fc_unload_proceed); - pr_info("%s: ctrl deletes complete\n", __func__); - } + flush_workqueue(nvme_delete_wq); nvmf_unregister_transport(&nvme_fc_transport); - ida_destroy(&nvme_fc_local_port_cnt); - ida_destroy(&nvme_fc_ctrl_cnt); - device_destroy(&fc_class, MKDEV(0, 0)); class_unregister(&fc_class); - destroy_workqueue(nvme_fc_wq); } module_init(nvme_fc_init_module); module_exit(nvme_fc_exit_module); +MODULE_DESCRIPTION("NVMe host FC transport driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c index 18f5c1be5d67..3dfd5ae99ae0 100644 --- a/drivers/nvme/host/ioctl.c +++ b/drivers/nvme/host/ioctl.c @@ -228,7 +228,7 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) length = (io.nblocks + 1) << ns->head->lba_shift; if ((io.control & NVME_RW_PRINFO_PRACT) && - ns->head->ms == sizeof(struct t10_pi_tuple)) { + (ns->head->ms == ns->head->pi_size)) { /* * Protection information is stripped/inserted by the * controller. diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index 2dd4137a08b2..5397fb428b24 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -156,7 +156,7 @@ void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl) if (!ns->head->disk) continue; kblockd_schedule_work(&ns->head->requeue_work); - if (ctrl->state == NVME_CTRL_LIVE) + if (nvme_ctrl_state(ns->ctrl) == NVME_CTRL_LIVE) disk_uevent(ns->head->disk, KOBJ_CHANGE); } up_read(&ctrl->namespaces_rwsem); @@ -223,13 +223,14 @@ void nvme_mpath_revalidate_paths(struct nvme_ns *ns) static bool nvme_path_is_disabled(struct nvme_ns *ns) { + enum nvme_ctrl_state state = nvme_ctrl_state(ns->ctrl); + /* * We don't treat NVME_CTRL_DELETING as a disabled path as I/O should * still be able to complete assuming that the controller is connected. * Otherwise it will fail immediately and return to the requeue list. */ - if (ns->ctrl->state != NVME_CTRL_LIVE && - ns->ctrl->state != NVME_CTRL_DELETING) + if (state != NVME_CTRL_LIVE && state != NVME_CTRL_DELETING) return true; if (test_bit(NVME_NS_ANA_PENDING, &ns->flags) || !test_bit(NVME_NS_READY, &ns->flags)) @@ -331,7 +332,7 @@ out: static inline bool nvme_path_is_optimized(struct nvme_ns *ns) { - return ns->ctrl->state == NVME_CTRL_LIVE && + return nvme_ctrl_state(ns->ctrl) == NVME_CTRL_LIVE && ns->ana_state == NVME_ANA_OPTIMIZED; } @@ -358,7 +359,7 @@ static bool nvme_available_path(struct nvme_ns_head *head) list_for_each_entry_rcu(ns, &head->list, siblings) { if (test_bit(NVME_CTRL_FAILFAST_EXPIRED, &ns->ctrl->flags)) continue; - switch (ns->ctrl->state) { + switch (nvme_ctrl_state(ns->ctrl)) { case NVME_CTRL_LIVE: case NVME_CTRL_RESETTING: case NVME_CTRL_CONNECTING: @@ -515,6 +516,7 @@ static void nvme_requeue_work(struct work_struct *work) int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head) { + struct queue_limits lim; bool vwc = false; mutex_init(&head->lock); @@ -531,9 +533,14 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head) !nvme_is_unique_nsid(ctrl, head) || !multipath) return 0; - head->disk = blk_alloc_disk(ctrl->numa_node); - if (!head->disk) - return -ENOMEM; + blk_set_stacking_limits(&lim); + lim.dma_alignment = 3; + if (head->ids.csi != NVME_CSI_ZNS) + lim.max_zone_append_sectors = 0; + + head->disk = blk_alloc_disk(&lim, ctrl->numa_node); + if (IS_ERR(head->disk)) + return PTR_ERR(head->disk); head->disk->fops = &nvme_ns_head_ops; head->disk->private_data = head; sprintf(head->disk->disk_name, "nvme%dn%d", @@ -552,11 +559,6 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head) ctrl->tagset->map[HCTX_TYPE_POLL].nr_queues) blk_queue_flag_set(QUEUE_FLAG_POLL, head->disk->queue); - /* set to a default value of 512 until the disk is validated */ - blk_queue_logical_block_size(head->disk->queue, 512); - blk_set_stacking_limits(&head->disk->queue->limits); - blk_queue_dma_alignment(head->disk->queue, 3); - /* we need to propagate up the VMC settings */ if (ctrl->vwc & NVME_CTRL_VWC_PRESENT) vwc = true; @@ -667,7 +669,7 @@ static void nvme_update_ns_ana_state(struct nvme_ana_group_desc *desc, * controller is ready. */ if (nvme_state_is_live(ns->ana_state) && - ns->ctrl->state == NVME_CTRL_LIVE) + nvme_ctrl_state(ns->ctrl) == NVME_CTRL_LIVE) nvme_mpath_set_live(ns); } @@ -748,7 +750,7 @@ static void nvme_ana_work(struct work_struct *work) { struct nvme_ctrl *ctrl = container_of(work, struct nvme_ctrl, ana_work); - if (ctrl->state != NVME_CTRL_LIVE) + if (nvme_ctrl_state(ctrl) != NVME_CTRL_LIVE) return; nvme_read_ana_log(ctrl); diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 030c80818240..24193fcb8bd5 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -263,6 +263,7 @@ enum nvme_ctrl_flags { struct nvme_ctrl { bool comp_seen; bool identified; + bool passthru_err_log_enabled; enum nvme_ctrl_state state; spinlock_t lock; struct mutex scan_lock; @@ -454,6 +455,7 @@ struct nvme_ns_head { struct list_head entry; struct kref ref; bool shared; + bool passthru_err_log_enabled; int instance; struct nvme_effects_log *effects; u64 nuse; @@ -462,6 +464,7 @@ struct nvme_ns_head { u16 ms; u16 pi_size; u8 pi_type; + u8 pi_offset; u8 guard_type; u16 sgs; u32 sws; @@ -522,7 +525,6 @@ struct nvme_ns { struct device cdev_device; struct nvme_fault_inject fault_inject; - }; /* NVMe ns supports metadata actions by the controller (generate/strip) */ @@ -805,17 +807,18 @@ blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req); blk_status_t nvme_fail_nonready_command(struct nvme_ctrl *ctrl, struct request *req); bool __nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq, - bool queue_live); + bool queue_live, enum nvme_ctrl_state state); static inline bool nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq, bool queue_live) { - if (likely(ctrl->state == NVME_CTRL_LIVE)) + enum nvme_ctrl_state state = nvme_ctrl_state(ctrl); + + if (likely(state == NVME_CTRL_LIVE)) return true; - if (ctrl->ops->flags & NVME_F_FABRICS && - ctrl->state == NVME_CTRL_DELETING) + if (ctrl->ops->flags & NVME_F_FABRICS && state == NVME_CTRL_DELETING) return queue_live; - return __nvme_check_ready(ctrl, rq, queue_live); + return __nvme_check_ready(ctrl, rq, queue_live, state); } /* @@ -836,12 +839,27 @@ static inline bool nvme_is_unique_nsid(struct nvme_ctrl *ctrl, (ctrl->ctratt & NVME_CTRL_CTRATT_NVM_SETS); } +/* + * Flags for __nvme_submit_sync_cmd() + */ +typedef __u32 __bitwise nvme_submit_flags_t; + +enum { + /* Insert request at the head of the queue */ + NVME_SUBMIT_AT_HEAD = (__force nvme_submit_flags_t)(1 << 0), + /* Set BLK_MQ_REQ_NOWAIT when allocating request */ + NVME_SUBMIT_NOWAIT = (__force nvme_submit_flags_t)(1 << 1), + /* Set BLK_MQ_REQ_RESERVED when allocating request */ + NVME_SUBMIT_RESERVED = (__force nvme_submit_flags_t)(1 << 2), + /* Retry command when NVME_SC_DNR is not set in the result */ + NVME_SUBMIT_RETRY = (__force nvme_submit_flags_t)(1 << 3), +}; + int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, void *buf, unsigned bufflen); int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, union nvme_result *result, void *buffer, unsigned bufflen, - int qid, int at_head, - blk_mq_req_flags_t flags); + int qid, nvme_submit_flags_t flags); int nvme_set_features(struct nvme_ctrl *dev, unsigned int fid, unsigned int dword11, void *buffer, size_t buflen, u32 *result); @@ -1018,11 +1036,11 @@ static inline bool nvme_disk_is_ns_head(struct gendisk *disk) } #endif /* CONFIG_NVME_MULTIPATH */ -int nvme_revalidate_zones(struct nvme_ns *ns); int nvme_ns_report_zones(struct nvme_ns *ns, sector_t sector, unsigned int nr_zones, report_zones_cb cb, void *data); +int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf, + struct queue_limits *lim); #ifdef CONFIG_BLK_DEV_ZONED -int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf); blk_status_t nvme_setup_zone_mgmt_send(struct nvme_ns *ns, struct request *req, struct nvme_command *cmnd, enum nvme_zone_mgmt_action action); @@ -1033,13 +1051,6 @@ static inline blk_status_t nvme_setup_zone_mgmt_send(struct nvme_ns *ns, { return BLK_STS_NOTSUPP; } - -static inline int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf) -{ - dev_warn(ns->ctrl->device, - "Please enable CONFIG_BLK_DEV_ZONED to support ZNS devices\n"); - return -EPROTONOSUPPORT; -} #endif static inline struct nvme_ns *nvme_get_ns_from_dev(struct device *dev) @@ -1124,35 +1135,42 @@ static inline bool nvme_multi_css(struct nvme_ctrl *ctrl) } #ifdef CONFIG_NVME_VERBOSE_ERRORS -const unsigned char *nvme_get_error_status_str(u16 status); -const unsigned char *nvme_get_opcode_str(u8 opcode); -const unsigned char *nvme_get_admin_opcode_str(u8 opcode); -const unsigned char *nvme_get_fabrics_opcode_str(u8 opcode); +const char *nvme_get_error_status_str(u16 status); +const char *nvme_get_opcode_str(u8 opcode); +const char *nvme_get_admin_opcode_str(u8 opcode); +const char *nvme_get_fabrics_opcode_str(u8 opcode); #else /* CONFIG_NVME_VERBOSE_ERRORS */ -static inline const unsigned char *nvme_get_error_status_str(u16 status) +static inline const char *nvme_get_error_status_str(u16 status) { return "I/O Error"; } -static inline const unsigned char *nvme_get_opcode_str(u8 opcode) +static inline const char *nvme_get_opcode_str(u8 opcode) { return "I/O Cmd"; } -static inline const unsigned char *nvme_get_admin_opcode_str(u8 opcode) +static inline const char *nvme_get_admin_opcode_str(u8 opcode) { return "Admin Cmd"; } -static inline const unsigned char *nvme_get_fabrics_opcode_str(u8 opcode) +static inline const char *nvme_get_fabrics_opcode_str(u8 opcode) { return "Fabrics Cmd"; } #endif /* CONFIG_NVME_VERBOSE_ERRORS */ -static inline const unsigned char *nvme_opcode_str(int qid, u8 opcode, u8 fctype) +static inline const char *nvme_opcode_str(int qid, u8 opcode) { - if (opcode == nvme_fabrics_command) - return nvme_get_fabrics_opcode_str(fctype); return qid ? nvme_get_opcode_str(opcode) : nvme_get_admin_opcode_str(opcode); } + +static inline const char *nvme_fabrics_opcode_str( + int qid, const struct nvme_command *cmd) +{ + if (nvme_is_fabrics(cmd)) + return nvme_get_fabrics_opcode_str(cmd->fabrics.fctype); + + return nvme_opcode_str(qid, cmd->common.opcode); +} #endif /* _NVME_H */ diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index c1d6357ec98a..e6267a6aa380 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -1349,7 +1349,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req) dev_warn(dev->ctrl.device, "I/O tag %d (%04x) opcode %#x (%s) QID %d timeout, reset controller\n", req->tag, nvme_cid(req), opcode, - nvme_opcode_str(nvmeq->qid, opcode, 0), nvmeq->qid); + nvme_opcode_str(nvmeq->qid, opcode), nvmeq->qid); nvme_req(req)->flags |= NVME_REQ_CANCELLED; goto disable; } @@ -3543,5 +3543,6 @@ static void __exit nvme_exit(void) MODULE_AUTHOR("Matthew Wilcox <willy@linux.intel.com>"); MODULE_LICENSE("GPL"); MODULE_VERSION("1.0"); +MODULE_DESCRIPTION("NVMe host PCIe transport driver"); module_init(nvme_init); module_exit(nvme_exit); diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 11dde0d83044..366f0bb4ebfc 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -1006,6 +1006,7 @@ static int nvme_rdma_setup_ctrl(struct nvme_rdma_ctrl *ctrl, bool new) { int ret; bool changed; + u16 max_queue_size; ret = nvme_rdma_configure_admin_queue(ctrl, new); if (ret) @@ -1030,11 +1031,16 @@ static int nvme_rdma_setup_ctrl(struct nvme_rdma_ctrl *ctrl, bool new) ctrl->ctrl.opts->queue_size, ctrl->ctrl.sqsize + 1); } - if (ctrl->ctrl.sqsize + 1 > NVME_RDMA_MAX_QUEUE_SIZE) { + if (ctrl->ctrl.max_integrity_segments) + max_queue_size = NVME_RDMA_MAX_METADATA_QUEUE_SIZE; + else + max_queue_size = NVME_RDMA_MAX_QUEUE_SIZE; + + if (ctrl->ctrl.sqsize + 1 > max_queue_size) { dev_warn(ctrl->ctrl.device, - "ctrl sqsize %u > max queue size %u, clamping down\n", - ctrl->ctrl.sqsize + 1, NVME_RDMA_MAX_QUEUE_SIZE); - ctrl->ctrl.sqsize = NVME_RDMA_MAX_QUEUE_SIZE - 1; + "ctrl sqsize %u > max queue size %u, clamping down\n", + ctrl->ctrl.sqsize + 1, max_queue_size); + ctrl->ctrl.sqsize = max_queue_size - 1; } if (ctrl->ctrl.sqsize + 1 > ctrl->ctrl.maxcmd) { @@ -1410,6 +1416,8 @@ static int nvme_rdma_map_sg_pi(struct nvme_rdma_queue *queue, struct nvme_ns *ns = rq->q->queuedata; struct bio *bio = rq->bio; struct nvme_keyed_sgl_desc *sg = &c->common.dptr.ksgl; + struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk); + u32 xfer_len; int nr; req->mr = ib_mr_pool_get(queue->qp, &queue->qp->sig_mrs); @@ -1422,8 +1430,7 @@ static int nvme_rdma_map_sg_pi(struct nvme_rdma_queue *queue, if (unlikely(nr)) goto mr_put; - nvme_rdma_set_sig_attrs(blk_get_integrity(bio->bi_bdev->bd_disk), c, - req->mr->sig_attrs, ns->head->pi_type); + nvme_rdma_set_sig_attrs(bi, c, req->mr->sig_attrs, ns->head->pi_type); nvme_rdma_set_prot_checks(c, &req->mr->sig_attrs->check_mask); ib_update_fast_reg_key(req->mr, ib_inc_rkey(req->mr->rkey)); @@ -1441,7 +1448,11 @@ static int nvme_rdma_map_sg_pi(struct nvme_rdma_queue *queue, IB_ACCESS_REMOTE_WRITE; sg->addr = cpu_to_le64(req->mr->iova); - put_unaligned_le24(req->mr->length, sg->length); + xfer_len = req->mr->length; + /* Check if PI is added by the HW */ + if (!pi_count) + xfer_len += (xfer_len >> bi->interval_exp) * ns->head->pi_size; + put_unaligned_le24(xfer_len, sg->length); put_unaligned_le32(req->mr->rkey, sg->key); sg->type = NVME_KEY_SGL_FMT_DATA_DESC << 4; @@ -1946,14 +1957,13 @@ static enum blk_eh_timer_return nvme_rdma_timeout(struct request *rq) struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq); struct nvme_rdma_queue *queue = req->queue; struct nvme_rdma_ctrl *ctrl = queue->ctrl; - u8 opcode = req->req.cmd->common.opcode; - u8 fctype = req->req.cmd->fabrics.fctype; + struct nvme_command *cmd = req->req.cmd; int qid = nvme_rdma_queue_idx(queue); dev_warn(ctrl->ctrl.device, "I/O tag %d (%04x) opcode %#x (%s) QID %d timeout\n", - rq->tag, nvme_cid(rq), opcode, - nvme_opcode_str(qid, opcode, fctype), qid); + rq->tag, nvme_cid(rq), cmd->common.opcode, + nvme_fabrics_opcode_str(qid, cmd), qid); if (nvme_ctrl_state(&ctrl->ctrl) != NVME_CTRL_LIVE) { /* @@ -2296,8 +2306,8 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev, if (ret) goto out_uninit_ctrl; - dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISpcs\n", - nvmf_ctrl_subsysnqn(&ctrl->ctrl), &ctrl->addr); + dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISpcs, hostnqn: %s\n", + nvmf_ctrl_subsysnqn(&ctrl->ctrl), &ctrl->addr, opts->host->nqn); mutex_lock(&nvme_rdma_ctrl_mutex); list_add_tail(&ctrl->list, &nvme_rdma_ctrl_list); @@ -2400,4 +2410,5 @@ static void __exit nvme_rdma_cleanup_module(void) module_init(nvme_rdma_init_module); module_exit(nvme_rdma_cleanup_module); +MODULE_DESCRIPTION("NVMe host RDMA transport driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/nvme/host/sysfs.c b/drivers/nvme/host/sysfs.c index 754e91111042..09fcaa519e5b 100644 --- a/drivers/nvme/host/sysfs.c +++ b/drivers/nvme/host/sysfs.c @@ -35,6 +35,31 @@ static ssize_t nvme_sysfs_rescan(struct device *dev, } static DEVICE_ATTR(rescan_controller, S_IWUSR, NULL, nvme_sysfs_rescan); +static ssize_t nvme_adm_passthru_err_log_enabled_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct nvme_ctrl *ctrl = dev_get_drvdata(dev); + + return sysfs_emit(buf, + ctrl->passthru_err_log_enabled ? "on\n" : "off\n"); +} + +static ssize_t nvme_adm_passthru_err_log_enabled_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct nvme_ctrl *ctrl = dev_get_drvdata(dev); + bool passthru_err_log_enabled; + int err; + + err = kstrtobool(buf, &passthru_err_log_enabled); + if (err) + return -EINVAL; + + ctrl->passthru_err_log_enabled = passthru_err_log_enabled; + + return count; +} + static inline struct nvme_ns_head *dev_to_ns_head(struct device *dev) { struct gendisk *disk = dev_to_disk(dev); @@ -44,6 +69,37 @@ static inline struct nvme_ns_head *dev_to_ns_head(struct device *dev) return nvme_get_ns_from_dev(dev)->head; } +static ssize_t nvme_io_passthru_err_log_enabled_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct nvme_ns_head *head = dev_to_ns_head(dev); + + return sysfs_emit(buf, head->passthru_err_log_enabled ? "on\n" : "off\n"); +} + +static ssize_t nvme_io_passthru_err_log_enabled_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct nvme_ns_head *head = dev_to_ns_head(dev); + bool passthru_err_log_enabled; + int err; + + err = kstrtobool(buf, &passthru_err_log_enabled); + if (err) + return -EINVAL; + head->passthru_err_log_enabled = passthru_err_log_enabled; + + return count; +} + +static struct device_attribute dev_attr_adm_passthru_err_log_enabled = \ + __ATTR(passthru_err_log_enabled, S_IRUGO | S_IWUSR, \ + nvme_adm_passthru_err_log_enabled_show, nvme_adm_passthru_err_log_enabled_store); + +static struct device_attribute dev_attr_io_passthru_err_log_enabled = \ + __ATTR(passthru_err_log_enabled, S_IRUGO | S_IWUSR, \ + nvme_io_passthru_err_log_enabled_show, nvme_io_passthru_err_log_enabled_store); + static ssize_t wwid_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -165,14 +221,11 @@ static int ns_update_nuse(struct nvme_ns *ns) ret = nvme_identify_ns(ns->ctrl, ns->head->ns_id, &id); if (ret) - goto out_free_id; + return ret; ns->head->nuse = le64_to_cpu(id->nuse); - -out_free_id: kfree(id); - - return ret; + return 0; } static ssize_t nuse_show(struct device *dev, struct device_attribute *attr, @@ -208,6 +261,7 @@ static struct attribute *nvme_ns_attrs[] = { &dev_attr_ana_grpid.attr, &dev_attr_ana_state.attr, #endif + &dev_attr_io_passthru_err_log_enabled.attr, NULL, }; @@ -311,6 +365,7 @@ static ssize_t nvme_sysfs_show_state(struct device *dev, char *buf) { struct nvme_ctrl *ctrl = dev_get_drvdata(dev); + unsigned state = (unsigned)nvme_ctrl_state(ctrl); static const char *const state_name[] = { [NVME_CTRL_NEW] = "new", [NVME_CTRL_LIVE] = "live", @@ -321,9 +376,8 @@ static ssize_t nvme_sysfs_show_state(struct device *dev, [NVME_CTRL_DEAD] = "dead", }; - if ((unsigned)ctrl->state < ARRAY_SIZE(state_name) && - state_name[ctrl->state]) - return sysfs_emit(buf, "%s\n", state_name[ctrl->state]); + if (state < ARRAY_SIZE(state_name) && state_name[state]) + return sysfs_emit(buf, "%s\n", state_name[state]); return sysfs_emit(buf, "unknown state\n"); } @@ -655,6 +709,7 @@ static struct attribute *nvme_dev_attrs[] = { #ifdef CONFIG_NVME_TCP_TLS &dev_attr_tls_key.attr, #endif + &dev_attr_adm_passthru_err_log_enabled.attr, NULL }; diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index d058d990532b..a6d596e05602 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -2428,13 +2428,13 @@ static enum blk_eh_timer_return nvme_tcp_timeout(struct request *rq) struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq); struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl; struct nvme_tcp_cmd_pdu *pdu = nvme_tcp_req_cmd_pdu(req); - u8 opc = pdu->cmd.common.opcode, fctype = pdu->cmd.fabrics.fctype; + struct nvme_command *cmd = &pdu->cmd; int qid = nvme_tcp_queue_id(req->queue); dev_warn(ctrl->device, "I/O tag %d (%04x) type %d opcode %#x (%s) QID %d timeout\n", - rq->tag, nvme_cid(rq), pdu->hdr.type, opc, - nvme_opcode_str(qid, opc, fctype), qid); + rq->tag, nvme_cid(rq), pdu->hdr.type, cmd->common.opcode, + nvme_fabrics_opcode_str(qid, cmd), qid); if (nvme_ctrl_state(ctrl) != NVME_CTRL_LIVE) { /* @@ -2753,8 +2753,8 @@ static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev, if (ret) goto out_uninit_ctrl; - dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISp\n", - nvmf_ctrl_subsysnqn(&ctrl->ctrl), &ctrl->addr); + dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISp, hostnqn: %s\n", + nvmf_ctrl_subsysnqn(&ctrl->ctrl), &ctrl->addr, opts->host->nqn); mutex_lock(&nvme_tcp_ctrl_mutex); list_add_tail(&ctrl->list, &nvme_tcp_ctrl_list); @@ -2826,4 +2826,5 @@ static void __exit nvme_tcp_cleanup_module(void) module_init(nvme_tcp_init_module); module_exit(nvme_tcp_cleanup_module); +MODULE_DESCRIPTION("NVMe host TCP transport driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/nvme/host/zns.c b/drivers/nvme/host/zns.c index 499bbb0eee8d..722384bcc765 100644 --- a/drivers/nvme/host/zns.c +++ b/drivers/nvme/host/zns.c @@ -7,16 +7,6 @@ #include <linux/vmalloc.h> #include "nvme.h" -int nvme_revalidate_zones(struct nvme_ns *ns) -{ - struct request_queue *q = ns->queue; - - blk_queue_chunk_sectors(q, ns->head->zsze); - blk_queue_max_zone_append_sectors(q, ns->ctrl->max_zone_append); - - return blk_revalidate_disk_zones(ns->disk, NULL); -} - static int nvme_set_max_append(struct nvme_ctrl *ctrl) { struct nvme_command c = { }; @@ -45,10 +35,10 @@ static int nvme_set_max_append(struct nvme_ctrl *ctrl) return 0; } -int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf) +int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf, + struct queue_limits *lim) { struct nvme_effects_log *log = ns->head->effects; - struct request_queue *q = ns->queue; struct nvme_command c = { }; struct nvme_id_ns_zns *id; int status; @@ -109,10 +99,12 @@ int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf) goto free_data; } - disk_set_zoned(ns->disk); - blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q); - disk_set_max_open_zones(ns->disk, le32_to_cpu(id->mor) + 1); - disk_set_max_active_zones(ns->disk, le32_to_cpu(id->mar) + 1); + blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, ns->queue); + lim->zoned = 1; + lim->max_open_zones = le32_to_cpu(id->mor) + 1; + lim->max_active_zones = le32_to_cpu(id->mar) + 1; + lim->chunk_sectors = ns->head->zsze; + lim->max_zone_append_sectors = ns->ctrl->max_zone_append; free_data: kfree(id); return status; diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c index 39cb570f833d..f5b7054a4a05 100644 --- a/drivers/nvme/target/admin-cmd.c +++ b/drivers/nvme/target/admin-cmd.c @@ -428,7 +428,7 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req) id->cqes = (0x4 << 4) | 0x4; /* no enforcement soft-limit for maxcmd - pick arbitrary high value */ - id->maxcmd = cpu_to_le16(NVMET_MAX_CMD); + id->maxcmd = cpu_to_le16(NVMET_MAX_CMD(ctrl)); id->nn = cpu_to_le32(NVMET_MAX_NAMESPACES); id->mnan = cpu_to_le32(NVMET_MAX_NAMESPACES); diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c index 2482a0db2504..77a6e817b315 100644 --- a/drivers/nvme/target/configfs.c +++ b/drivers/nvme/target/configfs.c @@ -273,6 +273,32 @@ static ssize_t nvmet_param_inline_data_size_store(struct config_item *item, CONFIGFS_ATTR(nvmet_, param_inline_data_size); +static ssize_t nvmet_param_max_queue_size_show(struct config_item *item, + char *page) +{ + struct nvmet_port *port = to_nvmet_port(item); + + return snprintf(page, PAGE_SIZE, "%d\n", port->max_queue_size); +} + +static ssize_t nvmet_param_max_queue_size_store(struct config_item *item, + const char *page, size_t count) +{ + struct nvmet_port *port = to_nvmet_port(item); + int ret; + + if (nvmet_is_port_enabled(port, __func__)) + return -EACCES; + ret = kstrtoint(page, 0, &port->max_queue_size); + if (ret) { + pr_err("Invalid value '%s' for max_queue_size\n", page); + return -EINVAL; + } + return count; +} + +CONFIGFS_ATTR(nvmet_, param_max_queue_size); + #ifdef CONFIG_BLK_DEV_INTEGRITY static ssize_t nvmet_param_pi_enable_show(struct config_item *item, char *page) @@ -1859,6 +1885,7 @@ static struct configfs_attribute *nvmet_port_attrs[] = { &nvmet_attr_addr_trtype, &nvmet_attr_addr_tsas, &nvmet_attr_param_inline_data_size, + &nvmet_attr_param_max_queue_size, #ifdef CONFIG_BLK_DEV_INTEGRITY &nvmet_attr_param_pi_enable, #endif @@ -1917,6 +1944,7 @@ static struct config_group *nvmet_ports_make(struct config_group *group, INIT_LIST_HEAD(&port->subsystems); INIT_LIST_HEAD(&port->referrals); port->inline_data_size = -1; /* < 0 == let the transport choose */ + port->max_queue_size = -1; /* < 0 == let the transport choose */ port->disc_addr.portid = cpu_to_le16(portid); port->disc_addr.adrfam = NVMF_ADDR_FAMILY_MAX; diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index d26aa30f8702..6bbe4df0166c 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c @@ -248,7 +248,7 @@ void nvmet_ns_changed(struct nvmet_subsys *subsys, u32 nsid) nvmet_add_to_changed_ns_log(ctrl, cpu_to_le32(nsid)); if (nvmet_aen_bit_disabled(ctrl, NVME_AEN_BIT_NS_ATTR)) continue; - nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE, + nvmet_add_async_event(ctrl, NVME_AER_NOTICE, NVME_AER_NOTICE_NS_CHANGED, NVME_LOG_CHANGED_NS); } @@ -265,7 +265,7 @@ void nvmet_send_ana_event(struct nvmet_subsys *subsys, continue; if (nvmet_aen_bit_disabled(ctrl, NVME_AEN_BIT_ANA_CHANGE)) continue; - nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE, + nvmet_add_async_event(ctrl, NVME_AER_NOTICE, NVME_AER_NOTICE_ANA, NVME_LOG_ANA); } mutex_unlock(&subsys->lock); @@ -358,6 +358,18 @@ int nvmet_enable_port(struct nvmet_port *port) if (port->inline_data_size < 0) port->inline_data_size = 0; + /* + * If the transport didn't set the max_queue_size properly, then clamp + * it to the target limits. Also set default values in case the + * transport didn't set it at all. + */ + if (port->max_queue_size < 0) + port->max_queue_size = NVMET_MAX_QUEUE_SIZE; + else + port->max_queue_size = clamp_t(int, port->max_queue_size, + NVMET_MIN_QUEUE_SIZE, + NVMET_MAX_QUEUE_SIZE); + port->enabled = true; port->tr_ops = ops; return 0; @@ -1223,9 +1235,10 @@ static void nvmet_init_cap(struct nvmet_ctrl *ctrl) ctrl->cap |= (15ULL << 24); /* maximum queue entries supported: */ if (ctrl->ops->get_max_queue_size) - ctrl->cap |= ctrl->ops->get_max_queue_size(ctrl) - 1; + ctrl->cap |= min_t(u16, ctrl->ops->get_max_queue_size(ctrl), + ctrl->port->max_queue_size) - 1; else - ctrl->cap |= NVMET_QUEUE_SIZE - 1; + ctrl->cap |= ctrl->port->max_queue_size - 1; if (nvmet_is_passthru_subsys(ctrl->subsys)) nvmet_passthrough_override_cap(ctrl); @@ -1411,6 +1424,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn, kref_init(&ctrl->ref); ctrl->subsys = subsys; + ctrl->pi_support = ctrl->port->pi_enable && ctrl->subsys->pi_support; nvmet_init_cap(ctrl); WRITE_ONCE(ctrl->aen_enabled, NVMET_AEN_CFG_OPTIONAL); @@ -1705,4 +1719,5 @@ static void __exit nvmet_exit(void) module_init(nvmet_init); module_exit(nvmet_exit); +MODULE_DESCRIPTION("NVMe target core framework"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/nvme/target/discovery.c b/drivers/nvme/target/discovery.c index 668d257fa986..ce54da8c6b36 100644 --- a/drivers/nvme/target/discovery.c +++ b/drivers/nvme/target/discovery.c @@ -21,7 +21,7 @@ static void __nvmet_disc_changed(struct nvmet_port *port, if (nvmet_aen_bit_disabled(ctrl, NVME_AEN_BIT_DISC_CHANGE)) return; - nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE, + nvmet_add_async_event(ctrl, NVME_AER_NOTICE, NVME_AER_NOTICE_DISC_CHANGED, NVME_LOG_DISC); } @@ -282,7 +282,7 @@ static void nvmet_execute_disc_identify(struct nvmet_req *req) id->lpa = (1 << 2); /* no enforcement soft-limit for maxcmd - pick arbitrary high value */ - id->maxcmd = cpu_to_le16(NVMET_MAX_CMD); + id->maxcmd = cpu_to_le16(NVMET_MAX_CMD(ctrl)); id->sgls = cpu_to_le32(1 << 0); /* we always support SGLs */ if (ctrl->ops->flags & NVMF_KEYED_SGLS) diff --git a/drivers/nvme/target/fabrics-cmd.c b/drivers/nvme/target/fabrics-cmd.c index d8da840a1c0e..b23f4cf840bd 100644 --- a/drivers/nvme/target/fabrics-cmd.c +++ b/drivers/nvme/target/fabrics-cmd.c @@ -157,7 +157,8 @@ static u16 nvmet_install_queue(struct nvmet_ctrl *ctrl, struct nvmet_req *req) return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR; } - if (sqsize > mqes) { + /* for fabrics, this value applies to only the I/O Submission Queues */ + if (qid && sqsize > mqes) { pr_warn("sqsize %u is larger than MQES supported %u cntlid %d\n", sqsize, mqes, ctrl->cntlid); req->error_loc = offsetof(struct nvmf_connect_command, sqsize); @@ -209,7 +210,7 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req) struct nvmf_connect_command *c = &req->cmd->connect; struct nvmf_connect_data *d; struct nvmet_ctrl *ctrl = NULL; - u16 status = 0; + u16 status; int ret; if (!nvmet_check_transfer_len(req, sizeof(struct nvmf_connect_data))) @@ -251,8 +252,6 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req) if (status) goto out; - ctrl->pi_support = ctrl->port->pi_enable && ctrl->subsys->pi_support; - uuid_copy(&ctrl->hostid, &d->hostid); ret = nvmet_setup_auth(ctrl); @@ -290,7 +289,7 @@ static void nvmet_execute_io_connect(struct nvmet_req *req) struct nvmf_connect_data *d; struct nvmet_ctrl *ctrl; u16 qid = le16_to_cpu(c->qid); - u16 status = 0; + u16 status; if (!nvmet_check_transfer_len(req, sizeof(struct nvmf_connect_data))) return; diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c index bda7a3009e85..fd229f310c93 100644 --- a/drivers/nvme/target/fc.c +++ b/drivers/nvme/target/fc.c @@ -111,6 +111,8 @@ struct nvmet_fc_tgtport { struct nvmet_fc_port_entry *pe; struct kref ref; u32 max_sg_cnt; + + struct work_struct put_work; }; struct nvmet_fc_port_entry { @@ -145,7 +147,6 @@ struct nvmet_fc_tgt_queue { struct list_head avail_defer_list; struct workqueue_struct *work_q; struct kref ref; - struct rcu_head rcu; /* array of fcp_iods */ struct nvmet_fc_fcp_iod fod[] __counted_by(sqsize); } __aligned(sizeof(unsigned long long)); @@ -166,10 +167,9 @@ struct nvmet_fc_tgt_assoc { struct nvmet_fc_hostport *hostport; struct nvmet_fc_ls_iod *rcv_disconn; struct list_head a_list; - struct nvmet_fc_tgt_queue __rcu *queues[NVMET_NR_QUEUES + 1]; + struct nvmet_fc_tgt_queue *queues[NVMET_NR_QUEUES + 1]; struct kref ref; struct work_struct del_work; - struct rcu_head rcu; }; @@ -249,6 +249,13 @@ static int nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc); static void nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue); static int nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue); static void nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport); +static void nvmet_fc_put_tgtport_work(struct work_struct *work) +{ + struct nvmet_fc_tgtport *tgtport = + container_of(work, struct nvmet_fc_tgtport, put_work); + + nvmet_fc_tgtport_put(tgtport); +} static int nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport); static void nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport, struct nvmet_fc_fcp_iod *fod); @@ -360,7 +367,7 @@ __nvmet_fc_finish_ls_req(struct nvmet_fc_ls_req_op *lsop) if (!lsop->req_queued) { spin_unlock_irqrestore(&tgtport->lock, flags); - return; + goto out_putwork; } list_del(&lsop->lsreq_list); @@ -373,7 +380,8 @@ __nvmet_fc_finish_ls_req(struct nvmet_fc_ls_req_op *lsop) (lsreq->rqstlen + lsreq->rsplen), DMA_BIDIRECTIONAL); - nvmet_fc_tgtport_put(tgtport); +out_putwork: + queue_work(nvmet_wq, &tgtport->put_work); } static int @@ -489,8 +497,7 @@ nvmet_fc_xmt_disconnect_assoc(struct nvmet_fc_tgt_assoc *assoc) * message is normal. Otherwise, send unless the hostport has * already been invalidated by the lldd. */ - if (!tgtport->ops->ls_req || !assoc->hostport || - assoc->hostport->invalid) + if (!tgtport->ops->ls_req || assoc->hostport->invalid) return; lsop = kzalloc((sizeof(*lsop) + @@ -802,14 +809,11 @@ nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc, if (!queue) return NULL; - if (!nvmet_fc_tgt_a_get(assoc)) - goto out_free_queue; - queue->work_q = alloc_workqueue("ntfc%d.%d.%d", 0, 0, assoc->tgtport->fc_target_port.port_num, assoc->a_id, qid); if (!queue->work_q) - goto out_a_put; + goto out_free_queue; queue->qid = qid; queue->sqsize = sqsize; @@ -831,15 +835,13 @@ nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc, goto out_fail_iodlist; WARN_ON(assoc->queues[qid]); - rcu_assign_pointer(assoc->queues[qid], queue); + assoc->queues[qid] = queue; return queue; out_fail_iodlist: nvmet_fc_destroy_fcp_iodlist(assoc->tgtport, queue); destroy_workqueue(queue->work_q); -out_a_put: - nvmet_fc_tgt_a_put(assoc); out_free_queue: kfree(queue); return NULL; @@ -852,15 +854,11 @@ nvmet_fc_tgt_queue_free(struct kref *ref) struct nvmet_fc_tgt_queue *queue = container_of(ref, struct nvmet_fc_tgt_queue, ref); - rcu_assign_pointer(queue->assoc->queues[queue->qid], NULL); - nvmet_fc_destroy_fcp_iodlist(queue->assoc->tgtport, queue); - nvmet_fc_tgt_a_put(queue->assoc); - destroy_workqueue(queue->work_q); - kfree_rcu(queue, rcu); + kfree(queue); } static void @@ -969,7 +967,7 @@ nvmet_fc_find_target_queue(struct nvmet_fc_tgtport *tgtport, rcu_read_lock(); list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) { if (association_id == assoc->association_id) { - queue = rcu_dereference(assoc->queues[qid]); + queue = assoc->queues[qid]; if (queue && (!atomic_read(&queue->connected) || !nvmet_fc_tgt_q_get(queue))) @@ -1078,8 +1076,6 @@ nvmet_fc_alloc_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle) /* new allocation not needed */ kfree(newhost); newhost = match; - /* no new allocation - release reference */ - nvmet_fc_tgtport_put(tgtport); } else { newhost->tgtport = tgtport; newhost->hosthandle = hosthandle; @@ -1094,23 +1090,54 @@ nvmet_fc_alloc_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle) } static void -nvmet_fc_delete_assoc(struct work_struct *work) +nvmet_fc_delete_assoc(struct nvmet_fc_tgt_assoc *assoc) +{ + nvmet_fc_delete_target_assoc(assoc); + nvmet_fc_tgt_a_put(assoc); +} + +static void +nvmet_fc_delete_assoc_work(struct work_struct *work) { struct nvmet_fc_tgt_assoc *assoc = container_of(work, struct nvmet_fc_tgt_assoc, del_work); + struct nvmet_fc_tgtport *tgtport = assoc->tgtport; - nvmet_fc_delete_target_assoc(assoc); - nvmet_fc_tgt_a_put(assoc); + nvmet_fc_delete_assoc(assoc); + nvmet_fc_tgtport_put(tgtport); +} + +static void +nvmet_fc_schedule_delete_assoc(struct nvmet_fc_tgt_assoc *assoc) +{ + nvmet_fc_tgtport_get(assoc->tgtport); + queue_work(nvmet_wq, &assoc->del_work); +} + +static bool +nvmet_fc_assoc_exits(struct nvmet_fc_tgtport *tgtport, u64 association_id) +{ + struct nvmet_fc_tgt_assoc *a; + + list_for_each_entry_rcu(a, &tgtport->assoc_list, a_list) { + if (association_id == a->association_id) + return true; + } + + return false; } static struct nvmet_fc_tgt_assoc * nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle) { - struct nvmet_fc_tgt_assoc *assoc, *tmpassoc; + struct nvmet_fc_tgt_assoc *assoc; unsigned long flags; + bool done; u64 ran; int idx; - bool needrandom = true; + + if (!tgtport->pe) + return NULL; assoc = kzalloc(sizeof(*assoc), GFP_KERNEL); if (!assoc) @@ -1120,43 +1147,35 @@ nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle) if (idx < 0) goto out_free_assoc; - if (!nvmet_fc_tgtport_get(tgtport)) - goto out_ida; - assoc->hostport = nvmet_fc_alloc_hostport(tgtport, hosthandle); if (IS_ERR(assoc->hostport)) - goto out_put; + goto out_ida; assoc->tgtport = tgtport; assoc->a_id = idx; INIT_LIST_HEAD(&assoc->a_list); kref_init(&assoc->ref); - INIT_WORK(&assoc->del_work, nvmet_fc_delete_assoc); + INIT_WORK(&assoc->del_work, nvmet_fc_delete_assoc_work); atomic_set(&assoc->terminating, 0); - while (needrandom) { + done = false; + do { get_random_bytes(&ran, sizeof(ran) - BYTES_FOR_QID); ran = ran << BYTES_FOR_QID_SHIFT; spin_lock_irqsave(&tgtport->lock, flags); - needrandom = false; - list_for_each_entry(tmpassoc, &tgtport->assoc_list, a_list) { - if (ran == tmpassoc->association_id) { - needrandom = true; - break; - } - } - if (!needrandom) { + rcu_read_lock(); + if (!nvmet_fc_assoc_exits(tgtport, ran)) { assoc->association_id = ran; list_add_tail_rcu(&assoc->a_list, &tgtport->assoc_list); + done = true; } + rcu_read_unlock(); spin_unlock_irqrestore(&tgtport->lock, flags); - } + } while (!done); return assoc; -out_put: - nvmet_fc_tgtport_put(tgtport); out_ida: ida_free(&tgtport->assoc_cnt, idx); out_free_assoc: @@ -1172,13 +1191,18 @@ nvmet_fc_target_assoc_free(struct kref *ref) struct nvmet_fc_tgtport *tgtport = assoc->tgtport; struct nvmet_fc_ls_iod *oldls; unsigned long flags; + int i; + + for (i = NVMET_NR_QUEUES; i >= 0; i--) { + if (assoc->queues[i]) + nvmet_fc_delete_target_queue(assoc->queues[i]); + } /* Send Disconnect now that all i/o has completed */ nvmet_fc_xmt_disconnect_assoc(assoc); nvmet_fc_free_hostport(assoc->hostport); spin_lock_irqsave(&tgtport->lock, flags); - list_del_rcu(&assoc->a_list); oldls = assoc->rcv_disconn; spin_unlock_irqrestore(&tgtport->lock, flags); /* if pending Rcv Disconnect Association LS, send rsp now */ @@ -1188,8 +1212,7 @@ nvmet_fc_target_assoc_free(struct kref *ref) dev_info(tgtport->dev, "{%d:%d} Association freed\n", tgtport->fc_target_port.port_num, assoc->a_id); - kfree_rcu(assoc, rcu); - nvmet_fc_tgtport_put(tgtport); + kfree(assoc); } static void @@ -1208,7 +1231,7 @@ static void nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc) { struct nvmet_fc_tgtport *tgtport = assoc->tgtport; - struct nvmet_fc_tgt_queue *queue; + unsigned long flags; int i, terminating; terminating = atomic_xchg(&assoc->terminating, 1); @@ -1217,29 +1240,21 @@ nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc) if (terminating) return; + spin_lock_irqsave(&tgtport->lock, flags); + list_del_rcu(&assoc->a_list); + spin_unlock_irqrestore(&tgtport->lock, flags); - for (i = NVMET_NR_QUEUES; i >= 0; i--) { - rcu_read_lock(); - queue = rcu_dereference(assoc->queues[i]); - if (!queue) { - rcu_read_unlock(); - continue; - } + synchronize_rcu(); - if (!nvmet_fc_tgt_q_get(queue)) { - rcu_read_unlock(); - continue; - } - rcu_read_unlock(); - nvmet_fc_delete_target_queue(queue); - nvmet_fc_tgt_q_put(queue); + /* ensure all in-flight I/Os have been processed */ + for (i = NVMET_NR_QUEUES; i >= 0; i--) { + if (assoc->queues[i]) + flush_workqueue(assoc->queues[i]->work_q); } dev_info(tgtport->dev, "{%d:%d} Association deleted\n", tgtport->fc_target_port.port_num, assoc->a_id); - - nvmet_fc_tgt_a_put(assoc); } static struct nvmet_fc_tgt_assoc * @@ -1415,6 +1430,7 @@ nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo, kref_init(&newrec->ref); ida_init(&newrec->assoc_cnt); newrec->max_sg_cnt = template->max_sgl_segments; + INIT_WORK(&newrec->put_work, nvmet_fc_put_tgtport_work); ret = nvmet_fc_alloc_ls_iodlist(newrec); if (ret) { @@ -1492,9 +1508,8 @@ __nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport) list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) { if (!nvmet_fc_tgt_a_get(assoc)) continue; - if (!queue_work(nvmet_wq, &assoc->del_work)) - /* already deleting - release local reference */ - nvmet_fc_tgt_a_put(assoc); + nvmet_fc_schedule_delete_assoc(assoc); + nvmet_fc_tgt_a_put(assoc); } rcu_read_unlock(); } @@ -1540,16 +1555,14 @@ nvmet_fc_invalidate_host(struct nvmet_fc_target_port *target_port, spin_lock_irqsave(&tgtport->lock, flags); list_for_each_entry_safe(assoc, next, &tgtport->assoc_list, a_list) { - if (!assoc->hostport || - assoc->hostport->hosthandle != hosthandle) + if (assoc->hostport->hosthandle != hosthandle) continue; if (!nvmet_fc_tgt_a_get(assoc)) continue; assoc->hostport->invalid = 1; noassoc = false; - if (!queue_work(nvmet_wq, &assoc->del_work)) - /* already deleting - release local reference */ - nvmet_fc_tgt_a_put(assoc); + nvmet_fc_schedule_delete_assoc(assoc); + nvmet_fc_tgt_a_put(assoc); } spin_unlock_irqrestore(&tgtport->lock, flags); @@ -1581,7 +1594,7 @@ nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl) rcu_read_lock(); list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) { - queue = rcu_dereference(assoc->queues[0]); + queue = assoc->queues[0]; if (queue && queue->nvme_sq.ctrl == ctrl) { if (nvmet_fc_tgt_a_get(assoc)) found_ctrl = true; @@ -1593,9 +1606,8 @@ nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl) nvmet_fc_tgtport_put(tgtport); if (found_ctrl) { - if (!queue_work(nvmet_wq, &assoc->del_work)) - /* already deleting - release local reference */ - nvmet_fc_tgt_a_put(assoc); + nvmet_fc_schedule_delete_assoc(assoc); + nvmet_fc_tgt_a_put(assoc); return; } @@ -1625,6 +1637,8 @@ nvmet_fc_unregister_targetport(struct nvmet_fc_target_port *target_port) /* terminate any outstanding associations */ __nvmet_fc_free_assocs(tgtport); + flush_workqueue(nvmet_wq); + /* * should terminate LS's as well. However, LS's will be generated * at the tail end of association termination, so they likely don't @@ -1870,9 +1884,6 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport, sizeof(struct fcnvme_ls_disconnect_assoc_acc)), FCNVME_LS_DISCONNECT_ASSOC); - /* release get taken in nvmet_fc_find_target_assoc */ - nvmet_fc_tgt_a_put(assoc); - /* * The rules for LS response says the response cannot * go back until ABTS's have been sent for all outstanding @@ -1887,8 +1898,6 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport, assoc->rcv_disconn = iod; spin_unlock_irqrestore(&tgtport->lock, flags); - nvmet_fc_delete_target_assoc(assoc); - if (oldls) { dev_info(tgtport->dev, "{%d:%d} Multiple Disconnect Association LS's " @@ -1904,6 +1913,9 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport, nvmet_fc_xmt_ls_rsp(tgtport, oldls); } + nvmet_fc_schedule_delete_assoc(assoc); + nvmet_fc_tgt_a_put(assoc); + return false; } @@ -2540,8 +2552,9 @@ nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport, fod->req.cmd = &fod->cmdiubuf.sqe; fod->req.cqe = &fod->rspiubuf.cqe; - if (tgtport->pe) - fod->req.port = tgtport->pe->port; + if (!tgtport->pe) + goto transport_error; + fod->req.port = tgtport->pe->port; /* clear any response payload */ memset(&fod->rspiubuf, 0, sizeof(fod->rspiubuf)); @@ -2902,6 +2915,9 @@ nvmet_fc_remove_port(struct nvmet_port *port) nvmet_fc_portentry_unbind(pe); + /* terminate any outstanding associations */ + __nvmet_fc_free_assocs(pe->tgtport); + kfree(pe); } @@ -2933,6 +2949,9 @@ static int __init nvmet_fc_init_module(void) static void __exit nvmet_fc_exit_module(void) { + /* ensure any shutdown operation, e.g. delete ctrls have finished */ + flush_workqueue(nvmet_wq); + /* sanity check - all lports should be removed */ if (!list_empty(&nvmet_fc_target_list)) pr_warn("%s: targetport list not empty\n", __func__); @@ -2945,4 +2964,5 @@ static void __exit nvmet_fc_exit_module(void) module_init(nvmet_fc_init_module); module_exit(nvmet_fc_exit_module); +MODULE_DESCRIPTION("NVMe target FC transport driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c index ead349af30f1..913cd2ec7a6f 100644 --- a/drivers/nvme/target/fcloop.c +++ b/drivers/nvme/target/fcloop.c @@ -358,7 +358,7 @@ fcloop_h2t_ls_req(struct nvme_fc_local_port *localport, if (!rport->targetport) { tls_req->status = -ECONNREFUSED; spin_lock(&rport->lock); - list_add_tail(&rport->ls_list, &tls_req->ls_list); + list_add_tail(&tls_req->ls_list, &rport->ls_list); spin_unlock(&rport->lock); queue_work(nvmet_wq, &rport->ls_work); return ret; @@ -391,7 +391,7 @@ fcloop_h2t_xmt_ls_rsp(struct nvmet_fc_target_port *targetport, if (remoteport) { rport = remoteport->private; spin_lock(&rport->lock); - list_add_tail(&rport->ls_list, &tls_req->ls_list); + list_add_tail(&tls_req->ls_list, &rport->ls_list); spin_unlock(&rport->lock); queue_work(nvmet_wq, &rport->ls_work); } @@ -446,7 +446,7 @@ fcloop_t2h_ls_req(struct nvmet_fc_target_port *targetport, void *hosthandle, if (!tport->remoteport) { tls_req->status = -ECONNREFUSED; spin_lock(&tport->lock); - list_add_tail(&tport->ls_list, &tls_req->ls_list); + list_add_tail(&tls_req->ls_list, &tport->ls_list); spin_unlock(&tport->lock); queue_work(nvmet_wq, &tport->ls_work); return ret; @@ -1556,7 +1556,9 @@ static const struct attribute_group *fcloop_dev_attr_groups[] = { NULL, }; -static struct class *fcloop_class; +static const struct class fcloop_class = { + .name = "fcloop", +}; static struct device *fcloop_device; @@ -1564,15 +1566,14 @@ static int __init fcloop_init(void) { int ret; - fcloop_class = class_create("fcloop"); - if (IS_ERR(fcloop_class)) { + ret = class_register(&fcloop_class); + if (ret) { pr_err("couldn't register class fcloop\n"); - ret = PTR_ERR(fcloop_class); return ret; } fcloop_device = device_create_with_groups( - fcloop_class, NULL, MKDEV(0, 0), NULL, + &fcloop_class, NULL, MKDEV(0, 0), NULL, fcloop_dev_attr_groups, "ctl"); if (IS_ERR(fcloop_device)) { pr_err("couldn't create ctl device!\n"); @@ -1585,7 +1586,7 @@ static int __init fcloop_init(void) return 0; out_destroy_class: - class_destroy(fcloop_class); + class_unregister(&fcloop_class); return ret; } @@ -1643,11 +1644,12 @@ static void __exit fcloop_exit(void) put_device(fcloop_device); - device_destroy(fcloop_class, MKDEV(0, 0)); - class_destroy(fcloop_class); + device_destroy(&fcloop_class, MKDEV(0, 0)); + class_unregister(&fcloop_class); } module_init(fcloop_init); module_exit(fcloop_exit); +MODULE_DESCRIPTION("NVMe target FC loop transport driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c index f11400a908f2..6426aac2634a 100644 --- a/drivers/nvme/target/io-cmd-bdev.c +++ b/drivers/nvme/target/io-cmd-bdev.c @@ -50,10 +50,10 @@ void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id) void nvmet_bdev_ns_disable(struct nvmet_ns *ns) { - if (ns->bdev_handle) { - bdev_release(ns->bdev_handle); + if (ns->bdev_file) { + fput(ns->bdev_file); ns->bdev = NULL; - ns->bdev_handle = NULL; + ns->bdev_file = NULL; } } @@ -85,18 +85,18 @@ int nvmet_bdev_ns_enable(struct nvmet_ns *ns) if (ns->buffered_io) return -ENOTBLK; - ns->bdev_handle = bdev_open_by_path(ns->device_path, + ns->bdev_file = bdev_file_open_by_path(ns->device_path, BLK_OPEN_READ | BLK_OPEN_WRITE, NULL, NULL); - if (IS_ERR(ns->bdev_handle)) { - ret = PTR_ERR(ns->bdev_handle); + if (IS_ERR(ns->bdev_file)) { + ret = PTR_ERR(ns->bdev_file); if (ret != -ENOTBLK) { pr_err("failed to open block device %s: (%d)\n", ns->device_path, ret); } - ns->bdev_handle = NULL; + ns->bdev_file = NULL; return ret; } - ns->bdev = ns->bdev_handle->bdev; + ns->bdev = file_bdev(ns->bdev_file); ns->size = bdev_nr_bytes(ns->bdev); ns->blksize_shift = blksize_bits(bdev_logical_block_size(ns->bdev)); diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c index 9cb434c58075..e589915ddef8 100644 --- a/drivers/nvme/target/loop.c +++ b/drivers/nvme/target/loop.c @@ -400,7 +400,7 @@ static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl) } nvme_quiesce_admin_queue(&ctrl->ctrl); - if (ctrl->ctrl.state == NVME_CTRL_LIVE) + if (nvme_ctrl_state(&ctrl->ctrl) == NVME_CTRL_LIVE) nvme_disable_ctrl(&ctrl->ctrl, true); nvme_cancel_admin_tagset(&ctrl->ctrl); @@ -434,8 +434,10 @@ static void nvme_loop_reset_ctrl_work(struct work_struct *work) nvme_loop_shutdown_ctrl(ctrl); if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) { - if (ctrl->ctrl.state != NVME_CTRL_DELETING && - ctrl->ctrl.state != NVME_CTRL_DELETING_NOIO) + enum nvme_ctrl_state state = nvme_ctrl_state(&ctrl->ctrl); + + if (state != NVME_CTRL_DELETING && + state != NVME_CTRL_DELETING_NOIO) /* state change failure for non-deleted ctrl? */ WARN_ON_ONCE(1); return; @@ -688,5 +690,6 @@ static void __exit nvme_loop_cleanup_module(void) module_init(nvme_loop_init_module); module_exit(nvme_loop_cleanup_module); +MODULE_DESCRIPTION("NVMe target loop transport driver"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("nvmet-transport-254"); /* 254 == NVMF_TRTYPE_LOOP */ diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h index 6c8acebe1a1a..f460728e1df1 100644 --- a/drivers/nvme/target/nvmet.h +++ b/drivers/nvme/target/nvmet.h @@ -58,7 +58,7 @@ struct nvmet_ns { struct percpu_ref ref; - struct bdev_handle *bdev_handle; + struct file *bdev_file; struct block_device *bdev; struct file *file; bool readonly; @@ -163,6 +163,7 @@ struct nvmet_port { void *priv; bool enabled; int inline_data_size; + int max_queue_size; const struct nvmet_fabrics_ops *tr_ops; bool pi_enable; }; @@ -543,9 +544,10 @@ void nvmet_subsys_disc_changed(struct nvmet_subsys *subsys, void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type, u8 event_info, u8 log_page); -#define NVMET_QUEUE_SIZE 1024 +#define NVMET_MIN_QUEUE_SIZE 16 +#define NVMET_MAX_QUEUE_SIZE 1024 #define NVMET_NR_QUEUES 128 -#define NVMET_MAX_CMD NVMET_QUEUE_SIZE +#define NVMET_MAX_CMD(ctrl) (NVME_CAP_MQES(ctrl->cap) + 1) /* * Nice round number that makes a list of nsids fit into a page. diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c index f2d963e1fe94..bb4a69d538fd 100644 --- a/drivers/nvme/target/passthru.c +++ b/drivers/nvme/target/passthru.c @@ -132,7 +132,7 @@ static u16 nvmet_passthru_override_id_ctrl(struct nvmet_req *req) id->sqes = min_t(__u8, ((0x6 << 4) | 0x6), id->sqes); id->cqes = min_t(__u8, ((0x4 << 4) | 0x4), id->cqes); - id->maxcmd = cpu_to_le16(NVMET_MAX_CMD); + id->maxcmd = cpu_to_le16(NVMET_MAX_CMD(ctrl)); /* don't support fuse commands */ id->fuses = 0; diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c index 667f9c04f35d..f2bb9d95ecf4 100644 --- a/drivers/nvme/target/rdma.c +++ b/drivers/nvme/target/rdma.c @@ -1956,6 +1956,14 @@ static int nvmet_rdma_add_port(struct nvmet_port *nport) nport->inline_data_size = NVMET_RDMA_MAX_INLINE_DATA_SIZE; } + if (nport->max_queue_size < 0) { + nport->max_queue_size = NVME_RDMA_DEFAULT_QUEUE_SIZE; + } else if (nport->max_queue_size > NVME_RDMA_MAX_QUEUE_SIZE) { + pr_warn("max_queue_size %u is too large, reducing to %u\n", + nport->max_queue_size, NVME_RDMA_MAX_QUEUE_SIZE); + nport->max_queue_size = NVME_RDMA_MAX_QUEUE_SIZE; + } + ret = inet_pton_with_scope(&init_net, af, nport->disc_addr.traddr, nport->disc_addr.trsvcid, &port->addr); if (ret) { @@ -2015,6 +2023,8 @@ static u8 nvmet_rdma_get_mdts(const struct nvmet_ctrl *ctrl) static u16 nvmet_rdma_get_max_queue_size(const struct nvmet_ctrl *ctrl) { + if (ctrl->pi_support) + return NVME_RDMA_MAX_METADATA_QUEUE_SIZE; return NVME_RDMA_MAX_QUEUE_SIZE; } @@ -2104,5 +2114,6 @@ static void __exit nvmet_rdma_exit(void) module_init(nvmet_rdma_init); module_exit(nvmet_rdma_exit); +MODULE_DESCRIPTION("NVMe target RDMA transport driver"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("nvmet-transport-1"); /* 1 == NVMF_TRTYPE_RDMA */ diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c index 6a1e6bb80062..c8655fc5aa5b 100644 --- a/drivers/nvme/target/tcp.c +++ b/drivers/nvme/target/tcp.c @@ -2216,10 +2216,12 @@ static void __exit nvmet_tcp_exit(void) flush_workqueue(nvmet_wq); destroy_workqueue(nvmet_tcp_wq); + ida_destroy(&nvmet_tcp_queue_ida); } module_init(nvmet_tcp_init); module_exit(nvmet_tcp_exit); +MODULE_DESCRIPTION("NVMe target TCP transport driver"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("nvmet-transport-3"); /* 3 == NVMF_TRTYPE_TCP */ diff --git a/drivers/nvme/target/zns.c b/drivers/nvme/target/zns.c index 5b5c1e481722..3148d9f1bde6 100644 --- a/drivers/nvme/target/zns.c +++ b/drivers/nvme/target/zns.c @@ -456,8 +456,7 @@ static u16 nvmet_bdev_execute_zmgmt_send_all(struct nvmet_req *req) switch (zsa_req_op(req->cmd->zms.zsa)) { case REQ_OP_ZONE_RESET: ret = blkdev_zone_mgmt(req->ns->bdev, REQ_OP_ZONE_RESET, 0, - get_capacity(req->ns->bdev->bd_disk), - GFP_KERNEL); + get_capacity(req->ns->bdev->bd_disk)); if (ret < 0) return blkdev_zone_mgmt_errno_to_nvme_status(ret); break; @@ -508,7 +507,7 @@ static void nvmet_bdev_zmgmt_send_work(struct work_struct *w) goto out; } - ret = blkdev_zone_mgmt(bdev, op, sect, zone_sectors, GFP_KERNEL); + ret = blkdev_zone_mgmt(bdev, op, sect, zone_sectors); if (ret < 0) status = blkdev_zone_mgmt_errno_to_nvme_status(ret); diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c index 980123fb4dde..eb357ac2e54a 100644 --- a/drivers/nvmem/core.c +++ b/drivers/nvmem/core.c @@ -460,8 +460,9 @@ static int nvmem_populate_sysfs_cells(struct nvmem_device *nvmem) list_for_each_entry(entry, &nvmem->cells, node) { sysfs_bin_attr_init(&attrs[i]); attrs[i].attr.name = devm_kasprintf(&nvmem->dev, GFP_KERNEL, - "%s@%x", entry->name, - entry->offset); + "%s@%x,%x", entry->name, + entry->offset, + entry->bit_offset); attrs[i].attr.mode = 0444; attrs[i].size = entry->bytes; attrs[i].read = &nvmem_cell_attr_read; diff --git a/drivers/of/property.c b/drivers/of/property.c index 641a40cf5cf3..fa8cd33be131 100644 --- a/drivers/of/property.c +++ b/drivers/of/property.c @@ -763,7 +763,9 @@ struct device_node *of_graph_get_port_parent(struct device_node *node) /* Walk 3 levels up only if there is 'ports' node. */ for (depth = 3; depth && node; depth--) { node = of_get_next_parent(node); - if (depth == 2 && !of_node_name_eq(node, "ports")) + if (depth == 2 && !of_node_name_eq(node, "ports") && + !of_node_name_eq(node, "in-ports") && + !of_node_name_eq(node, "out-ports")) break; } return node; @@ -1063,36 +1065,6 @@ of_fwnode_device_get_match_data(const struct fwnode_handle *fwnode, return of_device_get_match_data(dev); } -static struct device_node *of_get_compat_node(struct device_node *np) -{ - of_node_get(np); - - while (np) { - if (!of_device_is_available(np)) { - of_node_put(np); - np = NULL; - } - - if (of_property_present(np, "compatible")) - break; - - np = of_get_next_parent(np); - } - - return np; -} - -static struct device_node *of_get_compat_node_parent(struct device_node *np) -{ - struct device_node *parent, *node; - - parent = of_get_parent(np); - node = of_get_compat_node(parent); - of_node_put(parent); - - return node; -} - static void of_link_to_phandle(struct device_node *con_np, struct device_node *sup_np) { @@ -1222,10 +1194,10 @@ static struct device_node *parse_##fname(struct device_node *np, \ * parse_prop.prop_name: Name of property holding a phandle value * parse_prop.index: For properties holding a list of phandles, this is the * index into the list + * @get_con_dev: If the consumer node containing the property is never converted + * to a struct device, implement this ops so fw_devlink can use it + * to find the true consumer. * @optional: Describes whether a supplier is mandatory or not - * @node_not_dev: The consumer node containing the property is never converted - * to a struct device. Instead, parse ancestor nodes for the - * compatible property to find a node corresponding to a device. * * Returns: * parse_prop() return values are @@ -1236,15 +1208,15 @@ static struct device_node *parse_##fname(struct device_node *np, \ struct supplier_bindings { struct device_node *(*parse_prop)(struct device_node *np, const char *prop_name, int index); + struct device_node *(*get_con_dev)(struct device_node *np); bool optional; - bool node_not_dev; }; DEFINE_SIMPLE_PROP(clocks, "clocks", "#clock-cells") DEFINE_SIMPLE_PROP(interconnects, "interconnects", "#interconnect-cells") DEFINE_SIMPLE_PROP(iommus, "iommus", "#iommu-cells") DEFINE_SIMPLE_PROP(mboxes, "mboxes", "#mbox-cells") -DEFINE_SIMPLE_PROP(io_channels, "io-channel", "#io-channel-cells") +DEFINE_SIMPLE_PROP(io_channels, "io-channels", "#io-channel-cells") DEFINE_SIMPLE_PROP(interrupt_parent, "interrupt-parent", NULL) DEFINE_SIMPLE_PROP(dmas, "dmas", "#dma-cells") DEFINE_SIMPLE_PROP(power_domains, "power-domains", "#power-domain-cells") @@ -1262,7 +1234,6 @@ DEFINE_SIMPLE_PROP(pinctrl5, "pinctrl-5", NULL) DEFINE_SIMPLE_PROP(pinctrl6, "pinctrl-6", NULL) DEFINE_SIMPLE_PROP(pinctrl7, "pinctrl-7", NULL) DEFINE_SIMPLE_PROP(pinctrl8, "pinctrl-8", NULL) -DEFINE_SIMPLE_PROP(remote_endpoint, "remote-endpoint", NULL) DEFINE_SIMPLE_PROP(pwms, "pwms", "#pwm-cells") DEFINE_SIMPLE_PROP(resets, "resets", "#reset-cells") DEFINE_SIMPLE_PROP(leds, "leds", NULL) @@ -1328,6 +1299,17 @@ static struct device_node *parse_interrupts(struct device_node *np, return of_irq_parse_one(np, index, &sup_args) ? NULL : sup_args.np; } +static struct device_node *parse_remote_endpoint(struct device_node *np, + const char *prop_name, + int index) +{ + /* Return NULL for index > 0 to signify end of remote-endpoints. */ + if (index > 0 || strcmp(prop_name, "remote-endpoint")) + return NULL; + + return of_graph_get_remote_port_parent(np); +} + static const struct supplier_bindings of_supplier_bindings[] = { { .parse_prop = parse_clocks, }, { .parse_prop = parse_interconnects, }, @@ -1352,7 +1334,10 @@ static const struct supplier_bindings of_supplier_bindings[] = { { .parse_prop = parse_pinctrl6, }, { .parse_prop = parse_pinctrl7, }, { .parse_prop = parse_pinctrl8, }, - { .parse_prop = parse_remote_endpoint, .node_not_dev = true, }, + { + .parse_prop = parse_remote_endpoint, + .get_con_dev = of_graph_get_port_parent, + }, { .parse_prop = parse_pwms, }, { .parse_prop = parse_resets, }, { .parse_prop = parse_leds, }, @@ -1403,8 +1388,8 @@ static int of_link_property(struct device_node *con_np, const char *prop_name) while ((phandle = s->parse_prop(con_np, prop_name, i))) { struct device_node *con_dev_np; - con_dev_np = s->node_not_dev - ? of_get_compat_node_parent(con_np) + con_dev_np = s->get_con_dev + ? s->get_con_dev(con_np) : of_node_get(con_np); matched = true; i++; diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c index cfd60e35a899..d7593bde2d02 100644 --- a/drivers/of/unittest.c +++ b/drivers/of/unittest.c @@ -50,6 +50,12 @@ static struct unittest_results { failed; \ }) +#ifdef CONFIG_OF_KOBJ +#define OF_KREF_READ(NODE) kref_read(&(NODE)->kobj.kref) +#else +#define OF_KREF_READ(NODE) 1 +#endif + /* * Expected message may have a message level other than KERN_INFO. * Print the expected message only if the current loglevel will allow @@ -570,7 +576,7 @@ static void __init of_unittest_parse_phandle_with_args_map(void) pr_err("missing testcase data\n"); return; } - prefs[i] = kref_read(&p[i]->kobj.kref); + prefs[i] = OF_KREF_READ(p[i]); } rc = of_count_phandle_with_args(np, "phandle-list", "#phandle-cells"); @@ -693,9 +699,9 @@ static void __init of_unittest_parse_phandle_with_args_map(void) unittest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc); for (i = 0; i < ARRAY_SIZE(p); ++i) { - unittest(prefs[i] == kref_read(&p[i]->kobj.kref), + unittest(prefs[i] == OF_KREF_READ(p[i]), "provider%d: expected:%d got:%d\n", - i, prefs[i], kref_read(&p[i]->kobj.kref)); + i, prefs[i], OF_KREF_READ(p[i])); of_node_put(p[i]); } } diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c index 9c2137dae429..826b5016a101 100644 --- a/drivers/pci/bus.c +++ b/drivers/pci/bus.c @@ -386,21 +386,8 @@ void pci_bus_add_devices(const struct pci_bus *bus) } EXPORT_SYMBOL(pci_bus_add_devices); -/** pci_walk_bus - walk devices on/under bus, calling callback. - * @top bus whose devices should be walked - * @cb callback to be called for each device found - * @userdata arbitrary pointer to be passed to callback. - * - * Walk the given bus, including any bridged devices - * on buses under this bus. Call the provided callback - * on each device found. - * - * We check the return of @cb each time. If it returns anything - * other than 0, we break out. - * - */ -void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *), - void *userdata) +static void __pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *), + void *userdata, bool locked) { struct pci_dev *dev; struct pci_bus *bus; @@ -408,7 +395,8 @@ void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *), int retval; bus = top; - down_read(&pci_bus_sem); + if (!locked) + down_read(&pci_bus_sem); next = top->devices.next; for (;;) { if (next == &bus->devices) { @@ -431,10 +419,37 @@ void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *), if (retval) break; } - up_read(&pci_bus_sem); + if (!locked) + up_read(&pci_bus_sem); +} + +/** + * pci_walk_bus - walk devices on/under bus, calling callback. + * @top: bus whose devices should be walked + * @cb: callback to be called for each device found + * @userdata: arbitrary pointer to be passed to callback + * + * Walk the given bus, including any bridged devices + * on buses under this bus. Call the provided callback + * on each device found. + * + * We check the return of @cb each time. If it returns anything + * other than 0, we break out. + */ +void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *), void *userdata) +{ + __pci_walk_bus(top, cb, userdata, false); } EXPORT_SYMBOL_GPL(pci_walk_bus); +void pci_walk_bus_locked(struct pci_bus *top, int (*cb)(struct pci_dev *, void *), void *userdata) +{ + lockdep_assert_held(&pci_bus_sem); + + __pci_walk_bus(top, cb, userdata, true); +} +EXPORT_SYMBOL_GPL(pci_walk_bus_locked); + struct pci_bus *pci_bus_get(struct pci_bus *bus) { if (bus) diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c index 5befed2dc02b..9a437cfce073 100644 --- a/drivers/pci/controller/dwc/pcie-designware-ep.c +++ b/drivers/pci/controller/dwc/pcie-designware-ep.c @@ -6,6 +6,7 @@ * Author: Kishon Vijay Abraham I <kishon@ti.com> */ +#include <linux/align.h> #include <linux/bitfield.h> #include <linux/of.h> #include <linux/platform_device.h> @@ -482,9 +483,10 @@ int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no, reg = ep_func->msi_cap + PCI_MSI_DATA_32; msg_data = dw_pcie_ep_readw_dbi(ep, func_no, reg); } - aligned_offset = msg_addr_lower & (epc->mem->window.page_size - 1); - msg_addr = ((u64)msg_addr_upper) << 32 | - (msg_addr_lower & ~aligned_offset); + msg_addr = ((u64)msg_addr_upper) << 32 | msg_addr_lower; + + aligned_offset = msg_addr & (epc->mem->window.page_size - 1); + msg_addr = ALIGN_DOWN(msg_addr, epc->mem->window.page_size); ret = dw_pcie_ep_map_addr(epc, func_no, 0, ep->msi_mem_phys, msg_addr, epc->mem->window.page_size); if (ret) @@ -551,7 +553,7 @@ int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no, } aligned_offset = msg_addr & (epc->mem->window.page_size - 1); - msg_addr &= ~aligned_offset; + msg_addr = ALIGN_DOWN(msg_addr, epc->mem->window.page_size); ret = dw_pcie_ep_map_addr(epc, func_no, 0, ep->msi_mem_phys, msg_addr, epc->mem->window.page_size); if (ret) diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c index 10f2d0bb86be..2ce2a3bd932b 100644 --- a/drivers/pci/controller/dwc/pcie-qcom.c +++ b/drivers/pci/controller/dwc/pcie-qcom.c @@ -972,7 +972,7 @@ static int qcom_pcie_enable_aspm(struct pci_dev *pdev, void *userdata) * Downstream devices need to be in D0 state before enabling PCI PM * substates. */ - pci_set_power_state(pdev, PCI_D0); + pci_set_power_state_locked(pdev, PCI_D0); pci_enable_link_state_locked(pdev, PCIE_LINK_STATE_ALL); return 0; diff --git a/drivers/pci/hotplug/s390_pci_hpc.c b/drivers/pci/hotplug/s390_pci_hpc.c index a89b7de72dcf..7333b305f2a5 100644 --- a/drivers/pci/hotplug/s390_pci_hpc.c +++ b/drivers/pci/hotplug/s390_pci_hpc.c @@ -26,58 +26,79 @@ static int enable_slot(struct hotplug_slot *hotplug_slot) hotplug_slot); int rc; - if (zdev->state != ZPCI_FN_STATE_STANDBY) - return -EIO; + mutex_lock(&zdev->state_lock); + if (zdev->state != ZPCI_FN_STATE_STANDBY) { + rc = -EIO; + goto out; + } rc = sclp_pci_configure(zdev->fid); zpci_dbg(3, "conf fid:%x, rc:%d\n", zdev->fid, rc); if (rc) - return rc; + goto out; zdev->state = ZPCI_FN_STATE_CONFIGURED; - return zpci_scan_configured_device(zdev, zdev->fh); + rc = zpci_scan_configured_device(zdev, zdev->fh); +out: + mutex_unlock(&zdev->state_lock); + return rc; } static int disable_slot(struct hotplug_slot *hotplug_slot) { struct zpci_dev *zdev = container_of(hotplug_slot, struct zpci_dev, hotplug_slot); - struct pci_dev *pdev; + struct pci_dev *pdev = NULL; + int rc; - if (zdev->state != ZPCI_FN_STATE_CONFIGURED) - return -EIO; + mutex_lock(&zdev->state_lock); + if (zdev->state != ZPCI_FN_STATE_CONFIGURED) { + rc = -EIO; + goto out; + } pdev = pci_get_slot(zdev->zbus->bus, zdev->devfn); if (pdev && pci_num_vf(pdev)) { pci_dev_put(pdev); - return -EBUSY; + rc = -EBUSY; + goto out; } - pci_dev_put(pdev); - return zpci_deconfigure_device(zdev); + rc = zpci_deconfigure_device(zdev); +out: + mutex_unlock(&zdev->state_lock); + if (pdev) + pci_dev_put(pdev); + return rc; } static int reset_slot(struct hotplug_slot *hotplug_slot, bool probe) { struct zpci_dev *zdev = container_of(hotplug_slot, struct zpci_dev, hotplug_slot); + int rc = -EIO; - if (zdev->state != ZPCI_FN_STATE_CONFIGURED) - return -EIO; /* - * We can't take the zdev->lock as reset_slot may be called during - * probing and/or device removal which already happens under the - * zdev->lock. Instead the user should use the higher level - * pci_reset_function() or pci_bus_reset() which hold the PCI device - * lock preventing concurrent removal. If not using these functions - * holding the PCI device lock is required. + * If we can't get the zdev->state_lock the device state is + * currently undergoing a transition and we bail out - just + * the same as if the device's state is not configured at all. */ + if (!mutex_trylock(&zdev->state_lock)) + return rc; - /* As long as the function is configured we can reset */ - if (probe) - return 0; + /* We can reset only if the function is configured */ + if (zdev->state != ZPCI_FN_STATE_CONFIGURED) + goto out; + + if (probe) { + rc = 0; + goto out; + } - return zpci_hot_reset_device(zdev); + rc = zpci_hot_reset_device(zdev); +out: + mutex_unlock(&zdev->state_lock); + return rc; } static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value) diff --git a/drivers/pci/msi/irqdomain.c b/drivers/pci/msi/irqdomain.c index c8be056c248d..cfd84a899c82 100644 --- a/drivers/pci/msi/irqdomain.c +++ b/drivers/pci/msi/irqdomain.c @@ -61,7 +61,7 @@ static irq_hw_number_t pci_msi_domain_calc_hwirq(struct msi_desc *desc) return (irq_hw_number_t)desc->msi_index | pci_dev_id(dev) << 11 | - (pci_domain_nr(dev->bus) & 0xFFFFFFFF) << 27; + ((irq_hw_number_t)(pci_domain_nr(dev->bus) & 0xFFFFFFFF)) << 27; } static void pci_msi_domain_set_desc(msi_alloc_info_t *arg, diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index d8f11a078924..c3585229c12a 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -1354,6 +1354,7 @@ end: /** * pci_set_full_power_state - Put a PCI device into D0 and update its state * @dev: PCI device to power up + * @locked: whether pci_bus_sem is held * * Call pci_power_up() to put @dev into D0, read from its PCI_PM_CTRL register * to confirm the state change, restore its BARs if they might be lost and @@ -1363,7 +1364,7 @@ end: * to D0, it is more efficient to use pci_power_up() directly instead of this * function. */ -static int pci_set_full_power_state(struct pci_dev *dev) +static int pci_set_full_power_state(struct pci_dev *dev, bool locked) { u16 pmcsr; int ret; @@ -1399,7 +1400,7 @@ static int pci_set_full_power_state(struct pci_dev *dev) } if (dev->bus->self) - pcie_aspm_pm_state_change(dev->bus->self); + pcie_aspm_pm_state_change(dev->bus->self, locked); return 0; } @@ -1428,10 +1429,22 @@ void pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state) pci_walk_bus(bus, __pci_dev_set_current_state, &state); } +static void __pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state, bool locked) +{ + if (!bus) + return; + + if (locked) + pci_walk_bus_locked(bus, __pci_dev_set_current_state, &state); + else + pci_walk_bus(bus, __pci_dev_set_current_state, &state); +} + /** * pci_set_low_power_state - Put a PCI device into a low-power state. * @dev: PCI device to handle. * @state: PCI power state (D1, D2, D3hot) to put the device into. + * @locked: whether pci_bus_sem is held * * Use the device's PCI_PM_CTRL register to put it into a low-power state. * @@ -1442,7 +1455,7 @@ void pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state) * 0 if device already is in the requested state. * 0 if device's power state has been successfully changed. */ -static int pci_set_low_power_state(struct pci_dev *dev, pci_power_t state) +static int pci_set_low_power_state(struct pci_dev *dev, pci_power_t state, bool locked) { u16 pmcsr; @@ -1496,29 +1509,12 @@ static int pci_set_low_power_state(struct pci_dev *dev, pci_power_t state) pci_power_name(state)); if (dev->bus->self) - pcie_aspm_pm_state_change(dev->bus->self); + pcie_aspm_pm_state_change(dev->bus->self, locked); return 0; } -/** - * pci_set_power_state - Set the power state of a PCI device - * @dev: PCI device to handle. - * @state: PCI power state (D0, D1, D2, D3hot) to put the device into. - * - * Transition a device to a new power state, using the platform firmware and/or - * the device's PCI PM registers. - * - * RETURN VALUE: - * -EINVAL if the requested state is invalid. - * -EIO if device does not support PCI PM or its PM capabilities register has a - * wrong version, or device doesn't support the requested state. - * 0 if the transition is to D1 or D2 but D1 and D2 are not supported. - * 0 if device already is in the requested state. - * 0 if the transition is to D3 but D3 is not supported. - * 0 if device's power state has been successfully changed. - */ -int pci_set_power_state(struct pci_dev *dev, pci_power_t state) +static int __pci_set_power_state(struct pci_dev *dev, pci_power_t state, bool locked) { int error; @@ -1542,7 +1538,7 @@ int pci_set_power_state(struct pci_dev *dev, pci_power_t state) return 0; if (state == PCI_D0) - return pci_set_full_power_state(dev); + return pci_set_full_power_state(dev, locked); /* * This device is quirked not to be put into D3, so don't put it in @@ -1556,16 +1552,16 @@ int pci_set_power_state(struct pci_dev *dev, pci_power_t state) * To put the device in D3cold, put it into D3hot in the native * way, then put it into D3cold using platform ops. */ - error = pci_set_low_power_state(dev, PCI_D3hot); + error = pci_set_low_power_state(dev, PCI_D3hot, locked); if (pci_platform_power_transition(dev, PCI_D3cold)) return error; /* Powering off a bridge may power off the whole hierarchy */ if (dev->current_state == PCI_D3cold) - pci_bus_set_current_state(dev->subordinate, PCI_D3cold); + __pci_bus_set_current_state(dev->subordinate, PCI_D3cold, locked); } else { - error = pci_set_low_power_state(dev, state); + error = pci_set_low_power_state(dev, state, locked); if (pci_platform_power_transition(dev, state)) return error; @@ -1573,8 +1569,38 @@ int pci_set_power_state(struct pci_dev *dev, pci_power_t state) return 0; } + +/** + * pci_set_power_state - Set the power state of a PCI device + * @dev: PCI device to handle. + * @state: PCI power state (D0, D1, D2, D3hot) to put the device into. + * + * Transition a device to a new power state, using the platform firmware and/or + * the device's PCI PM registers. + * + * RETURN VALUE: + * -EINVAL if the requested state is invalid. + * -EIO if device does not support PCI PM or its PM capabilities register has a + * wrong version, or device doesn't support the requested state. + * 0 if the transition is to D1 or D2 but D1 and D2 are not supported. + * 0 if device already is in the requested state. + * 0 if the transition is to D3 but D3 is not supported. + * 0 if device's power state has been successfully changed. + */ +int pci_set_power_state(struct pci_dev *dev, pci_power_t state) +{ + return __pci_set_power_state(dev, state, false); +} EXPORT_SYMBOL(pci_set_power_state); +int pci_set_power_state_locked(struct pci_dev *dev, pci_power_t state) +{ + lockdep_assert_held(&pci_bus_sem); + + return __pci_set_power_state(dev, state, true); +} +EXPORT_SYMBOL(pci_set_power_state_locked); + #define PCI_EXP_SAVE_REGS 7 static struct pci_cap_saved_state *_pci_find_saved_cap(struct pci_dev *pci_dev, @@ -2496,29 +2522,36 @@ static void pci_pme_list_scan(struct work_struct *work) if (pdev->pme_poll) { struct pci_dev *bridge = pdev->bus->self; struct device *dev = &pdev->dev; - int pm_status; + struct device *bdev = bridge ? &bridge->dev : NULL; + int bref = 0; /* - * If bridge is in low power state, the - * configuration space of subordinate devices - * may be not accessible + * If we have a bridge, it should be in an active/D0 + * state or the configuration space of subordinate + * devices may not be accessible or stable over the + * course of the call. */ - if (bridge && bridge->current_state != PCI_D0) - continue; + if (bdev) { + bref = pm_runtime_get_if_active(bdev, true); + if (!bref) + continue; + + if (bridge->current_state != PCI_D0) + goto put_bridge; + } /* - * If the device is in a low power state it - * should not be polled either. + * The device itself should be suspended but config + * space must be accessible, therefore it cannot be in + * D3cold. */ - pm_status = pm_runtime_get_if_active(dev, true); - if (!pm_status) - continue; - - if (pdev->current_state != PCI_D3cold) + if (pm_runtime_suspended(dev) && + pdev->current_state != PCI_D3cold) pci_pme_wakeup(pdev, NULL); - if (pm_status > 0) - pm_runtime_put(dev); +put_bridge: + if (bref > 0) + pm_runtime_put(bdev); } else { list_del(&pme_dev->list); kfree(pme_dev); diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index 2336a8d1edab..e9750b1b19ba 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h @@ -571,12 +571,12 @@ int pcie_retrain_link(struct pci_dev *pdev, bool use_lt); #ifdef CONFIG_PCIEASPM void pcie_aspm_init_link_state(struct pci_dev *pdev); void pcie_aspm_exit_link_state(struct pci_dev *pdev); -void pcie_aspm_pm_state_change(struct pci_dev *pdev); +void pcie_aspm_pm_state_change(struct pci_dev *pdev, bool locked); void pcie_aspm_powersave_config_link(struct pci_dev *pdev); #else static inline void pcie_aspm_init_link_state(struct pci_dev *pdev) { } static inline void pcie_aspm_exit_link_state(struct pci_dev *pdev) { } -static inline void pcie_aspm_pm_state_change(struct pci_dev *pdev) { } +static inline void pcie_aspm_pm_state_change(struct pci_dev *pdev, bool locked) { } static inline void pcie_aspm_powersave_config_link(struct pci_dev *pdev) { } #endif diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c index 5a0066ecc3c5..bc0bd86695ec 100644 --- a/drivers/pci/pcie/aspm.c +++ b/drivers/pci/pcie/aspm.c @@ -1003,8 +1003,11 @@ void pcie_aspm_exit_link_state(struct pci_dev *pdev) up_read(&pci_bus_sem); } -/* @pdev: the root port or switch downstream port */ -void pcie_aspm_pm_state_change(struct pci_dev *pdev) +/* + * @pdev: the root port or switch downstream port + * @locked: whether pci_bus_sem is held + */ +void pcie_aspm_pm_state_change(struct pci_dev *pdev, bool locked) { struct pcie_link_state *link = pdev->link_state; @@ -1014,12 +1017,14 @@ void pcie_aspm_pm_state_change(struct pci_dev *pdev) * Devices changed PM state, we should recheck if latency * meets all functions' requirement */ - down_read(&pci_bus_sem); + if (!locked) + down_read(&pci_bus_sem); mutex_lock(&aspm_lock); pcie_update_aspm_capable(link->root); pcie_config_aspm_path(link); mutex_unlock(&aspm_lock); - up_read(&pci_bus_sem); + if (!locked) + up_read(&pci_bus_sem); } void pcie_aspm_powersave_config_link(struct pci_dev *pdev) diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c index c584165b13ba..7e3aa7e2345f 100644 --- a/drivers/perf/arm-cmn.c +++ b/drivers/perf/arm-cmn.c @@ -2305,6 +2305,17 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset) dev_dbg(cmn->dev, "ignoring external node %llx\n", reg); continue; } + /* + * AmpereOneX erratum AC04_MESH_1 makes some XPs report a bogus + * child count larger than the number of valid child pointers. + * A child offset of 0 can only occur on CMN-600; otherwise it + * would imply the root node being its own grandchild, which + * we can safely dismiss in general. + */ + if (reg == 0 && cmn->part != PART_CMN600) { + dev_dbg(cmn->dev, "bogus child pointer?\n"); + continue; + } arm_cmn_init_node_info(cmn, reg & CMN_CHILD_NODE_ADDR, dn); diff --git a/drivers/perf/arm_smmuv3_pmu.c b/drivers/perf/arm_smmuv3_pmu.c index 6303b82566f9..9e5d7fa647b6 100644 --- a/drivers/perf/arm_smmuv3_pmu.c +++ b/drivers/perf/arm_smmuv3_pmu.c @@ -716,7 +716,7 @@ static void smmu_pmu_free_msis(void *data) { struct device *dev = data; - platform_msi_domain_free_irqs(dev); + platform_device_msi_free_irqs_all(dev); } static void smmu_pmu_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg) @@ -746,7 +746,7 @@ static void smmu_pmu_setup_msi(struct smmu_pmu *pmu) if (!(readl(pmu->reg_base + SMMU_PMCG_CFGR) & SMMU_PMCG_CFGR_MSI)) return; - ret = platform_msi_domain_alloc_irqs(dev, 1, smmu_pmu_write_msi_msg); + ret = platform_device_msi_init_and_alloc_irqs(dev, 1, smmu_pmu_write_msi_msg); if (ret) { dev_warn(dev, "failed to allocate MSIs\n"); return; diff --git a/drivers/perf/cxl_pmu.c b/drivers/perf/cxl_pmu.c index 365d964b0f6a..308c9969642e 100644 --- a/drivers/perf/cxl_pmu.c +++ b/drivers/perf/cxl_pmu.c @@ -59,7 +59,7 @@ #define CXL_PMU_COUNTER_CFG_EVENT_GRP_ID_IDX_MSK GENMASK_ULL(63, 59) #define CXL_PMU_FILTER_CFG_REG(n, f) (0x400 + 4 * ((f) + (n) * 8)) -#define CXL_PMU_FILTER_CFG_VALUE_MSK GENMASK(15, 0) +#define CXL_PMU_FILTER_CFG_VALUE_MSK GENMASK(31, 0) #define CXL_PMU_COUNTER_REG(n) (0xc00 + 8 * (n)) @@ -314,9 +314,9 @@ static bool cxl_pmu_config1_get_edge(struct perf_event *event) } /* - * CPMU specification allows for 8 filters, each with a 16 bit value... - * So we need to find 8x16bits to store it in. - * As the value used for disable is 0xffff, a separate enable switch + * CPMU specification allows for 8 filters, each with a 32 bit value... + * So we need to find 8x32bits to store it in. + * As the value used for disable is 0xffff_ffff, a separate enable switch * is needed. */ @@ -419,7 +419,7 @@ static struct attribute *cxl_pmu_event_attrs[] = { CXL_PMU_EVENT_CXL_ATTR(s2m_ndr_cmp, CXL_PMU_GID_S2M_NDR, BIT(0)), CXL_PMU_EVENT_CXL_ATTR(s2m_ndr_cmps, CXL_PMU_GID_S2M_NDR, BIT(1)), CXL_PMU_EVENT_CXL_ATTR(s2m_ndr_cmpe, CXL_PMU_GID_S2M_NDR, BIT(2)), - CXL_PMU_EVENT_CXL_ATTR(s2m_ndr_biconflictack, CXL_PMU_GID_S2M_NDR, BIT(3)), + CXL_PMU_EVENT_CXL_ATTR(s2m_ndr_biconflictack, CXL_PMU_GID_S2M_NDR, BIT(4)), /* CXL rev 3.0 Table 3-46 S2M DRS opcodes */ CXL_PMU_EVENT_CXL_ATTR(s2m_drs_memdata, CXL_PMU_GID_S2M_DRS, BIT(0)), CXL_PMU_EVENT_CXL_ATTR(s2m_drs_memdatanxm, CXL_PMU_GID_S2M_DRS, BIT(1)), @@ -642,7 +642,7 @@ static void cxl_pmu_event_start(struct perf_event *event, int flags) if (cxl_pmu_config1_hdm_filter_en(event)) cfg = cxl_pmu_config2_get_hdm_decoder(event); else - cfg = GENMASK(15, 0); /* No filtering if 0xFFFF_FFFF */ + cfg = GENMASK(31, 0); /* No filtering if 0xFFFF_FFFF */ writeq(cfg, base + CXL_PMU_FILTER_CFG_REG(hwc->idx, 0)); } diff --git a/drivers/perf/riscv_pmu.c b/drivers/perf/riscv_pmu.c index 0dda70e1ef90..c78a6fd6c57f 100644 --- a/drivers/perf/riscv_pmu.c +++ b/drivers/perf/riscv_pmu.c @@ -150,19 +150,11 @@ u64 riscv_pmu_ctr_get_width_mask(struct perf_event *event) struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu); struct hw_perf_event *hwc = &event->hw; - if (!rvpmu->ctr_get_width) - /** - * If the pmu driver doesn't support counter width, set it to default - * maximum allowed by the specification. - */ - cwidth = 63; - else { - if (hwc->idx == -1) - /* Handle init case where idx is not initialized yet */ - cwidth = rvpmu->ctr_get_width(0); - else - cwidth = rvpmu->ctr_get_width(hwc->idx); - } + if (hwc->idx == -1) + /* Handle init case where idx is not initialized yet */ + cwidth = rvpmu->ctr_get_width(0); + else + cwidth = rvpmu->ctr_get_width(hwc->idx); return GENMASK_ULL(cwidth, 0); } diff --git a/drivers/perf/riscv_pmu_legacy.c b/drivers/perf/riscv_pmu_legacy.c index 79fdd667922e..fa0bccf4edf2 100644 --- a/drivers/perf/riscv_pmu_legacy.c +++ b/drivers/perf/riscv_pmu_legacy.c @@ -37,6 +37,12 @@ static int pmu_legacy_event_map(struct perf_event *event, u64 *config) return pmu_legacy_ctr_get_idx(event); } +/* cycle & instret are always 64 bit, one bit less according to SBI spec */ +static int pmu_legacy_ctr_get_width(int idx) +{ + return 63; +} + static u64 pmu_legacy_read_ctr(struct perf_event *event) { struct hw_perf_event *hwc = &event->hw; @@ -111,12 +117,14 @@ static void pmu_legacy_init(struct riscv_pmu *pmu) pmu->ctr_stop = NULL; pmu->event_map = pmu_legacy_event_map; pmu->ctr_get_idx = pmu_legacy_ctr_get_idx; - pmu->ctr_get_width = NULL; + pmu->ctr_get_width = pmu_legacy_ctr_get_width; pmu->ctr_clear_idx = NULL; pmu->ctr_read = pmu_legacy_read_ctr; pmu->event_mapped = pmu_legacy_event_mapped; pmu->event_unmapped = pmu_legacy_event_unmapped; pmu->csr_index = pmu_legacy_csr_index; + pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT; + pmu->pmu.capabilities |= PERF_PMU_CAP_NO_EXCLUDE; perf_pmu_register(&pmu->pmu, "cpu", PERF_TYPE_RAW); } diff --git a/drivers/perf/riscv_pmu_sbi.c b/drivers/perf/riscv_pmu_sbi.c index 16acd4dcdb96..452aab49db1e 100644 --- a/drivers/perf/riscv_pmu_sbi.c +++ b/drivers/perf/riscv_pmu_sbi.c @@ -512,7 +512,7 @@ static void pmu_sbi_set_scounteren(void *arg) if (event->hw.idx != -1) csr_write(CSR_SCOUNTEREN, - csr_read(CSR_SCOUNTEREN) | (1 << pmu_sbi_csr_index(event))); + csr_read(CSR_SCOUNTEREN) | BIT(pmu_sbi_csr_index(event))); } static void pmu_sbi_reset_scounteren(void *arg) @@ -521,7 +521,7 @@ static void pmu_sbi_reset_scounteren(void *arg) if (event->hw.idx != -1) csr_write(CSR_SCOUNTEREN, - csr_read(CSR_SCOUNTEREN) & ~(1 << pmu_sbi_csr_index(event))); + csr_read(CSR_SCOUNTEREN) & ~BIT(pmu_sbi_csr_index(event))); } static void pmu_sbi_ctr_start(struct perf_event *event, u64 ival) @@ -731,14 +731,14 @@ static irqreturn_t pmu_sbi_ovf_handler(int irq, void *dev) /* compute hardware counter index */ hidx = info->csr - CSR_CYCLE; /* check if the corresponding bit is set in sscountovf */ - if (!(overflow & (1 << hidx))) + if (!(overflow & BIT(hidx))) continue; /* * Keep a track of overflowed counters so that they can be started * with updated initial value. */ - overflowed_ctrs |= 1 << lidx; + overflowed_ctrs |= BIT(lidx); hw_evt = &event->hw; riscv_pmu_event_update(event); perf_sample_data_init(&data, 0, hw_evt->last_period); diff --git a/drivers/phy/freescale/phy-fsl-imx8-mipi-dphy.c b/drivers/phy/freescale/phy-fsl-imx8-mipi-dphy.c index e625b32889bf..0928a526e2ab 100644 --- a/drivers/phy/freescale/phy-fsl-imx8-mipi-dphy.c +++ b/drivers/phy/freescale/phy-fsl-imx8-mipi-dphy.c @@ -706,7 +706,7 @@ static int mixel_dphy_probe(struct platform_device *pdev) return ret; } - priv->id = of_alias_get_id(np, "mipi_dphy"); + priv->id = of_alias_get_id(np, "mipi-dphy"); if (priv->id < 0) { dev_err(dev, "Failed to get phy node alias id: %d\n", priv->id); diff --git a/drivers/phy/microchip/lan966x_serdes.c b/drivers/phy/microchip/lan966x_serdes.c index c1a41b6cd29b..b5ac2b7995e7 100644 --- a/drivers/phy/microchip/lan966x_serdes.c +++ b/drivers/phy/microchip/lan966x_serdes.c @@ -96,6 +96,8 @@ static const struct serdes_mux lan966x_serdes_muxes[] = { SERDES_MUX_SGMII(SERDES6G(1), 3, HSIO_HW_CFG_SD6G_1_CFG, HSIO_HW_CFG_SD6G_1_CFG_SET(1)), + SERDES_MUX_SGMII(SERDES6G(2), 4, 0, 0), + SERDES_MUX_RGMII(RGMII(0), 2, HSIO_HW_CFG_RGMII_0_CFG | HSIO_HW_CFG_RGMII_ENA | HSIO_HW_CFG_GMII_ENA, diff --git a/drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c b/drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c index a623f092b11f..a43e20abb10d 100644 --- a/drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c +++ b/drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c @@ -37,56 +37,28 @@ #define EUSB2_TUNE_EUSB_EQU 0x5A #define EUSB2_TUNE_EUSB_HS_COMP_CUR 0x5B -#define QCOM_EUSB2_REPEATER_INIT_CFG(r, v) \ - { \ - .reg = r, \ - .val = v, \ - } - -enum reg_fields { - F_TUNE_EUSB_HS_COMP_CUR, - F_TUNE_EUSB_EQU, - F_TUNE_EUSB_SLEW, - F_TUNE_USB2_HS_COMP_CUR, - F_TUNE_USB2_PREEM, - F_TUNE_USB2_EQU, - F_TUNE_USB2_SLEW, - F_TUNE_SQUELCH_U, - F_TUNE_HSDISC, - F_TUNE_RES_FSDIF, - F_TUNE_IUSB2, - F_TUNE_USB2_CROSSOVER, - F_NUM_TUNE_FIELDS, - - F_FORCE_VAL_5 = F_NUM_TUNE_FIELDS, - F_FORCE_EN_5, - - F_EN_CTL1, - - F_RPTR_STATUS, - F_NUM_FIELDS, -}; - -static struct reg_field eusb2_repeater_tune_reg_fields[F_NUM_FIELDS] = { - [F_TUNE_EUSB_HS_COMP_CUR] = REG_FIELD(EUSB2_TUNE_EUSB_HS_COMP_CUR, 0, 1), - [F_TUNE_EUSB_EQU] = REG_FIELD(EUSB2_TUNE_EUSB_EQU, 0, 1), - [F_TUNE_EUSB_SLEW] = REG_FIELD(EUSB2_TUNE_EUSB_SLEW, 0, 1), - [F_TUNE_USB2_HS_COMP_CUR] = REG_FIELD(EUSB2_TUNE_USB2_HS_COMP_CUR, 0, 1), - [F_TUNE_USB2_PREEM] = REG_FIELD(EUSB2_TUNE_USB2_PREEM, 0, 2), - [F_TUNE_USB2_EQU] = REG_FIELD(EUSB2_TUNE_USB2_EQU, 0, 1), - [F_TUNE_USB2_SLEW] = REG_FIELD(EUSB2_TUNE_USB2_SLEW, 0, 1), - [F_TUNE_SQUELCH_U] = REG_FIELD(EUSB2_TUNE_SQUELCH_U, 0, 2), - [F_TUNE_HSDISC] = REG_FIELD(EUSB2_TUNE_HSDISC, 0, 2), - [F_TUNE_RES_FSDIF] = REG_FIELD(EUSB2_TUNE_RES_FSDIF, 0, 2), - [F_TUNE_IUSB2] = REG_FIELD(EUSB2_TUNE_IUSB2, 0, 3), - [F_TUNE_USB2_CROSSOVER] = REG_FIELD(EUSB2_TUNE_USB2_CROSSOVER, 0, 2), - - [F_FORCE_VAL_5] = REG_FIELD(EUSB2_FORCE_VAL_5, 0, 7), - [F_FORCE_EN_5] = REG_FIELD(EUSB2_FORCE_EN_5, 0, 7), - - [F_EN_CTL1] = REG_FIELD(EUSB2_EN_CTL1, 0, 7), - - [F_RPTR_STATUS] = REG_FIELD(EUSB2_RPTR_STATUS, 0, 7), +enum eusb2_reg_layout { + TUNE_EUSB_HS_COMP_CUR, + TUNE_EUSB_EQU, + TUNE_EUSB_SLEW, + TUNE_USB2_HS_COMP_CUR, + TUNE_USB2_PREEM, + TUNE_USB2_EQU, + TUNE_USB2_SLEW, + TUNE_SQUELCH_U, + TUNE_HSDISC, + TUNE_RES_FSDIF, + TUNE_IUSB2, + TUNE_USB2_CROSSOVER, + NUM_TUNE_FIELDS, + + FORCE_VAL_5 = NUM_TUNE_FIELDS, + FORCE_EN_5, + + EN_CTL1, + + RPTR_STATUS, + LAYOUT_SIZE, }; struct eusb2_repeater_cfg { @@ -98,10 +70,11 @@ struct eusb2_repeater_cfg { struct eusb2_repeater { struct device *dev; - struct regmap_field *regs[F_NUM_FIELDS]; + struct regmap *regmap; struct phy *phy; struct regulator_bulk_data *vregs; const struct eusb2_repeater_cfg *cfg; + u32 base; enum phy_mode mode; }; @@ -109,10 +82,10 @@ static const char * const pm8550b_vreg_l[] = { "vdd18", "vdd3", }; -static const u32 pm8550b_init_tbl[F_NUM_TUNE_FIELDS] = { - [F_TUNE_IUSB2] = 0x8, - [F_TUNE_SQUELCH_U] = 0x3, - [F_TUNE_USB2_PREEM] = 0x5, +static const u32 pm8550b_init_tbl[NUM_TUNE_FIELDS] = { + [TUNE_IUSB2] = 0x8, + [TUNE_SQUELCH_U] = 0x3, + [TUNE_USB2_PREEM] = 0x5, }; static const struct eusb2_repeater_cfg pm8550b_eusb2_cfg = { @@ -140,47 +113,42 @@ static int eusb2_repeater_init_vregs(struct eusb2_repeater *rptr) static int eusb2_repeater_init(struct phy *phy) { - struct reg_field *regfields = eusb2_repeater_tune_reg_fields; struct eusb2_repeater *rptr = phy_get_drvdata(phy); struct device_node *np = rptr->dev->of_node; - u32 init_tbl[F_NUM_TUNE_FIELDS] = { 0 }; - u8 override; + struct regmap *regmap = rptr->regmap; + const u32 *init_tbl = rptr->cfg->init_tbl; + u8 tune_usb2_preem = init_tbl[TUNE_USB2_PREEM]; + u8 tune_hsdisc = init_tbl[TUNE_HSDISC]; + u8 tune_iusb2 = init_tbl[TUNE_IUSB2]; + u32 base = rptr->base; u32 val; int ret; - int i; + + of_property_read_u8(np, "qcom,tune-usb2-amplitude", &tune_iusb2); + of_property_read_u8(np, "qcom,tune-usb2-disc-thres", &tune_hsdisc); + of_property_read_u8(np, "qcom,tune-usb2-preem", &tune_usb2_preem); ret = regulator_bulk_enable(rptr->cfg->num_vregs, rptr->vregs); if (ret) return ret; - regmap_field_update_bits(rptr->regs[F_EN_CTL1], EUSB2_RPTR_EN, EUSB2_RPTR_EN); + regmap_write(regmap, base + EUSB2_EN_CTL1, EUSB2_RPTR_EN); - for (i = 0; i < F_NUM_TUNE_FIELDS; i++) { - if (init_tbl[i]) { - regmap_field_update_bits(rptr->regs[i], init_tbl[i], init_tbl[i]); - } else { - /* Write 0 if there's no value set */ - u32 mask = GENMASK(regfields[i].msb, regfields[i].lsb); - - regmap_field_update_bits(rptr->regs[i], mask, 0); - } - } - memcpy(init_tbl, rptr->cfg->init_tbl, sizeof(init_tbl)); + regmap_write(regmap, base + EUSB2_TUNE_EUSB_HS_COMP_CUR, init_tbl[TUNE_EUSB_HS_COMP_CUR]); + regmap_write(regmap, base + EUSB2_TUNE_EUSB_EQU, init_tbl[TUNE_EUSB_EQU]); + regmap_write(regmap, base + EUSB2_TUNE_EUSB_SLEW, init_tbl[TUNE_EUSB_SLEW]); + regmap_write(regmap, base + EUSB2_TUNE_USB2_HS_COMP_CUR, init_tbl[TUNE_USB2_HS_COMP_CUR]); + regmap_write(regmap, base + EUSB2_TUNE_USB2_EQU, init_tbl[TUNE_USB2_EQU]); + regmap_write(regmap, base + EUSB2_TUNE_USB2_SLEW, init_tbl[TUNE_USB2_SLEW]); + regmap_write(regmap, base + EUSB2_TUNE_SQUELCH_U, init_tbl[TUNE_SQUELCH_U]); + regmap_write(regmap, base + EUSB2_TUNE_RES_FSDIF, init_tbl[TUNE_RES_FSDIF]); + regmap_write(regmap, base + EUSB2_TUNE_USB2_CROSSOVER, init_tbl[TUNE_USB2_CROSSOVER]); - if (!of_property_read_u8(np, "qcom,tune-usb2-amplitude", &override)) - init_tbl[F_TUNE_IUSB2] = override; + regmap_write(regmap, base + EUSB2_TUNE_USB2_PREEM, tune_usb2_preem); + regmap_write(regmap, base + EUSB2_TUNE_HSDISC, tune_hsdisc); + regmap_write(regmap, base + EUSB2_TUNE_IUSB2, tune_iusb2); - if (!of_property_read_u8(np, "qcom,tune-usb2-disc-thres", &override)) - init_tbl[F_TUNE_HSDISC] = override; - - if (!of_property_read_u8(np, "qcom,tune-usb2-preem", &override)) - init_tbl[F_TUNE_USB2_PREEM] = override; - - for (i = 0; i < F_NUM_TUNE_FIELDS; i++) - regmap_field_update_bits(rptr->regs[i], init_tbl[i], init_tbl[i]); - - ret = regmap_field_read_poll_timeout(rptr->regs[F_RPTR_STATUS], - val, val & RPTR_OK, 10, 5); + ret = regmap_read_poll_timeout(regmap, base + EUSB2_RPTR_STATUS, val, val & RPTR_OK, 10, 5); if (ret) dev_err(rptr->dev, "initialization timed-out\n"); @@ -191,6 +159,8 @@ static int eusb2_repeater_set_mode(struct phy *phy, enum phy_mode mode, int submode) { struct eusb2_repeater *rptr = phy_get_drvdata(phy); + struct regmap *regmap = rptr->regmap; + u32 base = rptr->base; switch (mode) { case PHY_MODE_USB_HOST: @@ -199,10 +169,8 @@ static int eusb2_repeater_set_mode(struct phy *phy, * per eUSB 1.2 Spec. Below implement software workaround until * PHY and controller is fixing seen observation. */ - regmap_field_update_bits(rptr->regs[F_FORCE_EN_5], - F_CLK_19P2M_EN, F_CLK_19P2M_EN); - regmap_field_update_bits(rptr->regs[F_FORCE_VAL_5], - V_CLK_19P2M_EN, V_CLK_19P2M_EN); + regmap_write(regmap, base + EUSB2_FORCE_EN_5, F_CLK_19P2M_EN); + regmap_write(regmap, base + EUSB2_FORCE_VAL_5, V_CLK_19P2M_EN); break; case PHY_MODE_USB_DEVICE: /* @@ -211,10 +179,8 @@ static int eusb2_repeater_set_mode(struct phy *phy, * repeater doesn't clear previous value due to shared * regulators (say host <-> device mode switch). */ - regmap_field_update_bits(rptr->regs[F_FORCE_EN_5], - F_CLK_19P2M_EN, 0); - regmap_field_update_bits(rptr->regs[F_FORCE_VAL_5], - V_CLK_19P2M_EN, 0); + regmap_write(regmap, base + EUSB2_FORCE_EN_5, 0); + regmap_write(regmap, base + EUSB2_FORCE_VAL_5, 0); break; default: return -EINVAL; @@ -243,9 +209,8 @@ static int eusb2_repeater_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct phy_provider *phy_provider; struct device_node *np = dev->of_node; - struct regmap *regmap; - int i, ret; u32 res; + int ret; rptr = devm_kzalloc(dev, sizeof(*rptr), GFP_KERNEL); if (!rptr) @@ -258,22 +223,15 @@ static int eusb2_repeater_probe(struct platform_device *pdev) if (!rptr->cfg) return -EINVAL; - regmap = dev_get_regmap(dev->parent, NULL); - if (!regmap) + rptr->regmap = dev_get_regmap(dev->parent, NULL); + if (!rptr->regmap) return -ENODEV; ret = of_property_read_u32(np, "reg", &res); if (ret < 0) return ret; - for (i = 0; i < F_NUM_FIELDS; i++) - eusb2_repeater_tune_reg_fields[i].reg += res; - - ret = devm_regmap_field_bulk_alloc(dev, regmap, rptr->regs, - eusb2_repeater_tune_reg_fields, - F_NUM_FIELDS); - if (ret) - return ret; + rptr->base = res; ret = eusb2_repeater_init_vregs(rptr); if (ret < 0) { diff --git a/drivers/phy/qualcomm/phy-qcom-m31.c b/drivers/phy/qualcomm/phy-qcom-m31.c index c2590579190a..03fb0d4b75d7 100644 --- a/drivers/phy/qualcomm/phy-qcom-m31.c +++ b/drivers/phy/qualcomm/phy-qcom-m31.c @@ -299,7 +299,7 @@ static int m31usb_phy_probe(struct platform_device *pdev) qphy->vreg = devm_regulator_get(dev, "vdda-phy"); if (IS_ERR(qphy->vreg)) - return dev_err_probe(dev, PTR_ERR(qphy->phy), + return dev_err_probe(dev, PTR_ERR(qphy->vreg), "failed to get vreg\n"); phy_set_drvdata(qphy->phy, qphy); diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c index 1ad10110dd25..17c4ad7553a5 100644 --- a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c +++ b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c @@ -3562,14 +3562,6 @@ static int qmp_combo_probe(struct platform_device *pdev) if (ret) return ret; - ret = qmp_combo_typec_switch_register(qmp); - if (ret) - return ret; - - ret = drm_aux_bridge_register(dev); - if (ret) - return ret; - /* Check for legacy binding with child nodes. */ usb_np = of_get_child_by_name(dev->of_node, "usb3-phy"); if (usb_np) { @@ -3589,6 +3581,14 @@ static int qmp_combo_probe(struct platform_device *pdev) if (ret) goto err_node_put; + ret = qmp_combo_typec_switch_register(qmp); + if (ret) + goto err_node_put; + + ret = drm_aux_bridge_register(dev); + if (ret) + goto err_node_put; + pm_runtime_set_active(dev); ret = devm_pm_runtime_enable(dev); if (ret) diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-usb.c b/drivers/phy/qualcomm/phy-qcom-qmp-usb.c index 243cc2b9a0fb..5c003988c35d 100644 --- a/drivers/phy/qualcomm/phy-qcom-qmp-usb.c +++ b/drivers/phy/qualcomm/phy-qcom-qmp-usb.c @@ -1556,6 +1556,14 @@ static const char * const qmp_phy_vreg_l[] = { "vdda-phy", "vdda-pll", }; +static const struct qmp_usb_offsets qmp_usb_offsets_v3 = { + .serdes = 0, + .pcs = 0x800, + .pcs_misc = 0x600, + .tx = 0x200, + .rx = 0x400, +}; + static const struct qmp_usb_offsets qmp_usb_offsets_ipq9574 = { .serdes = 0, .pcs = 0x800, @@ -1564,7 +1572,7 @@ static const struct qmp_usb_offsets qmp_usb_offsets_ipq9574 = { .rx = 0x400, }; -static const struct qmp_usb_offsets qmp_usb_offsets_v3 = { +static const struct qmp_usb_offsets qmp_usb_offsets_v3_msm8996 = { .serdes = 0, .pcs = 0x600, .tx = 0x200, @@ -1613,6 +1621,24 @@ static const struct qmp_usb_offsets qmp_usb_offsets_v7 = { .rx = 0x1000, }; +static const struct qmp_phy_cfg ipq6018_usb3phy_cfg = { + .lanes = 1, + + .offsets = &qmp_usb_offsets_v3, + + .serdes_tbl = ipq9574_usb3_serdes_tbl, + .serdes_tbl_num = ARRAY_SIZE(ipq9574_usb3_serdes_tbl), + .tx_tbl = msm8996_usb3_tx_tbl, + .tx_tbl_num = ARRAY_SIZE(msm8996_usb3_tx_tbl), + .rx_tbl = ipq8074_usb3_rx_tbl, + .rx_tbl_num = ARRAY_SIZE(ipq8074_usb3_rx_tbl), + .pcs_tbl = ipq8074_usb3_pcs_tbl, + .pcs_tbl_num = ARRAY_SIZE(ipq8074_usb3_pcs_tbl), + .vreg_list = qmp_phy_vreg_l, + .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l), + .regs = qmp_v3_usb3phy_regs_layout, +}; + static const struct qmp_phy_cfg ipq8074_usb3phy_cfg = { .lanes = 1, @@ -1652,7 +1678,7 @@ static const struct qmp_phy_cfg ipq9574_usb3phy_cfg = { static const struct qmp_phy_cfg msm8996_usb3phy_cfg = { .lanes = 1, - .offsets = &qmp_usb_offsets_v3, + .offsets = &qmp_usb_offsets_v3_msm8996, .serdes_tbl = msm8996_usb3_serdes_tbl, .serdes_tbl_num = ARRAY_SIZE(msm8996_usb3_serdes_tbl), @@ -2563,7 +2589,7 @@ err_node_put: static const struct of_device_id qmp_usb_of_match_table[] = { { .compatible = "qcom,ipq6018-qmp-usb3-phy", - .data = &ipq8074_usb3phy_cfg, + .data = &ipq6018_usb3phy_cfg, }, { .compatible = "qcom,ipq8074-qmp-usb3-phy", .data = &ipq8074_usb3phy_cfg, diff --git a/drivers/phy/renesas/phy-rcar-gen3-usb2.c b/drivers/phy/renesas/phy-rcar-gen3-usb2.c index e53eace7c91e..6387c0d34c55 100644 --- a/drivers/phy/renesas/phy-rcar-gen3-usb2.c +++ b/drivers/phy/renesas/phy-rcar-gen3-usb2.c @@ -673,8 +673,6 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev) channel->irq = platform_get_irq_optional(pdev, 0); channel->dr_mode = rcar_gen3_get_dr_mode(dev->of_node); if (channel->dr_mode != USB_DR_MODE_UNKNOWN) { - int ret; - channel->is_otg_channel = true; channel->uses_otg_pins = !of_property_read_bool(dev->of_node, "renesas,no-otg-pins"); @@ -738,8 +736,6 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev) ret = PTR_ERR(provider); goto error; } else if (channel->is_otg_channel) { - int ret; - ret = device_create_file(dev, &dev_attr_role); if (ret < 0) goto error; diff --git a/drivers/phy/ti/phy-omap-usb2.c b/drivers/phy/ti/phy-omap-usb2.c index dd2913ac0fa2..78e19b128962 100644 --- a/drivers/phy/ti/phy-omap-usb2.c +++ b/drivers/phy/ti/phy-omap-usb2.c @@ -117,7 +117,7 @@ static int omap_usb_set_vbus(struct usb_otg *otg, bool enabled) { struct omap_usb *phy = phy_to_omapusb(otg->usb_phy); - if (!phy->comparator) + if (!phy->comparator || !phy->comparator->set_vbus) return -ENODEV; return phy->comparator->set_vbus(phy->comparator, enabled); @@ -127,7 +127,7 @@ static int omap_usb_start_srp(struct usb_otg *otg) { struct omap_usb *phy = phy_to_omapusb(otg->usb_phy); - if (!phy->comparator) + if (!phy->comparator || !phy->comparator->start_srp) return -ENODEV; return phy->comparator->start_srp(phy->comparator); diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c index ee56856cb80c..bbcdece83bf4 100644 --- a/drivers/pinctrl/core.c +++ b/drivers/pinctrl/core.c @@ -1644,7 +1644,7 @@ static int pinctrl_pins_show(struct seq_file *s, void *what) const struct pinctrl_ops *ops = pctldev->desc->pctlops; unsigned int i, pin; #ifdef CONFIG_GPIOLIB - struct gpio_device *gdev __free(gpio_device_put) = NULL; + struct gpio_device *gdev = NULL; struct pinctrl_gpio_range *range; int gpio_num; #endif diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c index 03ecb3d1aaf6..49f89b70dcec 100644 --- a/drivers/pinctrl/pinctrl-amd.c +++ b/drivers/pinctrl/pinctrl-amd.c @@ -1159,7 +1159,7 @@ static int amd_gpio_probe(struct platform_device *pdev) } ret = devm_request_irq(&pdev->dev, gpio_dev->irq, amd_gpio_irq_handler, - IRQF_SHARED, KBUILD_MODNAME, gpio_dev); + IRQF_SHARED | IRQF_ONESHOT, KBUILD_MODNAME, gpio_dev); if (ret) goto out2; diff --git a/drivers/pinctrl/stm32/pinctrl-stm32mp257.c b/drivers/pinctrl/stm32/pinctrl-stm32mp257.c index 73f091cd827e..23aebd4695e9 100644 --- a/drivers/pinctrl/stm32/pinctrl-stm32mp257.c +++ b/drivers/pinctrl/stm32/pinctrl-stm32mp257.c @@ -2562,7 +2562,7 @@ static const struct of_device_id stm32mp257_pctrl_match[] = { }; static const struct dev_pm_ops stm32_pinctrl_dev_pm_ops = { - SET_LATE_SYSTEM_SLEEP_PM_OPS(NULL, stm32_pinctrl_resume) + SET_LATE_SYSTEM_SLEEP_PM_OPS(stm32_pinctrl_suspend, stm32_pinctrl_resume) }; static struct platform_driver stm32mp257_pinctrl_driver = { diff --git a/drivers/platform/x86/amd/pmf/core.c b/drivers/platform/x86/amd/pmf/core.c index feaa09f5b35a..4f734e049f4a 100644 --- a/drivers/platform/x86/amd/pmf/core.c +++ b/drivers/platform/x86/amd/pmf/core.c @@ -296,7 +296,8 @@ static int amd_pmf_suspend_handler(struct device *dev) { struct amd_pmf_dev *pdev = dev_get_drvdata(dev); - kfree(pdev->buf); + if (pdev->smart_pc_enabled) + cancel_delayed_work_sync(&pdev->pb_work); return 0; } @@ -312,6 +313,9 @@ static int amd_pmf_resume_handler(struct device *dev) return ret; } + if (pdev->smart_pc_enabled) + schedule_delayed_work(&pdev->pb_work, msecs_to_jiffies(2000)); + return 0; } @@ -330,9 +334,14 @@ static void amd_pmf_init_features(struct amd_pmf_dev *dev) dev_dbg(dev->dev, "SPS enabled and Platform Profiles registered\n"); } - if (!amd_pmf_init_smart_pc(dev)) { + amd_pmf_init_smart_pc(dev); + if (dev->smart_pc_enabled) { dev_dbg(dev->dev, "Smart PC Solution Enabled\n"); - } else if (is_apmf_func_supported(dev, APMF_FUNC_AUTO_MODE)) { + /* If Smart PC is enabled, no need to check for other features */ + return; + } + + if (is_apmf_func_supported(dev, APMF_FUNC_AUTO_MODE)) { amd_pmf_init_auto_mode(dev); dev_dbg(dev->dev, "Auto Mode Init done\n"); } else if (is_apmf_func_supported(dev, APMF_FUNC_DYN_SLIDER_AC) || @@ -351,7 +360,7 @@ static void amd_pmf_deinit_features(struct amd_pmf_dev *dev) amd_pmf_deinit_sps(dev); } - if (!dev->smart_pc_enabled) { + if (dev->smart_pc_enabled) { amd_pmf_deinit_smart_pc(dev); } else if (is_apmf_func_supported(dev, APMF_FUNC_AUTO_MODE)) { amd_pmf_deinit_auto_mode(dev); diff --git a/drivers/platform/x86/amd/pmf/pmf.h b/drivers/platform/x86/amd/pmf/pmf.h index 16999c5b334f..66cae1cca73c 100644 --- a/drivers/platform/x86/amd/pmf/pmf.h +++ b/drivers/platform/x86/amd/pmf/pmf.h @@ -441,11 +441,6 @@ struct apmf_dyn_slider_output { struct apmf_cnqf_power_set ps[APMF_CNQF_MAX]; } __packed; -enum smart_pc_status { - PMF_SMART_PC_ENABLED, - PMF_SMART_PC_DISABLED, -}; - /* Smart PC - TA internals */ enum system_state { SYSTEM_STATE_S0i3, diff --git a/drivers/platform/x86/amd/pmf/tee-if.c b/drivers/platform/x86/amd/pmf/tee-if.c index f8c0177afb0d..dcbe8f85e122 100644 --- a/drivers/platform/x86/amd/pmf/tee-if.c +++ b/drivers/platform/x86/amd/pmf/tee-if.c @@ -252,15 +252,17 @@ static int amd_pmf_start_policy_engine(struct amd_pmf_dev *dev) cookie = readl(dev->policy_buf + POLICY_COOKIE_OFFSET); length = readl(dev->policy_buf + POLICY_COOKIE_LEN); - if (cookie != POLICY_SIGN_COOKIE || !length) + if (cookie != POLICY_SIGN_COOKIE || !length) { + dev_dbg(dev->dev, "cookie doesn't match\n"); return -EINVAL; + } /* Update the actual length */ dev->policy_sz = length + 512; res = amd_pmf_invoke_cmd_init(dev); if (res == TA_PMF_TYPE_SUCCESS) { /* Now its safe to announce that smart pc is enabled */ - dev->smart_pc_enabled = PMF_SMART_PC_ENABLED; + dev->smart_pc_enabled = true; /* * Start collecting the data from TA FW after a small delay * or else, we might end up getting stale values. @@ -268,7 +270,7 @@ static int amd_pmf_start_policy_engine(struct amd_pmf_dev *dev) schedule_delayed_work(&dev->pb_work, msecs_to_jiffies(pb_actions_ms * 3)); } else { dev_err(dev->dev, "ta invoke cmd init failed err: %x\n", res); - dev->smart_pc_enabled = PMF_SMART_PC_DISABLED; + dev->smart_pc_enabled = false; return res; } @@ -336,25 +338,6 @@ static void amd_pmf_remove_pb(struct amd_pmf_dev *dev) {} static void amd_pmf_hex_dump_pb(struct amd_pmf_dev *dev) {} #endif -static int amd_pmf_get_bios_buffer(struct amd_pmf_dev *dev) -{ - dev->policy_buf = kzalloc(dev->policy_sz, GFP_KERNEL); - if (!dev->policy_buf) - return -ENOMEM; - - dev->policy_base = devm_ioremap(dev->dev, dev->policy_addr, dev->policy_sz); - if (!dev->policy_base) - return -ENOMEM; - - memcpy(dev->policy_buf, dev->policy_base, dev->policy_sz); - - amd_pmf_hex_dump_pb(dev); - if (pb_side_load) - amd_pmf_open_pb(dev, dev->dbgfs_dir); - - return amd_pmf_start_policy_engine(dev); -} - static int amd_pmf_amdtee_ta_match(struct tee_ioctl_version_data *ver, const void *data) { return ver->impl_id == TEE_IMPL_ID_AMDTEE; @@ -453,22 +436,59 @@ int amd_pmf_init_smart_pc(struct amd_pmf_dev *dev) return ret; INIT_DELAYED_WORK(&dev->pb_work, amd_pmf_invoke_cmd); - amd_pmf_set_dram_addr(dev, true); - amd_pmf_get_bios_buffer(dev); + + ret = amd_pmf_set_dram_addr(dev, true); + if (ret) + goto error; + + dev->policy_base = devm_ioremap(dev->dev, dev->policy_addr, dev->policy_sz); + if (!dev->policy_base) { + ret = -ENOMEM; + goto error; + } + + dev->policy_buf = kzalloc(dev->policy_sz, GFP_KERNEL); + if (!dev->policy_buf) { + ret = -ENOMEM; + goto error; + } + + memcpy(dev->policy_buf, dev->policy_base, dev->policy_sz); + + amd_pmf_hex_dump_pb(dev); + dev->prev_data = kzalloc(sizeof(*dev->prev_data), GFP_KERNEL); - if (!dev->prev_data) - return -ENOMEM; + if (!dev->prev_data) { + ret = -ENOMEM; + goto error; + } + + ret = amd_pmf_start_policy_engine(dev); + if (ret) + goto error; + + if (pb_side_load) + amd_pmf_open_pb(dev, dev->dbgfs_dir); + + return 0; - return dev->smart_pc_enabled; +error: + amd_pmf_deinit_smart_pc(dev); + + return ret; } void amd_pmf_deinit_smart_pc(struct amd_pmf_dev *dev) { - if (pb_side_load) + if (pb_side_load && dev->esbin) amd_pmf_remove_pb(dev); + cancel_delayed_work_sync(&dev->pb_work); kfree(dev->prev_data); + dev->prev_data = NULL; kfree(dev->policy_buf); - cancel_delayed_work_sync(&dev->pb_work); + dev->policy_buf = NULL; + kfree(dev->buf); + dev->buf = NULL; amd_pmf_tee_deinit(dev); } diff --git a/drivers/platform/x86/intel/int0002_vgpio.c b/drivers/platform/x86/intel/int0002_vgpio.c index b6708bab7c53..527d8fbc7cc1 100644 --- a/drivers/platform/x86/intel/int0002_vgpio.c +++ b/drivers/platform/x86/intel/int0002_vgpio.c @@ -196,7 +196,7 @@ static int int0002_probe(struct platform_device *pdev) * IRQs into gpiolib. */ ret = devm_request_irq(dev, irq, int0002_irq, - IRQF_SHARED, "INT0002", chip); + IRQF_ONESHOT | IRQF_SHARED, "INT0002", chip); if (ret) { dev_err(dev, "Error requesting IRQ %d: %d\n", irq, ret); return ret; diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c index a5e0f5c22179..b89c0dda9e5d 100644 --- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c +++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c @@ -242,7 +242,7 @@ static int __init intel_uncore_init(void) return -ENODEV; uncore_max_entries = topology_max_packages() * - topology_max_die_per_package(); + topology_max_dies_per_package(); uncore_instances = kcalloc(uncore_max_entries, sizeof(*uncore_instances), GFP_KERNEL); if (!uncore_instances) diff --git a/drivers/platform/x86/intel/vbtn.c b/drivers/platform/x86/intel/vbtn.c index 210b0a81b7ec..084c355c86f5 100644 --- a/drivers/platform/x86/intel/vbtn.c +++ b/drivers/platform/x86/intel/vbtn.c @@ -200,9 +200,6 @@ static void notify_handler(acpi_handle handle, u32 event, void *context) autorelease = val && (!ke_rel || ke_rel->type == KE_IGNORE); sparse_keymap_report_event(input_dev, event, val, autorelease); - - /* Some devices need this to report further events */ - acpi_evaluate_object(handle, "VBDL", NULL, NULL); } /* diff --git a/drivers/platform/x86/p2sb.c b/drivers/platform/x86/p2sb.c index 6bd14d0132db..3d66e1d4eb1f 100644 --- a/drivers/platform/x86/p2sb.c +++ b/drivers/platform/x86/p2sb.c @@ -20,9 +20,11 @@ #define P2SBC_HIDE BIT(8) #define P2SB_DEVFN_DEFAULT PCI_DEVFN(31, 1) +#define P2SB_DEVFN_GOLDMONT PCI_DEVFN(13, 0) +#define SPI_DEVFN_GOLDMONT PCI_DEVFN(13, 2) static const struct x86_cpu_id p2sb_cpu_ids[] = { - X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT, PCI_DEVFN(13, 0)), + X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT, P2SB_DEVFN_GOLDMONT), {} }; @@ -98,21 +100,12 @@ static void p2sb_scan_and_cache_devfn(struct pci_bus *bus, unsigned int devfn) static int p2sb_scan_and_cache(struct pci_bus *bus, unsigned int devfn) { - unsigned int slot, fn; - - if (PCI_FUNC(devfn) == 0) { - /* - * When function number of the P2SB device is zero, scan it and - * other function numbers, and if devices are available, cache - * their BAR0s. - */ - slot = PCI_SLOT(devfn); - for (fn = 0; fn < NR_P2SB_RES_CACHE; fn++) - p2sb_scan_and_cache_devfn(bus, PCI_DEVFN(slot, fn)); - } else { - /* Scan the P2SB device and cache its BAR0 */ - p2sb_scan_and_cache_devfn(bus, devfn); - } + /* Scan the P2SB device and cache its BAR0 */ + p2sb_scan_and_cache_devfn(bus, devfn); + + /* On Goldmont p2sb_bar() also gets called for the SPI controller */ + if (devfn == P2SB_DEVFN_GOLDMONT) + p2sb_scan_and_cache_devfn(bus, SPI_DEVFN_GOLDMONT); if (!p2sb_valid_resource(&p2sb_resources[PCI_FUNC(devfn)].res)) return -ENOENT; diff --git a/drivers/platform/x86/serdev_helpers.h b/drivers/platform/x86/serdev_helpers.h new file mode 100644 index 000000000000..bcf3a0c356ea --- /dev/null +++ b/drivers/platform/x86/serdev_helpers.h @@ -0,0 +1,80 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * In some cases UART attached devices which require an in kernel driver, + * e.g. UART attached Bluetooth HCIs are described in the ACPI tables + * by an ACPI device with a broken or missing UartSerialBusV2() resource. + * + * This causes the kernel to create a /dev/ttyS# char-device for the UART + * instead of creating an in kernel serdev-controller + serdev-device pair + * for the in kernel driver. + * + * The quirk handling in acpi_quirk_skip_serdev_enumeration() makes the kernel + * create a serdev-controller device for these UARTs instead of a /dev/ttyS#. + * + * Instantiating the actual serdev-device to bind to is up to pdx86 code, + * this header provides a helper for getting the serdev-controller device. + */ +#include <linux/acpi.h> +#include <linux/device.h> +#include <linux/err.h> +#include <linux/printk.h> +#include <linux/sprintf.h> +#include <linux/string.h> + +static inline struct device * +get_serdev_controller(const char *serial_ctrl_hid, + const char *serial_ctrl_uid, + int serial_ctrl_port, + const char *serdev_ctrl_name) +{ + struct device *ctrl_dev, *child; + struct acpi_device *ctrl_adev; + char name[32]; + int i; + + ctrl_adev = acpi_dev_get_first_match_dev(serial_ctrl_hid, serial_ctrl_uid, -1); + if (!ctrl_adev) { + pr_err("error could not get %s/%s serial-ctrl adev\n", + serial_ctrl_hid, serial_ctrl_uid); + return ERR_PTR(-ENODEV); + } + + /* get_first_physical_node() returns a weak ref */ + ctrl_dev = get_device(acpi_get_first_physical_node(ctrl_adev)); + if (!ctrl_dev) { + pr_err("error could not get %s/%s serial-ctrl physical node\n", + serial_ctrl_hid, serial_ctrl_uid); + ctrl_dev = ERR_PTR(-ENODEV); + goto put_ctrl_adev; + } + + /* Walk host -> uart-ctrl -> port -> serdev-ctrl */ + for (i = 0; i < 3; i++) { + switch (i) { + case 0: + snprintf(name, sizeof(name), "%s:0", dev_name(ctrl_dev)); + break; + case 1: + snprintf(name, sizeof(name), "%s.%d", + dev_name(ctrl_dev), serial_ctrl_port); + break; + case 2: + strscpy(name, serdev_ctrl_name, sizeof(name)); + break; + } + + child = device_find_child_by_name(ctrl_dev, name); + put_device(ctrl_dev); + if (!child) { + pr_err("error could not find '%s' device\n", name); + ctrl_dev = ERR_PTR(-ENODEV); + goto put_ctrl_adev; + } + + ctrl_dev = child; + } + +put_ctrl_adev: + acpi_dev_put(ctrl_adev); + return ctrl_dev; +} diff --git a/drivers/platform/x86/think-lmi.c b/drivers/platform/x86/think-lmi.c index 3a396b763c49..ce3e08815a8e 100644 --- a/drivers/platform/x86/think-lmi.c +++ b/drivers/platform/x86/think-lmi.c @@ -1009,7 +1009,16 @@ static ssize_t current_value_store(struct kobject *kobj, * Note - this sets the variable and then the password as separate * WMI calls. Function tlmi_save_bios_settings will error if the * password is incorrect. + * Workstation's require the opcode to be set before changing the + * attribute. */ + if (tlmi_priv.pwd_admin->valid && tlmi_priv.pwd_admin->password[0]) { + ret = tlmi_opcode_setting("WmiOpcodePasswordAdmin", + tlmi_priv.pwd_admin->password); + if (ret) + goto out; + } + set_str = kasprintf(GFP_KERNEL, "%s,%s;", setting->display_name, new_setting); if (!set_str) { @@ -1021,17 +1030,10 @@ static ssize_t current_value_store(struct kobject *kobj, if (ret) goto out; - if (tlmi_priv.save_mode == TLMI_SAVE_BULK) { + if (tlmi_priv.save_mode == TLMI_SAVE_BULK) tlmi_priv.save_required = true; - } else { - if (tlmi_priv.pwd_admin->valid && tlmi_priv.pwd_admin->password[0]) { - ret = tlmi_opcode_setting("WmiOpcodePasswordAdmin", - tlmi_priv.pwd_admin->password); - if (ret) - goto out; - } + else ret = tlmi_save_bios_settings(""); - } } else { /* old non-opcode based authentication method (deprecated) */ if (tlmi_priv.pwd_admin->valid && tlmi_priv.pwd_admin->password[0]) { auth_str = kasprintf(GFP_KERNEL, "%s,%s,%s;", diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index c4895e9bc714..5ecd9d33250d 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c @@ -10308,6 +10308,7 @@ static int convert_dytc_to_profile(int funcmode, int dytcmode, return 0; default: /* Unknown function */ + pr_debug("unknown function 0x%x\n", funcmode); return -EOPNOTSUPP; } return 0; @@ -10493,8 +10494,8 @@ static void dytc_profile_refresh(void) return; perfmode = (output >> DYTC_GET_MODE_BIT) & 0xF; - convert_dytc_to_profile(funcmode, perfmode, &profile); - if (profile != dytc_current_profile) { + err = convert_dytc_to_profile(funcmode, perfmode, &profile); + if (!err && profile != dytc_current_profile) { dytc_current_profile = profile; platform_profile_notify(); } diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c index 7aee5e9ff2b8..975cf24ae359 100644 --- a/drivers/platform/x86/touchscreen_dmi.c +++ b/drivers/platform/x86/touchscreen_dmi.c @@ -81,7 +81,7 @@ static const struct property_entry chuwi_hi8_air_props[] = { }; static const struct ts_dmi_data chuwi_hi8_air_data = { - .acpi_name = "MSSL1680:00", + .acpi_name = "MSSL1680", .properties = chuwi_hi8_air_props, }; @@ -415,18 +415,13 @@ static const struct property_entry gdix1001_upside_down_props[] = { { } }; -static const struct ts_dmi_data gdix1001_00_upside_down_data = { - .acpi_name = "GDIX1001:00", - .properties = gdix1001_upside_down_props, -}; - -static const struct ts_dmi_data gdix1001_01_upside_down_data = { - .acpi_name = "GDIX1001:01", +static const struct ts_dmi_data gdix1001_upside_down_data = { + .acpi_name = "GDIX1001", .properties = gdix1001_upside_down_props, }; -static const struct ts_dmi_data gdix1002_00_upside_down_data = { - .acpi_name = "GDIX1002:00", +static const struct ts_dmi_data gdix1002_upside_down_data = { + .acpi_name = "GDIX1002", .properties = gdix1001_upside_down_props, }; @@ -1412,7 +1407,7 @@ const struct dmi_system_id touchscreen_dmi_table[] = { }, { /* Juno Tablet */ - .driver_data = (void *)&gdix1002_00_upside_down_data, + .driver_data = (void *)&gdix1002_upside_down_data, .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Default string"), /* Both product- and board-name being "Default string" is somewhat rare */ @@ -1658,7 +1653,7 @@ const struct dmi_system_id touchscreen_dmi_table[] = { }, { /* Teclast X89 (Android version / BIOS) */ - .driver_data = (void *)&gdix1001_00_upside_down_data, + .driver_data = (void *)&gdix1001_upside_down_data, .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "WISKY"), DMI_MATCH(DMI_BOARD_NAME, "3G062i"), @@ -1666,7 +1661,7 @@ const struct dmi_system_id touchscreen_dmi_table[] = { }, { /* Teclast X89 (Windows version / BIOS) */ - .driver_data = (void *)&gdix1001_01_upside_down_data, + .driver_data = (void *)&gdix1001_upside_down_data, .matches = { /* tPAD is too generic, also match on bios date */ DMI_MATCH(DMI_BOARD_VENDOR, "TECLAST"), @@ -1684,7 +1679,7 @@ const struct dmi_system_id touchscreen_dmi_table[] = { }, { /* Teclast X98 Pro */ - .driver_data = (void *)&gdix1001_00_upside_down_data, + .driver_data = (void *)&gdix1001_upside_down_data, .matches = { /* * Only match BIOS date, because the manufacturers @@ -1788,7 +1783,7 @@ const struct dmi_system_id touchscreen_dmi_table[] = { }, { /* "WinBook TW100" */ - .driver_data = (void *)&gdix1001_00_upside_down_data, + .driver_data = (void *)&gdix1001_upside_down_data, .matches = { DMI_MATCH(DMI_SYS_VENDOR, "WinBook"), DMI_MATCH(DMI_PRODUCT_NAME, "TW100") @@ -1796,7 +1791,7 @@ const struct dmi_system_id touchscreen_dmi_table[] = { }, { /* WinBook TW700 */ - .driver_data = (void *)&gdix1001_00_upside_down_data, + .driver_data = (void *)&gdix1001_upside_down_data, .matches = { DMI_MATCH(DMI_SYS_VENDOR, "WinBook"), DMI_MATCH(DMI_PRODUCT_NAME, "TW700") @@ -1821,7 +1816,7 @@ static void ts_dmi_add_props(struct i2c_client *client) int error; if (has_acpi_companion(dev) && - !strncmp(ts_data->acpi_name, client->name, I2C_NAME_SIZE)) { + strstarts(client->name, ts_data->acpi_name)) { error = device_create_managed_software_node(dev, ts_data->properties, NULL); if (error) dev_err(dev, "failed to add properties: %d\n", error); diff --git a/drivers/platform/x86/x86-android-tablets/core.c b/drivers/platform/x86/x86-android-tablets/core.c index f8221a15575b..a3415f1c0b5f 100644 --- a/drivers/platform/x86/x86-android-tablets/core.c +++ b/drivers/platform/x86/x86-android-tablets/core.c @@ -21,6 +21,7 @@ #include <linux/string.h> #include "x86-android-tablets.h" +#include "../serdev_helpers.h" static struct platform_device *x86_android_tablet_device; @@ -113,6 +114,9 @@ int x86_acpi_irq_helper_get(const struct x86_acpi_irq_data *data) if (irq_type != IRQ_TYPE_NONE && irq_type != irq_get_trigger_type(irq)) irq_set_irq_type(irq, irq_type); + if (data->free_gpio) + devm_gpiod_put(&x86_android_tablet_device->dev, gpiod); + return irq; case X86_ACPI_IRQ_TYPE_PMIC: status = acpi_get_handle(NULL, data->chip, &handle); @@ -229,38 +233,20 @@ static __init int x86_instantiate_spi_dev(const struct x86_dev_info *dev_info, i static __init int x86_instantiate_serdev(const struct x86_serdev_info *info, int idx) { - struct acpi_device *ctrl_adev, *serdev_adev; + struct acpi_device *serdev_adev; struct serdev_device *serdev; struct device *ctrl_dev; int ret = -ENODEV; - ctrl_adev = acpi_dev_get_first_match_dev(info->ctrl_hid, info->ctrl_uid, -1); - if (!ctrl_adev) { - pr_err("error could not get %s/%s ctrl adev\n", - info->ctrl_hid, info->ctrl_uid); - return -ENODEV; - } + ctrl_dev = get_serdev_controller(info->ctrl_hid, info->ctrl_uid, 0, + info->ctrl_devname); + if (IS_ERR(ctrl_dev)) + return PTR_ERR(ctrl_dev); serdev_adev = acpi_dev_get_first_match_dev(info->serdev_hid, NULL, -1); if (!serdev_adev) { pr_err("error could not get %s serdev adev\n", info->serdev_hid); - goto put_ctrl_adev; - } - - /* get_first_physical_node() returns a weak ref, no need to put() it */ - ctrl_dev = acpi_get_first_physical_node(ctrl_adev); - if (!ctrl_dev) { - pr_err("error could not get %s/%s ctrl physical dev\n", - info->ctrl_hid, info->ctrl_uid); - goto put_serdev_adev; - } - - /* ctrl_dev now points to the controller's parent, get the controller */ - ctrl_dev = device_find_child_by_name(ctrl_dev, info->ctrl_devname); - if (!ctrl_dev) { - pr_err("error could not get %s/%s %s ctrl dev\n", - info->ctrl_hid, info->ctrl_uid, info->ctrl_devname); - goto put_serdev_adev; + goto put_ctrl_dev; } serdev = serdev_device_alloc(to_serdev_controller(ctrl_dev)); @@ -283,8 +269,8 @@ static __init int x86_instantiate_serdev(const struct x86_serdev_info *info, int put_serdev_adev: acpi_dev_put(serdev_adev); -put_ctrl_adev: - acpi_dev_put(ctrl_adev); +put_ctrl_dev: + put_device(ctrl_dev); return ret; } diff --git a/drivers/platform/x86/x86-android-tablets/lenovo.c b/drivers/platform/x86/x86-android-tablets/lenovo.c index f1c66a61bfc5..c297391955ad 100644 --- a/drivers/platform/x86/x86-android-tablets/lenovo.c +++ b/drivers/platform/x86/x86-android-tablets/lenovo.c @@ -116,6 +116,7 @@ static const struct x86_i2c_client_info lenovo_yb1_x90_i2c_clients[] __initconst .trigger = ACPI_EDGE_SENSITIVE, .polarity = ACPI_ACTIVE_LOW, .con_id = "goodix_ts_irq", + .free_gpio = true, }, }, { /* Wacom Digitizer in keyboard half */ diff --git a/drivers/platform/x86/x86-android-tablets/other.c b/drivers/platform/x86/x86-android-tablets/other.c index bc6bbf7ec6ea..278402dcb808 100644 --- a/drivers/platform/x86/x86-android-tablets/other.c +++ b/drivers/platform/x86/x86-android-tablets/other.c @@ -68,7 +68,7 @@ static const struct x86_i2c_client_info acer_b1_750_i2c_clients[] __initconst = }, }; -static struct gpiod_lookup_table acer_b1_750_goodix_gpios = { +static struct gpiod_lookup_table acer_b1_750_nvt_ts_gpios = { .dev_id = "i2c-NVT-ts", .table = { GPIO_LOOKUP("INT33FC:01", 26, "reset", GPIO_ACTIVE_LOW), @@ -77,7 +77,7 @@ static struct gpiod_lookup_table acer_b1_750_goodix_gpios = { }; static struct gpiod_lookup_table * const acer_b1_750_gpios[] = { - &acer_b1_750_goodix_gpios, + &acer_b1_750_nvt_ts_gpios, &int3496_reference_gpios, NULL }; diff --git a/drivers/platform/x86/x86-android-tablets/x86-android-tablets.h b/drivers/platform/x86/x86-android-tablets/x86-android-tablets.h index 49fed9410adb..468993edfeee 100644 --- a/drivers/platform/x86/x86-android-tablets/x86-android-tablets.h +++ b/drivers/platform/x86/x86-android-tablets/x86-android-tablets.h @@ -39,6 +39,7 @@ struct x86_acpi_irq_data { int index; int trigger; /* ACPI_EDGE_SENSITIVE / ACPI_LEVEL_SENSITIVE */ int polarity; /* ACPI_ACTIVE_HIGH / ACPI_ACTIVE_LOW / ACPI_ACTIVE_BOTH */ + bool free_gpio; /* Release GPIO after getting IRQ (for TYPE_GPIOINT) */ const char *con_id; }; diff --git a/drivers/pmdomain/arm/scmi_perf_domain.c b/drivers/pmdomain/arm/scmi_perf_domain.c index 709bbc448fad..d7ef46ccd9b8 100644 --- a/drivers/pmdomain/arm/scmi_perf_domain.c +++ b/drivers/pmdomain/arm/scmi_perf_domain.c @@ -159,6 +159,9 @@ static void scmi_perf_domain_remove(struct scmi_device *sdev) struct genpd_onecell_data *scmi_pd_data = dev_get_drvdata(dev); int i; + if (!scmi_pd_data) + return; + of_genpd_del_provider(dev->of_node); for (i = 0; i < scmi_pd_data->num_domains; i++) diff --git a/drivers/pmdomain/core.c b/drivers/pmdomain/core.c index a1f6cba3ae6c..18e232b5ed53 100644 --- a/drivers/pmdomain/core.c +++ b/drivers/pmdomain/core.c @@ -1109,7 +1109,7 @@ static int __init genpd_power_off_unused(void) return 0; } -late_initcall(genpd_power_off_unused); +late_initcall_sync(genpd_power_off_unused); #ifdef CONFIG_PM_SLEEP diff --git a/drivers/pmdomain/mediatek/mtk-pm-domains.c b/drivers/pmdomain/mediatek/mtk-pm-domains.c index e26dc17d07ad..e274e3315fe7 100644 --- a/drivers/pmdomain/mediatek/mtk-pm-domains.c +++ b/drivers/pmdomain/mediatek/mtk-pm-domains.c @@ -561,6 +561,11 @@ static int scpsys_add_subdomain(struct scpsys *scpsys, struct device_node *paren goto err_put_node; } + /* recursive call to add all subdomains */ + ret = scpsys_add_subdomain(scpsys, child); + if (ret) + goto err_put_node; + ret = pm_genpd_add_subdomain(parent_pd, child_pd); if (ret) { dev_err(scpsys->dev, "failed to add %s subdomain to parent %s\n", @@ -570,11 +575,6 @@ static int scpsys_add_subdomain(struct scpsys *scpsys, struct device_node *paren dev_dbg(scpsys->dev, "%s add subdomain: %s\n", parent_pd->name, child_pd->name); } - - /* recursive call to add all subdomains */ - ret = scpsys_add_subdomain(scpsys, child); - if (ret) - goto err_put_node; } return 0; @@ -588,9 +588,6 @@ static void scpsys_remove_one_domain(struct scpsys_domain *pd) { int ret; - if (scpsys_domain_is_on(pd)) - scpsys_power_off(&pd->genpd); - /* * We're in the error cleanup already, so we only complain, * but won't emit another error on top of the original one. @@ -600,6 +597,8 @@ static void scpsys_remove_one_domain(struct scpsys_domain *pd) dev_err(pd->scpsys->dev, "failed to remove domain '%s' : %d - state may be inconsistent\n", pd->genpd.name, ret); + if (scpsys_domain_is_on(pd)) + scpsys_power_off(&pd->genpd); clk_bulk_put(pd->num_clks, pd->clks); clk_bulk_put(pd->num_subsys_clks, pd->subsys_clks); diff --git a/drivers/pmdomain/qcom/rpmhpd.c b/drivers/pmdomain/qcom/rpmhpd.c index 3078896b1300..de9121ef4216 100644 --- a/drivers/pmdomain/qcom/rpmhpd.c +++ b/drivers/pmdomain/qcom/rpmhpd.c @@ -217,7 +217,6 @@ static struct rpmhpd *sa8540p_rpmhpds[] = { [SC8280XP_CX] = &cx, [SC8280XP_CX_AO] = &cx_ao, [SC8280XP_EBI] = &ebi, - [SC8280XP_GFX] = &gfx, [SC8280XP_LCX] = &lcx, [SC8280XP_LMX] = &lmx, [SC8280XP_MMCX] = &mmcx, @@ -692,6 +691,7 @@ static int rpmhpd_aggregate_corner(struct rpmhpd *pd, unsigned int corner) unsigned int active_corner, sleep_corner; unsigned int this_active_corner = 0, this_sleep_corner = 0; unsigned int peer_active_corner = 0, peer_sleep_corner = 0; + unsigned int peer_enabled_corner; if (pd->state_synced) { to_active_sleep(pd, corner, &this_active_corner, &this_sleep_corner); @@ -701,9 +701,11 @@ static int rpmhpd_aggregate_corner(struct rpmhpd *pd, unsigned int corner) this_sleep_corner = pd->level_count - 1; } - if (peer && peer->enabled) - to_active_sleep(peer, peer->corner, &peer_active_corner, + if (peer && peer->enabled) { + peer_enabled_corner = max(peer->corner, peer->enable_corner); + to_active_sleep(peer, peer_enabled_corner, &peer_active_corner, &peer_sleep_corner); + } active_corner = max(this_active_corner, peer_active_corner); diff --git a/drivers/pmdomain/renesas/r8a77980-sysc.c b/drivers/pmdomain/renesas/r8a77980-sysc.c index 39ca84a67daa..621e411fc999 100644 --- a/drivers/pmdomain/renesas/r8a77980-sysc.c +++ b/drivers/pmdomain/renesas/r8a77980-sysc.c @@ -25,7 +25,8 @@ static const struct rcar_sysc_area r8a77980_areas[] __initconst = { PD_CPU_NOCR }, { "ca53-cpu3", 0x200, 3, R8A77980_PD_CA53_CPU3, R8A77980_PD_CA53_SCU, PD_CPU_NOCR }, - { "cr7", 0x240, 0, R8A77980_PD_CR7, R8A77980_PD_ALWAYS_ON }, + { "cr7", 0x240, 0, R8A77980_PD_CR7, R8A77980_PD_ALWAYS_ON, + PD_CPU_NOCR }, { "a3ir", 0x180, 0, R8A77980_PD_A3IR, R8A77980_PD_ALWAYS_ON }, { "a2ir0", 0x400, 0, R8A77980_PD_A2IR0, R8A77980_PD_A3IR }, { "a2ir1", 0x400, 1, R8A77980_PD_A2IR1, R8A77980_PD_A3IR }, diff --git a/drivers/power/supply/Kconfig b/drivers/power/supply/Kconfig index f21cb05815ec..3e31375491d5 100644 --- a/drivers/power/supply/Kconfig +++ b/drivers/power/supply/Kconfig @@ -978,6 +978,7 @@ config CHARGER_QCOM_SMB2 config FUEL_GAUGE_MM8013 tristate "Mitsumi MM8013 fuel gauge driver" depends on I2C + select REGMAP_I2C help Say Y here to enable the Mitsumi MM8013 fuel gauge driver. It enables the monitoring of many battery parameters, including diff --git a/drivers/power/supply/bq27xxx_battery_i2c.c b/drivers/power/supply/bq27xxx_battery_i2c.c index 3a1798b0c1a7..9910c600743e 100644 --- a/drivers/power/supply/bq27xxx_battery_i2c.c +++ b/drivers/power/supply/bq27xxx_battery_i2c.c @@ -209,7 +209,9 @@ static void bq27xxx_battery_i2c_remove(struct i2c_client *client) { struct bq27xxx_device_info *di = i2c_get_clientdata(client); - free_irq(client->irq, di); + if (client->irq) + free_irq(client->irq, di); + bq27xxx_battery_teardown(di); mutex_lock(&battery_mutex); diff --git a/drivers/power/supply/qcom_battmgr.c b/drivers/power/supply/qcom_battmgr.c index a12e2a66d516..ec163d1bcd18 100644 --- a/drivers/power/supply/qcom_battmgr.c +++ b/drivers/power/supply/qcom_battmgr.c @@ -282,7 +282,6 @@ struct qcom_battmgr_wireless { struct qcom_battmgr { struct device *dev; - struct auxiliary_device *adev; struct pmic_glink_client *client; enum qcom_battmgr_variant variant; @@ -1294,69 +1293,11 @@ static void qcom_battmgr_enable_worker(struct work_struct *work) dev_err(battmgr->dev, "failed to request power notifications\n"); } -static char *qcom_battmgr_battery[] = { "battery" }; - -static void qcom_battmgr_register_psy(struct qcom_battmgr *battmgr) -{ - struct power_supply_config psy_cfg_supply = {}; - struct auxiliary_device *adev = battmgr->adev; - struct power_supply_config psy_cfg = {}; - struct device *dev = &adev->dev; - - psy_cfg.drv_data = battmgr; - psy_cfg.of_node = adev->dev.of_node; - - psy_cfg_supply.drv_data = battmgr; - psy_cfg_supply.of_node = adev->dev.of_node; - psy_cfg_supply.supplied_to = qcom_battmgr_battery; - psy_cfg_supply.num_supplicants = 1; - - if (battmgr->variant == QCOM_BATTMGR_SC8280XP) { - battmgr->bat_psy = devm_power_supply_register(dev, &sc8280xp_bat_psy_desc, &psy_cfg); - if (IS_ERR(battmgr->bat_psy)) - dev_err(dev, "failed to register battery power supply (%ld)\n", - PTR_ERR(battmgr->bat_psy)); - - battmgr->ac_psy = devm_power_supply_register(dev, &sc8280xp_ac_psy_desc, &psy_cfg_supply); - if (IS_ERR(battmgr->ac_psy)) - dev_err(dev, "failed to register AC power supply (%ld)\n", - PTR_ERR(battmgr->ac_psy)); - - battmgr->usb_psy = devm_power_supply_register(dev, &sc8280xp_usb_psy_desc, &psy_cfg_supply); - if (IS_ERR(battmgr->usb_psy)) - dev_err(dev, "failed to register USB power supply (%ld)\n", - PTR_ERR(battmgr->usb_psy)); - - battmgr->wls_psy = devm_power_supply_register(dev, &sc8280xp_wls_psy_desc, &psy_cfg_supply); - if (IS_ERR(battmgr->wls_psy)) - dev_err(dev, "failed to register wireless charing power supply (%ld)\n", - PTR_ERR(battmgr->wls_psy)); - } else { - battmgr->bat_psy = devm_power_supply_register(dev, &sm8350_bat_psy_desc, &psy_cfg); - if (IS_ERR(battmgr->bat_psy)) - dev_err(dev, "failed to register battery power supply (%ld)\n", - PTR_ERR(battmgr->bat_psy)); - - battmgr->usb_psy = devm_power_supply_register(dev, &sm8350_usb_psy_desc, &psy_cfg_supply); - if (IS_ERR(battmgr->usb_psy)) - dev_err(dev, "failed to register USB power supply (%ld)\n", - PTR_ERR(battmgr->usb_psy)); - - battmgr->wls_psy = devm_power_supply_register(dev, &sm8350_wls_psy_desc, &psy_cfg_supply); - if (IS_ERR(battmgr->wls_psy)) - dev_err(dev, "failed to register wireless charing power supply (%ld)\n", - PTR_ERR(battmgr->wls_psy)); - } -} - static void qcom_battmgr_pdr_notify(void *priv, int state) { struct qcom_battmgr *battmgr = priv; if (state == SERVREG_SERVICE_STATE_UP) { - if (!battmgr->bat_psy) - qcom_battmgr_register_psy(battmgr); - battmgr->service_up = true; schedule_work(&battmgr->enable_work); } else { @@ -1371,9 +1312,13 @@ static const struct of_device_id qcom_battmgr_of_variants[] = { {} }; +static char *qcom_battmgr_battery[] = { "battery" }; + static int qcom_battmgr_probe(struct auxiliary_device *adev, const struct auxiliary_device_id *id) { + struct power_supply_config psy_cfg_supply = {}; + struct power_supply_config psy_cfg = {}; const struct of_device_id *match; struct qcom_battmgr *battmgr; struct device *dev = &adev->dev; @@ -1383,7 +1328,14 @@ static int qcom_battmgr_probe(struct auxiliary_device *adev, return -ENOMEM; battmgr->dev = dev; - battmgr->adev = adev; + + psy_cfg.drv_data = battmgr; + psy_cfg.of_node = adev->dev.of_node; + + psy_cfg_supply.drv_data = battmgr; + psy_cfg_supply.of_node = adev->dev.of_node; + psy_cfg_supply.supplied_to = qcom_battmgr_battery; + psy_cfg_supply.num_supplicants = 1; INIT_WORK(&battmgr->enable_work, qcom_battmgr_enable_worker); mutex_init(&battmgr->lock); @@ -1395,6 +1347,43 @@ static int qcom_battmgr_probe(struct auxiliary_device *adev, else battmgr->variant = QCOM_BATTMGR_SM8350; + if (battmgr->variant == QCOM_BATTMGR_SC8280XP) { + battmgr->bat_psy = devm_power_supply_register(dev, &sc8280xp_bat_psy_desc, &psy_cfg); + if (IS_ERR(battmgr->bat_psy)) + return dev_err_probe(dev, PTR_ERR(battmgr->bat_psy), + "failed to register battery power supply\n"); + + battmgr->ac_psy = devm_power_supply_register(dev, &sc8280xp_ac_psy_desc, &psy_cfg_supply); + if (IS_ERR(battmgr->ac_psy)) + return dev_err_probe(dev, PTR_ERR(battmgr->ac_psy), + "failed to register AC power supply\n"); + + battmgr->usb_psy = devm_power_supply_register(dev, &sc8280xp_usb_psy_desc, &psy_cfg_supply); + if (IS_ERR(battmgr->usb_psy)) + return dev_err_probe(dev, PTR_ERR(battmgr->usb_psy), + "failed to register USB power supply\n"); + + battmgr->wls_psy = devm_power_supply_register(dev, &sc8280xp_wls_psy_desc, &psy_cfg_supply); + if (IS_ERR(battmgr->wls_psy)) + return dev_err_probe(dev, PTR_ERR(battmgr->wls_psy), + "failed to register wireless charing power supply\n"); + } else { + battmgr->bat_psy = devm_power_supply_register(dev, &sm8350_bat_psy_desc, &psy_cfg); + if (IS_ERR(battmgr->bat_psy)) + return dev_err_probe(dev, PTR_ERR(battmgr->bat_psy), + "failed to register battery power supply\n"); + + battmgr->usb_psy = devm_power_supply_register(dev, &sm8350_usb_psy_desc, &psy_cfg_supply); + if (IS_ERR(battmgr->usb_psy)) + return dev_err_probe(dev, PTR_ERR(battmgr->usb_psy), + "failed to register USB power supply\n"); + + battmgr->wls_psy = devm_power_supply_register(dev, &sm8350_wls_psy_desc, &psy_cfg_supply); + if (IS_ERR(battmgr->wls_psy)) + return dev_err_probe(dev, PTR_ERR(battmgr->wls_psy), + "failed to register wireless charing power supply\n"); + } + battmgr->client = devm_pmic_glink_register_client(dev, PMIC_GLINK_OWNER_BATTMGR, qcom_battmgr_callback, diff --git a/drivers/powercap/intel_rapl_common.c b/drivers/powercap/intel_rapl_common.c index 2feed036c1cd..00c861899a47 100644 --- a/drivers/powercap/intel_rapl_common.c +++ b/drivers/powercap/intel_rapl_common.c @@ -1564,7 +1564,7 @@ struct rapl_package *rapl_add_package(int id, struct rapl_if_priv *priv, bool id if (id_is_cpu) { rp->id = topology_logical_die_id(id); rp->lead_cpu = id; - if (topology_max_die_per_package() > 1) + if (topology_max_dies_per_package() > 1) snprintf(rp->name, PACKAGE_DOMAIN_NAME_LENGTH, "package-%d-die-%d", topology_physical_package_id(id), topology_die_id(id)); else diff --git a/drivers/ptp/ptp_kvm_common.c b/drivers/ptp/ptp_kvm_common.c index 2418977989be..15ccb7dd2ed0 100644 --- a/drivers/ptp/ptp_kvm_common.c +++ b/drivers/ptp/ptp_kvm_common.c @@ -28,15 +28,15 @@ static int ptp_kvm_get_time_fn(ktime_t *device_time, struct system_counterval_t *system_counter, void *ctx) { - long ret; - u64 cycle; + enum clocksource_ids cs_id; struct timespec64 tspec; - struct clocksource *cs; + u64 cycle; + int ret; spin_lock(&kvm_ptp_lock); preempt_disable_notrace(); - ret = kvm_arch_ptp_get_crosststamp(&cycle, &tspec, &cs); + ret = kvm_arch_ptp_get_crosststamp(&cycle, &tspec, &cs_id); if (ret) { spin_unlock(&kvm_ptp_lock); preempt_enable_notrace(); @@ -46,7 +46,7 @@ static int ptp_kvm_get_time_fn(ktime_t *device_time, preempt_enable_notrace(); system_counter->cycles = cycle; - system_counter->cs = cs; + system_counter->cs_id = cs_id; *device_time = timespec64_to_ktime(tspec); diff --git a/drivers/ptp/ptp_kvm_x86.c b/drivers/ptp/ptp_kvm_x86.c index 902844cc1a17..617c8d6706d3 100644 --- a/drivers/ptp/ptp_kvm_x86.c +++ b/drivers/ptp/ptp_kvm_x86.c @@ -93,7 +93,7 @@ int kvm_arch_ptp_get_clock(struct timespec64 *ts) } int kvm_arch_ptp_get_crosststamp(u64 *cycle, struct timespec64 *tspec, - struct clocksource **cs) + enum clocksource_ids *cs_id) { struct pvclock_vcpu_time_info *src; unsigned int version; @@ -123,7 +123,7 @@ int kvm_arch_ptp_get_crosststamp(u64 *cycle, struct timespec64 *tspec, *cycle = __pvclock_read_cycles(src, clock_pair->tsc); } while (pvclock_read_retry(src, version)); - *cs = &kvm_clock; + *cs_id = CSID_X86_KVM_CLK; return 0; } diff --git a/drivers/ras/Kconfig b/drivers/ras/Kconfig index c2a236f2e846..fc4f4bb94a4c 100644 --- a/drivers/ras/Kconfig +++ b/drivers/ras/Kconfig @@ -32,5 +32,18 @@ menuconfig RAS if RAS source "arch/x86/ras/Kconfig" +source "drivers/ras/amd/atl/Kconfig" + +config RAS_FMPM + tristate "FRU Memory Poison Manager" + default m + depends on AMD_ATL && ACPI_APEI + help + Support saving and restoring memory error information across reboot + using ACPI ERST as persistent storage. Error information is saved with + the UEFI CPER "FRU Memory Poison" section format. + + Memory will be retired during boot time and run time depending on + platform-specific policies. endif diff --git a/drivers/ras/Makefile b/drivers/ras/Makefile index 6f0404f50107..11f95d59d397 100644 --- a/drivers/ras/Makefile +++ b/drivers/ras/Makefile @@ -2,3 +2,6 @@ obj-$(CONFIG_RAS) += ras.o obj-$(CONFIG_DEBUG_FS) += debugfs.o obj-$(CONFIG_RAS_CEC) += cec.o + +obj-$(CONFIG_RAS_FMPM) += amd/fmpm.o +obj-y += amd/atl/ diff --git a/drivers/ras/amd/atl/Kconfig b/drivers/ras/amd/atl/Kconfig new file mode 100644 index 000000000000..df49c23e7f62 --- /dev/null +++ b/drivers/ras/amd/atl/Kconfig @@ -0,0 +1,21 @@ +# SPDX-License-Identifier: GPL-2.0-or-later +# +# AMD Address Translation Library Kconfig +# +# Copyright (c) 2023, Advanced Micro Devices, Inc. +# All Rights Reserved. +# +# Author: Yazen Ghannam <Yazen.Ghannam@amd.com> + +config AMD_ATL + tristate "AMD Address Translation Library" + depends on AMD_NB && X86_64 && RAS + depends on MEMORY_FAILURE + default N + help + This library includes support for implementation-specific + address translation procedures needed for various error + handling cases. + + Enable this option if using DRAM ECC on Zen-based systems + and OS-based error handling. diff --git a/drivers/ras/amd/atl/Makefile b/drivers/ras/amd/atl/Makefile new file mode 100644 index 000000000000..4acd5f05bd9c --- /dev/null +++ b/drivers/ras/amd/atl/Makefile @@ -0,0 +1,18 @@ +# SPDX-License-Identifier: GPL-2.0-or-later +# +# AMD Address Translation Library Makefile +# +# Copyright (c) 2023, Advanced Micro Devices, Inc. +# All Rights Reserved. +# +# Author: Yazen Ghannam <Yazen.Ghannam@amd.com> + +amd_atl-y := access.o +amd_atl-y += core.o +amd_atl-y += dehash.o +amd_atl-y += denormalize.o +amd_atl-y += map.o +amd_atl-y += system.o +amd_atl-y += umc.o + +obj-$(CONFIG_AMD_ATL) += amd_atl.o diff --git a/drivers/ras/amd/atl/access.c b/drivers/ras/amd/atl/access.c new file mode 100644 index 000000000000..ee4661ed28ba --- /dev/null +++ b/drivers/ras/amd/atl/access.c @@ -0,0 +1,133 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * AMD Address Translation Library + * + * access.c : DF Indirect Access functions + * + * Copyright (c) 2023, Advanced Micro Devices, Inc. + * All Rights Reserved. + * + * Author: Yazen Ghannam <Yazen.Ghannam@amd.com> + */ + +#include "internal.h" + +/* Protect the PCI config register pairs used for DF indirect access. */ +static DEFINE_MUTEX(df_indirect_mutex); + +/* + * Data Fabric Indirect Access uses FICAA/FICAD. + * + * Fabric Indirect Configuration Access Address (FICAA): constructed based + * on the device's Instance Id and the PCI function and register offset of + * the desired register. + * + * Fabric Indirect Configuration Access Data (FICAD): there are FICAD + * low and high registers but so far only the low register is needed. + * + * Use Instance Id 0xFF to indicate a broadcast read. + */ +#define DF_BROADCAST 0xFF + +#define DF_FICAA_INST_EN BIT(0) +#define DF_FICAA_REG_NUM GENMASK(10, 1) +#define DF_FICAA_FUNC_NUM GENMASK(13, 11) +#define DF_FICAA_INST_ID GENMASK(23, 16) + +#define DF_FICAA_REG_NUM_LEGACY GENMASK(10, 2) + +static u16 get_accessible_node(u16 node) +{ + /* + * On heterogeneous systems, not all AMD Nodes are accessible + * through software-visible registers. The Node ID needs to be + * adjusted for register accesses. But its value should not be + * changed for the translation methods. + */ + if (df_cfg.flags.heterogeneous) { + /* Only Node 0 is accessible on DF3.5 systems. */ + if (df_cfg.rev == DF3p5) + node = 0; + + /* + * Only the first Node in each Socket is accessible on + * DF4.5 systems, and this is visible to software as one + * Fabric per Socket. The Socket ID can be derived from + * the Node ID and global shift values. + */ + if (df_cfg.rev == DF4p5) + node >>= df_cfg.socket_id_shift - df_cfg.node_id_shift; + } + + return node; +} + +static int __df_indirect_read(u16 node, u8 func, u16 reg, u8 instance_id, u32 *lo) +{ + u32 ficaa_addr = 0x8C, ficad_addr = 0xB8; + struct pci_dev *F4; + int err = -ENODEV; + u32 ficaa = 0; + + node = get_accessible_node(node); + if (node >= amd_nb_num()) + goto out; + + F4 = node_to_amd_nb(node)->link; + if (!F4) + goto out; + + /* Enable instance-specific access. */ + if (instance_id != DF_BROADCAST) { + ficaa |= FIELD_PREP(DF_FICAA_INST_EN, 1); + ficaa |= FIELD_PREP(DF_FICAA_INST_ID, instance_id); + } + + /* + * The two least-significant bits are masked when inputing the + * register offset to FICAA. + */ + reg >>= 2; + + if (df_cfg.flags.legacy_ficaa) { + ficaa_addr = 0x5C; + ficad_addr = 0x98; + + ficaa |= FIELD_PREP(DF_FICAA_REG_NUM_LEGACY, reg); + } else { + ficaa |= FIELD_PREP(DF_FICAA_REG_NUM, reg); + } + + ficaa |= FIELD_PREP(DF_FICAA_FUNC_NUM, func); + + mutex_lock(&df_indirect_mutex); + + err = pci_write_config_dword(F4, ficaa_addr, ficaa); + if (err) { + pr_warn("Error writing DF Indirect FICAA, FICAA=0x%x\n", ficaa); + goto out_unlock; + } + + err = pci_read_config_dword(F4, ficad_addr, lo); + if (err) + pr_warn("Error reading DF Indirect FICAD LO, FICAA=0x%x.\n", ficaa); + + pr_debug("node=%u inst=0x%x func=0x%x reg=0x%x val=0x%x", + node, instance_id, func, reg << 2, *lo); + +out_unlock: + mutex_unlock(&df_indirect_mutex); + +out: + return err; +} + +int df_indirect_read_instance(u16 node, u8 func, u16 reg, u8 instance_id, u32 *lo) +{ + return __df_indirect_read(node, func, reg, instance_id, lo); +} + +int df_indirect_read_broadcast(u16 node, u8 func, u16 reg, u32 *lo) +{ + return __df_indirect_read(node, func, reg, DF_BROADCAST, lo); +} diff --git a/drivers/ras/amd/atl/core.c b/drivers/ras/amd/atl/core.c new file mode 100644 index 000000000000..6dc4e06305f7 --- /dev/null +++ b/drivers/ras/amd/atl/core.c @@ -0,0 +1,225 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * AMD Address Translation Library + * + * core.c : Module init and base translation functions + * + * Copyright (c) 2023, Advanced Micro Devices, Inc. + * All Rights Reserved. + * + * Author: Yazen Ghannam <Yazen.Ghannam@amd.com> + */ + +#include <linux/module.h> +#include <asm/cpu_device_id.h> + +#include "internal.h" + +struct df_config df_cfg __read_mostly; + +static int addr_over_limit(struct addr_ctx *ctx) +{ + u64 dram_limit_addr; + + if (df_cfg.rev >= DF4) + dram_limit_addr = FIELD_GET(DF4_DRAM_LIMIT_ADDR, ctx->map.limit); + else + dram_limit_addr = FIELD_GET(DF2_DRAM_LIMIT_ADDR, ctx->map.limit); + + dram_limit_addr <<= DF_DRAM_BASE_LIMIT_LSB; + dram_limit_addr |= GENMASK(DF_DRAM_BASE_LIMIT_LSB - 1, 0); + + /* Is calculated system address above DRAM limit address? */ + if (ctx->ret_addr > dram_limit_addr) { + atl_debug(ctx, "Calculated address (0x%016llx) > DRAM limit (0x%016llx)", + ctx->ret_addr, dram_limit_addr); + return -EINVAL; + } + + return 0; +} + +static bool legacy_hole_en(struct addr_ctx *ctx) +{ + u32 reg = ctx->map.base; + + if (df_cfg.rev >= DF4) + reg = ctx->map.ctl; + + return FIELD_GET(DF_LEGACY_MMIO_HOLE_EN, reg); +} + +static int add_legacy_hole(struct addr_ctx *ctx) +{ + u32 dram_hole_base; + u8 func = 0; + + if (!legacy_hole_en(ctx)) + return 0; + + if (df_cfg.rev >= DF4) + func = 7; + + if (df_indirect_read_broadcast(ctx->node_id, func, 0x104, &dram_hole_base)) + return -EINVAL; + + dram_hole_base &= DF_DRAM_HOLE_BASE_MASK; + + if (ctx->ret_addr >= dram_hole_base) + ctx->ret_addr += (BIT_ULL(32) - dram_hole_base); + + return 0; +} + +static u64 get_base_addr(struct addr_ctx *ctx) +{ + u64 base_addr; + + if (df_cfg.rev >= DF4) + base_addr = FIELD_GET(DF4_BASE_ADDR, ctx->map.base); + else + base_addr = FIELD_GET(DF2_BASE_ADDR, ctx->map.base); + + return base_addr << DF_DRAM_BASE_LIMIT_LSB; +} + +static int add_base_and_hole(struct addr_ctx *ctx) +{ + ctx->ret_addr += get_base_addr(ctx); + + if (add_legacy_hole(ctx)) + return -EINVAL; + + return 0; +} + +static bool late_hole_remove(struct addr_ctx *ctx) +{ + if (df_cfg.rev == DF3p5) + return true; + + if (df_cfg.rev == DF4) + return true; + + if (ctx->map.intlv_mode == DF3_6CHAN) + return true; + + return false; +} + +unsigned long norm_to_sys_addr(u8 socket_id, u8 die_id, u8 coh_st_inst_id, unsigned long addr) +{ + struct addr_ctx ctx; + + if (df_cfg.rev == UNKNOWN) + return -EINVAL; + + memset(&ctx, 0, sizeof(ctx)); + + /* Start from the normalized address */ + ctx.ret_addr = addr; + ctx.inst_id = coh_st_inst_id; + + ctx.inputs.norm_addr = addr; + ctx.inputs.socket_id = socket_id; + ctx.inputs.die_id = die_id; + ctx.inputs.coh_st_inst_id = coh_st_inst_id; + + if (determine_node_id(&ctx, socket_id, die_id)) + return -EINVAL; + + if (get_address_map(&ctx)) + return -EINVAL; + + if (denormalize_address(&ctx)) + return -EINVAL; + + if (!late_hole_remove(&ctx) && add_base_and_hole(&ctx)) + return -EINVAL; + + if (dehash_address(&ctx)) + return -EINVAL; + + if (late_hole_remove(&ctx) && add_base_and_hole(&ctx)) + return -EINVAL; + + if (addr_over_limit(&ctx)) + return -EINVAL; + + return ctx.ret_addr; +} + +static void check_for_legacy_df_access(void) +{ + /* + * All Zen-based systems before Family 19h use the legacy + * DF Indirect Access (FICAA/FICAD) offsets. + */ + if (boot_cpu_data.x86 < 0x19) { + df_cfg.flags.legacy_ficaa = true; + return; + } + + /* All systems after Family 19h use the current offsets. */ + if (boot_cpu_data.x86 > 0x19) + return; + + /* Some Family 19h systems use the legacy offsets. */ + switch (boot_cpu_data.x86_model) { + case 0x00 ... 0x0f: + case 0x20 ... 0x5f: + df_cfg.flags.legacy_ficaa = true; + } +} + +/* + * This library provides functionality for AMD-based systems with a Data Fabric. + * The set of systems with a Data Fabric is equivalent to the set of Zen-based systems + * and the set of systems with the Scalable MCA feature at this time. However, these + * are technically independent things. + * + * It's possible to match on the PCI IDs of the Data Fabric devices, but this will be + * an ever expanding list. Instead, match on the SMCA and Zen features to cover all + * relevant systems. + */ +static const struct x86_cpu_id amd_atl_cpuids[] = { + X86_MATCH_FEATURE(X86_FEATURE_SMCA, NULL), + X86_MATCH_FEATURE(X86_FEATURE_ZEN, NULL), + { } +}; +MODULE_DEVICE_TABLE(x86cpu, amd_atl_cpuids); + +static int __init amd_atl_init(void) +{ + if (!x86_match_cpu(amd_atl_cpuids)) + return -ENODEV; + + if (!amd_nb_num()) + return -ENODEV; + + check_for_legacy_df_access(); + + if (get_df_system_info()) + return -ENODEV; + + /* Increment this module's recount so that it can't be easily unloaded. */ + __module_get(THIS_MODULE); + amd_atl_register_decoder(convert_umc_mca_addr_to_sys_addr); + + pr_info("AMD Address Translation Library initialized"); + return 0; +} + +/* + * Exit function is only needed for testing and debug. Module unload must be + * forced to override refcount check. + */ +static void __exit amd_atl_exit(void) +{ + amd_atl_unregister_decoder(); +} + +module_init(amd_atl_init); +module_exit(amd_atl_exit); + +MODULE_LICENSE("GPL"); diff --git a/drivers/ras/amd/atl/dehash.c b/drivers/ras/amd/atl/dehash.c new file mode 100644 index 000000000000..4ea46262c4f5 --- /dev/null +++ b/drivers/ras/amd/atl/dehash.c @@ -0,0 +1,500 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * AMD Address Translation Library + * + * dehash.c : Functions to account for hashing bits + * + * Copyright (c) 2023, Advanced Micro Devices, Inc. + * All Rights Reserved. + * + * Author: Yazen Ghannam <Yazen.Ghannam@amd.com> + */ + +#include "internal.h" + +/* + * Verify the interleave bits are correct in the different interleaving + * settings. + * + * If @num_intlv_dies and/or @num_intlv_sockets are 1, it means the + * respective interleaving is disabled. + */ +static inline bool map_bits_valid(struct addr_ctx *ctx, u8 bit1, u8 bit2, + u8 num_intlv_dies, u8 num_intlv_sockets) +{ + if (!(ctx->map.intlv_bit_pos == bit1 || ctx->map.intlv_bit_pos == bit2)) { + pr_debug("Invalid interleave bit: %u", ctx->map.intlv_bit_pos); + return false; + } + + if (ctx->map.num_intlv_dies > num_intlv_dies) { + pr_debug("Invalid number of interleave dies: %u", ctx->map.num_intlv_dies); + return false; + } + + if (ctx->map.num_intlv_sockets > num_intlv_sockets) { + pr_debug("Invalid number of interleave sockets: %u", ctx->map.num_intlv_sockets); + return false; + } + + return true; +} + +static int df2_dehash_addr(struct addr_ctx *ctx) +{ + u8 hashed_bit, intlv_bit, intlv_bit_pos; + + if (!map_bits_valid(ctx, 8, 9, 1, 1)) + return -EINVAL; + + intlv_bit_pos = ctx->map.intlv_bit_pos; + intlv_bit = !!(BIT_ULL(intlv_bit_pos) & ctx->ret_addr); + + hashed_bit = intlv_bit; + hashed_bit ^= FIELD_GET(BIT_ULL(12), ctx->ret_addr); + hashed_bit ^= FIELD_GET(BIT_ULL(18), ctx->ret_addr); + hashed_bit ^= FIELD_GET(BIT_ULL(21), ctx->ret_addr); + hashed_bit ^= FIELD_GET(BIT_ULL(30), ctx->ret_addr); + + if (hashed_bit != intlv_bit) + ctx->ret_addr ^= BIT_ULL(intlv_bit_pos); + + return 0; +} + +static int df3_dehash_addr(struct addr_ctx *ctx) +{ + bool hash_ctl_64k, hash_ctl_2M, hash_ctl_1G; + u8 hashed_bit, intlv_bit, intlv_bit_pos; + + if (!map_bits_valid(ctx, 8, 9, 1, 1)) + return -EINVAL; + + hash_ctl_64k = FIELD_GET(DF3_HASH_CTL_64K, ctx->map.ctl); + hash_ctl_2M = FIELD_GET(DF3_HASH_CTL_2M, ctx->map.ctl); + hash_ctl_1G = FIELD_GET(DF3_HASH_CTL_1G, ctx->map.ctl); + + intlv_bit_pos = ctx->map.intlv_bit_pos; + intlv_bit = !!(BIT_ULL(intlv_bit_pos) & ctx->ret_addr); + + hashed_bit = intlv_bit; + hashed_bit ^= FIELD_GET(BIT_ULL(14), ctx->ret_addr); + hashed_bit ^= FIELD_GET(BIT_ULL(18), ctx->ret_addr) & hash_ctl_64k; + hashed_bit ^= FIELD_GET(BIT_ULL(23), ctx->ret_addr) & hash_ctl_2M; + hashed_bit ^= FIELD_GET(BIT_ULL(32), ctx->ret_addr) & hash_ctl_1G; + + if (hashed_bit != intlv_bit) + ctx->ret_addr ^= BIT_ULL(intlv_bit_pos); + + /* Calculation complete for 2 channels. Continue for 4 and 8 channels. */ + if (ctx->map.intlv_mode == DF3_COD4_2CHAN_HASH) + return 0; + + intlv_bit = FIELD_GET(BIT_ULL(12), ctx->ret_addr); + + hashed_bit = intlv_bit; + hashed_bit ^= FIELD_GET(BIT_ULL(16), ctx->ret_addr) & hash_ctl_64k; + hashed_bit ^= FIELD_GET(BIT_ULL(21), ctx->ret_addr) & hash_ctl_2M; + hashed_bit ^= FIELD_GET(BIT_ULL(30), ctx->ret_addr) & hash_ctl_1G; + + if (hashed_bit != intlv_bit) + ctx->ret_addr ^= BIT_ULL(12); + + /* Calculation complete for 4 channels. Continue for 8 channels. */ + if (ctx->map.intlv_mode == DF3_COD2_4CHAN_HASH) + return 0; + + intlv_bit = FIELD_GET(BIT_ULL(13), ctx->ret_addr); + + hashed_bit = intlv_bit; + hashed_bit ^= FIELD_GET(BIT_ULL(17), ctx->ret_addr) & hash_ctl_64k; + hashed_bit ^= FIELD_GET(BIT_ULL(22), ctx->ret_addr) & hash_ctl_2M; + hashed_bit ^= FIELD_GET(BIT_ULL(31), ctx->ret_addr) & hash_ctl_1G; + + if (hashed_bit != intlv_bit) + ctx->ret_addr ^= BIT_ULL(13); + + return 0; +} + +static int df3_6chan_dehash_addr(struct addr_ctx *ctx) +{ + u8 intlv_bit_pos = ctx->map.intlv_bit_pos; + u8 hashed_bit, intlv_bit, num_intlv_bits; + bool hash_ctl_2M, hash_ctl_1G; + + if (ctx->map.intlv_mode != DF3_6CHAN) { + atl_debug_on_bad_intlv_mode(ctx); + return -EINVAL; + } + + num_intlv_bits = ilog2(ctx->map.num_intlv_chan) + 1; + + hash_ctl_2M = FIELD_GET(DF3_HASH_CTL_2M, ctx->map.ctl); + hash_ctl_1G = FIELD_GET(DF3_HASH_CTL_1G, ctx->map.ctl); + + intlv_bit = !!(BIT_ULL(intlv_bit_pos) & ctx->ret_addr); + + hashed_bit = intlv_bit; + hashed_bit ^= !!(BIT_ULL(intlv_bit_pos + num_intlv_bits) & ctx->ret_addr); + hashed_bit ^= FIELD_GET(BIT_ULL(23), ctx->ret_addr) & hash_ctl_2M; + hashed_bit ^= FIELD_GET(BIT_ULL(32), ctx->ret_addr) & hash_ctl_1G; + + if (hashed_bit != intlv_bit) + ctx->ret_addr ^= BIT_ULL(intlv_bit_pos); + + intlv_bit_pos++; + intlv_bit = !!(BIT_ULL(intlv_bit_pos) & ctx->ret_addr); + + hashed_bit = intlv_bit; + hashed_bit ^= FIELD_GET(BIT_ULL(21), ctx->ret_addr) & hash_ctl_2M; + hashed_bit ^= FIELD_GET(BIT_ULL(30), ctx->ret_addr) & hash_ctl_1G; + + if (hashed_bit != intlv_bit) + ctx->ret_addr ^= BIT_ULL(intlv_bit_pos); + + intlv_bit_pos++; + intlv_bit = !!(BIT_ULL(intlv_bit_pos) & ctx->ret_addr); + + hashed_bit = intlv_bit; + hashed_bit ^= FIELD_GET(BIT_ULL(22), ctx->ret_addr) & hash_ctl_2M; + hashed_bit ^= FIELD_GET(BIT_ULL(31), ctx->ret_addr) & hash_ctl_1G; + + if (hashed_bit != intlv_bit) + ctx->ret_addr ^= BIT_ULL(intlv_bit_pos); + + return 0; +} + +static int df4_dehash_addr(struct addr_ctx *ctx) +{ + bool hash_ctl_64k, hash_ctl_2M, hash_ctl_1G; + u8 hashed_bit, intlv_bit; + + if (!map_bits_valid(ctx, 8, 8, 1, 2)) + return -EINVAL; + + hash_ctl_64k = FIELD_GET(DF4_HASH_CTL_64K, ctx->map.ctl); + hash_ctl_2M = FIELD_GET(DF4_HASH_CTL_2M, ctx->map.ctl); + hash_ctl_1G = FIELD_GET(DF4_HASH_CTL_1G, ctx->map.ctl); + + intlv_bit = FIELD_GET(BIT_ULL(8), ctx->ret_addr); + + hashed_bit = intlv_bit; + hashed_bit ^= FIELD_GET(BIT_ULL(16), ctx->ret_addr) & hash_ctl_64k; + hashed_bit ^= FIELD_GET(BIT_ULL(21), ctx->ret_addr) & hash_ctl_2M; + hashed_bit ^= FIELD_GET(BIT_ULL(30), ctx->ret_addr) & hash_ctl_1G; + + if (ctx->map.num_intlv_sockets == 1) + hashed_bit ^= FIELD_GET(BIT_ULL(14), ctx->ret_addr); + + if (hashed_bit != intlv_bit) + ctx->ret_addr ^= BIT_ULL(8); + + /* + * Hashing is possible with socket interleaving, so check the total number + * of channels in the system rather than DRAM map interleaving mode. + * + * Calculation complete for 2 channels. Continue for 4, 8, and 16 channels. + */ + if (ctx->map.total_intlv_chan <= 2) + return 0; + + intlv_bit = FIELD_GET(BIT_ULL(12), ctx->ret_addr); + + hashed_bit = intlv_bit; + hashed_bit ^= FIELD_GET(BIT_ULL(17), ctx->ret_addr) & hash_ctl_64k; + hashed_bit ^= FIELD_GET(BIT_ULL(22), ctx->ret_addr) & hash_ctl_2M; + hashed_bit ^= FIELD_GET(BIT_ULL(31), ctx->ret_addr) & hash_ctl_1G; + + if (hashed_bit != intlv_bit) + ctx->ret_addr ^= BIT_ULL(12); + + /* Calculation complete for 4 channels. Continue for 8 and 16 channels. */ + if (ctx->map.total_intlv_chan <= 4) + return 0; + + intlv_bit = FIELD_GET(BIT_ULL(13), ctx->ret_addr); + + hashed_bit = intlv_bit; + hashed_bit ^= FIELD_GET(BIT_ULL(18), ctx->ret_addr) & hash_ctl_64k; + hashed_bit ^= FIELD_GET(BIT_ULL(23), ctx->ret_addr) & hash_ctl_2M; + hashed_bit ^= FIELD_GET(BIT_ULL(32), ctx->ret_addr) & hash_ctl_1G; + + if (hashed_bit != intlv_bit) + ctx->ret_addr ^= BIT_ULL(13); + + /* Calculation complete for 8 channels. Continue for 16 channels. */ + if (ctx->map.total_intlv_chan <= 8) + return 0; + + intlv_bit = FIELD_GET(BIT_ULL(14), ctx->ret_addr); + + hashed_bit = intlv_bit; + hashed_bit ^= FIELD_GET(BIT_ULL(19), ctx->ret_addr) & hash_ctl_64k; + hashed_bit ^= FIELD_GET(BIT_ULL(24), ctx->ret_addr) & hash_ctl_2M; + hashed_bit ^= FIELD_GET(BIT_ULL(33), ctx->ret_addr) & hash_ctl_1G; + + if (hashed_bit != intlv_bit) + ctx->ret_addr ^= BIT_ULL(14); + + return 0; +} + +static int df4p5_dehash_addr(struct addr_ctx *ctx) +{ + bool hash_ctl_64k, hash_ctl_2M, hash_ctl_1G, hash_ctl_1T; + u8 hashed_bit, intlv_bit; + u64 rehash_vector; + + if (!map_bits_valid(ctx, 8, 8, 1, 2)) + return -EINVAL; + + hash_ctl_64k = FIELD_GET(DF4_HASH_CTL_64K, ctx->map.ctl); + hash_ctl_2M = FIELD_GET(DF4_HASH_CTL_2M, ctx->map.ctl); + hash_ctl_1G = FIELD_GET(DF4_HASH_CTL_1G, ctx->map.ctl); + hash_ctl_1T = FIELD_GET(DF4p5_HASH_CTL_1T, ctx->map.ctl); + + /* + * Generate a unique address to determine which bits + * need to be dehashed. + * + * Start with a contiguous bitmask for the total + * number of channels starting at bit 8. + * + * Then make a gap in the proper place based on + * interleave mode. + */ + rehash_vector = ctx->map.total_intlv_chan - 1; + rehash_vector <<= 8; + + if (ctx->map.intlv_mode == DF4p5_NPS2_4CHAN_1K_HASH || + ctx->map.intlv_mode == DF4p5_NPS1_8CHAN_1K_HASH || + ctx->map.intlv_mode == DF4p5_NPS1_16CHAN_1K_HASH) + rehash_vector = expand_bits(10, 2, rehash_vector); + else + rehash_vector = expand_bits(9, 3, rehash_vector); + + if (rehash_vector & BIT_ULL(8)) { + intlv_bit = FIELD_GET(BIT_ULL(8), ctx->ret_addr); + + hashed_bit = intlv_bit; + hashed_bit ^= FIELD_GET(BIT_ULL(16), ctx->ret_addr) & hash_ctl_64k; + hashed_bit ^= FIELD_GET(BIT_ULL(21), ctx->ret_addr) & hash_ctl_2M; + hashed_bit ^= FIELD_GET(BIT_ULL(30), ctx->ret_addr) & hash_ctl_1G; + hashed_bit ^= FIELD_GET(BIT_ULL(40), ctx->ret_addr) & hash_ctl_1T; + + if (hashed_bit != intlv_bit) + ctx->ret_addr ^= BIT_ULL(8); + } + + if (rehash_vector & BIT_ULL(9)) { + intlv_bit = FIELD_GET(BIT_ULL(9), ctx->ret_addr); + + hashed_bit = intlv_bit; + hashed_bit ^= FIELD_GET(BIT_ULL(17), ctx->ret_addr) & hash_ctl_64k; + hashed_bit ^= FIELD_GET(BIT_ULL(22), ctx->ret_addr) & hash_ctl_2M; + hashed_bit ^= FIELD_GET(BIT_ULL(31), ctx->ret_addr) & hash_ctl_1G; + hashed_bit ^= FIELD_GET(BIT_ULL(41), ctx->ret_addr) & hash_ctl_1T; + + if (hashed_bit != intlv_bit) + ctx->ret_addr ^= BIT_ULL(9); + } + + if (rehash_vector & BIT_ULL(12)) { + intlv_bit = FIELD_GET(BIT_ULL(12), ctx->ret_addr); + + hashed_bit = intlv_bit; + hashed_bit ^= FIELD_GET(BIT_ULL(18), ctx->ret_addr) & hash_ctl_64k; + hashed_bit ^= FIELD_GET(BIT_ULL(23), ctx->ret_addr) & hash_ctl_2M; + hashed_bit ^= FIELD_GET(BIT_ULL(32), ctx->ret_addr) & hash_ctl_1G; + hashed_bit ^= FIELD_GET(BIT_ULL(42), ctx->ret_addr) & hash_ctl_1T; + + if (hashed_bit != intlv_bit) + ctx->ret_addr ^= BIT_ULL(12); + } + + if (rehash_vector & BIT_ULL(13)) { + intlv_bit = FIELD_GET(BIT_ULL(13), ctx->ret_addr); + + hashed_bit = intlv_bit; + hashed_bit ^= FIELD_GET(BIT_ULL(19), ctx->ret_addr) & hash_ctl_64k; + hashed_bit ^= FIELD_GET(BIT_ULL(24), ctx->ret_addr) & hash_ctl_2M; + hashed_bit ^= FIELD_GET(BIT_ULL(33), ctx->ret_addr) & hash_ctl_1G; + hashed_bit ^= FIELD_GET(BIT_ULL(43), ctx->ret_addr) & hash_ctl_1T; + + if (hashed_bit != intlv_bit) + ctx->ret_addr ^= BIT_ULL(13); + } + + if (rehash_vector & BIT_ULL(14)) { + intlv_bit = FIELD_GET(BIT_ULL(14), ctx->ret_addr); + + hashed_bit = intlv_bit; + hashed_bit ^= FIELD_GET(BIT_ULL(20), ctx->ret_addr) & hash_ctl_64k; + hashed_bit ^= FIELD_GET(BIT_ULL(25), ctx->ret_addr) & hash_ctl_2M; + hashed_bit ^= FIELD_GET(BIT_ULL(34), ctx->ret_addr) & hash_ctl_1G; + hashed_bit ^= FIELD_GET(BIT_ULL(44), ctx->ret_addr) & hash_ctl_1T; + + if (hashed_bit != intlv_bit) + ctx->ret_addr ^= BIT_ULL(14); + } + + return 0; +} + +/* + * MI300 hash bits + * 4K 64K 2M 1G 1T 1T + * COH_ST_Select[0] = XOR of addr{8, 12, 15, 22, 29, 36, 43} + * COH_ST_Select[1] = XOR of addr{9, 13, 16, 23, 30, 37, 44} + * COH_ST_Select[2] = XOR of addr{10, 14, 17, 24, 31, 38, 45} + * COH_ST_Select[3] = XOR of addr{11, 18, 25, 32, 39, 46} + * COH_ST_Select[4] = XOR of addr{14, 19, 26, 33, 40, 47} aka Stack + * DieID[0] = XOR of addr{12, 20, 27, 34, 41 } + * DieID[1] = XOR of addr{13, 21, 28, 35, 42 } + */ +static int mi300_dehash_addr(struct addr_ctx *ctx) +{ + bool hash_ctl_4k, hash_ctl_64k, hash_ctl_2M, hash_ctl_1G, hash_ctl_1T; + bool hashed_bit, intlv_bit, test_bit; + u8 num_intlv_bits, base_bit, i; + + if (!map_bits_valid(ctx, 8, 8, 4, 1)) + return -EINVAL; + + hash_ctl_4k = FIELD_GET(DF4p5_HASH_CTL_4K, ctx->map.ctl); + hash_ctl_64k = FIELD_GET(DF4_HASH_CTL_64K, ctx->map.ctl); + hash_ctl_2M = FIELD_GET(DF4_HASH_CTL_2M, ctx->map.ctl); + hash_ctl_1G = FIELD_GET(DF4_HASH_CTL_1G, ctx->map.ctl); + hash_ctl_1T = FIELD_GET(DF4p5_HASH_CTL_1T, ctx->map.ctl); + + /* Channel bits */ + num_intlv_bits = ilog2(ctx->map.num_intlv_chan); + + for (i = 0; i < num_intlv_bits; i++) { + base_bit = 8 + i; + + /* COH_ST_Select[4] jumps to a base bit of 14. */ + if (i == 4) + base_bit = 14; + + intlv_bit = BIT_ULL(base_bit) & ctx->ret_addr; + + hashed_bit = intlv_bit; + + /* 4k hash bit only applies to the first 3 bits. */ + if (i <= 2) { + test_bit = BIT_ULL(12 + i) & ctx->ret_addr; + hashed_bit ^= test_bit & hash_ctl_4k; + } + + /* Use temporary 'test_bit' value to avoid Sparse warnings. */ + test_bit = BIT_ULL(15 + i) & ctx->ret_addr; + hashed_bit ^= test_bit & hash_ctl_64k; + test_bit = BIT_ULL(22 + i) & ctx->ret_addr; + hashed_bit ^= test_bit & hash_ctl_2M; + test_bit = BIT_ULL(29 + i) & ctx->ret_addr; + hashed_bit ^= test_bit & hash_ctl_1G; + test_bit = BIT_ULL(36 + i) & ctx->ret_addr; + hashed_bit ^= test_bit & hash_ctl_1T; + test_bit = BIT_ULL(43 + i) & ctx->ret_addr; + hashed_bit ^= test_bit & hash_ctl_1T; + + if (hashed_bit != intlv_bit) + ctx->ret_addr ^= BIT_ULL(base_bit); + } + + /* Die bits */ + num_intlv_bits = ilog2(ctx->map.num_intlv_dies); + + for (i = 0; i < num_intlv_bits; i++) { + base_bit = 12 + i; + + intlv_bit = BIT_ULL(base_bit) & ctx->ret_addr; + + hashed_bit = intlv_bit; + + test_bit = BIT_ULL(20 + i) & ctx->ret_addr; + hashed_bit ^= test_bit & hash_ctl_64k; + test_bit = BIT_ULL(27 + i) & ctx->ret_addr; + hashed_bit ^= test_bit & hash_ctl_2M; + test_bit = BIT_ULL(34 + i) & ctx->ret_addr; + hashed_bit ^= test_bit & hash_ctl_1G; + test_bit = BIT_ULL(41 + i) & ctx->ret_addr; + hashed_bit ^= test_bit & hash_ctl_1T; + + if (hashed_bit != intlv_bit) + ctx->ret_addr ^= BIT_ULL(base_bit); + } + + return 0; +} + +int dehash_address(struct addr_ctx *ctx) +{ + switch (ctx->map.intlv_mode) { + /* No hashing cases. */ + case NONE: + case NOHASH_2CHAN: + case NOHASH_4CHAN: + case NOHASH_8CHAN: + case NOHASH_16CHAN: + case NOHASH_32CHAN: + /* Hashing bits handled earlier during CS ID calculation. */ + case DF4_NPS4_3CHAN_HASH: + case DF4_NPS2_5CHAN_HASH: + case DF4_NPS2_6CHAN_HASH: + case DF4_NPS1_10CHAN_HASH: + case DF4_NPS1_12CHAN_HASH: + case DF4p5_NPS2_6CHAN_1K_HASH: + case DF4p5_NPS2_6CHAN_2K_HASH: + case DF4p5_NPS1_10CHAN_1K_HASH: + case DF4p5_NPS1_10CHAN_2K_HASH: + case DF4p5_NPS1_12CHAN_1K_HASH: + case DF4p5_NPS1_12CHAN_2K_HASH: + case DF4p5_NPS0_24CHAN_1K_HASH: + case DF4p5_NPS0_24CHAN_2K_HASH: + /* No hash physical address bits, so nothing to do. */ + case DF4p5_NPS4_3CHAN_1K_HASH: + case DF4p5_NPS4_3CHAN_2K_HASH: + case DF4p5_NPS2_5CHAN_1K_HASH: + case DF4p5_NPS2_5CHAN_2K_HASH: + return 0; + + case DF2_2CHAN_HASH: + return df2_dehash_addr(ctx); + + case DF3_COD4_2CHAN_HASH: + case DF3_COD2_4CHAN_HASH: + case DF3_COD1_8CHAN_HASH: + return df3_dehash_addr(ctx); + + case DF3_6CHAN: + return df3_6chan_dehash_addr(ctx); + + case DF4_NPS4_2CHAN_HASH: + case DF4_NPS2_4CHAN_HASH: + case DF4_NPS1_8CHAN_HASH: + return df4_dehash_addr(ctx); + + case DF4p5_NPS4_2CHAN_1K_HASH: + case DF4p5_NPS4_2CHAN_2K_HASH: + case DF4p5_NPS2_4CHAN_2K_HASH: + case DF4p5_NPS2_4CHAN_1K_HASH: + case DF4p5_NPS1_8CHAN_1K_HASH: + case DF4p5_NPS1_8CHAN_2K_HASH: + case DF4p5_NPS1_16CHAN_1K_HASH: + case DF4p5_NPS1_16CHAN_2K_HASH: + return df4p5_dehash_addr(ctx); + + case MI3_HASH_8CHAN: + case MI3_HASH_16CHAN: + case MI3_HASH_32CHAN: + return mi300_dehash_addr(ctx); + + default: + atl_debug_on_bad_intlv_mode(ctx); + return -EINVAL; + } +} diff --git a/drivers/ras/amd/atl/denormalize.c b/drivers/ras/amd/atl/denormalize.c new file mode 100644 index 000000000000..e279224288d6 --- /dev/null +++ b/drivers/ras/amd/atl/denormalize.c @@ -0,0 +1,718 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * AMD Address Translation Library + * + * denormalize.c : Functions to account for interleaving bits + * + * Copyright (c) 2023, Advanced Micro Devices, Inc. + * All Rights Reserved. + * + * Author: Yazen Ghannam <Yazen.Ghannam@amd.com> + */ + +#include "internal.h" + +/* + * Returns the Destination Fabric ID. This is the first (lowest) + * COH_ST Fabric ID used within a DRAM Address map. + */ +static u16 get_dst_fabric_id(struct addr_ctx *ctx) +{ + switch (df_cfg.rev) { + case DF2: return FIELD_GET(DF2_DST_FABRIC_ID, ctx->map.limit); + case DF3: return FIELD_GET(DF3_DST_FABRIC_ID, ctx->map.limit); + case DF3p5: return FIELD_GET(DF3p5_DST_FABRIC_ID, ctx->map.limit); + case DF4: return FIELD_GET(DF4_DST_FABRIC_ID, ctx->map.ctl); + case DF4p5: return FIELD_GET(DF4p5_DST_FABRIC_ID, ctx->map.ctl); + default: + atl_debug_on_bad_df_rev(); + return 0; + } +} + +/* + * Make a contiguous gap in address for N bits starting at bit P. + * + * Example: + * address bits: [20:0] + * # of interleave bits (n): 3 + * starting interleave bit (p): 8 + * + * expanded address bits: [20+n : n+p][n+p-1 : p][p-1 : 0] + * [23 : 11][10 : 8][7 : 0] + */ +static u64 make_space_for_coh_st_id_at_intlv_bit(struct addr_ctx *ctx) +{ + return expand_bits(ctx->map.intlv_bit_pos, + ctx->map.total_intlv_bits, + ctx->ret_addr); +} + +/* + * Make two gaps in address for N bits. + * First gap is a single bit at bit P. + * Second gap is the remaining N-1 bits at bit 12. + * + * Example: + * address bits: [20:0] + * # of interleave bits (n): 3 + * starting interleave bit (p): 8 + * + * First gap + * expanded address bits: [20+1 : p+1][p][p-1 : 0] + * [21 : 9][8][7 : 0] + * + * Second gap uses result from first. + * r = n - 1; remaining interleave bits + * expanded address bits: [21+r : 12+r][12+r-1: 12][11 : 0] + * [23 : 14][13 : 12][11 : 0] + */ +static u64 make_space_for_coh_st_id_split_2_1(struct addr_ctx *ctx) +{ + /* Make a single space at the interleave bit. */ + u64 denorm_addr = expand_bits(ctx->map.intlv_bit_pos, 1, ctx->ret_addr); + + /* Done if there's only a single interleave bit. */ + if (ctx->map.total_intlv_bits <= 1) + return denorm_addr; + + /* Make spaces for the remaining interleave bits starting at bit 12. */ + return expand_bits(12, ctx->map.total_intlv_bits - 1, denorm_addr); +} + +/* + * Make space for CS ID at bits [14:8] as follows: + * + * 8 channels -> bits [10:8] + * 16 channels -> bits [11:8] + * 32 channels -> bits [14,11:8] + * + * 1 die -> N/A + * 2 dies -> bit [12] + * 4 dies -> bits [13:12] + */ +static u64 make_space_for_coh_st_id_mi300(struct addr_ctx *ctx) +{ + u8 num_intlv_bits = ilog2(ctx->map.num_intlv_chan); + u64 denorm_addr; + + if (ctx->map.intlv_bit_pos != 8) { + pr_debug("Invalid interleave bit: %u", ctx->map.intlv_bit_pos); + return ~0ULL; + } + + /* Channel bits. Covers up to 4 bits at [11:8]. */ + denorm_addr = expand_bits(8, min(num_intlv_bits, 4), ctx->ret_addr); + + /* Die bits. Always starts at [12]. */ + denorm_addr = expand_bits(12, ilog2(ctx->map.num_intlv_dies), denorm_addr); + + /* Additional channel bit at [14]. */ + if (num_intlv_bits > 4) + denorm_addr = expand_bits(14, 1, denorm_addr); + + return denorm_addr; +} + +/* + * Take the current calculated address and shift enough bits in the middle + * to make a gap where the interleave bits will be inserted. + */ +static u64 make_space_for_coh_st_id(struct addr_ctx *ctx) +{ + switch (ctx->map.intlv_mode) { + case NOHASH_2CHAN: + case NOHASH_4CHAN: + case NOHASH_8CHAN: + case NOHASH_16CHAN: + case NOHASH_32CHAN: + case DF2_2CHAN_HASH: + return make_space_for_coh_st_id_at_intlv_bit(ctx); + + case DF3_COD4_2CHAN_HASH: + case DF3_COD2_4CHAN_HASH: + case DF3_COD1_8CHAN_HASH: + case DF4_NPS4_2CHAN_HASH: + case DF4_NPS2_4CHAN_HASH: + case DF4_NPS1_8CHAN_HASH: + case DF4p5_NPS4_2CHAN_1K_HASH: + case DF4p5_NPS4_2CHAN_2K_HASH: + case DF4p5_NPS2_4CHAN_2K_HASH: + case DF4p5_NPS1_8CHAN_2K_HASH: + case DF4p5_NPS1_16CHAN_2K_HASH: + return make_space_for_coh_st_id_split_2_1(ctx); + + case MI3_HASH_8CHAN: + case MI3_HASH_16CHAN: + case MI3_HASH_32CHAN: + return make_space_for_coh_st_id_mi300(ctx); + + default: + atl_debug_on_bad_intlv_mode(ctx); + return ~0ULL; + } +} + +static u16 get_coh_st_id_df2(struct addr_ctx *ctx) +{ + u8 num_socket_intlv_bits = ilog2(ctx->map.num_intlv_sockets); + u8 num_die_intlv_bits = ilog2(ctx->map.num_intlv_dies); + u8 num_intlv_bits; + u16 coh_st_id, mask; + + coh_st_id = ctx->coh_st_fabric_id - get_dst_fabric_id(ctx); + + /* Channel interleave bits */ + num_intlv_bits = order_base_2(ctx->map.num_intlv_chan); + mask = GENMASK(num_intlv_bits - 1, 0); + coh_st_id &= mask; + + /* Die interleave bits */ + if (num_die_intlv_bits) { + u16 die_bits; + + mask = GENMASK(num_die_intlv_bits - 1, 0); + die_bits = ctx->coh_st_fabric_id & df_cfg.die_id_mask; + die_bits >>= df_cfg.die_id_shift; + + coh_st_id |= (die_bits & mask) << num_intlv_bits; + num_intlv_bits += num_die_intlv_bits; + } + + /* Socket interleave bits */ + if (num_socket_intlv_bits) { + u16 socket_bits; + + mask = GENMASK(num_socket_intlv_bits - 1, 0); + socket_bits = ctx->coh_st_fabric_id & df_cfg.socket_id_mask; + socket_bits >>= df_cfg.socket_id_shift; + + coh_st_id |= (socket_bits & mask) << num_intlv_bits; + } + + return coh_st_id; +} + +static u16 get_coh_st_id_df4(struct addr_ctx *ctx) +{ + /* + * Start with the original component mask and the number of interleave + * bits for the channels in this map. + */ + u8 num_intlv_bits = ilog2(ctx->map.num_intlv_chan); + u16 mask = df_cfg.component_id_mask; + + u16 socket_bits; + + /* Set the derived Coherent Station ID to the input Coherent Station Fabric ID. */ + u16 coh_st_id = ctx->coh_st_fabric_id & mask; + + /* + * Subtract the "base" Destination Fabric ID. + * This accounts for systems with disabled Coherent Stations. + */ + coh_st_id -= get_dst_fabric_id(ctx) & mask; + + /* + * Generate and use a new mask based on the number of bits + * needed for channel interleaving in this map. + */ + mask = GENMASK(num_intlv_bits - 1, 0); + coh_st_id &= mask; + + /* Done if socket interleaving is not enabled. */ + if (ctx->map.num_intlv_sockets <= 1) + return coh_st_id; + + /* + * Figure out how many bits are needed for the number of + * interleaved sockets. And shift the derived Coherent Station ID to account + * for these. + */ + num_intlv_bits = ilog2(ctx->map.num_intlv_sockets); + coh_st_id <<= num_intlv_bits; + + /* Generate a new mask for the socket interleaving bits. */ + mask = GENMASK(num_intlv_bits - 1, 0); + + /* Get the socket interleave bits from the original Coherent Station Fabric ID. */ + socket_bits = (ctx->coh_st_fabric_id & df_cfg.socket_id_mask) >> df_cfg.socket_id_shift; + + /* Apply the appropriate socket bits to the derived Coherent Station ID. */ + coh_st_id |= socket_bits & mask; + + return coh_st_id; +} + +/* + * MI300 hash has: + * (C)hannel[3:0] = coh_st_id[3:0] + * (S)tack[0] = coh_st_id[4] + * (D)ie[1:0] = coh_st_id[6:5] + * + * Hashed coh_st_id is swizzled so that Stack bit is at the end. + * coh_st_id = SDDCCCC + */ +static u16 get_coh_st_id_mi300(struct addr_ctx *ctx) +{ + u8 channel_bits, die_bits, stack_bit; + u16 die_id; + + /* Subtract the "base" Destination Fabric ID. */ + ctx->coh_st_fabric_id -= get_dst_fabric_id(ctx); + + die_id = (ctx->coh_st_fabric_id & df_cfg.die_id_mask) >> df_cfg.die_id_shift; + + channel_bits = FIELD_GET(GENMASK(3, 0), ctx->coh_st_fabric_id); + stack_bit = FIELD_GET(BIT(4), ctx->coh_st_fabric_id) << 6; + die_bits = die_id << 4; + + return stack_bit | die_bits | channel_bits; +} + +/* + * Derive the correct Coherent Station ID that represents the interleave bits + * used within the system physical address. This accounts for the + * interleave mode, number of interleaved channels/dies/sockets, and + * other system/mode-specific bit swizzling. + * + * Returns: Coherent Station ID on success. + * All bits set on error. + */ +static u16 calculate_coh_st_id(struct addr_ctx *ctx) +{ + switch (ctx->map.intlv_mode) { + case NOHASH_2CHAN: + case NOHASH_4CHAN: + case NOHASH_8CHAN: + case NOHASH_16CHAN: + case NOHASH_32CHAN: + case DF3_COD4_2CHAN_HASH: + case DF3_COD2_4CHAN_HASH: + case DF3_COD1_8CHAN_HASH: + case DF2_2CHAN_HASH: + return get_coh_st_id_df2(ctx); + + case DF4_NPS4_2CHAN_HASH: + case DF4_NPS2_4CHAN_HASH: + case DF4_NPS1_8CHAN_HASH: + case DF4p5_NPS4_2CHAN_1K_HASH: + case DF4p5_NPS4_2CHAN_2K_HASH: + case DF4p5_NPS2_4CHAN_2K_HASH: + case DF4p5_NPS1_8CHAN_2K_HASH: + case DF4p5_NPS1_16CHAN_2K_HASH: + return get_coh_st_id_df4(ctx); + + case MI3_HASH_8CHAN: + case MI3_HASH_16CHAN: + case MI3_HASH_32CHAN: + return get_coh_st_id_mi300(ctx); + + /* COH_ST ID is simply the COH_ST Fabric ID adjusted by the Destination Fabric ID. */ + case DF4p5_NPS2_4CHAN_1K_HASH: + case DF4p5_NPS1_8CHAN_1K_HASH: + case DF4p5_NPS1_16CHAN_1K_HASH: + return ctx->coh_st_fabric_id - get_dst_fabric_id(ctx); + + default: + atl_debug_on_bad_intlv_mode(ctx); + return ~0; + } +} + +static u64 insert_coh_st_id_at_intlv_bit(struct addr_ctx *ctx, u64 denorm_addr, u16 coh_st_id) +{ + return denorm_addr | (coh_st_id << ctx->map.intlv_bit_pos); +} + +static u64 insert_coh_st_id_split_2_1(struct addr_ctx *ctx, u64 denorm_addr, u16 coh_st_id) +{ + /* Insert coh_st_id[0] at the interleave bit. */ + denorm_addr |= (coh_st_id & BIT(0)) << ctx->map.intlv_bit_pos; + + /* Insert coh_st_id[2:1] at bit 12. */ + denorm_addr |= (coh_st_id & GENMASK(2, 1)) << 11; + + return denorm_addr; +} + +static u64 insert_coh_st_id_split_2_2(struct addr_ctx *ctx, u64 denorm_addr, u16 coh_st_id) +{ + /* Insert coh_st_id[1:0] at bit 8. */ + denorm_addr |= (coh_st_id & GENMASK(1, 0)) << 8; + + /* + * Insert coh_st_id[n:2] at bit 12. 'n' could be 2 or 3. + * Grab both because bit 3 will be clear if unused. + */ + denorm_addr |= (coh_st_id & GENMASK(3, 2)) << 10; + + return denorm_addr; +} + +static u64 insert_coh_st_id(struct addr_ctx *ctx, u64 denorm_addr, u16 coh_st_id) +{ + switch (ctx->map.intlv_mode) { + case NOHASH_2CHAN: + case NOHASH_4CHAN: + case NOHASH_8CHAN: + case NOHASH_16CHAN: + case NOHASH_32CHAN: + case MI3_HASH_8CHAN: + case MI3_HASH_16CHAN: + case MI3_HASH_32CHAN: + case DF2_2CHAN_HASH: + return insert_coh_st_id_at_intlv_bit(ctx, denorm_addr, coh_st_id); + + case DF3_COD4_2CHAN_HASH: + case DF3_COD2_4CHAN_HASH: + case DF3_COD1_8CHAN_HASH: + case DF4_NPS4_2CHAN_HASH: + case DF4_NPS2_4CHAN_HASH: + case DF4_NPS1_8CHAN_HASH: + case DF4p5_NPS4_2CHAN_1K_HASH: + case DF4p5_NPS4_2CHAN_2K_HASH: + case DF4p5_NPS2_4CHAN_2K_HASH: + case DF4p5_NPS1_8CHAN_2K_HASH: + case DF4p5_NPS1_16CHAN_2K_HASH: + return insert_coh_st_id_split_2_1(ctx, denorm_addr, coh_st_id); + + case DF4p5_NPS2_4CHAN_1K_HASH: + case DF4p5_NPS1_8CHAN_1K_HASH: + case DF4p5_NPS1_16CHAN_1K_HASH: + return insert_coh_st_id_split_2_2(ctx, denorm_addr, coh_st_id); + + default: + atl_debug_on_bad_intlv_mode(ctx); + return ~0ULL; + } +} + +/* + * MI300 systems have a fixed, hardware-defined physical-to-logical + * Coherent Station mapping. The Remap registers are not used. + */ +static const u16 phy_to_log_coh_st_map_mi300[] = { + 12, 13, 14, 15, + 8, 9, 10, 11, + 4, 5, 6, 7, + 0, 1, 2, 3, + 28, 29, 30, 31, + 24, 25, 26, 27, + 20, 21, 22, 23, + 16, 17, 18, 19, +}; + +static u16 get_logical_coh_st_fabric_id_mi300(struct addr_ctx *ctx) +{ + if (ctx->inst_id >= ARRAY_SIZE(phy_to_log_coh_st_map_mi300)) { + atl_debug(ctx, "Instance ID out of range"); + return ~0; + } + + return phy_to_log_coh_st_map_mi300[ctx->inst_id] | (ctx->node_id << df_cfg.node_id_shift); +} + +static u16 get_logical_coh_st_fabric_id(struct addr_ctx *ctx) +{ + u16 component_id, log_fabric_id; + + /* Start with the physical COH_ST Fabric ID. */ + u16 phys_fabric_id = ctx->coh_st_fabric_id; + + if (df_cfg.rev == DF4p5 && df_cfg.flags.heterogeneous) + return get_logical_coh_st_fabric_id_mi300(ctx); + + /* Skip logical ID lookup if remapping is disabled. */ + if (!FIELD_GET(DF4_REMAP_EN, ctx->map.ctl) && + ctx->map.intlv_mode != DF3_6CHAN) + return phys_fabric_id; + + /* Mask off the Node ID bits to get the "local" Component ID. */ + component_id = phys_fabric_id & df_cfg.component_id_mask; + + /* + * Search the list of logical Component IDs for the one that + * matches this physical Component ID. + */ + for (log_fabric_id = 0; log_fabric_id < MAX_COH_ST_CHANNELS; log_fabric_id++) { + if (ctx->map.remap_array[log_fabric_id] == component_id) + break; + } + + if (log_fabric_id == MAX_COH_ST_CHANNELS) + atl_debug(ctx, "COH_ST remap entry not found for 0x%x", + log_fabric_id); + + /* Get the Node ID bits from the physical and apply to the logical. */ + return (phys_fabric_id & df_cfg.node_id_mask) | log_fabric_id; +} + +static int denorm_addr_common(struct addr_ctx *ctx) +{ + u64 denorm_addr; + u16 coh_st_id; + + /* + * Convert the original physical COH_ST Fabric ID to a logical value. + * This is required for non-power-of-two and other interleaving modes. + */ + ctx->coh_st_fabric_id = get_logical_coh_st_fabric_id(ctx); + + denorm_addr = make_space_for_coh_st_id(ctx); + coh_st_id = calculate_coh_st_id(ctx); + ctx->ret_addr = insert_coh_st_id(ctx, denorm_addr, coh_st_id); + return 0; +} + +static int denorm_addr_df3_6chan(struct addr_ctx *ctx) +{ + u16 coh_st_id = ctx->coh_st_fabric_id & df_cfg.component_id_mask; + u8 total_intlv_bits = ctx->map.total_intlv_bits; + u8 low_bit, intlv_bit = ctx->map.intlv_bit_pos; + u64 msb_intlv_bits, temp_addr_a, temp_addr_b; + u8 np2_bits = ctx->map.np2_bits; + + if (ctx->map.intlv_mode != DF3_6CHAN) + return -EINVAL; + + /* + * 'np2_bits' holds the number of bits needed to cover the + * amount of memory (rounded up) in this map using 64K chunks. + * + * Example: + * Total memory in map: 6GB + * Rounded up to next power-of-2: 8GB + * Number of 64K chunks: 0x20000 + * np2_bits = log2(# of chunks): 17 + * + * Get the two most-significant interleave bits from the + * input address based on the following: + * + * [15 + np2_bits - total_intlv_bits : 14 + np2_bits - total_intlv_bits] + */ + low_bit = 14 + np2_bits - total_intlv_bits; + msb_intlv_bits = ctx->ret_addr >> low_bit; + msb_intlv_bits &= 0x3; + + /* + * If MSB are 11b, then logical COH_ST ID is 6 or 7. + * Need to adjust based on the mod3 result. + */ + if (msb_intlv_bits == 3) { + u8 addr_mod, phys_addr_msb, msb_coh_st_id; + + /* Get the remaining interleave bits from the input address. */ + temp_addr_b = GENMASK_ULL(low_bit - 1, intlv_bit) & ctx->ret_addr; + temp_addr_b >>= intlv_bit; + + /* Calculate the logical COH_ST offset based on mod3. */ + addr_mod = temp_addr_b % 3; + + /* Get COH_ST ID bits [2:1]. */ + msb_coh_st_id = (coh_st_id >> 1) & 0x3; + + /* Get the bit that starts the physical address bits. */ + phys_addr_msb = (intlv_bit + np2_bits + 1); + phys_addr_msb &= BIT(0); + phys_addr_msb++; + phys_addr_msb *= 3 - addr_mod + msb_coh_st_id; + phys_addr_msb %= 3; + + /* Move the physical address MSB to the correct place. */ + temp_addr_b |= phys_addr_msb << (low_bit - total_intlv_bits - intlv_bit); + + /* Generate a new COH_ST ID as follows: coh_st_id = [1, 1, coh_st_id[0]] */ + coh_st_id &= BIT(0); + coh_st_id |= GENMASK(2, 1); + } else { + temp_addr_b = GENMASK_ULL(63, intlv_bit) & ctx->ret_addr; + temp_addr_b >>= intlv_bit; + } + + temp_addr_a = GENMASK_ULL(intlv_bit - 1, 0) & ctx->ret_addr; + temp_addr_b <<= intlv_bit + total_intlv_bits; + + ctx->ret_addr = temp_addr_a | temp_addr_b; + ctx->ret_addr |= coh_st_id << intlv_bit; + return 0; +} + +static int denorm_addr_df4_np2(struct addr_ctx *ctx) +{ + bool hash_ctl_64k, hash_ctl_2M, hash_ctl_1G; + u16 group, group_offset, log_coh_st_offset; + unsigned int mod_value, shift_value; + u16 mask = df_cfg.component_id_mask; + u64 temp_addr_a, temp_addr_b; + bool hash_pa8, hashed_bit; + + switch (ctx->map.intlv_mode) { + case DF4_NPS4_3CHAN_HASH: + mod_value = 3; + shift_value = 13; + break; + case DF4_NPS2_6CHAN_HASH: + mod_value = 3; + shift_value = 12; + break; + case DF4_NPS1_12CHAN_HASH: + mod_value = 3; + shift_value = 11; + break; + case DF4_NPS2_5CHAN_HASH: + mod_value = 5; + shift_value = 13; + break; + case DF4_NPS1_10CHAN_HASH: + mod_value = 5; + shift_value = 12; + break; + default: + atl_debug_on_bad_intlv_mode(ctx); + return -EINVAL; + }; + + if (ctx->map.num_intlv_sockets == 1) { + hash_pa8 = BIT_ULL(shift_value) & ctx->ret_addr; + temp_addr_a = remove_bits(shift_value, shift_value, ctx->ret_addr); + } else { + hash_pa8 = ctx->coh_st_fabric_id & df_cfg.socket_id_mask; + temp_addr_a = ctx->ret_addr; + } + + /* Make a gap for the real bit [8]. */ + temp_addr_a = expand_bits(8, 1, temp_addr_a); + + /* Make an additional gap for bits [13:12], as appropriate.*/ + if (ctx->map.intlv_mode == DF4_NPS2_6CHAN_HASH || + ctx->map.intlv_mode == DF4_NPS1_10CHAN_HASH) { + temp_addr_a = expand_bits(13, 1, temp_addr_a); + } else if (ctx->map.intlv_mode == DF4_NPS1_12CHAN_HASH) { + temp_addr_a = expand_bits(12, 2, temp_addr_a); + } + + /* Keep bits [13:0]. */ + temp_addr_a &= GENMASK_ULL(13, 0); + + /* Get the appropriate high bits. */ + shift_value += 1 - ilog2(ctx->map.num_intlv_sockets); + temp_addr_b = GENMASK_ULL(63, shift_value) & ctx->ret_addr; + temp_addr_b >>= shift_value; + temp_addr_b *= mod_value; + + /* + * Coherent Stations are divided into groups. + * + * Multiples of 3 (mod3) are divided into quadrants. + * e.g. NP4_3CHAN -> [0, 1, 2] [6, 7, 8] + * [3, 4, 5] [9, 10, 11] + * + * Multiples of 5 (mod5) are divided into sides. + * e.g. NP2_5CHAN -> [0, 1, 2, 3, 4] [5, 6, 7, 8, 9] + */ + + /* + * Calculate the logical offset for the COH_ST within its DRAM Address map. + * e.g. if map includes [5, 6, 7, 8, 9] and target instance is '8', then + * log_coh_st_offset = 8 - 5 = 3 + */ + log_coh_st_offset = (ctx->coh_st_fabric_id & mask) - (get_dst_fabric_id(ctx) & mask); + + /* + * Figure out the group number. + * + * Following above example, + * log_coh_st_offset = 3 + * mod_value = 5 + * group = 3 / 5 = 0 + */ + group = log_coh_st_offset / mod_value; + + /* + * Figure out the offset within the group. + * + * Following above example, + * log_coh_st_offset = 3 + * mod_value = 5 + * group_offset = 3 % 5 = 3 + */ + group_offset = log_coh_st_offset % mod_value; + + /* Adjust group_offset if the hashed bit [8] is set. */ + if (hash_pa8) { + if (!group_offset) + group_offset = mod_value - 1; + else + group_offset--; + } + + /* Add in the group offset to the high bits. */ + temp_addr_b += group_offset; + + /* Shift the high bits to the proper starting position. */ + temp_addr_b <<= 14; + + /* Combine the high and low bits together. */ + ctx->ret_addr = temp_addr_a | temp_addr_b; + + /* Account for hashing here instead of in dehash_address(). */ + hash_ctl_64k = FIELD_GET(DF4_HASH_CTL_64K, ctx->map.ctl); + hash_ctl_2M = FIELD_GET(DF4_HASH_CTL_2M, ctx->map.ctl); + hash_ctl_1G = FIELD_GET(DF4_HASH_CTL_1G, ctx->map.ctl); + + hashed_bit = !!hash_pa8; + hashed_bit ^= FIELD_GET(BIT_ULL(14), ctx->ret_addr); + hashed_bit ^= FIELD_GET(BIT_ULL(16), ctx->ret_addr) & hash_ctl_64k; + hashed_bit ^= FIELD_GET(BIT_ULL(21), ctx->ret_addr) & hash_ctl_2M; + hashed_bit ^= FIELD_GET(BIT_ULL(30), ctx->ret_addr) & hash_ctl_1G; + + ctx->ret_addr |= hashed_bit << 8; + + /* Done for 3 and 5 channel. */ + if (ctx->map.intlv_mode == DF4_NPS4_3CHAN_HASH || + ctx->map.intlv_mode == DF4_NPS2_5CHAN_HASH) + return 0; + + /* Select the proper 'group' bit to use for Bit 13. */ + if (ctx->map.intlv_mode == DF4_NPS1_12CHAN_HASH) + hashed_bit = !!(group & BIT(1)); + else + hashed_bit = group & BIT(0); + + hashed_bit ^= FIELD_GET(BIT_ULL(18), ctx->ret_addr) & hash_ctl_64k; + hashed_bit ^= FIELD_GET(BIT_ULL(23), ctx->ret_addr) & hash_ctl_2M; + hashed_bit ^= FIELD_GET(BIT_ULL(32), ctx->ret_addr) & hash_ctl_1G; + + ctx->ret_addr |= hashed_bit << 13; + + /* Done for 6 and 10 channel. */ + if (ctx->map.intlv_mode != DF4_NPS1_12CHAN_HASH) + return 0; + + hashed_bit = group & BIT(0); + hashed_bit ^= FIELD_GET(BIT_ULL(17), ctx->ret_addr) & hash_ctl_64k; + hashed_bit ^= FIELD_GET(BIT_ULL(22), ctx->ret_addr) & hash_ctl_2M; + hashed_bit ^= FIELD_GET(BIT_ULL(31), ctx->ret_addr) & hash_ctl_1G; + + ctx->ret_addr |= hashed_bit << 12; + return 0; +} + +int denormalize_address(struct addr_ctx *ctx) +{ + switch (ctx->map.intlv_mode) { + case NONE: + return 0; + case DF4_NPS4_3CHAN_HASH: + case DF4_NPS2_6CHAN_HASH: + case DF4_NPS1_12CHAN_HASH: + case DF4_NPS2_5CHAN_HASH: + case DF4_NPS1_10CHAN_HASH: + return denorm_addr_df4_np2(ctx); + case DF3_6CHAN: + return denorm_addr_df3_6chan(ctx); + default: + return denorm_addr_common(ctx); + } +} diff --git a/drivers/ras/amd/atl/internal.h b/drivers/ras/amd/atl/internal.h new file mode 100644 index 000000000000..5de69e0bb0f9 --- /dev/null +++ b/drivers/ras/amd/atl/internal.h @@ -0,0 +1,306 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * AMD Address Translation Library + * + * internal.h : Helper functions and common defines + * + * Copyright (c) 2023, Advanced Micro Devices, Inc. + * All Rights Reserved. + * + * Author: Yazen Ghannam <Yazen.Ghannam@amd.com> + */ + +#ifndef __AMD_ATL_INTERNAL_H__ +#define __AMD_ATL_INTERNAL_H__ + +#include <linux/bitfield.h> +#include <linux/bitops.h> +#include <linux/ras.h> + +#include <asm/amd_nb.h> + +#include "reg_fields.h" + +/* Maximum possible number of Coherent Stations within a single Data Fabric. */ +#define MAX_COH_ST_CHANNELS 32 + +/* PCI ID for Zen4 Server DF Function 0. */ +#define DF_FUNC0_ID_ZEN4_SERVER 0x14AD1022 + +/* PCI IDs for MI300 DF Function 0. */ +#define DF_FUNC0_ID_MI300 0x15281022 + +/* Shift needed for adjusting register values to true values. */ +#define DF_DRAM_BASE_LIMIT_LSB 28 +#define MI300_DRAM_LIMIT_LSB 20 + +enum df_revisions { + UNKNOWN, + DF2, + DF3, + DF3p5, + DF4, + DF4p5, +}; + +/* These are mapped 1:1 to the hardware values. Special cases are set at > 0x20. */ +enum intlv_modes { + NONE = 0x00, + NOHASH_2CHAN = 0x01, + NOHASH_4CHAN = 0x03, + NOHASH_8CHAN = 0x05, + DF3_6CHAN = 0x06, + NOHASH_16CHAN = 0x07, + NOHASH_32CHAN = 0x08, + DF3_COD4_2CHAN_HASH = 0x0C, + DF3_COD2_4CHAN_HASH = 0x0D, + DF3_COD1_8CHAN_HASH = 0x0E, + DF4_NPS4_2CHAN_HASH = 0x10, + DF4_NPS2_4CHAN_HASH = 0x11, + DF4_NPS1_8CHAN_HASH = 0x12, + DF4_NPS4_3CHAN_HASH = 0x13, + DF4_NPS2_6CHAN_HASH = 0x14, + DF4_NPS1_12CHAN_HASH = 0x15, + DF4_NPS2_5CHAN_HASH = 0x16, + DF4_NPS1_10CHAN_HASH = 0x17, + MI3_HASH_8CHAN = 0x18, + MI3_HASH_16CHAN = 0x19, + MI3_HASH_32CHAN = 0x1A, + DF2_2CHAN_HASH = 0x21, + /* DF4.5 modes are all IntLvNumChan + 0x20 */ + DF4p5_NPS1_16CHAN_1K_HASH = 0x2C, + DF4p5_NPS0_24CHAN_1K_HASH = 0x2E, + DF4p5_NPS4_2CHAN_1K_HASH = 0x30, + DF4p5_NPS2_4CHAN_1K_HASH = 0x31, + DF4p5_NPS1_8CHAN_1K_HASH = 0x32, + DF4p5_NPS4_3CHAN_1K_HASH = 0x33, + DF4p5_NPS2_6CHAN_1K_HASH = 0x34, + DF4p5_NPS1_12CHAN_1K_HASH = 0x35, + DF4p5_NPS2_5CHAN_1K_HASH = 0x36, + DF4p5_NPS1_10CHAN_1K_HASH = 0x37, + DF4p5_NPS4_2CHAN_2K_HASH = 0x40, + DF4p5_NPS2_4CHAN_2K_HASH = 0x41, + DF4p5_NPS1_8CHAN_2K_HASH = 0x42, + DF4p5_NPS1_16CHAN_2K_HASH = 0x43, + DF4p5_NPS4_3CHAN_2K_HASH = 0x44, + DF4p5_NPS2_6CHAN_2K_HASH = 0x45, + DF4p5_NPS1_12CHAN_2K_HASH = 0x46, + DF4p5_NPS0_24CHAN_2K_HASH = 0x47, + DF4p5_NPS2_5CHAN_2K_HASH = 0x48, + DF4p5_NPS1_10CHAN_2K_HASH = 0x49, +}; + +struct df_flags { + __u8 legacy_ficaa : 1, + socket_id_shift_quirk : 1, + heterogeneous : 1, + __reserved_0 : 5; +}; + +struct df_config { + enum df_revisions rev; + + /* + * These masks operate on the 16-bit Coherent Station IDs, + * e.g. Instance, Fabric, Destination, etc. + */ + u16 component_id_mask; + u16 die_id_mask; + u16 node_id_mask; + u16 socket_id_mask; + + /* + * Least-significant bit of Node ID portion of the + * system-wide Coherent Station Fabric ID. + */ + u8 node_id_shift; + + /* + * Least-significant bit of Die portion of the Node ID. + * Adjusted to include the Node ID shift in order to apply + * to the Coherent Station Fabric ID. + */ + u8 die_id_shift; + + /* + * Least-significant bit of Socket portion of the Node ID. + * Adjusted to include the Node ID shift in order to apply + * to the Coherent Station Fabric ID. + */ + u8 socket_id_shift; + + /* Number of DRAM Address maps visible in a Coherent Station. */ + u8 num_coh_st_maps; + + /* Global flags to handle special cases. */ + struct df_flags flags; +}; + +extern struct df_config df_cfg; + +struct dram_addr_map { + /* + * Each DRAM Address Map can operate independently + * in different interleaving modes. + */ + enum intlv_modes intlv_mode; + + /* System-wide number for this address map. */ + u8 num; + + /* Raw register values */ + u32 base; + u32 limit; + u32 ctl; + u32 intlv; + + /* + * Logical to Physical Coherent Station Remapping array + * + * Index: Logical Coherent Station Instance ID + * Value: Physical Coherent Station Instance ID + * + * phys_coh_st_inst_id = remap_array[log_coh_st_inst_id] + */ + u8 remap_array[MAX_COH_ST_CHANNELS]; + + /* + * Number of bits covering DRAM Address map 0 + * when interleaving is non-power-of-2. + * + * Used only for DF3_6CHAN. + */ + u8 np2_bits; + + /* Position of the 'interleave bit'. */ + u8 intlv_bit_pos; + /* Number of channels interleaved in this map. */ + u8 num_intlv_chan; + /* Number of dies interleaved in this map. */ + u8 num_intlv_dies; + /* Number of sockets interleaved in this map. */ + u8 num_intlv_sockets; + /* + * Total number of channels interleaved accounting + * for die and socket interleaving. + */ + u8 total_intlv_chan; + /* Total bits needed to cover 'total_intlv_chan'. */ + u8 total_intlv_bits; +}; + +/* Original input values cached for debug printing. */ +struct addr_ctx_inputs { + u64 norm_addr; + u8 socket_id; + u8 die_id; + u8 coh_st_inst_id; +}; + +struct addr_ctx { + u64 ret_addr; + + struct addr_ctx_inputs inputs; + struct dram_addr_map map; + + /* AMD Node ID calculated from Socket and Die IDs. */ + u8 node_id; + + /* + * Coherent Station Instance ID + * Local ID used within a 'node'. + */ + u16 inst_id; + + /* + * Coherent Station Fabric ID + * System-wide ID that includes 'node' bits. + */ + u16 coh_st_fabric_id; +}; + +int df_indirect_read_instance(u16 node, u8 func, u16 reg, u8 instance_id, u32 *lo); +int df_indirect_read_broadcast(u16 node, u8 func, u16 reg, u32 *lo); + +int get_df_system_info(void); +int determine_node_id(struct addr_ctx *ctx, u8 socket_num, u8 die_num); +int get_addr_hash_mi300(void); + +int get_address_map(struct addr_ctx *ctx); + +int denormalize_address(struct addr_ctx *ctx); +int dehash_address(struct addr_ctx *ctx); + +unsigned long norm_to_sys_addr(u8 socket_id, u8 die_id, u8 coh_st_inst_id, unsigned long addr); +unsigned long convert_umc_mca_addr_to_sys_addr(struct atl_err *err); + +/* + * Make a gap in @data that is @num_bits long starting at @bit_num. + * e.g. data = 11111111'b + * bit_num = 3 + * num_bits = 2 + * result = 1111100111'b + */ +static inline u64 expand_bits(u8 bit_num, u8 num_bits, u64 data) +{ + u64 temp1, temp2; + + if (!num_bits) + return data; + + if (!bit_num) { + WARN_ON_ONCE(num_bits >= BITS_PER_LONG); + return data << num_bits; + } + + WARN_ON_ONCE(bit_num >= BITS_PER_LONG); + + temp1 = data & GENMASK_ULL(bit_num - 1, 0); + + temp2 = data & GENMASK_ULL(63, bit_num); + temp2 <<= num_bits; + + return temp1 | temp2; +} + +/* + * Remove bits in @data between @low_bit and @high_bit inclusive. + * e.g. data = XXXYYZZZ'b + * low_bit = 3 + * high_bit = 4 + * result = XXXZZZ'b + */ +static inline u64 remove_bits(u8 low_bit, u8 high_bit, u64 data) +{ + u64 temp1, temp2; + + WARN_ON_ONCE(high_bit >= BITS_PER_LONG); + WARN_ON_ONCE(low_bit >= BITS_PER_LONG); + WARN_ON_ONCE(low_bit > high_bit); + + if (!low_bit) + return data >> (high_bit++); + + temp1 = GENMASK_ULL(low_bit - 1, 0) & data; + temp2 = GENMASK_ULL(63, high_bit + 1) & data; + temp2 >>= high_bit - low_bit + 1; + + return temp1 | temp2; +} + +#define atl_debug(ctx, fmt, arg...) \ + pr_debug("socket_id=%u die_id=%u coh_st_inst_id=%u norm_addr=0x%016llx: " fmt,\ + (ctx)->inputs.socket_id, (ctx)->inputs.die_id,\ + (ctx)->inputs.coh_st_inst_id, (ctx)->inputs.norm_addr, ##arg) + +static inline void atl_debug_on_bad_df_rev(void) +{ + pr_debug("Unrecognized DF rev: %u", df_cfg.rev); +} + +static inline void atl_debug_on_bad_intlv_mode(struct addr_ctx *ctx) +{ + atl_debug(ctx, "Unrecognized interleave mode: %u", ctx->map.intlv_mode); +} + +#endif /* __AMD_ATL_INTERNAL_H__ */ diff --git a/drivers/ras/amd/atl/map.c b/drivers/ras/amd/atl/map.c new file mode 100644 index 000000000000..8b908e8d7495 --- /dev/null +++ b/drivers/ras/amd/atl/map.c @@ -0,0 +1,682 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * AMD Address Translation Library + * + * map.c : Functions to read and decode DRAM address maps + * + * Copyright (c) 2023, Advanced Micro Devices, Inc. + * All Rights Reserved. + * + * Author: Yazen Ghannam <Yazen.Ghannam@amd.com> + */ + +#include "internal.h" + +static int df2_get_intlv_mode(struct addr_ctx *ctx) +{ + ctx->map.intlv_mode = FIELD_GET(DF2_INTLV_NUM_CHAN, ctx->map.base); + + if (ctx->map.intlv_mode == 8) + ctx->map.intlv_mode = DF2_2CHAN_HASH; + + if (ctx->map.intlv_mode != NONE && + ctx->map.intlv_mode != NOHASH_2CHAN && + ctx->map.intlv_mode != DF2_2CHAN_HASH) + return -EINVAL; + + return 0; +} + +static int df3_get_intlv_mode(struct addr_ctx *ctx) +{ + ctx->map.intlv_mode = FIELD_GET(DF3_INTLV_NUM_CHAN, ctx->map.base); + return 0; +} + +static int df3p5_get_intlv_mode(struct addr_ctx *ctx) +{ + ctx->map.intlv_mode = FIELD_GET(DF3p5_INTLV_NUM_CHAN, ctx->map.base); + + if (ctx->map.intlv_mode == DF3_6CHAN) + return -EINVAL; + + return 0; +} + +static int df4_get_intlv_mode(struct addr_ctx *ctx) +{ + ctx->map.intlv_mode = FIELD_GET(DF4_INTLV_NUM_CHAN, ctx->map.intlv); + + if (ctx->map.intlv_mode == DF3_COD4_2CHAN_HASH || + ctx->map.intlv_mode == DF3_COD2_4CHAN_HASH || + ctx->map.intlv_mode == DF3_COD1_8CHAN_HASH || + ctx->map.intlv_mode == DF3_6CHAN) + return -EINVAL; + + return 0; +} + +static int df4p5_get_intlv_mode(struct addr_ctx *ctx) +{ + ctx->map.intlv_mode = FIELD_GET(DF4p5_INTLV_NUM_CHAN, ctx->map.intlv); + + if (ctx->map.intlv_mode <= NOHASH_32CHAN) + return 0; + + if (ctx->map.intlv_mode >= MI3_HASH_8CHAN && + ctx->map.intlv_mode <= MI3_HASH_32CHAN) + return 0; + + /* + * Modes matching the ranges above are returned as-is. + * + * All other modes are "fixed up" by adding 20h to make a unique value. + */ + ctx->map.intlv_mode += 0x20; + + return 0; +} + +static int get_intlv_mode(struct addr_ctx *ctx) +{ + int ret; + + switch (df_cfg.rev) { + case DF2: + ret = df2_get_intlv_mode(ctx); + break; + case DF3: + ret = df3_get_intlv_mode(ctx); + break; + case DF3p5: + ret = df3p5_get_intlv_mode(ctx); + break; + case DF4: + ret = df4_get_intlv_mode(ctx); + break; + case DF4p5: + ret = df4p5_get_intlv_mode(ctx); + break; + default: + ret = -EINVAL; + } + + if (ret) + atl_debug_on_bad_df_rev(); + + return ret; +} + +static u64 get_hi_addr_offset(u32 reg_dram_offset) +{ + u8 shift = DF_DRAM_BASE_LIMIT_LSB; + u64 hi_addr_offset; + + switch (df_cfg.rev) { + case DF2: + hi_addr_offset = FIELD_GET(DF2_HI_ADDR_OFFSET, reg_dram_offset); + break; + case DF3: + case DF3p5: + hi_addr_offset = FIELD_GET(DF3_HI_ADDR_OFFSET, reg_dram_offset); + break; + case DF4: + case DF4p5: + hi_addr_offset = FIELD_GET(DF4_HI_ADDR_OFFSET, reg_dram_offset); + break; + default: + hi_addr_offset = 0; + atl_debug_on_bad_df_rev(); + } + + if (df_cfg.rev == DF4p5 && df_cfg.flags.heterogeneous) + shift = MI300_DRAM_LIMIT_LSB; + + return hi_addr_offset << shift; +} + +/* + * Returns: 0 if offset is disabled. + * 1 if offset is enabled. + * -EINVAL on error. + */ +static int get_dram_offset(struct addr_ctx *ctx, u64 *norm_offset) +{ + u32 reg_dram_offset; + u8 map_num; + + /* Should not be called for map 0. */ + if (!ctx->map.num) { + atl_debug(ctx, "Trying to find DRAM offset for map 0"); + return -EINVAL; + } + + /* + * DramOffset registers don't exist for map 0, so the base register + * actually refers to map 1. + * Adjust the map_num for the register offsets. + */ + map_num = ctx->map.num - 1; + + if (df_cfg.rev >= DF4) { + /* Read D18F7x140 (DramOffset) */ + if (df_indirect_read_instance(ctx->node_id, 7, 0x140 + (4 * map_num), + ctx->inst_id, ®_dram_offset)) + return -EINVAL; + + } else { + /* Read D18F0x1B4 (DramOffset) */ + if (df_indirect_read_instance(ctx->node_id, 0, 0x1B4 + (4 * map_num), + ctx->inst_id, ®_dram_offset)) + return -EINVAL; + } + + if (!FIELD_GET(DF_HI_ADDR_OFFSET_EN, reg_dram_offset)) + return 0; + + *norm_offset = get_hi_addr_offset(reg_dram_offset); + + return 1; +} + +static int df3_6ch_get_dram_addr_map(struct addr_ctx *ctx) +{ + u16 dst_fabric_id = FIELD_GET(DF3_DST_FABRIC_ID, ctx->map.limit); + u8 i, j, shift = 4, mask = 0xF; + u32 reg, offset = 0x60; + u16 dst_node_id; + + /* Get Socket 1 register. */ + if (dst_fabric_id & df_cfg.socket_id_mask) + offset = 0x68; + + /* Read D18F0x06{0,8} (DF::Skt0CsTargetRemap0)/(DF::Skt0CsTargetRemap1) */ + if (df_indirect_read_broadcast(ctx->node_id, 0, offset, ®)) + return -EINVAL; + + /* Save 8 remap entries. */ + for (i = 0, j = 0; i < 8; i++, j++) + ctx->map.remap_array[i] = (reg >> (j * shift)) & mask; + + dst_node_id = dst_fabric_id & df_cfg.node_id_mask; + dst_node_id >>= df_cfg.node_id_shift; + + /* Read D18F2x090 (DF::Np2ChannelConfig) */ + if (df_indirect_read_broadcast(dst_node_id, 2, 0x90, ®)) + return -EINVAL; + + ctx->map.np2_bits = FIELD_GET(DF_LOG2_ADDR_64K_SPACE0, reg); + return 0; +} + +static int df2_get_dram_addr_map(struct addr_ctx *ctx) +{ + /* Read D18F0x110 (DramBaseAddress). */ + if (df_indirect_read_instance(ctx->node_id, 0, 0x110 + (8 * ctx->map.num), + ctx->inst_id, &ctx->map.base)) + return -EINVAL; + + /* Read D18F0x114 (DramLimitAddress). */ + if (df_indirect_read_instance(ctx->node_id, 0, 0x114 + (8 * ctx->map.num), + ctx->inst_id, &ctx->map.limit)) + return -EINVAL; + + return 0; +} + +static int df3_get_dram_addr_map(struct addr_ctx *ctx) +{ + if (df2_get_dram_addr_map(ctx)) + return -EINVAL; + + /* Read D18F0x3F8 (DfGlobalCtl). */ + if (df_indirect_read_instance(ctx->node_id, 0, 0x3F8, + ctx->inst_id, &ctx->map.ctl)) + return -EINVAL; + + return 0; +} + +static int df4_get_dram_addr_map(struct addr_ctx *ctx) +{ + u8 remap_sel, i, j, shift = 4, mask = 0xF; + u32 remap_reg; + + /* Read D18F7xE00 (DramBaseAddress). */ + if (df_indirect_read_instance(ctx->node_id, 7, 0xE00 + (16 * ctx->map.num), + ctx->inst_id, &ctx->map.base)) + return -EINVAL; + + /* Read D18F7xE04 (DramLimitAddress). */ + if (df_indirect_read_instance(ctx->node_id, 7, 0xE04 + (16 * ctx->map.num), + ctx->inst_id, &ctx->map.limit)) + return -EINVAL; + + /* Read D18F7xE08 (DramAddressCtl). */ + if (df_indirect_read_instance(ctx->node_id, 7, 0xE08 + (16 * ctx->map.num), + ctx->inst_id, &ctx->map.ctl)) + return -EINVAL; + + /* Read D18F7xE0C (DramAddressIntlv). */ + if (df_indirect_read_instance(ctx->node_id, 7, 0xE0C + (16 * ctx->map.num), + ctx->inst_id, &ctx->map.intlv)) + return -EINVAL; + + /* Check if Remap Enable bit is valid. */ + if (!FIELD_GET(DF4_REMAP_EN, ctx->map.ctl)) + return 0; + + /* Fill with bogus values, because '0' is a valid value. */ + memset(&ctx->map.remap_array, 0xFF, sizeof(ctx->map.remap_array)); + + /* Get Remap registers. */ + remap_sel = FIELD_GET(DF4_REMAP_SEL, ctx->map.ctl); + + /* Read D18F7x180 (CsTargetRemap0A). */ + if (df_indirect_read_instance(ctx->node_id, 7, 0x180 + (8 * remap_sel), + ctx->inst_id, &remap_reg)) + return -EINVAL; + + /* Save first 8 remap entries. */ + for (i = 0, j = 0; i < 8; i++, j++) + ctx->map.remap_array[i] = (remap_reg >> (j * shift)) & mask; + + /* Read D18F7x184 (CsTargetRemap0B). */ + if (df_indirect_read_instance(ctx->node_id, 7, 0x184 + (8 * remap_sel), + ctx->inst_id, &remap_reg)) + return -EINVAL; + + /* Save next 8 remap entries. */ + for (i = 8, j = 0; i < 16; i++, j++) + ctx->map.remap_array[i] = (remap_reg >> (j * shift)) & mask; + + return 0; +} + +static int df4p5_get_dram_addr_map(struct addr_ctx *ctx) +{ + u8 remap_sel, i, j, shift = 5, mask = 0x1F; + u32 remap_reg; + + /* Read D18F7x200 (DramBaseAddress). */ + if (df_indirect_read_instance(ctx->node_id, 7, 0x200 + (16 * ctx->map.num), + ctx->inst_id, &ctx->map.base)) + return -EINVAL; + + /* Read D18F7x204 (DramLimitAddress). */ + if (df_indirect_read_instance(ctx->node_id, 7, 0x204 + (16 * ctx->map.num), + ctx->inst_id, &ctx->map.limit)) + return -EINVAL; + + /* Read D18F7x208 (DramAddressCtl). */ + if (df_indirect_read_instance(ctx->node_id, 7, 0x208 + (16 * ctx->map.num), + ctx->inst_id, &ctx->map.ctl)) + return -EINVAL; + + /* Read D18F7x20C (DramAddressIntlv). */ + if (df_indirect_read_instance(ctx->node_id, 7, 0x20C + (16 * ctx->map.num), + ctx->inst_id, &ctx->map.intlv)) + return -EINVAL; + + /* Check if Remap Enable bit is valid. */ + if (!FIELD_GET(DF4_REMAP_EN, ctx->map.ctl)) + return 0; + + /* Fill with bogus values, because '0' is a valid value. */ + memset(&ctx->map.remap_array, 0xFF, sizeof(ctx->map.remap_array)); + + /* Get Remap registers. */ + remap_sel = FIELD_GET(DF4p5_REMAP_SEL, ctx->map.ctl); + + /* Read D18F7x180 (CsTargetRemap0A). */ + if (df_indirect_read_instance(ctx->node_id, 7, 0x180 + (24 * remap_sel), + ctx->inst_id, &remap_reg)) + return -EINVAL; + + /* Save first 6 remap entries. */ + for (i = 0, j = 0; i < 6; i++, j++) + ctx->map.remap_array[i] = (remap_reg >> (j * shift)) & mask; + + /* Read D18F7x184 (CsTargetRemap0B). */ + if (df_indirect_read_instance(ctx->node_id, 7, 0x184 + (24 * remap_sel), + ctx->inst_id, &remap_reg)) + return -EINVAL; + + /* Save next 6 remap entries. */ + for (i = 6, j = 0; i < 12; i++, j++) + ctx->map.remap_array[i] = (remap_reg >> (j * shift)) & mask; + + /* Read D18F7x188 (CsTargetRemap0C). */ + if (df_indirect_read_instance(ctx->node_id, 7, 0x188 + (24 * remap_sel), + ctx->inst_id, &remap_reg)) + return -EINVAL; + + /* Save next 6 remap entries. */ + for (i = 12, j = 0; i < 18; i++, j++) + ctx->map.remap_array[i] = (remap_reg >> (j * shift)) & mask; + + return 0; +} + +static int get_dram_addr_map(struct addr_ctx *ctx) +{ + switch (df_cfg.rev) { + case DF2: return df2_get_dram_addr_map(ctx); + case DF3: + case DF3p5: return df3_get_dram_addr_map(ctx); + case DF4: return df4_get_dram_addr_map(ctx); + case DF4p5: return df4p5_get_dram_addr_map(ctx); + default: + atl_debug_on_bad_df_rev(); + return -EINVAL; + } +} + +static int get_coh_st_fabric_id(struct addr_ctx *ctx) +{ + u32 reg; + + /* + * On MI300 systems, the Coherent Station Fabric ID is derived + * later. And it does not depend on the register value. + */ + if (df_cfg.rev == DF4p5 && df_cfg.flags.heterogeneous) + return 0; + + /* Read D18F0x50 (FabricBlockInstanceInformation3). */ + if (df_indirect_read_instance(ctx->node_id, 0, 0x50, ctx->inst_id, ®)) + return -EINVAL; + + if (df_cfg.rev < DF4p5) + ctx->coh_st_fabric_id = FIELD_GET(DF2_COH_ST_FABRIC_ID, reg); + else + ctx->coh_st_fabric_id = FIELD_GET(DF4p5_COH_ST_FABRIC_ID, reg); + + return 0; +} + +static int find_normalized_offset(struct addr_ctx *ctx, u64 *norm_offset) +{ + u64 last_offset = 0; + int ret; + + for (ctx->map.num = 1; ctx->map.num < df_cfg.num_coh_st_maps; ctx->map.num++) { + ret = get_dram_offset(ctx, norm_offset); + if (ret < 0) + return ret; + + /* Continue search if this map's offset is not enabled. */ + if (!ret) + continue; + + /* Enabled offsets should never be 0. */ + if (*norm_offset == 0) { + atl_debug(ctx, "Enabled map %u offset is 0", ctx->map.num); + return -EINVAL; + } + + /* Offsets should always increase from one map to the next. */ + if (*norm_offset <= last_offset) { + atl_debug(ctx, "Map %u offset (0x%016llx) <= previous (0x%016llx)", + ctx->map.num, *norm_offset, last_offset); + return -EINVAL; + } + + /* Match if this map's offset is less than the current calculated address. */ + if (ctx->ret_addr >= *norm_offset) + break; + + last_offset = *norm_offset; + } + + /* + * Finished search without finding a match. + * Reset to map 0 and no offset. + */ + if (ctx->map.num >= df_cfg.num_coh_st_maps) { + ctx->map.num = 0; + *norm_offset = 0; + } + + return 0; +} + +static bool valid_map(struct addr_ctx *ctx) +{ + if (df_cfg.rev >= DF4) + return FIELD_GET(DF_ADDR_RANGE_VAL, ctx->map.ctl); + else + return FIELD_GET(DF_ADDR_RANGE_VAL, ctx->map.base); +} + +static int get_address_map_common(struct addr_ctx *ctx) +{ + u64 norm_offset = 0; + + if (get_coh_st_fabric_id(ctx)) + return -EINVAL; + + if (find_normalized_offset(ctx, &norm_offset)) + return -EINVAL; + + if (get_dram_addr_map(ctx)) + return -EINVAL; + + if (!valid_map(ctx)) + return -EINVAL; + + ctx->ret_addr -= norm_offset; + + return 0; +} + +static u8 get_num_intlv_chan(struct addr_ctx *ctx) +{ + switch (ctx->map.intlv_mode) { + case NONE: + return 1; + case NOHASH_2CHAN: + case DF2_2CHAN_HASH: + case DF3_COD4_2CHAN_HASH: + case DF4_NPS4_2CHAN_HASH: + case DF4p5_NPS4_2CHAN_1K_HASH: + case DF4p5_NPS4_2CHAN_2K_HASH: + return 2; + case DF4_NPS4_3CHAN_HASH: + case DF4p5_NPS4_3CHAN_1K_HASH: + case DF4p5_NPS4_3CHAN_2K_HASH: + return 3; + case NOHASH_4CHAN: + case DF3_COD2_4CHAN_HASH: + case DF4_NPS2_4CHAN_HASH: + case DF4p5_NPS2_4CHAN_1K_HASH: + case DF4p5_NPS2_4CHAN_2K_HASH: + return 4; + case DF4_NPS2_5CHAN_HASH: + case DF4p5_NPS2_5CHAN_1K_HASH: + case DF4p5_NPS2_5CHAN_2K_HASH: + return 5; + case DF3_6CHAN: + case DF4_NPS2_6CHAN_HASH: + case DF4p5_NPS2_6CHAN_1K_HASH: + case DF4p5_NPS2_6CHAN_2K_HASH: + return 6; + case NOHASH_8CHAN: + case DF3_COD1_8CHAN_HASH: + case DF4_NPS1_8CHAN_HASH: + case MI3_HASH_8CHAN: + case DF4p5_NPS1_8CHAN_1K_HASH: + case DF4p5_NPS1_8CHAN_2K_HASH: + return 8; + case DF4_NPS1_10CHAN_HASH: + case DF4p5_NPS1_10CHAN_1K_HASH: + case DF4p5_NPS1_10CHAN_2K_HASH: + return 10; + case DF4_NPS1_12CHAN_HASH: + case DF4p5_NPS1_12CHAN_1K_HASH: + case DF4p5_NPS1_12CHAN_2K_HASH: + return 12; + case NOHASH_16CHAN: + case MI3_HASH_16CHAN: + case DF4p5_NPS1_16CHAN_1K_HASH: + case DF4p5_NPS1_16CHAN_2K_HASH: + return 16; + case DF4p5_NPS0_24CHAN_1K_HASH: + case DF4p5_NPS0_24CHAN_2K_HASH: + return 24; + case NOHASH_32CHAN: + case MI3_HASH_32CHAN: + return 32; + default: + atl_debug_on_bad_intlv_mode(ctx); + return 0; + } +} + +static void calculate_intlv_bits(struct addr_ctx *ctx) +{ + ctx->map.num_intlv_chan = get_num_intlv_chan(ctx); + + ctx->map.total_intlv_chan = ctx->map.num_intlv_chan; + ctx->map.total_intlv_chan *= ctx->map.num_intlv_dies; + ctx->map.total_intlv_chan *= ctx->map.num_intlv_sockets; + + /* + * Get the number of bits needed to cover this many channels. + * order_base_2() rounds up automatically. + */ + ctx->map.total_intlv_bits = order_base_2(ctx->map.total_intlv_chan); +} + +static u8 get_intlv_bit_pos(struct addr_ctx *ctx) +{ + u8 addr_sel = 0; + + switch (df_cfg.rev) { + case DF2: + addr_sel = FIELD_GET(DF2_INTLV_ADDR_SEL, ctx->map.base); + break; + case DF3: + case DF3p5: + addr_sel = FIELD_GET(DF3_INTLV_ADDR_SEL, ctx->map.base); + break; + case DF4: + case DF4p5: + addr_sel = FIELD_GET(DF4_INTLV_ADDR_SEL, ctx->map.intlv); + break; + default: + atl_debug_on_bad_df_rev(); + break; + } + + /* Add '8' to get the 'interleave bit position'. */ + return addr_sel + 8; +} + +static u8 get_num_intlv_dies(struct addr_ctx *ctx) +{ + u8 dies = 0; + + switch (df_cfg.rev) { + case DF2: + dies = FIELD_GET(DF2_INTLV_NUM_DIES, ctx->map.limit); + break; + case DF3: + dies = FIELD_GET(DF3_INTLV_NUM_DIES, ctx->map.base); + break; + case DF3p5: + dies = FIELD_GET(DF3p5_INTLV_NUM_DIES, ctx->map.base); + break; + case DF4: + case DF4p5: + dies = FIELD_GET(DF4_INTLV_NUM_DIES, ctx->map.intlv); + break; + default: + atl_debug_on_bad_df_rev(); + break; + } + + /* Register value is log2, e.g. 0 -> 1 die, 1 -> 2 dies, etc. */ + return 1 << dies; +} + +static u8 get_num_intlv_sockets(struct addr_ctx *ctx) +{ + u8 sockets = 0; + + switch (df_cfg.rev) { + case DF2: + sockets = FIELD_GET(DF2_INTLV_NUM_SOCKETS, ctx->map.limit); + break; + case DF3: + case DF3p5: + sockets = FIELD_GET(DF2_INTLV_NUM_SOCKETS, ctx->map.base); + break; + case DF4: + case DF4p5: + sockets = FIELD_GET(DF4_INTLV_NUM_SOCKETS, ctx->map.intlv); + break; + default: + atl_debug_on_bad_df_rev(); + break; + } + + /* Register value is log2, e.g. 0 -> 1 sockets, 1 -> 2 sockets, etc. */ + return 1 << sockets; +} + +static int get_global_map_data(struct addr_ctx *ctx) +{ + if (get_intlv_mode(ctx)) + return -EINVAL; + + if (ctx->map.intlv_mode == DF3_6CHAN && + df3_6ch_get_dram_addr_map(ctx)) + return -EINVAL; + + ctx->map.intlv_bit_pos = get_intlv_bit_pos(ctx); + ctx->map.num_intlv_dies = get_num_intlv_dies(ctx); + ctx->map.num_intlv_sockets = get_num_intlv_sockets(ctx); + calculate_intlv_bits(ctx); + + return 0; +} + +static void dump_address_map(struct dram_addr_map *map) +{ + u8 i; + + pr_debug("intlv_mode=0x%x", map->intlv_mode); + pr_debug("num=0x%x", map->num); + pr_debug("base=0x%x", map->base); + pr_debug("limit=0x%x", map->limit); + pr_debug("ctl=0x%x", map->ctl); + pr_debug("intlv=0x%x", map->intlv); + + for (i = 0; i < MAX_COH_ST_CHANNELS; i++) + pr_debug("remap_array[%u]=0x%x", i, map->remap_array[i]); + + pr_debug("intlv_bit_pos=%u", map->intlv_bit_pos); + pr_debug("num_intlv_chan=%u", map->num_intlv_chan); + pr_debug("num_intlv_dies=%u", map->num_intlv_dies); + pr_debug("num_intlv_sockets=%u", map->num_intlv_sockets); + pr_debug("total_intlv_chan=%u", map->total_intlv_chan); + pr_debug("total_intlv_bits=%u", map->total_intlv_bits); +} + +int get_address_map(struct addr_ctx *ctx) +{ + int ret; + + ret = get_address_map_common(ctx); + if (ret) + return ret; + + ret = get_global_map_data(ctx); + if (ret) + return ret; + + dump_address_map(&ctx->map); + + return ret; +} diff --git a/drivers/ras/amd/atl/reg_fields.h b/drivers/ras/amd/atl/reg_fields.h new file mode 100644 index 000000000000..9dcdf6e4a856 --- /dev/null +++ b/drivers/ras/amd/atl/reg_fields.h @@ -0,0 +1,606 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * AMD Address Translation Library + * + * reg_fields.h : Register field definitions + * + * Copyright (c) 2023, Advanced Micro Devices, Inc. + * All Rights Reserved. + * + * Author: Yazen Ghannam <Yazen.Ghannam@amd.com> + */ + +/* + * Notes on naming: + * 1) Use "DF_" prefix for fields that are the same for all revisions. + * 2) Use "DFx_" prefix for fields that differ between revisions. + * a) "x" is the first major revision where the new field appears. + * b) E.g., if DF2 and DF3 have the same field, then call it DF2. + * c) E.g., if DF3p5 and DF4 have the same field, then call it DF4. + */ + +/* + * Coherent Station Fabric ID + * + * Access type: Instance + * + * Register + * Rev Fieldname Bits + * + * D18F0x50 [Fabric Block Instance Information 3] + * DF2 BlockFabricId [19:8] + * DF3 BlockFabricId [19:8] + * DF3p5 BlockFabricId [19:8] + * DF4 BlockFabricId [19:8] + * DF4p5 BlockFabricId [15:8] + */ +#define DF2_COH_ST_FABRIC_ID GENMASK(19, 8) +#define DF4p5_COH_ST_FABRIC_ID GENMASK(15, 8) + +/* + * Component ID Mask + * + * Access type: Broadcast + * + * Register + * Rev Fieldname Bits + * + * DF2 N/A + * + * D18F1x208 [System Fabric ID Mask 0] + * DF3 ComponentIdMask [9:0] + * + * D18F1x150 [System Fabric ID Mask 0] + * DF3p5 ComponentIdMask [15:0] + * + * D18F4x1B0 [System Fabric ID Mask 0] + * DF4 ComponentIdMask [15:0] + * DF4p5 ComponentIdMask [15:0] + */ +#define DF3_COMPONENT_ID_MASK GENMASK(9, 0) +#define DF4_COMPONENT_ID_MASK GENMASK(15, 0) + +/* + * Destination Fabric ID + * + * Access type: Instance + * + * Register + * Rev Fieldname Bits + * + * D18F0x114 [DRAM Limit Address] + * DF2 DstFabricID [7:0] + * DF3 DstFabricID [9:0] + * DF3 DstFabricID [11:0] + * + * D18F7xE08 [DRAM Address Control] + * DF4 DstFabricID [27:16] + * + * D18F7x208 [DRAM Address Control] + * DF4p5 DstFabricID [23:16] + */ +#define DF2_DST_FABRIC_ID GENMASK(7, 0) +#define DF3_DST_FABRIC_ID GENMASK(9, 0) +#define DF3p5_DST_FABRIC_ID GENMASK(11, 0) +#define DF4_DST_FABRIC_ID GENMASK(27, 16) +#define DF4p5_DST_FABRIC_ID GENMASK(23, 16) + +/* + * Die ID Mask + * + * Access type: Broadcast + * + * Register + * Rev Fieldname Bits + * + * D18F1x208 [System Fabric ID Mask] + * DF2 DieIdMask [15:8] + * + * D18F1x20C [System Fabric ID Mask 1] + * DF3 DieIdMask [18:16] + * + * D18F1x158 [System Fabric ID Mask 2] + * DF3p5 DieIdMask [15:0] + * + * D18F4x1B8 [System Fabric ID Mask 2] + * DF4 DieIdMask [15:0] + * DF4p5 DieIdMask [15:0] + */ +#define DF2_DIE_ID_MASK GENMASK(15, 8) +#define DF3_DIE_ID_MASK GENMASK(18, 16) +#define DF4_DIE_ID_MASK GENMASK(15, 0) + +/* + * Die ID Shift + * + * Access type: Broadcast + * + * Register + * Rev Fieldname Bits + * + * D18F1x208 [System Fabric ID Mask] + * DF2 DieIdShift [27:24] + * + * DF3 N/A + * DF3p5 N/A + * DF4 N/A + * DF4p5 N/A + */ +#define DF2_DIE_ID_SHIFT GENMASK(27, 24) + +/* + * DRAM Address Range Valid + * + * Access type: Instance + * + * Register + * Rev Fieldname Bits + * + * D18F0x110 [DRAM Base Address] + * DF2 AddrRngVal [0] + * DF3 AddrRngVal [0] + * DF3p5 AddrRngVal [0] + * + * D18F7xE08 [DRAM Address Control] + * DF4 AddrRngVal [0] + * + * D18F7x208 [DRAM Address Control] + * DF4p5 AddrRngVal [0] + */ +#define DF_ADDR_RANGE_VAL BIT(0) + +/* + * DRAM Base Address + * + * Access type: Instance + * + * Register + * Rev Fieldname Bits + * + * D18F0x110 [DRAM Base Address] + * DF2 DramBaseAddr [31:12] + * DF3 DramBaseAddr [31:12] + * DF3p5 DramBaseAddr [31:12] + * + * D18F7xE00 [DRAM Base Address] + * DF4 DramBaseAddr [27:0] + * + * D18F7x200 [DRAM Base Address] + * DF4p5 DramBaseAddr [27:0] + */ +#define DF2_BASE_ADDR GENMASK(31, 12) +#define DF4_BASE_ADDR GENMASK(27, 0) + +/* + * DRAM Hole Base + * + * Access type: Broadcast + * + * Register + * Rev Fieldname Bits + * + * D18F0x104 [DRAM Hole Control] + * DF2 DramHoleBase [31:24] + * DF3 DramHoleBase [31:24] + * DF3p5 DramHoleBase [31:24] + * + * D18F7x104 [DRAM Hole Control] + * DF4 DramHoleBase [31:24] + * DF4p5 DramHoleBase [31:24] + */ +#define DF_DRAM_HOLE_BASE_MASK GENMASK(31, 24) + +/* + * DRAM Limit Address + * + * Access type: Instance + * + * Register + * Rev Fieldname Bits + * + * D18F0x114 [DRAM Limit Address] + * DF2 DramLimitAddr [31:12] + * DF3 DramLimitAddr [31:12] + * DF3p5 DramLimitAddr [31:12] + * + * D18F7xE04 [DRAM Limit Address] + * DF4 DramLimitAddr [27:0] + * + * D18F7x204 [DRAM Limit Address] + * DF4p5 DramLimitAddr [27:0] + */ +#define DF2_DRAM_LIMIT_ADDR GENMASK(31, 12) +#define DF4_DRAM_LIMIT_ADDR GENMASK(27, 0) + +/* + * Hash Interleave Controls + * + * Access type: Instance + * + * Register + * Rev Fieldname Bits + * + * DF2 N/A + * + * D18F0x3F8 [DF Global Control] + * DF3 GlbHashIntlvCtl64K [20] + * GlbHashIntlvCtl2M [21] + * GlbHashIntlvCtl1G [22] + * + * DF3p5 GlbHashIntlvCtl64K [20] + * GlbHashIntlvCtl2M [21] + * GlbHashIntlvCtl1G [22] + * + * D18F7xE08 [DRAM Address Control] + * DF4 HashIntlvCtl64K [8] + * HashIntlvCtl2M [9] + * HashIntlvCtl1G [10] + * + * D18F7x208 [DRAM Address Control] + * DF4p5 HashIntlvCtl4K [7] + * HashIntlvCtl64K [8] + * HashIntlvCtl2M [9] + * HashIntlvCtl1G [10] + * HashIntlvCtl1T [15] + */ +#define DF3_HASH_CTL_64K BIT(20) +#define DF3_HASH_CTL_2M BIT(21) +#define DF3_HASH_CTL_1G BIT(22) +#define DF4_HASH_CTL_64K BIT(8) +#define DF4_HASH_CTL_2M BIT(9) +#define DF4_HASH_CTL_1G BIT(10) +#define DF4p5_HASH_CTL_4K BIT(7) +#define DF4p5_HASH_CTL_1T BIT(15) + +/* + * High Address Offset + * + * Access type: Instance + * + * Register + * Rev Fieldname Bits + * + * D18F0x1B4 [DRAM Offset] + * DF2 HiAddrOffset [31:20] + * DF3 HiAddrOffset [31:12] + * DF3p5 HiAddrOffset [31:12] + * + * D18F7x140 [DRAM Offset] + * DF4 HiAddrOffset [24:1] + * DF4p5 HiAddrOffset [24:1] + * MI300 HiAddrOffset [31:1] + */ +#define DF2_HI_ADDR_OFFSET GENMASK(31, 20) +#define DF3_HI_ADDR_OFFSET GENMASK(31, 12) + +/* Follow reference code by including reserved bits for simplicity. */ +#define DF4_HI_ADDR_OFFSET GENMASK(31, 1) + +/* + * High Address Offset Enable + * + * Access type: Instance + * + * Register + * Rev Fieldname Bits + * + * D18F0x1B4 [DRAM Offset] + * DF2 HiAddrOffsetEn [0] + * DF3 HiAddrOffsetEn [0] + * DF3p5 HiAddrOffsetEn [0] + * + * D18F7x140 [DRAM Offset] + * DF4 HiAddrOffsetEn [0] + * DF4p5 HiAddrOffsetEn [0] + */ +#define DF_HI_ADDR_OFFSET_EN BIT(0) + +/* + * Interleave Address Select + * + * Access type: Instance + * + * Register + * Rev Fieldname Bits + * + * D18F0x110 [DRAM Base Address] + * DF2 IntLvAddrSel [10:8] + * DF3 IntLvAddrSel [11:9] + * DF3p5 IntLvAddrSel [11:9] + * + * D18F7xE0C [DRAM Address Interleave] + * DF4 IntLvAddrSel [2:0] + * + * D18F7x20C [DRAM Address Interleave] + * DF4p5 IntLvAddrSel [2:0] + */ +#define DF2_INTLV_ADDR_SEL GENMASK(10, 8) +#define DF3_INTLV_ADDR_SEL GENMASK(11, 9) +#define DF4_INTLV_ADDR_SEL GENMASK(2, 0) + +/* + * Interleave Number of Channels + * + * Access type: Instance + * + * Register + * Rev Fieldname Bits + * + * D18F0x110 [DRAM Base Address] + * DF2 IntLvNumChan [7:4] + * DF3 IntLvNumChan [5:2] + * DF3p5 IntLvNumChan [6:2] + * + * D18F7xE0C [DRAM Address Interleave] + * DF4 IntLvNumChan [8:4] + * + * D18F7x20C [DRAM Address Interleave] + * DF4p5 IntLvNumChan [9:4] + */ +#define DF2_INTLV_NUM_CHAN GENMASK(7, 4) +#define DF3_INTLV_NUM_CHAN GENMASK(5, 2) +#define DF3p5_INTLV_NUM_CHAN GENMASK(6, 2) +#define DF4_INTLV_NUM_CHAN GENMASK(8, 4) +#define DF4p5_INTLV_NUM_CHAN GENMASK(9, 4) + +/* + * Interleave Number of Dies + * + * Access type: Instance + * + * Register + * Rev Fieldname Bits + * + * D18F0x114 [DRAM Limit Address] + * DF2 IntLvNumDies [11:10] + * + * D18F0x110 [DRAM Base Address] + * DF3 IntLvNumDies [7:6] + * DF3p5 IntLvNumDies [7] + * + * D18F7xE0C [DRAM Address Interleave] + * DF4 IntLvNumDies [13:12] + * + * D18F7x20C [DRAM Address Interleave] + * DF4p5 IntLvNumDies [13:12] + */ +#define DF2_INTLV_NUM_DIES GENMASK(11, 10) +#define DF3_INTLV_NUM_DIES GENMASK(7, 6) +#define DF3p5_INTLV_NUM_DIES BIT(7) +#define DF4_INTLV_NUM_DIES GENMASK(13, 12) + +/* + * Interleave Number of Sockets + * + * Access type: Instance + * + * Register + * Rev Fieldname Bits + * + * D18F0x114 [DRAM Limit Address] + * DF2 IntLvNumSockets [8] + * + * D18F0x110 [DRAM Base Address] + * DF3 IntLvNumSockets [8] + * DF3p5 IntLvNumSockets [8] + * + * D18F7xE0C [DRAM Address Interleave] + * DF4 IntLvNumSockets [18] + * + * D18F7x20C [DRAM Address Interleave] + * DF4p5 IntLvNumSockets [18] + */ +#define DF2_INTLV_NUM_SOCKETS BIT(8) +#define DF4_INTLV_NUM_SOCKETS BIT(18) + +/* + * Legacy MMIO Hole Enable + * + * Access type: Instance + * + * Register + * Rev Fieldname Bits + * + * D18F0x110 [DRAM Base Address] + * DF2 LgcyMmioHoleEn [1] + * DF3 LgcyMmioHoleEn [1] + * DF3p5 LgcyMmioHoleEn [1] + * + * D18F7xE08 [DRAM Address Control] + * DF4 LgcyMmioHoleEn [1] + * + * D18F7x208 [DRAM Address Control] + * DF4p5 LgcyMmioHoleEn [1] + */ +#define DF_LEGACY_MMIO_HOLE_EN BIT(1) + +/* + * Log2 Address 64K Space 0 + * + * Access type: Instance + * + * Register + * Rev Fieldname Bits + * + * DF2 N/A + * + * D18F2x90 [Non-power-of-2 channel Configuration Register for COH_ST DRAM Address Maps] + * DF3 Log2Addr64KSpace0 [5:0] + * + * DF3p5 N/A + * DF4 N/A + * DF4p5 N/A + */ +#define DF_LOG2_ADDR_64K_SPACE0 GENMASK(5, 0) + +/* + * Major Revision + * + * Access type: Broadcast + * + * Register + * Rev Fieldname Bits + * + * DF2 N/A + * DF3 N/A + * DF3p5 N/A + * + * D18F0x040 [Fabric Block Instance Count] + * DF4 MajorRevision [27:24] + * DF4p5 MajorRevision [27:24] + */ +#define DF_MAJOR_REVISION GENMASK(27, 24) + +/* + * Minor Revision + * + * Access type: Broadcast + * + * Register + * Rev Fieldname Bits + * + * DF2 N/A + * DF3 N/A + * DF3p5 N/A + * + * D18F0x040 [Fabric Block Instance Count] + * DF4 MinorRevision [23:16] + * DF4p5 MinorRevision [23:16] + */ +#define DF_MINOR_REVISION GENMASK(23, 16) + +/* + * Node ID Mask + * + * Access type: Broadcast + * + * Register + * Rev Fieldname Bits + * + * DF2 N/A + * + * D18F1x208 [System Fabric ID Mask 0] + * DF3 NodeIdMask [25:16] + * + * D18F1x150 [System Fabric ID Mask 0] + * DF3p5 NodeIdMask [31:16] + * + * D18F4x1B0 [System Fabric ID Mask 0] + * DF4 NodeIdMask [31:16] + * DF4p5 NodeIdMask [31:16] + */ +#define DF3_NODE_ID_MASK GENMASK(25, 16) +#define DF4_NODE_ID_MASK GENMASK(31, 16) + +/* + * Node ID Shift + * + * Access type: Broadcast + * + * Register + * Rev Fieldname Bits + * + * DF2 N/A + * + * D18F1x20C [System Fabric ID Mask 1] + * DF3 NodeIdShift [3:0] + * + * D18F1x154 [System Fabric ID Mask 1] + * DF3p5 NodeIdShift [3:0] + * + * D18F4x1B4 [System Fabric ID Mask 1] + * DF4 NodeIdShift [3:0] + * DF4p5 NodeIdShift [3:0] + */ +#define DF3_NODE_ID_SHIFT GENMASK(3, 0) + +/* + * Remap Enable + * + * Access type: Instance + * + * Register + * Rev Fieldname Bits + * + * DF2 N/A + * DF3 N/A + * DF3p5 N/A + * + * D18F7xE08 [DRAM Address Control] + * DF4 RemapEn [4] + * + * D18F7x208 [DRAM Address Control] + * DF4p5 RemapEn [4] + */ +#define DF4_REMAP_EN BIT(4) + +/* + * Remap Select + * + * Access type: Instance + * + * Register + * Rev Fieldname Bits + * + * DF2 N/A + * DF3 N/A + * DF3p5 N/A + * + * D18F7xE08 [DRAM Address Control] + * DF4 RemapSel [7:5] + * + * D18F7x208 [DRAM Address Control] + * DF4p5 RemapSel [6:5] + */ +#define DF4_REMAP_SEL GENMASK(7, 5) +#define DF4p5_REMAP_SEL GENMASK(6, 5) + +/* + * Socket ID Mask + * + * Access type: Broadcast + * + * Register + * Rev Fieldname Bits + * + * D18F1x208 [System Fabric ID Mask] + * DF2 SocketIdMask [23:16] + * + * D18F1x20C [System Fabric ID Mask 1] + * DF3 SocketIdMask [26:24] + * + * D18F1x158 [System Fabric ID Mask 2] + * DF3p5 SocketIdMask [31:16] + * + * D18F4x1B8 [System Fabric ID Mask 2] + * DF4 SocketIdMask [31:16] + * DF4p5 SocketIdMask [31:16] + */ +#define DF2_SOCKET_ID_MASK GENMASK(23, 16) +#define DF3_SOCKET_ID_MASK GENMASK(26, 24) +#define DF4_SOCKET_ID_MASK GENMASK(31, 16) + +/* + * Socket ID Shift + * + * Access type: Broadcast + * + * Register + * Rev Fieldname Bits + * + * D18F1x208 [System Fabric ID Mask] + * DF2 SocketIdShift [31:28] + * + * D18F1x20C [System Fabric ID Mask 1] + * DF3 SocketIdShift [9:8] + * + * D18F1x158 [System Fabric ID Mask 2] + * DF3p5 SocketIdShift [11:8] + * + * D18F4x1B4 [System Fabric ID Mask 1] + * DF4 SocketIdShift [11:8] + * DF4p5 SocketIdShift [11:8] + */ +#define DF2_SOCKET_ID_SHIFT GENMASK(31, 28) +#define DF3_SOCKET_ID_SHIFT GENMASK(9, 8) +#define DF4_SOCKET_ID_SHIFT GENMASK(11, 8) diff --git a/drivers/ras/amd/atl/system.c b/drivers/ras/amd/atl/system.c new file mode 100644 index 000000000000..701349e84942 --- /dev/null +++ b/drivers/ras/amd/atl/system.c @@ -0,0 +1,288 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * AMD Address Translation Library + * + * system.c : Functions to read and save system-wide data + * + * Copyright (c) 2023, Advanced Micro Devices, Inc. + * All Rights Reserved. + * + * Author: Yazen Ghannam <Yazen.Ghannam@amd.com> + */ + +#include "internal.h" + +int determine_node_id(struct addr_ctx *ctx, u8 socket_id, u8 die_id) +{ + u16 socket_id_bits, die_id_bits; + + if (socket_id > 0 && df_cfg.socket_id_mask == 0) { + atl_debug(ctx, "Invalid socket inputs: socket_id=%u socket_id_mask=0x%x", + socket_id, df_cfg.socket_id_mask); + return -EINVAL; + } + + /* Do each step independently to avoid shift out-of-bounds issues. */ + socket_id_bits = socket_id; + socket_id_bits <<= df_cfg.socket_id_shift; + socket_id_bits &= df_cfg.socket_id_mask; + + if (die_id > 0 && df_cfg.die_id_mask == 0) { + atl_debug(ctx, "Invalid die inputs: die_id=%u die_id_mask=0x%x", + die_id, df_cfg.die_id_mask); + return -EINVAL; + } + + /* Do each step independently to avoid shift out-of-bounds issues. */ + die_id_bits = die_id; + die_id_bits <<= df_cfg.die_id_shift; + die_id_bits &= df_cfg.die_id_mask; + + ctx->node_id = (socket_id_bits | die_id_bits) >> df_cfg.node_id_shift; + return 0; +} + +static void df2_get_masks_shifts(u32 mask0) +{ + df_cfg.socket_id_shift = FIELD_GET(DF2_SOCKET_ID_SHIFT, mask0); + df_cfg.socket_id_mask = FIELD_GET(DF2_SOCKET_ID_MASK, mask0); + df_cfg.die_id_shift = FIELD_GET(DF2_DIE_ID_SHIFT, mask0); + df_cfg.die_id_mask = FIELD_GET(DF2_DIE_ID_MASK, mask0); + df_cfg.node_id_shift = df_cfg.die_id_shift; + df_cfg.node_id_mask = df_cfg.socket_id_mask | df_cfg.die_id_mask; + df_cfg.component_id_mask = ~df_cfg.node_id_mask; +} + +static void df3_get_masks_shifts(u32 mask0, u32 mask1) +{ + df_cfg.component_id_mask = FIELD_GET(DF3_COMPONENT_ID_MASK, mask0); + df_cfg.node_id_mask = FIELD_GET(DF3_NODE_ID_MASK, mask0); + + df_cfg.node_id_shift = FIELD_GET(DF3_NODE_ID_SHIFT, mask1); + df_cfg.socket_id_shift = FIELD_GET(DF3_SOCKET_ID_SHIFT, mask1); + df_cfg.socket_id_mask = FIELD_GET(DF3_SOCKET_ID_MASK, mask1); + df_cfg.die_id_mask = FIELD_GET(DF3_DIE_ID_MASK, mask1); +} + +static void df3p5_get_masks_shifts(u32 mask0, u32 mask1, u32 mask2) +{ + df_cfg.component_id_mask = FIELD_GET(DF4_COMPONENT_ID_MASK, mask0); + df_cfg.node_id_mask = FIELD_GET(DF4_NODE_ID_MASK, mask0); + + df_cfg.node_id_shift = FIELD_GET(DF3_NODE_ID_SHIFT, mask1); + df_cfg.socket_id_shift = FIELD_GET(DF4_SOCKET_ID_SHIFT, mask1); + + df_cfg.socket_id_mask = FIELD_GET(DF4_SOCKET_ID_MASK, mask2); + df_cfg.die_id_mask = FIELD_GET(DF4_DIE_ID_MASK, mask2); +} + +static void df4_get_masks_shifts(u32 mask0, u32 mask1, u32 mask2) +{ + df3p5_get_masks_shifts(mask0, mask1, mask2); + + if (!(df_cfg.flags.socket_id_shift_quirk && df_cfg.socket_id_shift == 1)) + return; + + df_cfg.socket_id_shift = 0; + df_cfg.socket_id_mask = 1; + df_cfg.die_id_shift = 0; + df_cfg.die_id_mask = 0; + df_cfg.node_id_shift = 8; + df_cfg.node_id_mask = 0x100; +} + +static int df4_get_fabric_id_mask_registers(void) +{ + u32 mask0, mask1, mask2; + + /* Read D18F4x1B0 (SystemFabricIdMask0) */ + if (df_indirect_read_broadcast(0, 4, 0x1B0, &mask0)) + return -EINVAL; + + /* Read D18F4x1B4 (SystemFabricIdMask1) */ + if (df_indirect_read_broadcast(0, 4, 0x1B4, &mask1)) + return -EINVAL; + + /* Read D18F4x1B8 (SystemFabricIdMask2) */ + if (df_indirect_read_broadcast(0, 4, 0x1B8, &mask2)) + return -EINVAL; + + df4_get_masks_shifts(mask0, mask1, mask2); + return 0; +} + +static int df4_determine_df_rev(u32 reg) +{ + df_cfg.rev = FIELD_GET(DF_MINOR_REVISION, reg) < 5 ? DF4 : DF4p5; + + /* Check for special cases or quirks based on Device/Vendor IDs.*/ + + /* Read D18F0x000 (DeviceVendorId0) */ + if (df_indirect_read_broadcast(0, 0, 0, ®)) + return -EINVAL; + + if (reg == DF_FUNC0_ID_ZEN4_SERVER) + df_cfg.flags.socket_id_shift_quirk = 1; + + if (reg == DF_FUNC0_ID_MI300) { + df_cfg.flags.heterogeneous = 1; + + if (get_addr_hash_mi300()) + return -EINVAL; + } + + return df4_get_fabric_id_mask_registers(); +} + +static int determine_df_rev_legacy(void) +{ + u32 fabric_id_mask0, fabric_id_mask1, fabric_id_mask2; + + /* + * Check for DF3.5. + * + * Component ID Mask must be non-zero. Register D18F1x150 is + * reserved pre-DF3.5, so value will be Read-as-Zero. + */ + + /* Read D18F1x150 (SystemFabricIdMask0). */ + if (df_indirect_read_broadcast(0, 1, 0x150, &fabric_id_mask0)) + return -EINVAL; + + if (FIELD_GET(DF4_COMPONENT_ID_MASK, fabric_id_mask0)) { + df_cfg.rev = DF3p5; + + /* Read D18F1x154 (SystemFabricIdMask1) */ + if (df_indirect_read_broadcast(0, 1, 0x154, &fabric_id_mask1)) + return -EINVAL; + + /* Read D18F1x158 (SystemFabricIdMask2) */ + if (df_indirect_read_broadcast(0, 1, 0x158, &fabric_id_mask2)) + return -EINVAL; + + df3p5_get_masks_shifts(fabric_id_mask0, fabric_id_mask1, fabric_id_mask2); + return 0; + } + + /* + * Check for DF3. + * + * Component ID Mask must be non-zero. Field is Read-as-Zero on DF2. + */ + + /* Read D18F1x208 (SystemFabricIdMask). */ + if (df_indirect_read_broadcast(0, 1, 0x208, &fabric_id_mask0)) + return -EINVAL; + + if (FIELD_GET(DF3_COMPONENT_ID_MASK, fabric_id_mask0)) { + df_cfg.rev = DF3; + + /* Read D18F1x20C (SystemFabricIdMask1) */ + if (df_indirect_read_broadcast(0, 1, 0x20C, &fabric_id_mask1)) + return -EINVAL; + + df3_get_masks_shifts(fabric_id_mask0, fabric_id_mask1); + return 0; + } + + /* Default to DF2. */ + df_cfg.rev = DF2; + df2_get_masks_shifts(fabric_id_mask0); + return 0; +} + +static int determine_df_rev(void) +{ + u32 reg; + u8 rev; + + if (df_cfg.rev != UNKNOWN) + return 0; + + /* Read D18F0x40 (FabricBlockInstanceCount). */ + if (df_indirect_read_broadcast(0, 0, 0x40, ®)) + return -EINVAL; + + /* + * Revision fields added for DF4 and later. + * + * Major revision of '0' is found pre-DF4. Field is Read-as-Zero. + */ + rev = FIELD_GET(DF_MAJOR_REVISION, reg); + if (!rev) + return determine_df_rev_legacy(); + + /* + * Fail out for major revisions other than '4'. + * + * Explicit support should be added for newer systems to avoid issues. + */ + if (rev == 4) + return df4_determine_df_rev(reg); + + return -EINVAL; +} + +static void get_num_maps(void) +{ + switch (df_cfg.rev) { + case DF2: + case DF3: + case DF3p5: + df_cfg.num_coh_st_maps = 2; + break; + case DF4: + case DF4p5: + df_cfg.num_coh_st_maps = 4; + break; + default: + atl_debug_on_bad_df_rev(); + } +} + +static void apply_node_id_shift(void) +{ + if (df_cfg.rev == DF2) + return; + + df_cfg.die_id_shift = df_cfg.node_id_shift; + df_cfg.die_id_mask <<= df_cfg.node_id_shift; + df_cfg.socket_id_mask <<= df_cfg.node_id_shift; + df_cfg.socket_id_shift += df_cfg.node_id_shift; +} + +static void dump_df_cfg(void) +{ + pr_debug("rev=0x%x", df_cfg.rev); + + pr_debug("component_id_mask=0x%x", df_cfg.component_id_mask); + pr_debug("die_id_mask=0x%x", df_cfg.die_id_mask); + pr_debug("node_id_mask=0x%x", df_cfg.node_id_mask); + pr_debug("socket_id_mask=0x%x", df_cfg.socket_id_mask); + + pr_debug("die_id_shift=0x%x", df_cfg.die_id_shift); + pr_debug("node_id_shift=0x%x", df_cfg.node_id_shift); + pr_debug("socket_id_shift=0x%x", df_cfg.socket_id_shift); + + pr_debug("num_coh_st_maps=%u", df_cfg.num_coh_st_maps); + + pr_debug("flags.legacy_ficaa=%u", df_cfg.flags.legacy_ficaa); + pr_debug("flags.socket_id_shift_quirk=%u", df_cfg.flags.socket_id_shift_quirk); +} + +int get_df_system_info(void) +{ + if (determine_df_rev()) { + pr_warn("amd_atl: Failed to determine DF Revision"); + df_cfg.rev = UNKNOWN; + return -EINVAL; + } + + apply_node_id_shift(); + + get_num_maps(); + + dump_df_cfg(); + + return 0; +} diff --git a/drivers/ras/amd/atl/umc.c b/drivers/ras/amd/atl/umc.c new file mode 100644 index 000000000000..59b6169093f7 --- /dev/null +++ b/drivers/ras/amd/atl/umc.c @@ -0,0 +1,341 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * AMD Address Translation Library + * + * umc.c : Unified Memory Controller (UMC) topology helpers + * + * Copyright (c) 2023, Advanced Micro Devices, Inc. + * All Rights Reserved. + * + * Author: Yazen Ghannam <Yazen.Ghannam@amd.com> + */ + +#include "internal.h" + +/* + * MI300 has a fixed, model-specific mapping between a UMC instance and + * its related Data Fabric Coherent Station instance. + * + * The MCA_IPID_UMC[InstanceId] field holds a unique identifier for the + * UMC instance within a Node. Use this to find the appropriate Coherent + * Station ID. + * + * Redundant bits were removed from the map below. + */ +static const u16 umc_coh_st_map[32] = { + 0x393, 0x293, 0x193, 0x093, + 0x392, 0x292, 0x192, 0x092, + 0x391, 0x291, 0x191, 0x091, + 0x390, 0x290, 0x190, 0x090, + 0x793, 0x693, 0x593, 0x493, + 0x792, 0x692, 0x592, 0x492, + 0x791, 0x691, 0x591, 0x491, + 0x790, 0x690, 0x590, 0x490, +}; + +#define UMC_ID_MI300 GENMASK(23, 12) +static u8 get_coh_st_inst_id_mi300(struct atl_err *err) +{ + u16 umc_id = FIELD_GET(UMC_ID_MI300, err->ipid); + u8 i; + + for (i = 0; i < ARRAY_SIZE(umc_coh_st_map); i++) { + if (umc_id == umc_coh_st_map[i]) + break; + } + + WARN_ON_ONCE(i >= ARRAY_SIZE(umc_coh_st_map)); + + return i; +} + +/* XOR the bits in @val. */ +static u16 bitwise_xor_bits(u16 val) +{ + u16 tmp = 0; + u8 i; + + for (i = 0; i < 16; i++) + tmp ^= (val >> i) & 0x1; + + return tmp; +} + +struct xor_bits { + bool xor_enable; + u16 col_xor; + u32 row_xor; +}; + +#define NUM_BANK_BITS 4 + +static struct { + /* UMC::CH::AddrHashBank */ + struct xor_bits bank[NUM_BANK_BITS]; + + /* UMC::CH::AddrHashPC */ + struct xor_bits pc; + + /* UMC::CH::AddrHashPC2 */ + u8 bank_xor; +} addr_hash; + +#define MI300_UMC_CH_BASE 0x90000 +#define MI300_ADDR_HASH_BANK0 (MI300_UMC_CH_BASE + 0xC8) +#define MI300_ADDR_HASH_PC (MI300_UMC_CH_BASE + 0xE0) +#define MI300_ADDR_HASH_PC2 (MI300_UMC_CH_BASE + 0xE4) + +#define ADDR_HASH_XOR_EN BIT(0) +#define ADDR_HASH_COL_XOR GENMASK(13, 1) +#define ADDR_HASH_ROW_XOR GENMASK(31, 14) +#define ADDR_HASH_BANK_XOR GENMASK(5, 0) + +/* + * Read UMC::CH::AddrHash{Bank,PC,PC2} registers to get XOR bits used + * for hashing. Do this during module init, since the values will not + * change during run time. + * + * These registers are instantiated for each UMC across each AMD Node. + * However, they should be identically programmed due to the fixed hardware + * design of MI300 systems. So read the values from Node 0 UMC 0 and keep a + * single global structure for simplicity. + */ +int get_addr_hash_mi300(void) +{ + u32 temp; + int ret; + u8 i; + + for (i = 0; i < NUM_BANK_BITS; i++) { + ret = amd_smn_read(0, MI300_ADDR_HASH_BANK0 + (i * 4), &temp); + if (ret) + return ret; + + addr_hash.bank[i].xor_enable = FIELD_GET(ADDR_HASH_XOR_EN, temp); + addr_hash.bank[i].col_xor = FIELD_GET(ADDR_HASH_COL_XOR, temp); + addr_hash.bank[i].row_xor = FIELD_GET(ADDR_HASH_ROW_XOR, temp); + } + + ret = amd_smn_read(0, MI300_ADDR_HASH_PC, &temp); + if (ret) + return ret; + + addr_hash.pc.xor_enable = FIELD_GET(ADDR_HASH_XOR_EN, temp); + addr_hash.pc.col_xor = FIELD_GET(ADDR_HASH_COL_XOR, temp); + addr_hash.pc.row_xor = FIELD_GET(ADDR_HASH_ROW_XOR, temp); + + ret = amd_smn_read(0, MI300_ADDR_HASH_PC2, &temp); + if (ret) + return ret; + + addr_hash.bank_xor = FIELD_GET(ADDR_HASH_BANK_XOR, temp); + + return 0; +} + +/* + * MI300 systems report a DRAM address in MCA_ADDR for DRAM ECC errors. This must + * be converted to the intermediate normalized address (NA) before translating to a + * system physical address. + * + * The DRAM address includes bank, row, and column. Also included are bits for + * pseudochannel (PC) and stack ID (SID). + * + * Abbreviations: (S)tack ID, (P)seudochannel, (R)ow, (B)ank, (C)olumn, (Z)ero + * + * The MCA address format is as follows: + * MCA_ADDR[27:0] = {S[1:0], P[0], R[14:0], B[3:0], C[4:0], Z[0]} + * + * The normalized address format is fixed in hardware and is as follows: + * NA[30:0] = {S[1:0], R[13:0], C4, B[1:0], B[3:2], C[3:2], P, C[1:0], Z[4:0]} + * + * Additionally, the PC and Bank bits may be hashed. This must be accounted for before + * reconstructing the normalized address. + */ +#define MI300_UMC_MCA_COL GENMASK(5, 1) +#define MI300_UMC_MCA_BANK GENMASK(9, 6) +#define MI300_UMC_MCA_ROW GENMASK(24, 10) +#define MI300_UMC_MCA_PC BIT(25) +#define MI300_UMC_MCA_SID GENMASK(27, 26) + +#define MI300_NA_COL_1_0 GENMASK(6, 5) +#define MI300_NA_PC BIT(7) +#define MI300_NA_COL_3_2 GENMASK(9, 8) +#define MI300_NA_BANK_3_2 GENMASK(11, 10) +#define MI300_NA_BANK_1_0 GENMASK(13, 12) +#define MI300_NA_COL_4 BIT(14) +#define MI300_NA_ROW GENMASK(28, 15) +#define MI300_NA_SID GENMASK(30, 29) + +static unsigned long convert_dram_to_norm_addr_mi300(unsigned long addr) +{ + u16 i, col, row, bank, pc, sid, temp; + + col = FIELD_GET(MI300_UMC_MCA_COL, addr); + bank = FIELD_GET(MI300_UMC_MCA_BANK, addr); + row = FIELD_GET(MI300_UMC_MCA_ROW, addr); + pc = FIELD_GET(MI300_UMC_MCA_PC, addr); + sid = FIELD_GET(MI300_UMC_MCA_SID, addr); + + /* Calculate hash for each Bank bit. */ + for (i = 0; i < NUM_BANK_BITS; i++) { + if (!addr_hash.bank[i].xor_enable) + continue; + + temp = bitwise_xor_bits(col & addr_hash.bank[i].col_xor); + temp ^= bitwise_xor_bits(row & addr_hash.bank[i].row_xor); + bank ^= temp << i; + } + + /* Calculate hash for PC bit. */ + if (addr_hash.pc.xor_enable) { + /* Bits SID[1:0] act as Bank[6:5] for PC hash, so apply them here. */ + bank |= sid << 5; + + temp = bitwise_xor_bits(col & addr_hash.pc.col_xor); + temp ^= bitwise_xor_bits(row & addr_hash.pc.row_xor); + temp ^= bitwise_xor_bits(bank & addr_hash.bank_xor); + pc ^= temp; + + /* Drop SID bits for the sake of debug printing later. */ + bank &= 0x1F; + } + + /* Reconstruct the normalized address starting with NA[4:0] = 0 */ + addr = 0; + + /* NA[6:5] = Column[1:0] */ + temp = col & 0x3; + addr |= FIELD_PREP(MI300_NA_COL_1_0, temp); + + /* NA[7] = PC */ + addr |= FIELD_PREP(MI300_NA_PC, pc); + + /* NA[9:8] = Column[3:2] */ + temp = (col >> 2) & 0x3; + addr |= FIELD_PREP(MI300_NA_COL_3_2, temp); + + /* NA[11:10] = Bank[3:2] */ + temp = (bank >> 2) & 0x3; + addr |= FIELD_PREP(MI300_NA_BANK_3_2, temp); + + /* NA[13:12] = Bank[1:0] */ + temp = bank & 0x3; + addr |= FIELD_PREP(MI300_NA_BANK_1_0, temp); + + /* NA[14] = Column[4] */ + temp = (col >> 4) & 0x1; + addr |= FIELD_PREP(MI300_NA_COL_4, temp); + + /* NA[28:15] = Row[13:0] */ + addr |= FIELD_PREP(MI300_NA_ROW, row); + + /* NA[30:29] = SID[1:0] */ + addr |= FIELD_PREP(MI300_NA_SID, sid); + + pr_debug("Addr=0x%016lx", addr); + pr_debug("Bank=%u Row=%u Column=%u PC=%u SID=%u", bank, row, col, pc, sid); + + return addr; +} + +/* + * When a DRAM ECC error occurs on MI300 systems, it is recommended to retire + * all memory within that DRAM row. This applies to the memory with a DRAM + * bank. + * + * To find the memory addresses, loop through permutations of the DRAM column + * bits and find the System Physical address of each. The column bits are used + * to calculate the intermediate Normalized address, so all permutations should + * be checked. + * + * See amd_atl::convert_dram_to_norm_addr_mi300() for MI300 address formats. + */ +#define MI300_NUM_COL BIT(HWEIGHT(MI300_UMC_MCA_COL)) +static void retire_row_mi300(struct atl_err *a_err) +{ + unsigned long addr; + struct page *p; + u8 col; + + for (col = 0; col < MI300_NUM_COL; col++) { + a_err->addr &= ~MI300_UMC_MCA_COL; + a_err->addr |= FIELD_PREP(MI300_UMC_MCA_COL, col); + + addr = amd_convert_umc_mca_addr_to_sys_addr(a_err); + if (IS_ERR_VALUE(addr)) + continue; + + addr = PHYS_PFN(addr); + + /* + * Skip invalid or already poisoned pages to avoid unnecessary + * error messages from memory_failure(). + */ + p = pfn_to_online_page(addr); + if (!p) + continue; + + if (PageHWPoison(p)) + continue; + + memory_failure(addr, 0); + } +} + +void amd_retire_dram_row(struct atl_err *a_err) +{ + if (df_cfg.rev == DF4p5 && df_cfg.flags.heterogeneous) + return retire_row_mi300(a_err); +} +EXPORT_SYMBOL_GPL(amd_retire_dram_row); + +static unsigned long get_addr(unsigned long addr) +{ + if (df_cfg.rev == DF4p5 && df_cfg.flags.heterogeneous) + return convert_dram_to_norm_addr_mi300(addr); + + return addr; +} + +#define MCA_IPID_INST_ID_HI GENMASK_ULL(47, 44) +static u8 get_die_id(struct atl_err *err) +{ + /* + * AMD Node ID is provided in MCA_IPID[InstanceIdHi], and this + * needs to be divided by 4 to get the internal Die ID. + */ + if (df_cfg.rev == DF4p5 && df_cfg.flags.heterogeneous) { + u8 node_id = FIELD_GET(MCA_IPID_INST_ID_HI, err->ipid); + + return node_id >> 2; + } + + /* + * For CPUs, this is the AMD Node ID modulo the number + * of AMD Nodes per socket. + */ + return topology_amd_node_id(err->cpu) % topology_amd_nodes_per_pkg(); +} + +#define UMC_CHANNEL_NUM GENMASK(31, 20) +static u8 get_coh_st_inst_id(struct atl_err *err) +{ + if (df_cfg.rev == DF4p5 && df_cfg.flags.heterogeneous) + return get_coh_st_inst_id_mi300(err); + + return FIELD_GET(UMC_CHANNEL_NUM, err->ipid); +} + +unsigned long convert_umc_mca_addr_to_sys_addr(struct atl_err *err) +{ + u8 socket_id = topology_physical_package_id(err->cpu); + u8 coh_st_inst_id = get_coh_st_inst_id(err); + unsigned long addr = get_addr(err->addr); + u8 die_id = get_die_id(err); + + pr_debug("socket_id=0x%x die_id=0x%x coh_st_inst_id=0x%x addr=0x%016lx", + socket_id, die_id, coh_st_inst_id, addr); + + return norm_to_sys_addr(socket_id, die_id, coh_st_inst_id, addr); +} diff --git a/drivers/ras/amd/fmpm.c b/drivers/ras/amd/fmpm.c new file mode 100644 index 000000000000..2f4ac9591c8f --- /dev/null +++ b/drivers/ras/amd/fmpm.c @@ -0,0 +1,1013 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * FRU (Field-Replaceable Unit) Memory Poison Manager + * + * Copyright (c) 2024, Advanced Micro Devices, Inc. + * All Rights Reserved. + * + * Authors: + * Naveen Krishna Chatradhi <naveenkrishna.chatradhi@amd.com> + * Muralidhara M K <muralidhara.mk@amd.com> + * Yazen Ghannam <Yazen.Ghannam@amd.com> + * + * Implementation notes, assumptions, and limitations: + * + * - FRU memory poison section and memory poison descriptor definitions are not yet + * included in the UEFI specification. So they are defined here. Afterwards, they + * may be moved to linux/cper.h, if appropriate. + * + * - Platforms based on AMD MI300 systems will be the first to use these structures. + * There are a number of assumptions made here that will need to be generalized + * to support other platforms. + * + * AMD MI300-based platform(s) assumptions: + * - Memory errors are reported through x86 MCA. + * - The entire DRAM row containing a memory error should be retired. + * - There will be (1) FRU memory poison section per CPER. + * - The FRU will be the CPU package (processor socket). + * - The default number of memory poison descriptor entries should be (8). + * - The platform will use ACPI ERST for persistent storage. + * - All FRU records should be saved to persistent storage. Module init will + * fail if any FRU record is not successfully written. + * + * - Boot time memory retirement may occur later than ideal due to dependencies + * on other libraries and drivers. This leaves a gap where bad memory may be + * accessed during early boot stages. + * + * - Enough memory should be pre-allocated for each FRU record to be able to hold + * the expected number of descriptor entries. This, mostly empty, record is + * written to storage during init time. Subsequent writes to the same record + * should allow the Platform to update the stored record in-place. Otherwise, + * if the record is extended, then the Platform may need to perform costly memory + * management operations on the storage. For example, the Platform may spend time + * in Firmware copying and invalidating memory on a relatively slow SPI ROM. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/cper.h> +#include <linux/ras.h> +#include <linux/cpu.h> + +#include <acpi/apei.h> + +#include <asm/cpu_device_id.h> +#include <asm/mce.h> + +#include "../debugfs.h" + +#define INVALID_CPU UINT_MAX + +/* Validation Bits */ +#define FMP_VALID_ARCH_TYPE BIT_ULL(0) +#define FMP_VALID_ARCH BIT_ULL(1) +#define FMP_VALID_ID_TYPE BIT_ULL(2) +#define FMP_VALID_ID BIT_ULL(3) +#define FMP_VALID_LIST_ENTRIES BIT_ULL(4) +#define FMP_VALID_LIST BIT_ULL(5) + +/* FRU Architecture Types */ +#define FMP_ARCH_TYPE_X86_CPUID_1_EAX 0 + +/* FRU ID Types */ +#define FMP_ID_TYPE_X86_PPIN 0 + +/* FRU Memory Poison Section */ +struct cper_sec_fru_mem_poison { + u32 checksum; + u64 validation_bits; + u32 fru_arch_type; + u64 fru_arch; + u32 fru_id_type; + u64 fru_id; + u32 nr_entries; +} __packed; + +/* FRU Descriptor ID Types */ +#define FPD_HW_ID_TYPE_MCA_IPID 0 + +/* FRU Descriptor Address Types */ +#define FPD_ADDR_TYPE_MCA_ADDR 0 + +/* Memory Poison Descriptor */ +struct cper_fru_poison_desc { + u64 timestamp; + u32 hw_id_type; + u64 hw_id; + u32 addr_type; + u64 addr; +} __packed; + +/* Collection of headers and sections for easy pointer use. */ +struct fru_rec { + struct cper_record_header hdr; + struct cper_section_descriptor sec_desc; + struct cper_sec_fru_mem_poison fmp; + struct cper_fru_poison_desc entries[]; +} __packed; + +/* + * Pointers to the complete CPER record of each FRU. + * + * Memory allocation will include padded space for descriptor entries. + */ +static struct fru_rec **fru_records; + +/* system physical addresses array */ +static u64 *spa_entries; + +#define INVALID_SPA ~0ULL + +static struct dentry *fmpm_dfs_dir; +static struct dentry *fmpm_dfs_entries; + +#define CPER_CREATOR_FMP \ + GUID_INIT(0xcd5c2993, 0xf4b2, 0x41b2, 0xb5, 0xd4, 0xf9, 0xc3, \ + 0xa0, 0x33, 0x08, 0x75) + +#define CPER_SECTION_TYPE_FMP \ + GUID_INIT(0x5e4706c1, 0x5356, 0x48c6, 0x93, 0x0b, 0x52, 0xf2, \ + 0x12, 0x0a, 0x44, 0x58) + +/** + * DOC: max_nr_entries (byte) + * Maximum number of descriptor entries possible for each FRU. + * + * Values between '1' and '255' are valid. + * No input or '0' will default to FMPM_DEFAULT_MAX_NR_ENTRIES. + */ +static u8 max_nr_entries; +module_param(max_nr_entries, byte, 0644); +MODULE_PARM_DESC(max_nr_entries, + "Maximum number of memory poison descriptor entries per FRU"); + +#define FMPM_DEFAULT_MAX_NR_ENTRIES 8 + +/* Maximum number of FRUs in the system. */ +#define FMPM_MAX_NR_FRU 256 +static unsigned int max_nr_fru; + +/* Total length of record including headers and list of descriptor entries. */ +static size_t max_rec_len; + +/* Total number of SPA entries across all FRUs. */ +static unsigned int spa_nr_entries; + +/* + * Protect the local records cache in fru_records and prevent concurrent + * writes to storage. This is only needed after init once notifier block + * registration is done. + * + * The majority of a record is fixed at module init and will not change + * during run time. The entries within a record will be updated as new + * errors are reported. The mutex should be held whenever the entries are + * accessed during run time. + */ +static DEFINE_MUTEX(fmpm_update_mutex); + +#define for_each_fru(i, rec) \ + for (i = 0; rec = fru_records[i], i < max_nr_fru; i++) + +static inline u32 get_fmp_len(struct fru_rec *rec) +{ + return rec->sec_desc.section_length - sizeof(struct cper_section_descriptor); +} + +static struct fru_rec *get_fru_record(u64 fru_id) +{ + struct fru_rec *rec; + unsigned int i; + + for_each_fru(i, rec) { + if (rec->fmp.fru_id == fru_id) + return rec; + } + + pr_debug("Record not found for FRU 0x%016llx\n", fru_id); + + return NULL; +} + +/* + * Sum up all bytes within the FRU Memory Poison Section including the Memory + * Poison Descriptor entries. + * + * Don't include the old checksum here. It's a u32 value, so summing each of its + * bytes will give the wrong total. + */ +static u32 do_fmp_checksum(struct cper_sec_fru_mem_poison *fmp, u32 len) +{ + u32 checksum = 0; + u8 *buf, *end; + + /* Skip old checksum. */ + buf = (u8 *)fmp + sizeof(u32); + end = buf + len; + + while (buf < end) + checksum += (u8)(*(buf++)); + + return checksum; +} + +static int update_record_on_storage(struct fru_rec *rec) +{ + u32 len, checksum; + int ret; + + /* Calculate a new checksum. */ + len = get_fmp_len(rec); + + /* Get the current total. */ + checksum = do_fmp_checksum(&rec->fmp, len); + + /* Use the complement value. */ + rec->fmp.checksum = -checksum; + + pr_debug("Writing to storage\n"); + + ret = erst_write(&rec->hdr); + if (ret) { + pr_warn("Storage update failed for FRU 0x%016llx\n", rec->fmp.fru_id); + + if (ret == -ENOSPC) + pr_warn("Not enough space on storage\n"); + } + + return ret; +} + +static bool rec_has_valid_entries(struct fru_rec *rec) +{ + if (!(rec->fmp.validation_bits & FMP_VALID_LIST_ENTRIES)) + return false; + + if (!(rec->fmp.validation_bits & FMP_VALID_LIST)) + return false; + + return true; +} + +static bool fpds_equal(struct cper_fru_poison_desc *old, struct cper_fru_poison_desc *new) +{ + /* + * Ignore timestamp field. + * The same physical error may be reported multiple times due to stuck bits, etc. + * + * Also, order the checks from most->least likely to fail to shortcut the code. + */ + if (old->addr != new->addr) + return false; + + if (old->hw_id != new->hw_id) + return false; + + if (old->addr_type != new->addr_type) + return false; + + if (old->hw_id_type != new->hw_id_type) + return false; + + return true; +} + +static bool rec_has_fpd(struct fru_rec *rec, struct cper_fru_poison_desc *fpd) +{ + unsigned int i; + + for (i = 0; i < rec->fmp.nr_entries; i++) { + struct cper_fru_poison_desc *fpd_i = &rec->entries[i]; + + if (fpds_equal(fpd_i, fpd)) { + pr_debug("Found duplicate record\n"); + return true; + } + } + + return false; +} + +static void save_spa(struct fru_rec *rec, unsigned int entry, + u64 addr, u64 id, unsigned int cpu) +{ + unsigned int i, fru_idx, spa_entry; + struct atl_err a_err; + unsigned long spa; + + if (entry >= max_nr_entries) { + pr_warn_once("FRU descriptor entry %d out-of-bounds (max: %d)\n", + entry, max_nr_entries); + return; + } + + /* spa_nr_entries is always multiple of max_nr_entries */ + for (i = 0; i < spa_nr_entries; i += max_nr_entries) { + fru_idx = i / max_nr_entries; + if (fru_records[fru_idx] == rec) + break; + } + + if (i >= spa_nr_entries) { + pr_warn_once("FRU record %d not found\n", i); + return; + } + + spa_entry = i + entry; + if (spa_entry >= spa_nr_entries) { + pr_warn_once("spa_entries[] index out-of-bounds\n"); + return; + } + + memset(&a_err, 0, sizeof(struct atl_err)); + + a_err.addr = addr; + a_err.ipid = id; + a_err.cpu = cpu; + + spa = amd_convert_umc_mca_addr_to_sys_addr(&a_err); + if (IS_ERR_VALUE(spa)) { + pr_debug("Failed to get system address\n"); + return; + } + + spa_entries[spa_entry] = spa; + pr_debug("fru_idx: %u, entry: %u, spa_entry: %u, spa: 0x%016llx\n", + fru_idx, entry, spa_entry, spa_entries[spa_entry]); +} + +static void update_fru_record(struct fru_rec *rec, struct mce *m) +{ + struct cper_sec_fru_mem_poison *fmp = &rec->fmp; + struct cper_fru_poison_desc fpd, *fpd_dest; + u32 entry = 0; + + mutex_lock(&fmpm_update_mutex); + + memset(&fpd, 0, sizeof(struct cper_fru_poison_desc)); + + fpd.timestamp = m->time; + fpd.hw_id_type = FPD_HW_ID_TYPE_MCA_IPID; + fpd.hw_id = m->ipid; + fpd.addr_type = FPD_ADDR_TYPE_MCA_ADDR; + fpd.addr = m->addr; + + /* This is the first entry, so just save it. */ + if (!rec_has_valid_entries(rec)) + goto save_fpd; + + /* Ignore already recorded errors. */ + if (rec_has_fpd(rec, &fpd)) + goto out_unlock; + + if (rec->fmp.nr_entries >= max_nr_entries) { + pr_warn("Exceeded number of entries for FRU 0x%016llx\n", rec->fmp.fru_id); + goto out_unlock; + } + + entry = fmp->nr_entries; + +save_fpd: + save_spa(rec, entry, m->addr, m->ipid, m->extcpu); + fpd_dest = &rec->entries[entry]; + memcpy(fpd_dest, &fpd, sizeof(struct cper_fru_poison_desc)); + + fmp->nr_entries = entry + 1; + fmp->validation_bits |= FMP_VALID_LIST_ENTRIES; + fmp->validation_bits |= FMP_VALID_LIST; + + pr_debug("Updated FRU 0x%016llx entry #%u\n", fmp->fru_id, entry); + + update_record_on_storage(rec); + +out_unlock: + mutex_unlock(&fmpm_update_mutex); +} + +static void retire_dram_row(u64 addr, u64 id, u32 cpu) +{ + struct atl_err a_err; + + memset(&a_err, 0, sizeof(struct atl_err)); + + a_err.addr = addr; + a_err.ipid = id; + a_err.cpu = cpu; + + amd_retire_dram_row(&a_err); +} + +static int fru_handle_mem_poison(struct notifier_block *nb, unsigned long val, void *data) +{ + struct mce *m = (struct mce *)data; + struct fru_rec *rec; + + if (!mce_is_memory_error(m)) + return NOTIFY_DONE; + + retire_dram_row(m->addr, m->ipid, m->extcpu); + + /* + * An invalid FRU ID should not happen on real errors. But it + * could happen from software error injection, etc. + */ + rec = get_fru_record(m->ppin); + if (!rec) + return NOTIFY_DONE; + + update_fru_record(rec, m); + + return NOTIFY_OK; +} + +static struct notifier_block fru_mem_poison_nb = { + .notifier_call = fru_handle_mem_poison, + .priority = MCE_PRIO_LOWEST, +}; + +static void retire_mem_fmp(struct fru_rec *rec) +{ + struct cper_sec_fru_mem_poison *fmp = &rec->fmp; + unsigned int i, cpu; + + for (i = 0; i < fmp->nr_entries; i++) { + struct cper_fru_poison_desc *fpd = &rec->entries[i]; + unsigned int err_cpu = INVALID_CPU; + + if (fpd->hw_id_type != FPD_HW_ID_TYPE_MCA_IPID) + continue; + + if (fpd->addr_type != FPD_ADDR_TYPE_MCA_ADDR) + continue; + + cpus_read_lock(); + for_each_online_cpu(cpu) { + if (topology_ppin(cpu) == fmp->fru_id) { + err_cpu = cpu; + break; + } + } + cpus_read_unlock(); + + if (err_cpu == INVALID_CPU) + continue; + + retire_dram_row(fpd->addr, fpd->hw_id, err_cpu); + save_spa(rec, i, fpd->addr, fpd->hw_id, err_cpu); + } +} + +static void retire_mem_records(void) +{ + struct fru_rec *rec; + unsigned int i; + + for_each_fru(i, rec) { + if (!rec_has_valid_entries(rec)) + continue; + + retire_mem_fmp(rec); + } +} + +/* Set the CPER Record Header and CPER Section Descriptor fields. */ +static void set_rec_fields(struct fru_rec *rec) +{ + struct cper_section_descriptor *sec_desc = &rec->sec_desc; + struct cper_record_header *hdr = &rec->hdr; + + memcpy(hdr->signature, CPER_SIG_RECORD, CPER_SIG_SIZE); + hdr->revision = CPER_RECORD_REV; + hdr->signature_end = CPER_SIG_END; + + /* + * Currently, it is assumed that there is one FRU Memory Poison + * section per CPER. But this may change for other implementations. + */ + hdr->section_count = 1; + + /* The logged errors are recoverable. Otherwise, they'd never make it here. */ + hdr->error_severity = CPER_SEV_RECOVERABLE; + + hdr->validation_bits = 0; + hdr->record_length = max_rec_len; + hdr->creator_id = CPER_CREATOR_FMP; + hdr->notification_type = CPER_NOTIFY_MCE; + hdr->record_id = cper_next_record_id(); + hdr->flags = CPER_HW_ERROR_FLAGS_PREVERR; + + sec_desc->section_offset = sizeof(struct cper_record_header); + sec_desc->section_length = max_rec_len - sizeof(struct cper_record_header); + sec_desc->revision = CPER_SEC_REV; + sec_desc->validation_bits = 0; + sec_desc->flags = CPER_SEC_PRIMARY; + sec_desc->section_type = CPER_SECTION_TYPE_FMP; + sec_desc->section_severity = CPER_SEV_RECOVERABLE; +} + +static int save_new_records(void) +{ + DECLARE_BITMAP(new_records, FMPM_MAX_NR_FRU); + struct fru_rec *rec; + unsigned int i; + int ret = 0; + + for_each_fru(i, rec) { + if (rec->hdr.record_length) + continue; + + set_rec_fields(rec); + + ret = update_record_on_storage(rec); + if (ret) + goto out_clear; + + set_bit(i, new_records); + } + + return ret; + +out_clear: + for_each_fru(i, rec) { + if (!test_bit(i, new_records)) + continue; + + erst_clear(rec->hdr.record_id); + } + + return ret; +} + +/* Check that the record matches expected types for the current system.*/ +static bool fmp_is_usable(struct fru_rec *rec) +{ + struct cper_sec_fru_mem_poison *fmp = &rec->fmp; + u64 cpuid; + + pr_debug("Validation bits: 0x%016llx\n", fmp->validation_bits); + + if (!(fmp->validation_bits & FMP_VALID_ARCH_TYPE)) { + pr_debug("Arch type unknown\n"); + return false; + } + + if (fmp->fru_arch_type != FMP_ARCH_TYPE_X86_CPUID_1_EAX) { + pr_debug("Arch type not 'x86 Family/Model/Stepping'\n"); + return false; + } + + if (!(fmp->validation_bits & FMP_VALID_ARCH)) { + pr_debug("Arch value unknown\n"); + return false; + } + + cpuid = cpuid_eax(1); + if (fmp->fru_arch != cpuid) { + pr_debug("Arch value mismatch: record = 0x%016llx, system = 0x%016llx\n", + fmp->fru_arch, cpuid); + return false; + } + + if (!(fmp->validation_bits & FMP_VALID_ID_TYPE)) { + pr_debug("FRU ID type unknown\n"); + return false; + } + + if (fmp->fru_id_type != FMP_ID_TYPE_X86_PPIN) { + pr_debug("FRU ID type is not 'x86 PPIN'\n"); + return false; + } + + if (!(fmp->validation_bits & FMP_VALID_ID)) { + pr_debug("FRU ID value unknown\n"); + return false; + } + + return true; +} + +static bool fmp_is_valid(struct fru_rec *rec) +{ + struct cper_sec_fru_mem_poison *fmp = &rec->fmp; + u32 checksum, len; + + len = get_fmp_len(rec); + if (len < sizeof(struct cper_sec_fru_mem_poison)) { + pr_debug("fmp length is too small\n"); + return false; + } + + /* Checksum must sum to zero for the entire section. */ + checksum = do_fmp_checksum(fmp, len) + fmp->checksum; + if (checksum) { + pr_debug("fmp checksum failed: sum = 0x%x\n", checksum); + print_hex_dump_debug("fmp record: ", DUMP_PREFIX_NONE, 16, 1, fmp, len, false); + return false; + } + + if (!fmp_is_usable(rec)) + return false; + + return true; +} + +static struct fru_rec *get_valid_record(struct fru_rec *old) +{ + struct fru_rec *new; + + if (!fmp_is_valid(old)) { + pr_debug("Ignoring invalid record\n"); + return NULL; + } + + new = get_fru_record(old->fmp.fru_id); + if (!new) + pr_debug("Ignoring record for absent FRU\n"); + + return new; +} + +/* + * Fetch saved records from persistent storage. + * + * For each found record: + * - If it was not created by this module, then ignore it. + * - If it is valid, then copy its data to the local cache. + * - If it is not valid, then erase it. + */ +static int get_saved_records(void) +{ + struct fru_rec *old, *new; + u64 record_id; + int ret, pos; + ssize_t len; + + /* + * Assume saved records match current max size. + * + * However, this may not be true depending on module parameters. + */ + old = kmalloc(max_rec_len, GFP_KERNEL); + if (!old) { + ret = -ENOMEM; + goto out; + } + + ret = erst_get_record_id_begin(&pos); + if (ret < 0) + goto out_end; + + while (!erst_get_record_id_next(&pos, &record_id)) { + if (record_id == APEI_ERST_INVALID_RECORD_ID) + goto out_end; + /* + * Make sure to clear temporary buffer between reads to avoid + * leftover data from records of various sizes. + */ + memset(old, 0, max_rec_len); + + len = erst_read_record(record_id, &old->hdr, max_rec_len, + sizeof(struct fru_rec), &CPER_CREATOR_FMP); + if (len < 0) + continue; + + if (len > max_rec_len) { + pr_debug("Found record larger than max_rec_len\n"); + continue; + } + + new = get_valid_record(old); + if (!new) + erst_clear(record_id); + + /* Restore the record */ + memcpy(new, old, len); + } + +out_end: + erst_get_record_id_end(); + kfree(old); +out: + return ret; +} + +static void set_fmp_fields(struct fru_rec *rec, unsigned int cpu) +{ + struct cper_sec_fru_mem_poison *fmp = &rec->fmp; + + fmp->fru_arch_type = FMP_ARCH_TYPE_X86_CPUID_1_EAX; + fmp->validation_bits |= FMP_VALID_ARCH_TYPE; + + /* Assume all CPUs in the system have the same value for now. */ + fmp->fru_arch = cpuid_eax(1); + fmp->validation_bits |= FMP_VALID_ARCH; + + fmp->fru_id_type = FMP_ID_TYPE_X86_PPIN; + fmp->validation_bits |= FMP_VALID_ID_TYPE; + + fmp->fru_id = topology_ppin(cpu); + fmp->validation_bits |= FMP_VALID_ID; +} + +static int init_fmps(void) +{ + struct fru_rec *rec; + unsigned int i, cpu; + int ret = 0; + + for_each_fru(i, rec) { + unsigned int fru_cpu = INVALID_CPU; + + cpus_read_lock(); + for_each_online_cpu(cpu) { + if (topology_physical_package_id(cpu) == i) { + fru_cpu = cpu; + break; + } + } + cpus_read_unlock(); + + if (fru_cpu == INVALID_CPU) { + pr_debug("Failed to find matching CPU for FRU #%u\n", i); + ret = -ENODEV; + break; + } + + set_fmp_fields(rec, fru_cpu); + } + + return ret; +} + +static int get_system_info(void) +{ + /* Only load on MI300A systems for now. */ + if (!(boot_cpu_data.x86_model >= 0x90 && + boot_cpu_data.x86_model <= 0x9f)) + return -ENODEV; + + if (!cpu_feature_enabled(X86_FEATURE_AMD_PPIN)) { + pr_debug("PPIN feature not available\n"); + return -ENODEV; + } + + /* Use CPU socket as FRU for MI300 systems. */ + max_nr_fru = topology_max_packages(); + if (!max_nr_fru) + return -ENODEV; + + if (max_nr_fru > FMPM_MAX_NR_FRU) { + pr_warn("Too many FRUs to manage: found: %u, max: %u\n", + max_nr_fru, FMPM_MAX_NR_FRU); + return -ENODEV; + } + + if (!max_nr_entries) + max_nr_entries = FMPM_DEFAULT_MAX_NR_ENTRIES; + + spa_nr_entries = max_nr_fru * max_nr_entries; + + max_rec_len = sizeof(struct fru_rec); + max_rec_len += sizeof(struct cper_fru_poison_desc) * max_nr_entries; + + pr_info("max FRUs: %u, max entries: %u, max record length: %lu\n", + max_nr_fru, max_nr_entries, max_rec_len); + + return 0; +} + +static void free_records(void) +{ + struct fru_rec *rec; + int i; + + for_each_fru(i, rec) + kfree(rec); + + kfree(fru_records); + kfree(spa_entries); +} + +static int allocate_records(void) +{ + int i, ret = 0; + + fru_records = kcalloc(max_nr_fru, sizeof(struct fru_rec *), GFP_KERNEL); + if (!fru_records) { + ret = -ENOMEM; + goto out; + } + + for (i = 0; i < max_nr_fru; i++) { + fru_records[i] = kzalloc(max_rec_len, GFP_KERNEL); + if (!fru_records[i]) { + ret = -ENOMEM; + goto out_free; + } + } + + spa_entries = kcalloc(spa_nr_entries, sizeof(u64), GFP_KERNEL); + if (!spa_entries) { + ret = -ENOMEM; + goto out_free; + } + + for (i = 0; i < spa_nr_entries; i++) + spa_entries[i] = INVALID_SPA; + + return ret; + +out_free: + while (--i >= 0) + kfree(fru_records[i]); + + kfree(fru_records); +out: + return ret; +} + +static void *fmpm_start(struct seq_file *f, loff_t *pos) +{ + if (*pos >= (spa_nr_entries + 1)) + return NULL; + return pos; +} + +static void *fmpm_next(struct seq_file *f, void *data, loff_t *pos) +{ + if (++(*pos) >= (spa_nr_entries + 1)) + return NULL; + return pos; +} + +static void fmpm_stop(struct seq_file *f, void *data) +{ +} + +#define SHORT_WIDTH 8 +#define U64_WIDTH 18 +#define TIMESTAMP_WIDTH 19 +#define LONG_WIDTH 24 +#define U64_PAD (LONG_WIDTH - U64_WIDTH) +#define TS_PAD (LONG_WIDTH - TIMESTAMP_WIDTH) +static int fmpm_show(struct seq_file *f, void *data) +{ + unsigned int fru_idx, entry, spa_entry, line; + struct cper_fru_poison_desc *fpd; + struct fru_rec *rec; + + line = *(loff_t *)data; + if (line == 0) { + seq_printf(f, "%-*s", SHORT_WIDTH, "fru_idx"); + seq_printf(f, "%-*s", LONG_WIDTH, "fru_id"); + seq_printf(f, "%-*s", SHORT_WIDTH, "entry"); + seq_printf(f, "%-*s", LONG_WIDTH, "timestamp"); + seq_printf(f, "%-*s", LONG_WIDTH, "hw_id"); + seq_printf(f, "%-*s", LONG_WIDTH, "addr"); + seq_printf(f, "%-*s", LONG_WIDTH, "spa"); + goto out_newline; + } + + spa_entry = line - 1; + fru_idx = spa_entry / max_nr_entries; + entry = spa_entry % max_nr_entries; + + rec = fru_records[fru_idx]; + if (!rec) + goto out; + + seq_printf(f, "%-*u", SHORT_WIDTH, fru_idx); + seq_printf(f, "0x%016llx%-*s", rec->fmp.fru_id, U64_PAD, ""); + seq_printf(f, "%-*u", SHORT_WIDTH, entry); + + mutex_lock(&fmpm_update_mutex); + + if (entry >= rec->fmp.nr_entries) { + seq_printf(f, "%-*s", LONG_WIDTH, "*"); + seq_printf(f, "%-*s", LONG_WIDTH, "*"); + seq_printf(f, "%-*s", LONG_WIDTH, "*"); + seq_printf(f, "%-*s", LONG_WIDTH, "*"); + goto out_unlock; + } + + fpd = &rec->entries[entry]; + + seq_printf(f, "%ptT%-*s", &fpd->timestamp, TS_PAD, ""); + seq_printf(f, "0x%016llx%-*s", fpd->hw_id, U64_PAD, ""); + seq_printf(f, "0x%016llx%-*s", fpd->addr, U64_PAD, ""); + + if (spa_entries[spa_entry] == INVALID_SPA) + seq_printf(f, "%-*s", LONG_WIDTH, "*"); + else + seq_printf(f, "0x%016llx%-*s", spa_entries[spa_entry], U64_PAD, ""); + +out_unlock: + mutex_unlock(&fmpm_update_mutex); +out_newline: + seq_putc(f, '\n'); +out: + return 0; +} + +static const struct seq_operations fmpm_seq_ops = { + .start = fmpm_start, + .next = fmpm_next, + .stop = fmpm_stop, + .show = fmpm_show, +}; + +static int fmpm_open(struct inode *inode, struct file *file) +{ + return seq_open(file, &fmpm_seq_ops); +} + +static const struct file_operations fmpm_fops = { + .open = fmpm_open, + .release = seq_release, + .read = seq_read, + .llseek = seq_lseek, +}; + +static void setup_debugfs(void) +{ + struct dentry *dfs = ras_get_debugfs_root(); + + if (!dfs) + return; + + fmpm_dfs_dir = debugfs_create_dir("fmpm", dfs); + if (!fmpm_dfs_dir) + return; + + fmpm_dfs_entries = debugfs_create_file("entries", 0400, fmpm_dfs_dir, NULL, &fmpm_fops); + if (!fmpm_dfs_entries) + debugfs_remove(fmpm_dfs_dir); +} + +static const struct x86_cpu_id fmpm_cpuids[] = { + X86_MATCH_VENDOR_FAM(AMD, 0x19, NULL), + { } +}; +MODULE_DEVICE_TABLE(x86cpu, fmpm_cpuids); + +static int __init fru_mem_poison_init(void) +{ + int ret; + + if (!x86_match_cpu(fmpm_cpuids)) { + ret = -ENODEV; + goto out; + } + + if (erst_disable) { + pr_debug("ERST not available\n"); + ret = -ENODEV; + goto out; + } + + ret = get_system_info(); + if (ret) + goto out; + + ret = allocate_records(); + if (ret) + goto out; + + ret = init_fmps(); + if (ret) + goto out_free; + + ret = get_saved_records(); + if (ret) + goto out_free; + + ret = save_new_records(); + if (ret) + goto out_free; + + setup_debugfs(); + + retire_mem_records(); + + mce_register_decode_chain(&fru_mem_poison_nb); + + pr_info("FRU Memory Poison Manager initialized\n"); + return 0; + +out_free: + free_records(); +out: + return ret; +} + +static void __exit fru_mem_poison_exit(void) +{ + mce_unregister_decode_chain(&fru_mem_poison_nb); + debugfs_remove(fmpm_dfs_dir); + free_records(); +} + +module_init(fru_mem_poison_init); +module_exit(fru_mem_poison_exit); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("FRU Memory Poison Manager"); diff --git a/drivers/ras/cec.c b/drivers/ras/cec.c index 321af498ee11..e440b15fbabc 100644 --- a/drivers/ras/cec.c +++ b/drivers/ras/cec.c @@ -480,9 +480,15 @@ DEFINE_SHOW_ATTRIBUTE(array); static int __init create_debugfs_nodes(void) { - struct dentry *d, *pfn, *decay, *count, *array; + struct dentry *d, *pfn, *decay, *count, *array, *dfs; - d = debugfs_create_dir("cec", ras_debugfs_dir); + dfs = ras_get_debugfs_root(); + if (!dfs) { + pr_warn("Error getting RAS debugfs root!\n"); + return -1; + } + + d = debugfs_create_dir("cec", dfs); if (!d) { pr_warn("Error creating cec debugfs node!\n"); return -1; diff --git a/drivers/ras/debugfs.c b/drivers/ras/debugfs.c index ffb973c328e3..42afd3de68b2 100644 --- a/drivers/ras/debugfs.c +++ b/drivers/ras/debugfs.c @@ -3,10 +3,16 @@ #include <linux/ras.h> #include "debugfs.h" -struct dentry *ras_debugfs_dir; +static struct dentry *ras_debugfs_dir; static atomic_t trace_count = ATOMIC_INIT(0); +struct dentry *ras_get_debugfs_root(void) +{ + return ras_debugfs_dir; +} +EXPORT_SYMBOL_GPL(ras_get_debugfs_root); + int ras_userspace_consumers(void) { return atomic_read(&trace_count); diff --git a/drivers/ras/debugfs.h b/drivers/ras/debugfs.h index c07443b462ad..4749ccdeeba1 100644 --- a/drivers/ras/debugfs.h +++ b/drivers/ras/debugfs.h @@ -4,6 +4,6 @@ #include <linux/debugfs.h> -extern struct dentry *ras_debugfs_dir; +struct dentry *ras_get_debugfs_root(void); #endif /* __RAS_DEBUGFS_H__ */ diff --git a/drivers/ras/ras.c b/drivers/ras/ras.c index 95540ea8dd9d..a6e4792a1b2e 100644 --- a/drivers/ras/ras.c +++ b/drivers/ras/ras.c @@ -10,6 +10,37 @@ #include <linux/ras.h> #include <linux/uuid.h> +#if IS_ENABLED(CONFIG_AMD_ATL) +/* + * Once set, this function pointer should never be unset. + * + * The library module will set this pointer if it successfully loads. The module + * should not be unloaded except for testing and debug purposes. + */ +static unsigned long (*amd_atl_umc_na_to_spa)(struct atl_err *err); + +void amd_atl_register_decoder(unsigned long (*f)(struct atl_err *)) +{ + amd_atl_umc_na_to_spa = f; +} +EXPORT_SYMBOL_GPL(amd_atl_register_decoder); + +void amd_atl_unregister_decoder(void) +{ + amd_atl_umc_na_to_spa = NULL; +} +EXPORT_SYMBOL_GPL(amd_atl_unregister_decoder); + +unsigned long amd_convert_umc_mca_addr_to_sys_addr(struct atl_err *err) +{ + if (!amd_atl_umc_na_to_spa) + return -EINVAL; + + return amd_atl_umc_na_to_spa(err); +} +EXPORT_SYMBOL_GPL(amd_convert_umc_mca_addr_to_sys_addr); +#endif /* CONFIG_AMD_ATL */ + #define CREATE_TRACE_POINTS #define TRACE_INCLUDE_PATH ../../include/ras #include <ras/ras_event.h> diff --git a/drivers/regulator/max5970-regulator.c b/drivers/regulator/max5970-regulator.c index bc88a40a88d4..8bbcd983a74a 100644 --- a/drivers/regulator/max5970-regulator.c +++ b/drivers/regulator/max5970-regulator.c @@ -29,8 +29,8 @@ struct max5970_regulator { }; enum max597x_regulator_id { - MAX597X_SW0, - MAX597X_SW1, + MAX597X_sw0, + MAX597X_sw1, }; static int max5970_read_adc(struct regmap *regmap, int reg, long *val) @@ -378,8 +378,8 @@ static int max597x_dt_parse(struct device_node *np, } static const struct regulator_desc regulators[] = { - MAX597X_SWITCH(SW0, MAX5970_REG_CHXEN, 0, "vss1"), - MAX597X_SWITCH(SW1, MAX5970_REG_CHXEN, 1, "vss2"), + MAX597X_SWITCH(sw0, MAX5970_REG_CHXEN, 0, "vss1"), + MAX597X_SWITCH(sw1, MAX5970_REG_CHXEN, 1, "vss2"), }; static int max597x_regmap_read_clear(struct regmap *map, unsigned int reg, @@ -392,7 +392,7 @@ static int max597x_regmap_read_clear(struct regmap *map, unsigned int reg, return ret; if (*val) - return regmap_write(map, reg, *val); + return regmap_write(map, reg, 0); return 0; } diff --git a/drivers/regulator/pwm-regulator.c b/drivers/regulator/pwm-regulator.c index 698c420e0869..60cfcd741c2a 100644 --- a/drivers/regulator/pwm-regulator.c +++ b/drivers/regulator/pwm-regulator.c @@ -157,7 +157,17 @@ static int pwm_regulator_get_voltage(struct regulator_dev *rdev) pwm_get_state(drvdata->pwm, &pstate); + if (!pstate.enabled) { + if (pstate.polarity == PWM_POLARITY_INVERSED) + pstate.duty_cycle = pstate.period; + else + pstate.duty_cycle = 0; + } + voltage = pwm_get_relative_duty_cycle(&pstate, duty_unit); + if (voltage < min(max_uV_duty, min_uV_duty) || + voltage > max(max_uV_duty, min_uV_duty)) + return -ENOTRECOVERABLE; /* * The dutycycle for min_uV might be greater than the one for max_uV. @@ -313,6 +323,32 @@ static int pwm_regulator_init_continuous(struct platform_device *pdev, return 0; } +static int pwm_regulator_init_boot_on(struct platform_device *pdev, + struct pwm_regulator_data *drvdata, + const struct regulator_init_data *init_data) +{ + struct pwm_state pstate; + + if (!init_data->constraints.boot_on || drvdata->enb_gpio) + return 0; + + pwm_get_state(drvdata->pwm, &pstate); + if (pstate.enabled) + return 0; + + /* + * Update the duty cycle so the output does not change + * when the regulator core enables the regulator (and + * thus the PWM channel). + */ + if (pstate.polarity == PWM_POLARITY_INVERSED) + pstate.duty_cycle = pstate.period; + else + pstate.duty_cycle = 0; + + return pwm_apply_might_sleep(drvdata->pwm, &pstate); +} + static int pwm_regulator_probe(struct platform_device *pdev) { const struct regulator_init_data *init_data; @@ -372,6 +408,13 @@ static int pwm_regulator_probe(struct platform_device *pdev) if (ret) return ret; + ret = pwm_regulator_init_boot_on(pdev, drvdata, init_data); + if (ret) { + dev_err(&pdev->dev, "Failed to apply boot_on settings: %d\n", + ret); + return ret; + } + regulator = devm_regulator_register(&pdev->dev, &drvdata->desc, &config); if (IS_ERR(regulator)) { diff --git a/drivers/regulator/rk808-regulator.c b/drivers/regulator/rk808-regulator.c index e374fa6e5f28..d89ae7f16d7a 100644 --- a/drivers/regulator/rk808-regulator.c +++ b/drivers/regulator/rk808-regulator.c @@ -1017,14 +1017,14 @@ static const struct regulator_desc rk805_reg[] = { }; static const struct linear_range rk806_buck_voltage_ranges[] = { - REGULATOR_LINEAR_RANGE(500000, 0, 160, 6250), /* 500mV ~ 1500mV */ - REGULATOR_LINEAR_RANGE(1500000, 161, 237, 25000), /* 1500mV ~ 3400mV */ - REGULATOR_LINEAR_RANGE(3400000, 238, 255, 0), + REGULATOR_LINEAR_RANGE(500000, 0, 159, 6250), /* 500mV ~ 1500mV */ + REGULATOR_LINEAR_RANGE(1500000, 160, 235, 25000), /* 1500mV ~ 3400mV */ + REGULATOR_LINEAR_RANGE(3400000, 236, 255, 0), }; static const struct linear_range rk806_ldo_voltage_ranges[] = { - REGULATOR_LINEAR_RANGE(500000, 0, 232, 12500), /* 500mV ~ 3400mV */ - REGULATOR_LINEAR_RANGE(3400000, 233, 255, 0), /* 500mV ~ 3400mV */ + REGULATOR_LINEAR_RANGE(500000, 0, 231, 12500), /* 500mV ~ 3400mV */ + REGULATOR_LINEAR_RANGE(3400000, 232, 255, 0), }; static const struct regulator_desc rk806_reg[] = { diff --git a/drivers/regulator/ti-abb-regulator.c b/drivers/regulator/ti-abb-regulator.c index f48214e2c3b4..04133510e5af 100644 --- a/drivers/regulator/ti-abb-regulator.c +++ b/drivers/regulator/ti-abb-regulator.c @@ -726,9 +726,25 @@ static int ti_abb_probe(struct platform_device *pdev) return PTR_ERR(abb->setup_reg); } - abb->int_base = devm_platform_ioremap_resource_byname(pdev, "int-address"); - if (IS_ERR(abb->int_base)) - return PTR_ERR(abb->int_base); + pname = "int-address"; + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, pname); + if (!res) { + dev_err(dev, "Missing '%s' IO resource\n", pname); + return -ENODEV; + } + /* + * The MPU interrupt status register (PRM_IRQSTATUS_MPU) is + * shared between regulator-abb-{ivahd,dspeve,gpu} driver + * instances. Therefore use devm_ioremap() rather than + * devm_platform_ioremap_resource_byname() to avoid busy + * resource region conflicts. + */ + abb->int_base = devm_ioremap(dev, res->start, + resource_size(res)); + if (!abb->int_base) { + dev_err(dev, "Unable to map '%s'\n", pname); + return -ENOMEM; + } /* Map Optional resources */ pname = "efuse-address"; diff --git a/drivers/rtc/lib_test.c b/drivers/rtc/lib_test.c index d5caf36c56cd..225c859d6da5 100644 --- a/drivers/rtc/lib_test.c +++ b/drivers/rtc/lib_test.c @@ -54,7 +54,7 @@ static void rtc_time64_to_tm_test_date_range(struct kunit *test) days = div_s64(secs, 86400); - #define FAIL_MSG "%d/%02d/%02d (%2d) : %ld", \ + #define FAIL_MSG "%d/%02d/%02d (%2d) : %lld", \ year, month, mday, yday, days KUNIT_ASSERT_EQ_MSG(test, year - 1900, result.tm_year, FAIL_MSG); diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index 7327e81352e9..cead018c3f06 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c @@ -8,9 +8,6 @@ * Copyright IBM Corp. 1999, 2009 */ -#define KMSG_COMPONENT "dasd" -#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt - #include <linux/kmod.h> #include <linux/init.h> #include <linux/interrupt.h> @@ -30,9 +27,6 @@ #include <asm/itcw.h> #include <asm/diag.h> -/* This is ugly... */ -#define PRINTK_HEADER "dasd:" - #include "dasd_int.h" /* * SECTION: Constant definitions to be used within this file @@ -313,39 +307,57 @@ static int dasd_state_basic_to_known(struct dasd_device *device) */ static int dasd_state_basic_to_ready(struct dasd_device *device) { - int rc; - struct dasd_block *block; - struct gendisk *disk; + struct dasd_block *block = device->block; + struct queue_limits lim; + int rc = 0; - rc = 0; - block = device->block; /* make disk known with correct capacity */ - if (block) { - if (block->base->discipline->do_analysis != NULL) - rc = block->base->discipline->do_analysis(block); - if (rc) { - if (rc != -EAGAIN) { - device->state = DASD_STATE_UNFMT; - disk = device->block->gdp; - kobject_uevent(&disk_to_dev(disk)->kobj, - KOBJ_CHANGE); - goto out; - } - return rc; - } - if (device->discipline->setup_blk_queue) - device->discipline->setup_blk_queue(block); - set_capacity(block->gdp, - block->blocks << block->s2b_shift); + if (!block) { device->state = DASD_STATE_READY; - rc = dasd_scan_partitions(block); - if (rc) { - device->state = DASD_STATE_BASIC; + goto out; + } + + if (block->base->discipline->do_analysis != NULL) + rc = block->base->discipline->do_analysis(block); + if (rc) { + if (rc == -EAGAIN) return rc; - } - } else { - device->state = DASD_STATE_READY; + device->state = DASD_STATE_UNFMT; + kobject_uevent(&disk_to_dev(device->block->gdp)->kobj, + KOBJ_CHANGE); + goto out; + } + + lim = queue_limits_start_update(block->gdp->queue); + lim.max_dev_sectors = device->discipline->max_sectors(block); + lim.max_hw_sectors = lim.max_dev_sectors; + lim.logical_block_size = block->bp_block; + + if (device->discipline->has_discard) { + unsigned int max_bytes; + + lim.discard_granularity = block->bp_block; + + /* Calculate max_discard_sectors and make it PAGE aligned */ + max_bytes = USHRT_MAX * block->bp_block; + max_bytes = ALIGN_DOWN(max_bytes, PAGE_SIZE); + + lim.max_hw_discard_sectors = max_bytes / block->bp_block; + lim.max_write_zeroes_sectors = lim.max_hw_discard_sectors; } + rc = queue_limits_commit_update(block->gdp->queue, &lim); + if (rc) + return rc; + + set_capacity(block->gdp, block->blocks << block->s2b_shift); + device->state = DASD_STATE_READY; + + rc = dasd_scan_partitions(block); + if (rc) { + device->state = DASD_STATE_BASIC; + return rc; + } + out: if (device->discipline->basic_to_ready) rc = device->discipline->basic_to_ready(device); @@ -412,7 +424,7 @@ dasd_state_ready_to_online(struct dasd_device * device) KOBJ_CHANGE); return 0; } - disk_uevent(device->block->bdev_handle->bdev->bd_disk, + disk_uevent(file_bdev(device->block->bdev_file)->bd_disk, KOBJ_CHANGE); } return 0; @@ -433,7 +445,7 @@ static int dasd_state_online_to_ready(struct dasd_device *device) device->state = DASD_STATE_READY; if (device->block && !(device->features & DASD_FEATURE_USERAW)) - disk_uevent(device->block->bdev_handle->bdev->bd_disk, + disk_uevent(file_bdev(device->block->bdev_file)->bd_disk, KOBJ_CHANGE); return 0; } @@ -1301,7 +1313,6 @@ int dasd_term_IO(struct dasd_ccw_req *cqr) { struct dasd_device *device; int retries, rc; - char errorstring[ERRORLENGTH]; /* Check the cqr */ rc = dasd_check_cqr(cqr); @@ -1340,10 +1351,8 @@ int dasd_term_IO(struct dasd_ccw_req *cqr) rc = 0; break; default: - /* internal error 10 - unknown rc*/ - snprintf(errorstring, ERRORLENGTH, "10 %d", rc); - dev_err(&device->cdev->dev, "An error occurred in the " - "DASD device driver, reason=%s\n", errorstring); + dev_err(&device->cdev->dev, + "Unexpected error during request termination %d\n", rc); BUG(); break; } @@ -1362,7 +1371,6 @@ int dasd_start_IO(struct dasd_ccw_req *cqr) { struct dasd_device *device; int rc; - char errorstring[ERRORLENGTH]; /* Check the cqr */ rc = dasd_check_cqr(cqr); @@ -1382,10 +1390,8 @@ int dasd_start_IO(struct dasd_ccw_req *cqr) return -EPERM; } if (cqr->retries < 0) { - /* internal error 14 - start_IO run out of retries */ - sprintf(errorstring, "14 %p", cqr); - dev_err(&device->cdev->dev, "An error occurred in the DASD " - "device driver, reason=%s\n", errorstring); + dev_err(&device->cdev->dev, + "Start I/O ran out of retries\n"); cqr->status = DASD_CQR_ERROR; return -EIO; } @@ -1463,11 +1469,8 @@ int dasd_start_IO(struct dasd_ccw_req *cqr) "not accessible"); break; default: - /* internal error 11 - unknown rc */ - snprintf(errorstring, ERRORLENGTH, "11 %d", rc); dev_err(&device->cdev->dev, - "An error occurred in the DASD device driver, " - "reason=%s\n", errorstring); + "Unexpected error during request start %d", rc); BUG(); break; } @@ -1904,8 +1907,6 @@ static void __dasd_device_process_ccw_queue(struct dasd_device *device, static void __dasd_process_cqr(struct dasd_device *device, struct dasd_ccw_req *cqr) { - char errorstring[ERRORLENGTH]; - switch (cqr->status) { case DASD_CQR_SUCCESS: cqr->status = DASD_CQR_DONE; @@ -1917,11 +1918,8 @@ static void __dasd_process_cqr(struct dasd_device *device, cqr->status = DASD_CQR_TERMINATED; break; default: - /* internal error 12 - wrong cqr status*/ - snprintf(errorstring, ERRORLENGTH, "12 %p %x02", cqr, cqr->status); dev_err(&device->cdev->dev, - "An error occurred in the DASD device driver, " - "reason=%s\n", errorstring); + "Unexpected CQR status %02x", cqr->status); BUG(); } if (cqr->callback) @@ -1986,16 +1984,14 @@ static void __dasd_device_check_expire(struct dasd_device *device) if (device->discipline->term_IO(cqr) != 0) { /* Hmpf, try again in 5 sec */ dev_err(&device->cdev->dev, - "cqr %p timed out (%lus) but cannot be " - "ended, retrying in 5 s\n", - cqr, (cqr->expires/HZ)); + "CQR timed out (%lus) but cannot be ended, retrying in 5s\n", + (cqr->expires / HZ)); cqr->expires += 5*HZ; dasd_device_set_timer(device, 5*HZ); } else { dev_err(&device->cdev->dev, - "cqr %p timed out (%lus), %i retries " - "remaining\n", cqr, (cqr->expires/HZ), - cqr->retries); + "CQR timed out (%lus), %i retries remaining\n", + (cqr->expires / HZ), cqr->retries); } __dasd_device_check_autoquiesce_timeout(device, cqr); } @@ -2116,8 +2112,7 @@ int dasd_flush_device_queue(struct dasd_device *device) if (rc) { /* unable to terminate requeust */ dev_err(&device->cdev->dev, - "Flushing the DASD request queue " - "failed for request %p\n", cqr); + "Flushing the DASD request queue failed\n"); /* stop flush processing */ goto finished; } @@ -2633,8 +2628,7 @@ static int __dasd_cancel_req(struct dasd_ccw_req *cqr) rc = device->discipline->term_IO(cqr); if (rc) { dev_err(&device->cdev->dev, - "Cancelling request %p failed with rc=%d\n", - cqr, rc); + "Cancelling request failed with rc=%d\n", rc); } else { cqr->stopclk = get_tod_clock(); } @@ -3402,8 +3396,7 @@ static void dasd_generic_auto_online(void *data, async_cookie_t cookie) ret = ccw_device_set_online(cdev); if (ret) - pr_warn("%s: Setting the DASD online failed with rc=%d\n", - dev_name(&cdev->dev), ret); + dev_warn(&cdev->dev, "Setting the DASD online failed with rc=%d\n", ret); } /* @@ -3490,8 +3483,11 @@ int dasd_generic_set_online(struct ccw_device *cdev, { struct dasd_discipline *discipline; struct dasd_device *device; + struct device *dev; int rc; + dev = &cdev->dev; + /* first online clears initial online feature flag */ dasd_set_feature(cdev, DASD_FEATURE_INITIAL_ONLINE, 0); device = dasd_create_device(cdev); @@ -3504,11 +3500,10 @@ int dasd_generic_set_online(struct ccw_device *cdev, /* Try to load the required module. */ rc = request_module(DASD_DIAG_MOD); if (rc) { - pr_warn("%s Setting the DASD online failed " - "because the required module %s " - "could not be loaded (rc=%d)\n", - dev_name(&cdev->dev), DASD_DIAG_MOD, - rc); + dev_warn(dev, "Setting the DASD online failed " + "because the required module %s " + "could not be loaded (rc=%d)\n", + DASD_DIAG_MOD, rc); dasd_delete_device(device); return -ENODEV; } @@ -3516,8 +3511,7 @@ int dasd_generic_set_online(struct ccw_device *cdev, /* Module init could have failed, so check again here after * request_module(). */ if (!dasd_diag_discipline_pointer) { - pr_warn("%s Setting the DASD online failed because of missing DIAG discipline\n", - dev_name(&cdev->dev)); + dev_warn(dev, "Setting the DASD online failed because of missing DIAG discipline\n"); dasd_delete_device(device); return -ENODEV; } @@ -3527,37 +3521,33 @@ int dasd_generic_set_online(struct ccw_device *cdev, dasd_delete_device(device); return -EINVAL; } + device->base_discipline = base_discipline; if (!try_module_get(discipline->owner)) { - module_put(base_discipline->owner); dasd_delete_device(device); return -EINVAL; } - device->base_discipline = base_discipline; device->discipline = discipline; /* check_device will allocate block device if necessary */ rc = discipline->check_device(device); if (rc) { - pr_warn("%s Setting the DASD online with discipline %s failed with rc=%i\n", - dev_name(&cdev->dev), discipline->name, rc); - module_put(discipline->owner); - module_put(base_discipline->owner); + dev_warn(dev, "Setting the DASD online with discipline %s failed with rc=%i\n", + discipline->name, rc); dasd_delete_device(device); return rc; } dasd_set_target_state(device, DASD_STATE_ONLINE); if (device->state <= DASD_STATE_KNOWN) { - pr_warn("%s Setting the DASD online failed because of a missing discipline\n", - dev_name(&cdev->dev)); + dev_warn(dev, "Setting the DASD online failed because of a missing discipline\n"); rc = -ENODEV; dasd_set_target_state(device, DASD_STATE_NEW); if (device->block) dasd_free_block(device->block); dasd_delete_device(device); - } else - pr_debug("dasd_generic device %s found\n", - dev_name(&cdev->dev)); + } else { + dev_dbg(dev, "dasd_generic device found\n"); + } wait_event(dasd_init_waitq, _wait_for_device(device)); @@ -3568,10 +3558,13 @@ EXPORT_SYMBOL_GPL(dasd_generic_set_online); int dasd_generic_set_offline(struct ccw_device *cdev) { + int max_count, open_count, rc; struct dasd_device *device; struct dasd_block *block; - int max_count, open_count, rc; unsigned long flags; + struct device *dev; + + dev = &cdev->dev; rc = 0; spin_lock_irqsave(get_ccwdev_lock(cdev), flags); @@ -3588,15 +3581,14 @@ int dasd_generic_set_offline(struct ccw_device *cdev) * in the other openers. */ if (device->block) { - max_count = device->block->bdev_handle ? 0 : -1; + max_count = device->block->bdev_file ? 0 : -1; open_count = atomic_read(&device->block->open_count); if (open_count > max_count) { if (open_count > 0) - pr_warn("%s: The DASD cannot be set offline with open count %i\n", - dev_name(&cdev->dev), open_count); + dev_warn(dev, "The DASD cannot be set offline with open count %i\n", + open_count); else - pr_warn("%s: The DASD cannot be set offline while it is in use\n", - dev_name(&cdev->dev)); + dev_warn(dev, "The DASD cannot be set offline while it is in use\n"); rc = -EBUSY; goto out_err; } @@ -3634,8 +3626,8 @@ int dasd_generic_set_offline(struct ccw_device *cdev) * so sync bdev first and then wait for our queues to become * empty */ - if (device->block && device->block->bdev_handle) - bdev_mark_dead(device->block->bdev_handle->bdev, false); + if (device->block && device->block->bdev_file) + bdev_mark_dead(file_bdev(device->block->bdev_file), false); dasd_schedule_device_bh(device); rc = wait_event_interruptible(shutdown_waitq, _wait_for_empty_queues(device)); @@ -3956,8 +3948,8 @@ static int dasd_handle_autoquiesce(struct dasd_device *device, if (dasd_eer_enabled(device)) dasd_eer_write(device, NULL, DASD_EER_AUTOQUIESCE); - pr_info("%s: The DASD has been put in the quiesce state\n", - dev_name(&device->cdev->dev)); + dev_info(&device->cdev->dev, + "The DASD has been put in the quiesce state\n"); dasd_device_set_stop_bits(device, DASD_STOPPED_QUIESCE); if (device->features & DASD_FEATURE_REQUEUEQUIESCE) @@ -3977,10 +3969,8 @@ static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device, NULL); if (IS_ERR(cqr)) { - /* internal error 13 - Allocating the RDC request failed*/ - dev_err(&device->cdev->dev, - "An error occurred in the DASD device driver, " - "reason=%s\n", "13"); + DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", + "Could not allocate RDC request"); return cqr; } diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c index 89957bb7244d..459b7f8ac883 100644 --- a/drivers/s390/block/dasd_3990_erp.c +++ b/drivers/s390/block/dasd_3990_erp.c @@ -7,13 +7,9 @@ * */ -#define KMSG_COMPONENT "dasd-eckd" - #include <linux/timer.h> #include <asm/idals.h> -#define PRINTK_HEADER "dasd_erp(3990): " - #include "dasd_int.h" #include "dasd_eckd.h" @@ -398,7 +394,6 @@ dasd_3990_handle_env_data(struct dasd_ccw_req * erp, char *sense) struct dasd_device *device = erp->startdev; char msg_format = (sense[7] & 0xF0); char msg_no = (sense[7] & 0x0F); - char errorstring[ERRORLENGTH]; switch (msg_format) { case 0x00: /* Format 0 - Program or System Checks */ @@ -1004,12 +999,9 @@ dasd_3990_handle_env_data(struct dasd_ccw_req * erp, char *sense) } break; - default: /* unknown message format - should not happen - internal error 03 - unknown message format */ - snprintf(errorstring, ERRORLENGTH, "03 %x02", msg_format); + default: dev_err(&device->cdev->dev, - "An error occurred in the DASD device driver, " - "reason=%s\n", errorstring); + "Unknown message format %02x", msg_format); break; } /* end switch message format */ @@ -1056,11 +1048,9 @@ dasd_3990_erp_com_rej(struct dasd_ccw_req * erp, char *sense) set_bit(DASD_CQR_SUPPRESS_CR, &erp->refers->flags); erp = dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED); } else { - /* fatal error - set status to FAILED - internal error 09 - Command Reject */ if (!test_bit(DASD_CQR_SUPPRESS_CR, &erp->flags)) dev_err(&device->cdev->dev, - "An error occurred in the DASD device driver, reason=09\n"); + "An I/O command request was rejected\n"); erp = dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED); } @@ -1128,13 +1118,7 @@ dasd_3990_erp_equip_check(struct dasd_ccw_req * erp, char *sense) erp->function = dasd_3990_erp_equip_check; if (sense[1] & SNS1_WRITE_INHIBITED) { - dev_info(&device->cdev->dev, - "Write inhibited path encountered\n"); - - /* vary path offline - internal error 04 - Path should be varied off-line.*/ - dev_err(&device->cdev->dev, "An error occurred in the DASD " - "device driver, reason=%s\n", "04"); + dev_err(&device->cdev->dev, "Write inhibited path encountered\n"); erp = dasd_3990_erp_action_1(erp); @@ -1285,11 +1269,7 @@ dasd_3990_erp_inv_format(struct dasd_ccw_req * erp, char *sense) erp = dasd_3990_erp_action_4(erp, sense); } else { - /* internal error 06 - The track format is not valid*/ - dev_err(&device->cdev->dev, - "An error occurred in the DASD device driver, " - "reason=%s\n", "06"); - + dev_err(&device->cdev->dev, "Track format is not valid\n"); erp = dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED); } @@ -1663,9 +1643,8 @@ dasd_3990_erp_action_1B_32(struct dasd_ccw_req * default_erp, char *sense) sizeof(struct LO_eckd_data), device); if (IS_ERR(erp)) { - /* internal error 01 - Unable to allocate ERP */ - dev_err(&device->cdev->dev, "An error occurred in the DASD " - "device driver, reason=%s\n", "01"); + DBF_DEV_EVENT(DBF_ERR, device, "%s", + "Unable to allocate ERP request (1B 32)"); return dasd_3990_erp_cleanup(default_erp, DASD_CQR_FAILED); } @@ -1807,10 +1786,8 @@ dasd_3990_update_1B(struct dasd_ccw_req * previous_erp, char *sense) cpa = previous_erp->irb.scsw.cmd.cpa; if (cpa == 0) { - /* internal error 02 - - Unable to determine address of the CCW to be restarted */ - dev_err(&device->cdev->dev, "An error occurred in the DASD " - "device driver, reason=%s\n", "02"); + dev_err(&device->cdev->dev, + "Unable to determine address of to be restarted CCW\n"); previous_erp->status = DASD_CQR_FAILED; @@ -2009,15 +1986,9 @@ dasd_3990_erp_compound_config(struct dasd_ccw_req * erp, char *sense) { if ((sense[25] & DASD_SENSE_BIT_1) && (sense[26] & DASD_SENSE_BIT_2)) { - - /* set to suspended duplex state then restart - internal error 05 - Set device to suspended duplex state - should be done */ struct dasd_device *device = erp->startdev; dev_err(&device->cdev->dev, - "An error occurred in the DASD device driver, " - "reason=%s\n", "05"); - + "Compound configuration error occurred\n"); } erp->function = dasd_3990_erp_compound_config; @@ -2153,10 +2124,9 @@ dasd_3990_erp_inspect_32(struct dasd_ccw_req * erp, char *sense) erp = dasd_3990_erp_int_req(erp); break; - case 0x0F: /* length mismatch during update write command - internal error 08 - update write command error*/ - dev_err(&device->cdev->dev, "An error occurred in the " - "DASD device driver, reason=%s\n", "08"); + case 0x0F: + dev_err(&device->cdev->dev, + "Update write command error occurred\n"); erp = dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED); break; @@ -2165,12 +2135,9 @@ dasd_3990_erp_inspect_32(struct dasd_ccw_req * erp, char *sense) erp = dasd_3990_erp_action_10_32(erp, sense); break; - case 0x15: /* next track outside defined extend - internal error 07 - The next track is not - within the defined storage extent */ + case 0x15: dev_err(&device->cdev->dev, - "An error occurred in the DASD device driver, " - "reason=%s\n", "07"); + "Track outside defined extent error occurred\n"); erp = dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED); break; @@ -2663,7 +2630,7 @@ dasd_3990_erp_further_erp(struct dasd_ccw_req *erp) * necessary */ dev_err(&device->cdev->dev, - "ERP %p has run out of retries and failed\n", erp); + "ERP %px has run out of retries and failed\n", erp); erp->status = DASD_CQR_FAILED; } @@ -2704,8 +2671,7 @@ dasd_3990_erp_handle_match_erp(struct dasd_ccw_req *erp_head, while (erp_done != erp) { if (erp_done == NULL) /* end of chain reached */ - panic(PRINTK_HEADER "Programming error in ERP! The " - "original request was lost\n"); + panic("Programming error in ERP! The original request was lost\n"); /* remove the request from the device queue */ list_del(&erp_done->blocklist); @@ -2786,11 +2752,9 @@ dasd_3990_erp_action(struct dasd_ccw_req * cqr) "ERP chain at BEGINNING of ERP-ACTION\n"); for (temp_erp = cqr; temp_erp != NULL; temp_erp = temp_erp->refers) { - dev_err(&device->cdev->dev, - "ERP %p (%02x) refers to %p\n", - temp_erp, temp_erp->status, - temp_erp->refers); + "ERP %px (%02x) refers to %px\n", + temp_erp, temp_erp->status, temp_erp->refers); } } @@ -2837,11 +2801,9 @@ dasd_3990_erp_action(struct dasd_ccw_req * cqr) "ERP chain at END of ERP-ACTION\n"); for (temp_erp = erp; temp_erp != NULL; temp_erp = temp_erp->refers) { - dev_err(&device->cdev->dev, - "ERP %p (%02x) refers to %p\n", - temp_erp, temp_erp->status, - temp_erp->refers); + "ERP %px (%02x) refers to %px\n", + temp_erp, temp_erp->status, temp_erp->refers); } } diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c index c9740ae88d1a..e84cd5436556 100644 --- a/drivers/s390/block/dasd_alias.c +++ b/drivers/s390/block/dasd_alias.c @@ -6,20 +6,12 @@ * Author(s): Stefan Weinhuber <wein@de.ibm.com> */ -#define KMSG_COMPONENT "dasd-eckd" - #include <linux/list.h> #include <linux/slab.h> #include <asm/ebcdic.h> #include "dasd_int.h" #include "dasd_eckd.h" -#ifdef PRINTK_HEADER -#undef PRINTK_HEADER -#endif /* PRINTK_HEADER */ -#define PRINTK_HEADER "dasd(eckd):" - - /* * General concept of alias management: * - PAV and DASD alias management is specific to the eckd discipline. diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c index c4e36650c426..0316c20823ee 100644 --- a/drivers/s390/block/dasd_devmap.c +++ b/drivers/s390/block/dasd_devmap.c @@ -13,8 +13,6 @@ * */ -#define KMSG_COMPONENT "dasd" - #include <linux/ctype.h> #include <linux/init.h> #include <linux/module.h> @@ -24,8 +22,6 @@ #include <linux/uaccess.h> #include <asm/ipl.h> -/* This is ugly... */ -#define PRINTK_HEADER "dasd_devmap:" #define DASD_MAX_PARAMS 256 #include "dasd_int.h" @@ -1114,7 +1110,7 @@ dasd_use_diag_show(struct device *dev, struct device_attribute *attr, char *buf) use_diag = (devmap->features & DASD_FEATURE_USEDIAG) != 0; else use_diag = (DASD_FEATURE_DEFAULT & DASD_FEATURE_USEDIAG) != 0; - return sprintf(buf, use_diag ? "1\n" : "0\n"); + return sysfs_emit(buf, use_diag ? "1\n" : "0\n"); } static ssize_t @@ -1163,7 +1159,7 @@ dasd_use_raw_show(struct device *dev, struct device_attribute *attr, char *buf) use_raw = (devmap->features & DASD_FEATURE_USERAW) != 0; else use_raw = (DASD_FEATURE_DEFAULT & DASD_FEATURE_USERAW) != 0; - return sprintf(buf, use_raw ? "1\n" : "0\n"); + return sysfs_emit(buf, use_raw ? "1\n" : "0\n"); } static ssize_t @@ -1259,7 +1255,7 @@ dasd_access_show(struct device *dev, struct device_attribute *attr, if (count < 0) return count; - return sprintf(buf, "%d\n", count); + return sysfs_emit(buf, "%d\n", count); } static DEVICE_ATTR(host_access_count, 0444, dasd_access_show, NULL); @@ -1338,19 +1334,19 @@ static ssize_t dasd_alias_show(struct device *dev, device = dasd_device_from_cdev(to_ccwdev(dev)); if (IS_ERR(device)) - return sprintf(buf, "0\n"); + return sysfs_emit(buf, "0\n"); if (device->discipline && device->discipline->get_uid && !device->discipline->get_uid(device, &uid)) { if (uid.type == UA_BASE_PAV_ALIAS || uid.type == UA_HYPER_PAV_ALIAS) { dasd_put_device(device); - return sprintf(buf, "1\n"); + return sysfs_emit(buf, "1\n"); } } dasd_put_device(device); - return sprintf(buf, "0\n"); + return sysfs_emit(buf, "0\n"); } static DEVICE_ATTR(alias, 0444, dasd_alias_show, NULL); @@ -1412,15 +1408,9 @@ dasd_uid_show(struct device *dev, struct device_attribute *attr, char *buf) break; } - if (strlen(uid.vduit) > 0) - snprintf(uid_string, sizeof(uid_string), - "%s.%s.%04x.%s.%s", - uid.vendor, uid.serial, uid.ssid, ua_string, - uid.vduit); - else - snprintf(uid_string, sizeof(uid_string), - "%s.%s.%04x.%s", - uid.vendor, uid.serial, uid.ssid, ua_string); + snprintf(uid_string, sizeof(uid_string), "%s.%s.%04x.%s%s%s", + uid.vendor, uid.serial, uid.ssid, ua_string, + uid.vduit[0] ? "." : "", uid.vduit); } dasd_put_device(device); @@ -1862,7 +1852,7 @@ static ssize_t dasd_pm_show(struct device *dev, device = dasd_device_from_cdev(to_ccwdev(dev)); if (IS_ERR(device)) - return sprintf(buf, "0\n"); + return sysfs_emit(buf, "0\n"); opm = dasd_path_get_opm(device); nppm = dasd_path_get_nppm(device); @@ -1872,8 +1862,8 @@ static ssize_t dasd_pm_show(struct device *dev, ifccpm = dasd_path_get_ifccpm(device); dasd_put_device(device); - return sprintf(buf, "%02x %02x %02x %02x %02x %02x\n", opm, nppm, - cablepm, cuirpm, hpfpm, ifccpm); + return sysfs_emit(buf, "%02x %02x %02x %02x %02x %02x\n", opm, nppm, + cablepm, cuirpm, hpfpm, ifccpm); } static DEVICE_ATTR(path_masks, 0444, dasd_pm_show, NULL); diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c index 2e4e555b37c3..ea4b1d01bb76 100644 --- a/drivers/s390/block/dasd_diag.c +++ b/drivers/s390/block/dasd_diag.c @@ -8,8 +8,6 @@ * */ -#define KMSG_COMPONENT "dasd" - #include <linux/kernel_stat.h> #include <linux/stddef.h> #include <linux/kernel.h> @@ -31,8 +29,6 @@ #include "dasd_int.h" #include "dasd_diag.h" -#define PRINTK_HEADER "dasd(diag):" - MODULE_LICENSE("GPL"); /* The maximum number of blocks per request (max_blocks) is dependent on the @@ -621,25 +617,9 @@ dasd_diag_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req, "dump sense not available for DIAG data"); } -/* - * Initialize block layer request queue. - */ -static void dasd_diag_setup_blk_queue(struct dasd_block *block) +static unsigned int dasd_diag_max_sectors(struct dasd_block *block) { - unsigned int logical_block_size = block->bp_block; - struct request_queue *q = block->gdp->queue; - int max; - - max = DIAG_MAX_BLOCKS << block->s2b_shift; - blk_queue_flag_set(QUEUE_FLAG_NONROT, q); - q->limits.max_dev_sectors = max; - blk_queue_logical_block_size(q, logical_block_size); - blk_queue_max_hw_sectors(q, max); - blk_queue_max_segments(q, USHRT_MAX); - /* With page sized segments each segment can be translated into one idaw/tidaw */ - blk_queue_max_segment_size(q, PAGE_SIZE); - blk_queue_segment_boundary(q, PAGE_SIZE - 1); - blk_queue_dma_alignment(q, PAGE_SIZE - 1); + return DIAG_MAX_BLOCKS << block->s2b_shift; } static int dasd_diag_pe_handler(struct dasd_device *device, @@ -652,10 +632,10 @@ static struct dasd_discipline dasd_diag_discipline = { .owner = THIS_MODULE, .name = "DIAG", .ebcname = "DIAG", + .max_sectors = dasd_diag_max_sectors, .check_device = dasd_diag_check_device, .pe_handler = dasd_diag_pe_handler, .fill_geometry = dasd_diag_fill_geometry, - .setup_blk_queue = dasd_diag_setup_blk_queue, .start_IO = dasd_start_diag, .term_IO = dasd_diag_term_IO, .handle_terminated_request = dasd_diag_handle_terminated_request, diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index bd89b032968a..373c1a86c33e 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c @@ -10,8 +10,6 @@ * Author.........: Nigel Hislop <hislop_nigel@emc.com> */ -#define KMSG_COMPONENT "dasd-eckd" - #include <linux/stddef.h> #include <linux/kernel.h> #include <linux/slab.h> @@ -37,11 +35,6 @@ #include "dasd_int.h" #include "dasd_eckd.h" -#ifdef PRINTK_HEADER -#undef PRINTK_HEADER -#endif /* PRINTK_HEADER */ -#define PRINTK_HEADER "dasd(eckd):" - /* * raw track access always map to 64k in memory * so it maps to 16 blocks of 4k per track @@ -1072,22 +1065,14 @@ static void dasd_eckd_read_fc_security(struct dasd_device *device) } } -static void dasd_eckd_get_uid_string(struct dasd_conf *conf, - char *print_uid) +static void dasd_eckd_get_uid_string(struct dasd_conf *conf, char *print_uid) { struct dasd_uid uid; create_uid(conf, &uid); - if (strlen(uid.vduit) > 0) - snprintf(print_uid, DASD_UID_STRLEN, - "%s.%s.%04x.%02x.%s", - uid.vendor, uid.serial, uid.ssid, - uid.real_unit_addr, uid.vduit); - else - snprintf(print_uid, DASD_UID_STRLEN, - "%s.%s.%04x.%02x", - uid.vendor, uid.serial, uid.ssid, - uid.real_unit_addr); + snprintf(print_uid, DASD_UID_STRLEN, "%s.%s.%04x.%02x%s%s", + uid.vendor, uid.serial, uid.ssid, uid.real_unit_addr, + uid.vduit[0] ? "." : "", uid.vduit); } static int dasd_eckd_check_cabling(struct dasd_device *device, @@ -5529,15 +5514,15 @@ dasd_eckd_ioctl(struct dasd_block *block, unsigned int cmd, void __user *argp) * and return number of printed chars. */ static void -dasd_eckd_dump_ccw_range(struct ccw1 *from, struct ccw1 *to, char *page) +dasd_eckd_dump_ccw_range(struct dasd_device *device, struct ccw1 *from, + struct ccw1 *to, char *page) { int len, count; char *datap; len = 0; while (from <= to) { - len += sprintf(page + len, PRINTK_HEADER - " CCW %p: %08X %08X DAT:", + len += sprintf(page + len, "CCW %px: %08X %08X DAT:", from, ((int *) from)[0], ((int *) from)[1]); /* get pointer to data (consider IDALs) */ @@ -5560,7 +5545,7 @@ dasd_eckd_dump_ccw_range(struct ccw1 *from, struct ccw1 *to, char *page) from++; } if (len > 0) - printk(KERN_ERR "%s", page); + dev_err(&device->cdev->dev, "%s", page); } static void @@ -5591,9 +5576,12 @@ dasd_eckd_dump_sense_dbf(struct dasd_device *device, struct irb *irb, static void dasd_eckd_dump_sense_ccw(struct dasd_device *device, struct dasd_ccw_req *req, struct irb *irb) { - char *page; struct ccw1 *first, *last, *fail, *from, *to; + struct device *dev; int len, sl, sct; + char *page; + + dev = &device->cdev->dev; page = (char *) get_zeroed_page(GFP_ATOMIC); if (page == NULL) { @@ -5602,24 +5590,18 @@ static void dasd_eckd_dump_sense_ccw(struct dasd_device *device, return; } /* dump the sense data */ - len = sprintf(page, PRINTK_HEADER - " I/O status report for device %s:\n", - dev_name(&device->cdev->dev)); - len += sprintf(page + len, PRINTK_HEADER - " in req: %p CC:%02X FC:%02X AC:%02X SC:%02X DS:%02X " - "CS:%02X RC:%d\n", + len = sprintf(page, "I/O status report:\n"); + len += sprintf(page + len, + "in req: %px CC:%02X FC:%02X AC:%02X SC:%02X DS:%02X CS:%02X RC:%d\n", req, scsw_cc(&irb->scsw), scsw_fctl(&irb->scsw), scsw_actl(&irb->scsw), scsw_stctl(&irb->scsw), scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw), req ? req->intrc : 0); - len += sprintf(page + len, PRINTK_HEADER - " device %s: Failing CCW: %p\n", - dev_name(&device->cdev->dev), + len += sprintf(page + len, "Failing CCW: %px\n", phys_to_virt(irb->scsw.cmd.cpa)); if (irb->esw.esw0.erw.cons) { for (sl = 0; sl < 4; sl++) { - len += sprintf(page + len, PRINTK_HEADER - " Sense(hex) %2d-%2d:", + len += sprintf(page + len, "Sense(hex) %2d-%2d:", (8 * sl), ((8 * sl) + 7)); for (sct = 0; sct < 8; sct++) { @@ -5631,23 +5613,20 @@ static void dasd_eckd_dump_sense_ccw(struct dasd_device *device, if (irb->ecw[27] & DASD_SENSE_BIT_0) { /* 24 Byte Sense Data */ - sprintf(page + len, PRINTK_HEADER - " 24 Byte: %x MSG %x, " - "%s MSGb to SYSOP\n", + sprintf(page + len, + "24 Byte: %x MSG %x, %s MSGb to SYSOP\n", irb->ecw[7] >> 4, irb->ecw[7] & 0x0f, irb->ecw[1] & 0x10 ? "" : "no"); } else { /* 32 Byte Sense Data */ - sprintf(page + len, PRINTK_HEADER - " 32 Byte: Format: %x " - "Exception class %x\n", + sprintf(page + len, + "32 Byte: Format: %x Exception class %x\n", irb->ecw[6] & 0x0f, irb->ecw[22] >> 4); } } else { - sprintf(page + len, PRINTK_HEADER - " SORRY - NO VALID SENSE AVAILABLE\n"); + sprintf(page + len, "SORRY - NO VALID SENSE AVAILABLE\n"); } - printk(KERN_ERR "%s", page); + dev_err(dev, "%s", page); if (req) { /* req == NULL for unsolicited interrupts */ @@ -5656,8 +5635,8 @@ static void dasd_eckd_dump_sense_ccw(struct dasd_device *device, first = req->cpaddr; for (last = first; last->flags & (CCW_FLAG_CC | CCW_FLAG_DC); last++); to = min(first + 6, last); - printk(KERN_ERR PRINTK_HEADER " Related CP in req: %p\n", req); - dasd_eckd_dump_ccw_range(first, to, page); + dev_err(dev, "Related CP in req: %px\n", req); + dasd_eckd_dump_ccw_range(device, first, to, page); /* print failing CCW area (maximum 4) */ /* scsw->cda is either valid or zero */ @@ -5665,19 +5644,19 @@ static void dasd_eckd_dump_sense_ccw(struct dasd_device *device, fail = phys_to_virt(irb->scsw.cmd.cpa); /* failing CCW */ if (from < fail - 2) { from = fail - 2; /* there is a gap - print header */ - printk(KERN_ERR PRINTK_HEADER "......\n"); + dev_err(dev, "......\n"); } to = min(fail + 1, last); - dasd_eckd_dump_ccw_range(from, to, page + len); + dasd_eckd_dump_ccw_range(device, from, to, page + len); /* print last CCWs (maximum 2) */ len = 0; from = max(from, ++to); if (from < last - 1) { from = last - 1; /* there is a gap - print header */ - printk(KERN_ERR PRINTK_HEADER "......\n"); + dev_err(dev, "......\n"); } - dasd_eckd_dump_ccw_range(from, last, page + len); + dasd_eckd_dump_ccw_range(device, from, last, page + len); } free_page((unsigned long) page); } @@ -5701,11 +5680,9 @@ static void dasd_eckd_dump_sense_tcw(struct dasd_device *device, return; } /* dump the sense data */ - len = sprintf(page, PRINTK_HEADER - " I/O status report for device %s:\n", - dev_name(&device->cdev->dev)); - len += sprintf(page + len, PRINTK_HEADER - " in req: %p CC:%02X FC:%02X AC:%02X SC:%02X DS:%02X " + len = sprintf(page, "I/O status report:\n"); + len += sprintf(page + len, + "in req: %px CC:%02X FC:%02X AC:%02X SC:%02X DS:%02X " "CS:%02X fcxs:%02X schxs:%02X RC:%d\n", req, scsw_cc(&irb->scsw), scsw_fctl(&irb->scsw), scsw_actl(&irb->scsw), scsw_stctl(&irb->scsw), @@ -5713,9 +5690,7 @@ static void dasd_eckd_dump_sense_tcw(struct dasd_device *device, irb->scsw.tm.fcxs, (irb->scsw.tm.ifob << 7) | irb->scsw.tm.sesq, req ? req->intrc : 0); - len += sprintf(page + len, PRINTK_HEADER - " device %s: Failing TCW: %p\n", - dev_name(&device->cdev->dev), + len += sprintf(page + len, "Failing TCW: %px\n", phys_to_virt(irb->scsw.tm.tcw)); tsb = NULL; @@ -5724,47 +5699,37 @@ static void dasd_eckd_dump_sense_tcw(struct dasd_device *device, tsb = tcw_get_tsb(phys_to_virt(irb->scsw.tm.tcw)); if (tsb) { - len += sprintf(page + len, PRINTK_HEADER - " tsb->length %d\n", tsb->length); - len += sprintf(page + len, PRINTK_HEADER - " tsb->flags %x\n", tsb->flags); - len += sprintf(page + len, PRINTK_HEADER - " tsb->dcw_offset %d\n", tsb->dcw_offset); - len += sprintf(page + len, PRINTK_HEADER - " tsb->count %d\n", tsb->count); + len += sprintf(page + len, "tsb->length %d\n", tsb->length); + len += sprintf(page + len, "tsb->flags %x\n", tsb->flags); + len += sprintf(page + len, "tsb->dcw_offset %d\n", tsb->dcw_offset); + len += sprintf(page + len, "tsb->count %d\n", tsb->count); residual = tsb->count - 28; - len += sprintf(page + len, PRINTK_HEADER - " residual %d\n", residual); + len += sprintf(page + len, "residual %d\n", residual); switch (tsb->flags & 0x07) { case 1: /* tsa_iostat */ - len += sprintf(page + len, PRINTK_HEADER - " tsb->tsa.iostat.dev_time %d\n", + len += sprintf(page + len, "tsb->tsa.iostat.dev_time %d\n", tsb->tsa.iostat.dev_time); - len += sprintf(page + len, PRINTK_HEADER - " tsb->tsa.iostat.def_time %d\n", + len += sprintf(page + len, "tsb->tsa.iostat.def_time %d\n", tsb->tsa.iostat.def_time); - len += sprintf(page + len, PRINTK_HEADER - " tsb->tsa.iostat.queue_time %d\n", + len += sprintf(page + len, "tsb->tsa.iostat.queue_time %d\n", tsb->tsa.iostat.queue_time); - len += sprintf(page + len, PRINTK_HEADER - " tsb->tsa.iostat.dev_busy_time %d\n", + len += sprintf(page + len, "tsb->tsa.iostat.dev_busy_time %d\n", tsb->tsa.iostat.dev_busy_time); - len += sprintf(page + len, PRINTK_HEADER - " tsb->tsa.iostat.dev_act_time %d\n", + len += sprintf(page + len, "tsb->tsa.iostat.dev_act_time %d\n", tsb->tsa.iostat.dev_act_time); sense = tsb->tsa.iostat.sense; break; case 2: /* ts_ddpc */ - len += sprintf(page + len, PRINTK_HEADER - " tsb->tsa.ddpc.rc %d\n", tsb->tsa.ddpc.rc); + len += sprintf(page + len, "tsb->tsa.ddpc.rc %d\n", + tsb->tsa.ddpc.rc); for (sl = 0; sl < 2; sl++) { - len += sprintf(page + len, PRINTK_HEADER - " tsb->tsa.ddpc.rcq %2d-%2d: ", + len += sprintf(page + len, + "tsb->tsa.ddpc.rcq %2d-%2d: ", (8 * sl), ((8 * sl) + 7)); rcq = tsb->tsa.ddpc.rcq; for (sct = 0; sct < 8; sct++) { - len += sprintf(page + len, " %02x", + len += sprintf(page + len, "%02x", rcq[8 * sl + sct]); } len += sprintf(page + len, "\n"); @@ -5772,15 +5737,15 @@ static void dasd_eckd_dump_sense_tcw(struct dasd_device *device, sense = tsb->tsa.ddpc.sense; break; case 3: /* tsa_intrg */ - len += sprintf(page + len, PRINTK_HEADER - " tsb->tsa.intrg.: not supported yet\n"); + len += sprintf(page + len, + "tsb->tsa.intrg.: not supported yet\n"); break; } if (sense) { for (sl = 0; sl < 4; sl++) { - len += sprintf(page + len, PRINTK_HEADER - " Sense(hex) %2d-%2d:", + len += sprintf(page + len, + "Sense(hex) %2d-%2d:", (8 * sl), ((8 * sl) + 7)); for (sct = 0; sct < 8; sct++) { len += sprintf(page + len, " %02x", @@ -5791,27 +5756,23 @@ static void dasd_eckd_dump_sense_tcw(struct dasd_device *device, if (sense[27] & DASD_SENSE_BIT_0) { /* 24 Byte Sense Data */ - sprintf(page + len, PRINTK_HEADER - " 24 Byte: %x MSG %x, " - "%s MSGb to SYSOP\n", + sprintf(page + len, + "24 Byte: %x MSG %x, %s MSGb to SYSOP\n", sense[7] >> 4, sense[7] & 0x0f, sense[1] & 0x10 ? "" : "no"); } else { /* 32 Byte Sense Data */ - sprintf(page + len, PRINTK_HEADER - " 32 Byte: Format: %x " - "Exception class %x\n", + sprintf(page + len, + "32 Byte: Format: %x Exception class %x\n", sense[6] & 0x0f, sense[22] >> 4); } } else { - sprintf(page + len, PRINTK_HEADER - " SORRY - NO VALID SENSE AVAILABLE\n"); + sprintf(page + len, "SORRY - NO VALID SENSE AVAILABLE\n"); } } else { - sprintf(page + len, PRINTK_HEADER - " SORRY - NO TSB DATA AVAILABLE\n"); + sprintf(page + len, "SORRY - NO TSB DATA AVAILABLE\n"); } - printk(KERN_ERR "%s", page); + dev_err(&device->cdev->dev, "%s", page); free_page((unsigned long) page); } @@ -6865,17 +6826,9 @@ static void dasd_eckd_handle_hpf_error(struct dasd_device *device, dasd_schedule_requeue(device); } -/* - * Initialize block layer request queue. - */ -static void dasd_eckd_setup_blk_queue(struct dasd_block *block) +static unsigned int dasd_eckd_max_sectors(struct dasd_block *block) { - unsigned int logical_block_size = block->bp_block; - struct request_queue *q = block->gdp->queue; - struct dasd_device *device = block->base; - int max; - - if (device->features & DASD_FEATURE_USERAW) { + if (block->base->features & DASD_FEATURE_USERAW) { /* * the max_blocks value for raw_track access is 256 * it is higher than the native ECKD value because we @@ -6883,19 +6836,10 @@ static void dasd_eckd_setup_blk_queue(struct dasd_block *block) * so the max_hw_sectors are * 2048 x 512B = 1024kB = 16 tracks */ - max = DASD_ECKD_MAX_BLOCKS_RAW << block->s2b_shift; - } else { - max = DASD_ECKD_MAX_BLOCKS << block->s2b_shift; + return DASD_ECKD_MAX_BLOCKS_RAW << block->s2b_shift; } - blk_queue_flag_set(QUEUE_FLAG_NONROT, q); - q->limits.max_dev_sectors = max; - blk_queue_logical_block_size(q, logical_block_size); - blk_queue_max_hw_sectors(q, max); - blk_queue_max_segments(q, USHRT_MAX); - /* With page sized segments each segment can be translated into one idaw/tidaw */ - blk_queue_max_segment_size(q, PAGE_SIZE); - blk_queue_segment_boundary(q, PAGE_SIZE - 1); - blk_queue_dma_alignment(q, PAGE_SIZE - 1); + + return DASD_ECKD_MAX_BLOCKS << block->s2b_shift; } static struct ccw_driver dasd_eckd_driver = { @@ -6927,7 +6871,7 @@ static struct dasd_discipline dasd_eckd_discipline = { .basic_to_ready = dasd_eckd_basic_to_ready, .online_to_ready = dasd_eckd_online_to_ready, .basic_to_known = dasd_eckd_basic_to_known, - .setup_blk_queue = dasd_eckd_setup_blk_queue, + .max_sectors = dasd_eckd_max_sectors, .fill_geometry = dasd_eckd_fill_geometry, .start_IO = dasd_start_IO, .term_IO = dasd_term_IO, diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c index c956de711cf7..5064a616e041 100644 --- a/drivers/s390/block/dasd_eer.c +++ b/drivers/s390/block/dasd_eer.c @@ -7,8 +7,6 @@ * Author(s): Stefan Weinhuber <wein@de.ibm.com> */ -#define KMSG_COMPONENT "dasd-eckd" - #include <linux/init.h> #include <linux/fs.h> #include <linux/kernel.h> @@ -28,11 +26,6 @@ #include "dasd_int.h" #include "dasd_eckd.h" -#ifdef PRINTK_HEADER -#undef PRINTK_HEADER -#endif /* PRINTK_HEADER */ -#define PRINTK_HEADER "dasd(eer):" - /* * SECTION: the internal buffer */ diff --git a/drivers/s390/block/dasd_erp.c b/drivers/s390/block/dasd_erp.c index c07e6e713518..4c0d3a704513 100644 --- a/drivers/s390/block/dasd_erp.c +++ b/drivers/s390/block/dasd_erp.c @@ -9,8 +9,6 @@ * */ -#define KMSG_COMPONENT "dasd" - #include <linux/ctype.h> #include <linux/init.h> @@ -18,9 +16,6 @@ #include <asm/ebcdic.h> #include <linux/uaccess.h> -/* This is ugly... */ -#define PRINTK_HEADER "dasd_erp:" - #include "dasd_int.h" struct dasd_ccw_req * @@ -170,12 +165,12 @@ dasd_log_sense(struct dasd_ccw_req *cqr, struct irb *irb) device = cqr->startdev; if (cqr->intrc == -ETIMEDOUT) { dev_err(&device->cdev->dev, - "A timeout error occurred for cqr %p\n", cqr); + "A timeout error occurred for cqr %px\n", cqr); return; } if (cqr->intrc == -ENOLINK) { dev_err(&device->cdev->dev, - "A transport error occurred for cqr %p\n", cqr); + "A transport error occurred for cqr %px\n", cqr); return; } /* dump sense data */ diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c index c06fa2b27120..bcbb2f8e91fe 100644 --- a/drivers/s390/block/dasd_fba.c +++ b/drivers/s390/block/dasd_fba.c @@ -25,11 +25,6 @@ #include "dasd_int.h" #include "dasd_fba.h" -#ifdef PRINTK_HEADER -#undef PRINTK_HEADER -#endif /* PRINTK_HEADER */ -#define PRINTK_HEADER "dasd(fba):" - #define FBA_DEFAULT_RETRIES 32 #define DASD_FBA_CCW_WRITE 0x41 @@ -660,30 +655,27 @@ static void dasd_fba_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req, struct irb *irb) { - char *page; struct ccw1 *act, *end, *last; int len, sl, sct, count; + struct device *dev; + char *page; + + dev = &device->cdev->dev; page = (char *) get_zeroed_page(GFP_ATOMIC); if (page == NULL) { DBF_DEV_EVENT(DBF_WARNING, device, "%s", - "No memory to dump sense data"); + "No memory to dump sense data"); return; } - len = sprintf(page, PRINTK_HEADER - " I/O status report for device %s:\n", - dev_name(&device->cdev->dev)); - len += sprintf(page + len, PRINTK_HEADER - " in req: %p CS: 0x%02X DS: 0x%02X\n", req, - irb->scsw.cmd.cstat, irb->scsw.cmd.dstat); - len += sprintf(page + len, PRINTK_HEADER - " device %s: Failing CCW: %p\n", - dev_name(&device->cdev->dev), + len = sprintf(page, "I/O status report:\n"); + len += sprintf(page + len, "in req: %px CS: 0x%02X DS: 0x%02X\n", + req, irb->scsw.cmd.cstat, irb->scsw.cmd.dstat); + len += sprintf(page + len, "Failing CCW: %px\n", (void *) (addr_t) irb->scsw.cmd.cpa); if (irb->esw.esw0.erw.cons) { for (sl = 0; sl < 4; sl++) { - len += sprintf(page + len, PRINTK_HEADER - " Sense(hex) %2d-%2d:", + len += sprintf(page + len, "Sense(hex) %2d-%2d:", (8 * sl), ((8 * sl) + 7)); for (sct = 0; sct < 8; sct++) { @@ -693,20 +685,18 @@ dasd_fba_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req, len += sprintf(page + len, "\n"); } } else { - len += sprintf(page + len, PRINTK_HEADER - " SORRY - NO VALID SENSE AVAILABLE\n"); + len += sprintf(page + len, "SORRY - NO VALID SENSE AVAILABLE\n"); } - printk(KERN_ERR "%s", page); + dev_err(dev, "%s", page); /* dump the Channel Program */ /* print first CCWs (maximum 8) */ act = req->cpaddr; - for (last = act; last->flags & (CCW_FLAG_CC | CCW_FLAG_DC); last++); + for (last = act; last->flags & (CCW_FLAG_CC | CCW_FLAG_DC); last++); end = min(act + 8, last); - len = sprintf(page, PRINTK_HEADER " Related CP in req: %p\n", req); + len = sprintf(page, "Related CP in req: %px\n", req); while (act <= end) { - len += sprintf(page + len, PRINTK_HEADER - " CCW %p: %08X %08X DAT:", + len += sprintf(page + len, "CCW %px: %08X %08X DAT:", act, ((int *) act)[0], ((int *) act)[1]); for (count = 0; count < 32 && count < act->count; count += sizeof(int)) @@ -716,19 +706,17 @@ dasd_fba_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req, len += sprintf(page + len, "\n"); act++; } - printk(KERN_ERR "%s", page); - + dev_err(dev, "%s", page); /* print failing CCW area */ len = 0; if (act < ((struct ccw1 *)(addr_t) irb->scsw.cmd.cpa) - 2) { act = ((struct ccw1 *)(addr_t) irb->scsw.cmd.cpa) - 2; - len += sprintf(page + len, PRINTK_HEADER "......\n"); + len += sprintf(page + len, "......\n"); } end = min((struct ccw1 *)(addr_t) irb->scsw.cmd.cpa + 2, last); while (act <= end) { - len += sprintf(page + len, PRINTK_HEADER - " CCW %p: %08X %08X DAT:", + len += sprintf(page + len, "CCW %px: %08X %08X DAT:", act, ((int *) act)[0], ((int *) act)[1]); for (count = 0; count < 32 && count < act->count; count += sizeof(int)) @@ -742,11 +730,10 @@ dasd_fba_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req, /* print last CCWs */ if (act < last - 2) { act = last - 2; - len += sprintf(page + len, PRINTK_HEADER "......\n"); + len += sprintf(page + len, "......\n"); } while (act <= last) { - len += sprintf(page + len, PRINTK_HEADER - " CCW %p: %08X %08X DAT:", + len += sprintf(page + len, "CCW %px: %08X %08X DAT:", act, ((int *) act)[0], ((int *) act)[1]); for (count = 0; count < 32 && count < act->count; count += sizeof(int)) @@ -757,39 +744,13 @@ dasd_fba_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req, act++; } if (len > 0) - printk(KERN_ERR "%s", page); + dev_err(dev, "%s", page); free_page((unsigned long) page); } -/* - * Initialize block layer request queue. - */ -static void dasd_fba_setup_blk_queue(struct dasd_block *block) +static unsigned int dasd_fba_max_sectors(struct dasd_block *block) { - unsigned int logical_block_size = block->bp_block; - struct request_queue *q = block->gdp->queue; - unsigned int max_bytes, max_discard_sectors; - int max; - - max = DASD_FBA_MAX_BLOCKS << block->s2b_shift; - blk_queue_flag_set(QUEUE_FLAG_NONROT, q); - q->limits.max_dev_sectors = max; - blk_queue_logical_block_size(q, logical_block_size); - blk_queue_max_hw_sectors(q, max); - blk_queue_max_segments(q, USHRT_MAX); - /* With page sized segments each segment can be translated into one idaw/tidaw */ - blk_queue_max_segment_size(q, PAGE_SIZE); - blk_queue_segment_boundary(q, PAGE_SIZE - 1); - - q->limits.discard_granularity = logical_block_size; - - /* Calculate max_discard_sectors and make it PAGE aligned */ - max_bytes = USHRT_MAX * logical_block_size; - max_bytes = ALIGN_DOWN(max_bytes, PAGE_SIZE); - max_discard_sectors = max_bytes / logical_block_size; - - blk_queue_max_discard_sectors(q, max_discard_sectors); - blk_queue_max_write_zeroes_sectors(q, max_discard_sectors); + return DASD_FBA_MAX_BLOCKS << block->s2b_shift; } static int dasd_fba_pe_handler(struct dasd_device *device, @@ -802,10 +763,11 @@ static struct dasd_discipline dasd_fba_discipline = { .owner = THIS_MODULE, .name = "FBA ", .ebcname = "FBA ", + .has_discard = true, .check_device = dasd_fba_check_characteristics, .do_analysis = dasd_fba_do_analysis, .pe_handler = dasd_fba_pe_handler, - .setup_blk_queue = dasd_fba_setup_blk_queue, + .max_sectors = dasd_fba_max_sectors, .fill_geometry = dasd_fba_fill_geometry, .start_IO = dasd_start_IO, .term_IO = dasd_term_IO, diff --git a/drivers/s390/block/dasd_genhd.c b/drivers/s390/block/dasd_genhd.c index 55e3abe94cde..4533dd055ca8 100644 --- a/drivers/s390/block/dasd_genhd.c +++ b/drivers/s390/block/dasd_genhd.c @@ -11,8 +11,6 @@ * */ -#define KMSG_COMPONENT "dasd" - #include <linux/interrupt.h> #include <linux/major.h> #include <linux/fs.h> @@ -20,9 +18,6 @@ #include <linux/uaccess.h> -/* This is ugly... */ -#define PRINTK_HEADER "dasd_gendisk:" - #include "dasd_int.h" static unsigned int queue_depth = 32; @@ -39,6 +34,16 @@ MODULE_PARM_DESC(nr_hw_queues, "Default number of hardware queues for new DASD d */ int dasd_gendisk_alloc(struct dasd_block *block) { + struct queue_limits lim = { + /* + * With page sized segments, each segment can be translated into + * one idaw/tidaw. + */ + .max_segment_size = PAGE_SIZE, + .seg_boundary_mask = PAGE_SIZE - 1, + .dma_alignment = PAGE_SIZE - 1, + .max_segments = USHRT_MAX, + }; struct gendisk *gdp; struct dasd_device *base; int len, rc; @@ -58,11 +63,12 @@ int dasd_gendisk_alloc(struct dasd_block *block) if (rc) return rc; - gdp = blk_mq_alloc_disk(&block->tag_set, block); + gdp = blk_mq_alloc_disk(&block->tag_set, &lim, block); if (IS_ERR(gdp)) { blk_mq_free_tag_set(&block->tag_set); return PTR_ERR(gdp); } + blk_queue_flag_set(QUEUE_FLAG_NONROT, gdp->queue); /* Initialize gendisk structure. */ gdp->major = DASD_MAJOR; @@ -127,15 +133,15 @@ void dasd_gendisk_free(struct dasd_block *block) */ int dasd_scan_partitions(struct dasd_block *block) { - struct bdev_handle *bdev_handle; + struct file *bdev_file; int rc; - bdev_handle = bdev_open_by_dev(disk_devt(block->gdp), BLK_OPEN_READ, + bdev_file = bdev_file_open_by_dev(disk_devt(block->gdp), BLK_OPEN_READ, NULL, NULL); - if (IS_ERR(bdev_handle)) { + if (IS_ERR(bdev_file)) { DBF_DEV_EVENT(DBF_ERR, block->base, "scan partitions error, blkdev_get returned %ld", - PTR_ERR(bdev_handle)); + PTR_ERR(bdev_file)); return -ENODEV; } @@ -147,15 +153,15 @@ int dasd_scan_partitions(struct dasd_block *block) "scan partitions error, rc %d", rc); /* - * Since the matching bdev_release() call to the - * bdev_open_by_path() in this function is not called before + * Since the matching fput() call to the + * bdev_file_open_by_path() in this function is not called before * dasd_destroy_partitions the offline open_count limit needs to be - * increased from 0 to 1. This is done by setting device->bdev_handle + * increased from 0 to 1. This is done by setting device->bdev_file * (see dasd_generic_set_offline). As long as the partition detection * is running no offline should be allowed. That is why the assignment - * to block->bdev_handle is done AFTER the BLKRRPART ioctl. + * to block->bdev_file is done AFTER the BLKRRPART ioctl. */ - block->bdev_handle = bdev_handle; + block->bdev_file = bdev_file; return 0; } @@ -165,21 +171,21 @@ int dasd_scan_partitions(struct dasd_block *block) */ void dasd_destroy_partitions(struct dasd_block *block) { - struct bdev_handle *bdev_handle; + struct file *bdev_file; /* - * Get the bdev_handle pointer from the device structure and clear - * device->bdev_handle to lower the offline open_count limit again. + * Get the bdev_file pointer from the device structure and clear + * device->bdev_file to lower the offline open_count limit again. */ - bdev_handle = block->bdev_handle; - block->bdev_handle = NULL; + bdev_file = block->bdev_file; + block->bdev_file = NULL; - mutex_lock(&bdev_handle->bdev->bd_disk->open_mutex); - bdev_disk_changed(bdev_handle->bdev->bd_disk, true); - mutex_unlock(&bdev_handle->bdev->bd_disk->open_mutex); + mutex_lock(&file_bdev(bdev_file)->bd_disk->open_mutex); + bdev_disk_changed(file_bdev(bdev_file)->bd_disk, true); + mutex_unlock(&file_bdev(bdev_file)->bd_disk->open_mutex); /* Matching blkdev_put to the blkdev_get in dasd_scan_partitions. */ - bdev_release(bdev_handle); + fput(bdev_file); } int dasd_gendisk_init(void) diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h index 1b1b8a41c4d4..e5f40536b425 100644 --- a/drivers/s390/block/dasd_int.h +++ b/drivers/s390/block/dasd_int.h @@ -113,9 +113,6 @@ do { \ __dev_id.ssid, __dev_id.devno, d_data); \ } while (0) -/* limit size for an errorstring */ -#define ERRORLENGTH 30 - /* definition of dbf debug levels */ #define DBF_EMERG 0 /* system is unusable */ #define DBF_ALERT 1 /* action must be taken immediately */ @@ -126,32 +123,6 @@ do { \ #define DBF_INFO 6 /* informational */ #define DBF_DEBUG 6 /* debug-level messages */ -/* messages to be written via klogd and dbf */ -#define DEV_MESSAGE(d_loglevel,d_device,d_string,d_args...)\ -do { \ - printk(d_loglevel PRINTK_HEADER " %s: " d_string "\n", \ - dev_name(&d_device->cdev->dev), d_args); \ - DBF_DEV_EVENT(DBF_ALERT, d_device, d_string, d_args); \ -} while(0) - -#define MESSAGE(d_loglevel,d_string,d_args...)\ -do { \ - printk(d_loglevel PRINTK_HEADER " " d_string "\n", d_args); \ - DBF_EVENT(DBF_ALERT, d_string, d_args); \ -} while(0) - -/* messages to be written via klogd only */ -#define DEV_MESSAGE_LOG(d_loglevel,d_device,d_string,d_args...)\ -do { \ - printk(d_loglevel PRINTK_HEADER " %s: " d_string "\n", \ - dev_name(&d_device->cdev->dev), d_args); \ -} while(0) - -#define MESSAGE_LOG(d_loglevel,d_string,d_args...)\ -do { \ - printk(d_loglevel PRINTK_HEADER " " d_string "\n", d_args); \ -} while(0) - /* Macro to calculate number of blocks per page */ #define BLOCKS_PER_PAGE(blksize) (PAGE_SIZE / blksize) @@ -322,6 +293,7 @@ struct dasd_discipline { struct module *owner; char ebcname[8]; /* a name used for tagging and printks */ char name[8]; /* a name used for tagging and printks */ + bool has_discard; struct list_head list; /* used for list of disciplines */ @@ -360,10 +332,7 @@ struct dasd_discipline { int (*online_to_ready) (struct dasd_device *); int (*basic_to_known)(struct dasd_device *); - /* - * Initialize block layer request queue. - */ - void (*setup_blk_queue)(struct dasd_block *); + unsigned int (*max_sectors)(struct dasd_block *); /* (struct dasd_device *); * Device operation functions. build_cp creates a ccw chain for * a block device request, start_io starts the request and @@ -650,7 +619,7 @@ struct dasd_block { struct gendisk *gdp; spinlock_t request_queue_lock; struct blk_mq_tag_set tag_set; - struct bdev_handle *bdev_handle; + struct file *bdev_file; atomic_t open_count; unsigned long blocks; /* size of volume in blocks */ diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c index 61b9675e2a67..7e0ed7032f76 100644 --- a/drivers/s390/block/dasd_ioctl.c +++ b/drivers/s390/block/dasd_ioctl.c @@ -10,8 +10,6 @@ * i/o controls for the dasd driver. */ -#define KMSG_COMPONENT "dasd" - #include <linux/interrupt.h> #include <linux/compat.h> #include <linux/major.h> @@ -24,12 +22,8 @@ #include <linux/uaccess.h> #include <linux/dasd_mod.h> -/* This is ugly... */ -#define PRINTK_HEADER "dasd_ioctl:" - #include "dasd_int.h" - static int dasd_ioctl_api_version(void __user *argp) { @@ -537,7 +531,7 @@ static int __dasd_ioctl_information(struct dasd_block *block, * This must be hidden from user-space. */ dasd_info->open_count = atomic_read(&block->open_count); - if (!block->bdev_handle) + if (!block->bdev_file) dasd_info->open_count++; /* diff --git a/drivers/s390/block/dasd_proc.c b/drivers/s390/block/dasd_proc.c index 62a859ea67f8..0faaa437d9be 100644 --- a/drivers/s390/block/dasd_proc.c +++ b/drivers/s390/block/dasd_proc.c @@ -11,8 +11,6 @@ * */ -#define KMSG_COMPONENT "dasd" - #include <linux/ctype.h> #include <linux/slab.h> #include <linux/string.h> @@ -23,9 +21,6 @@ #include <asm/debug.h> #include <linux/uaccess.h> -/* This is ugly... */ -#define PRINTK_HEADER "dasd_proc:" - #include "dasd_int.h" static struct proc_dir_entry *dasd_proc_root_entry = NULL; diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c index 4b7ecd4fd431..9c8f529b827c 100644 --- a/drivers/s390/block/dcssblk.c +++ b/drivers/s390/block/dcssblk.c @@ -546,6 +546,9 @@ static const struct attribute_group *dcssblk_dev_attr_groups[] = { static ssize_t dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { + struct queue_limits lim = { + .logical_block_size = 4096, + }; int rc, i, j, num_of_segments; struct dcssblk_dev_info *dev_info; struct segment_info *seg_info, *temp; @@ -629,9 +632,9 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char dev_info->dev.release = dcssblk_release_segment; dev_info->dev.groups = dcssblk_dev_attr_groups; INIT_LIST_HEAD(&dev_info->lh); - dev_info->gd = blk_alloc_disk(NUMA_NO_NODE); - if (dev_info->gd == NULL) { - rc = -ENOMEM; + dev_info->gd = blk_alloc_disk(&lim, NUMA_NO_NODE); + if (IS_ERR(dev_info->gd)) { + rc = PTR_ERR(dev_info->gd); goto seg_list_del; } dev_info->gd->major = dcssblk_major; @@ -639,7 +642,6 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char dev_info->gd->fops = &dcssblk_devops; dev_info->gd->private_data = dev_info; dev_info->gd->flags |= GENHD_FL_NO_PART; - blk_queue_logical_block_size(dev_info->gd->queue, 4096); blk_queue_flag_set(QUEUE_FLAG_DAX, dev_info->gd->queue); seg_byte_size = (dev_info->end - dev_info->start + 1); diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c index ade95e91b3c8..9f6fdd0daa74 100644 --- a/drivers/s390/block/scm_blk.c +++ b/drivers/s390/block/scm_blk.c @@ -435,10 +435,17 @@ static const struct blk_mq_ops scm_mq_ops = { int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev) { - unsigned int devindex, nr_max_blk; + struct queue_limits lim = { + .logical_block_size = 1 << 12, + }; + unsigned int devindex; struct request_queue *rq; int len, ret; + lim.max_segments = min(scmdev->nr_max_block, + (unsigned int) (PAGE_SIZE / sizeof(struct aidaw))); + lim.max_hw_sectors = lim.max_segments << 3; /* 8 * 512 = blk_size */ + devindex = atomic_inc_return(&nr_devices) - 1; /* scma..scmz + scmaa..scmzz */ if (devindex > 701) { @@ -462,18 +469,12 @@ int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev) if (ret) goto out; - bdev->gendisk = blk_mq_alloc_disk(&bdev->tag_set, scmdev); + bdev->gendisk = blk_mq_alloc_disk(&bdev->tag_set, &lim, scmdev); if (IS_ERR(bdev->gendisk)) { ret = PTR_ERR(bdev->gendisk); goto out_tag; } rq = bdev->rq = bdev->gendisk->queue; - nr_max_blk = min(scmdev->nr_max_block, - (unsigned int) (PAGE_SIZE / sizeof(struct aidaw))); - - blk_queue_logical_block_size(rq, 1 << 12); - blk_queue_max_hw_sectors(rq, nr_max_blk << 3); /* 8 * 512 = blk_size */ - blk_queue_max_segments(rq, nr_max_blk); blk_queue_flag_set(QUEUE_FLAG_NONROT, rq); blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, rq); diff --git a/drivers/s390/char/vmur.c b/drivers/s390/char/vmur.c index 82efdd20ad01..1d17a83569ce 100644 --- a/drivers/s390/char/vmur.c +++ b/drivers/s390/char/vmur.c @@ -195,7 +195,7 @@ static void free_chan_prog(struct ccw1 *cpa) struct ccw1 *ptr = cpa; while (ptr->cda) { - kfree((void *)(addr_t) ptr->cda); + kfree(phys_to_virt(ptr->cda)); ptr++; } kfree(cpa); @@ -237,7 +237,7 @@ static struct ccw1 *alloc_chan_prog(const char __user *ubuf, int rec_count, free_chan_prog(cpa); return ERR_PTR(-ENOMEM); } - cpa[i].cda = (u32)(addr_t) kbuf; + cpa[i].cda = (u32)virt_to_phys(kbuf); if (copy_from_user(kbuf, ubuf, reclen)) { free_chan_prog(cpa); return ERR_PTR(-EFAULT); diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c index bc3be0330f1d..0969fa01df58 100644 --- a/drivers/s390/char/zcore.c +++ b/drivers/s390/char/zcore.c @@ -29,7 +29,6 @@ #include <asm/irqflags.h> #include <asm/checksum.h> #include <asm/os_info.h> -#include <asm/switch_to.h> #include <asm/maccess.h> #include "sclp.h" diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c index aa3292e57e38..6eb8bcd948dc 100644 --- a/drivers/s390/cio/ccwgroup.c +++ b/drivers/s390/cio/ccwgroup.c @@ -31,7 +31,7 @@ * to devices that use multiple subchannels. */ -static struct bus_type ccwgroup_bus_type; +static const struct bus_type ccwgroup_bus_type; static void __ccwgroup_remove_symlinks(struct ccwgroup_device *gdev) { @@ -465,7 +465,7 @@ static void ccwgroup_shutdown(struct device *dev) gdrv->shutdown(gdev); } -static struct bus_type ccwgroup_bus_type = { +static const struct bus_type ccwgroup_bus_type = { .name = "ccwgroup", .dev_groups = ccwgroup_dev_groups, .remove = ccwgroup_remove, diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c index 64ed55c3aed6..3d88899dff7c 100644 --- a/drivers/s390/cio/chsc.c +++ b/drivers/s390/cio/chsc.c @@ -1091,8 +1091,8 @@ int __init chsc_init(void) { int ret; - sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); - chsc_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); + sei_page = (void *)get_zeroed_page(GFP_KERNEL); + chsc_page = (void *)get_zeroed_page(GFP_KERNEL); if (!sei_page || !chsc_page) { ret = -ENOMEM; goto out_err; diff --git a/drivers/s390/cio/chsc_sch.c b/drivers/s390/cio/chsc_sch.c index 902237d0baef..e6c800653f98 100644 --- a/drivers/s390/cio/chsc_sch.c +++ b/drivers/s390/cio/chsc_sch.c @@ -293,7 +293,7 @@ static int chsc_ioctl_start(void __user *user_area) if (!css_general_characteristics.dynio) /* It makes no sense to try. */ return -EOPNOTSUPP; - chsc_area = (void *)get_zeroed_page(GFP_DMA | GFP_KERNEL); + chsc_area = (void *)get_zeroed_page(GFP_KERNEL); if (!chsc_area) return -ENOMEM; request = kzalloc(sizeof(*request), GFP_KERNEL); @@ -341,7 +341,7 @@ static int chsc_ioctl_on_close_set(void __user *user_area) ret = -ENOMEM; goto out_unlock; } - on_close_chsc_area = (void *)get_zeroed_page(GFP_DMA | GFP_KERNEL); + on_close_chsc_area = (void *)get_zeroed_page(GFP_KERNEL); if (!on_close_chsc_area) { ret = -ENOMEM; goto out_free_request; @@ -393,7 +393,7 @@ static int chsc_ioctl_start_sync(void __user *user_area) struct chsc_sync_area *chsc_area; int ret, ccode; - chsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); + chsc_area = (void *)get_zeroed_page(GFP_KERNEL); if (!chsc_area) return -ENOMEM; if (copy_from_user(chsc_area, user_area, PAGE_SIZE)) { @@ -439,7 +439,7 @@ static int chsc_ioctl_info_channel_path(void __user *user_cd) u8 data[PAGE_SIZE - 20]; } __attribute__ ((packed)) *scpcd_area; - scpcd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); + scpcd_area = (void *)get_zeroed_page(GFP_KERNEL); if (!scpcd_area) return -ENOMEM; cd = kzalloc(sizeof(*cd), GFP_KERNEL); @@ -501,7 +501,7 @@ static int chsc_ioctl_info_cu(void __user *user_cd) u8 data[PAGE_SIZE - 20]; } __attribute__ ((packed)) *scucd_area; - scucd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); + scucd_area = (void *)get_zeroed_page(GFP_KERNEL); if (!scucd_area) return -ENOMEM; cd = kzalloc(sizeof(*cd), GFP_KERNEL); @@ -564,7 +564,7 @@ static int chsc_ioctl_info_sch_cu(void __user *user_cud) u8 data[PAGE_SIZE - 20]; } __attribute__ ((packed)) *sscud_area; - sscud_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); + sscud_area = (void *)get_zeroed_page(GFP_KERNEL); if (!sscud_area) return -ENOMEM; cud = kzalloc(sizeof(*cud), GFP_KERNEL); @@ -626,7 +626,7 @@ static int chsc_ioctl_conf_info(void __user *user_ci) u8 data[PAGE_SIZE - 20]; } __attribute__ ((packed)) *sci_area; - sci_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); + sci_area = (void *)get_zeroed_page(GFP_KERNEL); if (!sci_area) return -ENOMEM; ci = kzalloc(sizeof(*ci), GFP_KERNEL); @@ -697,7 +697,7 @@ static int chsc_ioctl_conf_comp_list(void __user *user_ccl) u32 res; } __attribute__ ((packed)) *cssids_parm; - sccl_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); + sccl_area = (void *)get_zeroed_page(GFP_KERNEL); if (!sccl_area) return -ENOMEM; ccl = kzalloc(sizeof(*ccl), GFP_KERNEL); @@ -757,7 +757,7 @@ static int chsc_ioctl_chpd(void __user *user_chpd) int ret; chpd = kzalloc(sizeof(*chpd), GFP_KERNEL); - scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); + scpd_area = (void *)get_zeroed_page(GFP_KERNEL); if (!scpd_area || !chpd) { ret = -ENOMEM; goto out_free; @@ -797,7 +797,7 @@ static int chsc_ioctl_dcal(void __user *user_dcal) u8 data[PAGE_SIZE - 36]; } __attribute__ ((packed)) *sdcal_area; - sdcal_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); + sdcal_area = (void *)get_zeroed_page(GFP_KERNEL); if (!sdcal_area) return -ENOMEM; dcal = kzalloc(sizeof(*dcal), GFP_KERNEL); diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c index 5584aa46c94e..f80dc18e2a76 100644 --- a/drivers/s390/cio/cmf.c +++ b/drivers/s390/cio/cmf.c @@ -169,7 +169,8 @@ static inline void cmf_activate(void *area, unsigned int onoff) " lgr 2,%[mbo]\n" " schm\n" : - : [r1] "d" ((unsigned long)onoff), [mbo] "d" (area) + : [r1] "d" ((unsigned long)onoff), + [mbo] "d" (virt_to_phys(area)) : "1", "2"); } @@ -501,8 +502,7 @@ static int alloc_cmb(struct ccw_device *cdev) WARN_ON(!list_empty(&cmb_area.list)); spin_unlock(&cmb_area.lock); - mem = (void*)__get_free_pages(GFP_KERNEL | GFP_DMA, - get_order(size)); + mem = (void *)__get_free_pages(GFP_KERNEL, get_order(size)); spin_lock(&cmb_area.lock); if (cmb_area.mem) { diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c index 28a88ed2c3aa..094431a62ad5 100644 --- a/drivers/s390/cio/css.c +++ b/drivers/s390/cio/css.c @@ -39,7 +39,7 @@ int max_ssid; #define MAX_CSS_IDX 0 struct channel_subsystem *channel_subsystems[MAX_CSS_IDX + 1]; -static struct bus_type css_bus_type; +static const struct bus_type css_bus_type; int for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data) @@ -1409,7 +1409,7 @@ static int css_uevent(const struct device *dev, struct kobj_uevent_env *env) return ret; } -static struct bus_type css_bus_type = { +static const struct bus_type css_bus_type = { .name = "css", .match = css_bus_match, .probe = css_probe, diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c index 0cfb179e1bcb..f95d12345d98 100644 --- a/drivers/s390/cio/device.c +++ b/drivers/s390/cio/device.c @@ -49,7 +49,7 @@ static const unsigned long recovery_delay[] = { 3, 30, 300 }; static atomic_t ccw_device_init_count = ATOMIC_INIT(0); static DECLARE_WAIT_QUEUE_HEAD(ccw_device_init_wq); -static struct bus_type ccw_bus_type; +static const struct bus_type ccw_bus_type; /******************* bus type handling ***********************/ @@ -1776,7 +1776,7 @@ static void ccw_device_shutdown(struct device *dev) __disable_cmf(cdev); } -static struct bus_type ccw_bus_type = { +static const struct bus_type ccw_bus_type = { .name = "ccw", .match = ccw_bus_match, .uevent = ccw_uevent, diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c index c533d1dadc6b..a5dba3829769 100644 --- a/drivers/s390/cio/device_ops.c +++ b/drivers/s390/cio/device_ops.c @@ -202,7 +202,8 @@ int ccw_device_start_timeout_key(struct ccw_device *cdev, struct ccw1 *cpa, return -EINVAL; if (cdev->private->state == DEV_STATE_NOT_OPER) return -ENODEV; - if (cdev->private->state == DEV_STATE_VERIFY) { + if (cdev->private->state == DEV_STATE_VERIFY || + cdev->private->flags.doverify) { /* Remember to fake irb when finished. */ if (!cdev->private->flags.fake_irb) { cdev->private->flags.fake_irb = FAKE_CMD_IRB; @@ -214,8 +215,7 @@ int ccw_device_start_timeout_key(struct ccw_device *cdev, struct ccw1 *cpa, } if (cdev->private->state != DEV_STATE_ONLINE || ((sch->schib.scsw.cmd.stctl & SCSW_STCTL_PRIM_STATUS) && - !(sch->schib.scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS)) || - cdev->private->flags.doverify) + !(sch->schib.scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS))) return -EBUSY; ret = cio_set_options (sch, flags); if (ret) diff --git a/drivers/s390/cio/scm.c b/drivers/s390/cio/scm.c index 6b21ba68c1fe..c7894d61306d 100644 --- a/drivers/s390/cio/scm.c +++ b/drivers/s390/cio/scm.c @@ -42,7 +42,7 @@ static int scmdev_uevent(const struct device *dev, struct kobj_uevent_env *env) return add_uevent_var(env, "MODALIAS=scm:scmdev"); } -static struct bus_type scm_bus_type = { +static const struct bus_type scm_bus_type = { .name = "scm", .probe = scmdev_probe, .remove = scmdev_remove, @@ -228,7 +228,7 @@ int scm_update_information(void) size_t num; int ret; - scm_info = (void *)__get_free_page(GFP_KERNEL | GFP_DMA); + scm_info = (void *)__get_free_page(GFP_KERNEL); if (!scm_info) return -ENOMEM; diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c index f46dd6abacd7..cce0bafd4c92 100644 --- a/drivers/s390/crypto/ap_bus.c +++ b/drivers/s390/crypto/ap_bus.c @@ -38,6 +38,7 @@ #include <linux/debugfs.h> #include <linux/ctype.h> #include <linux/module.h> +#include <asm/uv.h> #include "ap_bus.h" #include "ap_debug.h" @@ -83,14 +84,11 @@ EXPORT_SYMBOL(ap_perms); DEFINE_MUTEX(ap_perms_mutex); EXPORT_SYMBOL(ap_perms_mutex); -/* # of bus scans since init */ -static atomic64_t ap_scan_bus_count; - /* # of bindings complete since init */ static atomic64_t ap_bindings_complete_count = ATOMIC64_INIT(0); -/* completion for initial APQN bindings complete */ -static DECLARE_COMPLETION(ap_init_apqn_bindings_complete); +/* completion for APQN bindings complete */ +static DECLARE_COMPLETION(ap_apqn_bindings_complete); static struct ap_config_info *ap_qci_info; static struct ap_config_info *ap_qci_info_old; @@ -101,12 +99,16 @@ static struct ap_config_info *ap_qci_info_old; debug_info_t *ap_dbf_info; /* - * Workqueue timer for bus rescan. + * AP bus rescan related things. */ -static struct timer_list ap_config_timer; -static int ap_config_time = AP_CONFIG_TIME; -static void ap_scan_bus(struct work_struct *); -static DECLARE_WORK(ap_scan_work, ap_scan_bus); +static bool ap_scan_bus(void); +static bool ap_scan_bus_result; /* result of last ap_scan_bus() */ +static DEFINE_MUTEX(ap_scan_bus_mutex); /* mutex ap_scan_bus() invocations */ +static atomic64_t ap_scan_bus_count; /* counter ap_scan_bus() invocations */ +static int ap_scan_bus_time = AP_CONFIG_TIME; +static struct timer_list ap_scan_bus_timer; +static void ap_scan_bus_wq_callback(struct work_struct *); +static DECLARE_WORK(ap_scan_bus_work, ap_scan_bus_wq_callback); /* * Tasklet & timer for AP request polling and interrupts @@ -135,7 +137,7 @@ static int ap_max_domain_id = 15; /* Maximum adapter id, if not given via qci */ static int ap_max_adapter_id = 63; -static struct bus_type ap_bus_type; +static const struct bus_type ap_bus_type; /* Adapter interrupt definitions */ static void ap_interrupt_handler(struct airq_struct *airq, @@ -753,7 +755,7 @@ static void ap_calc_bound_apqns(unsigned int *apqns, unsigned int *bound) } /* - * After initial ap bus scan do check if all existing APQNs are + * After ap bus scan do check if all existing APQNs are * bound to device drivers. */ static void ap_check_bindings_complete(void) @@ -763,9 +765,9 @@ static void ap_check_bindings_complete(void) if (atomic64_read(&ap_scan_bus_count) >= 1) { ap_calc_bound_apqns(&apqns, &bound); if (bound == apqns) { - if (!completion_done(&ap_init_apqn_bindings_complete)) { - complete_all(&ap_init_apqn_bindings_complete); - AP_DBF_INFO("%s complete\n", __func__); + if (!completion_done(&ap_apqn_bindings_complete)) { + complete_all(&ap_apqn_bindings_complete); + pr_debug("%s all apqn bindings complete\n", __func__); } ap_send_bindings_complete_uevent(); } @@ -782,27 +784,29 @@ static void ap_check_bindings_complete(void) * -ETIME is returned. On failures negative return values are * returned to the caller. */ -int ap_wait_init_apqn_bindings_complete(unsigned long timeout) +int ap_wait_apqn_bindings_complete(unsigned long timeout) { + int rc = 0; long l; - if (completion_done(&ap_init_apqn_bindings_complete)) + if (completion_done(&ap_apqn_bindings_complete)) return 0; if (timeout) l = wait_for_completion_interruptible_timeout( - &ap_init_apqn_bindings_complete, timeout); + &ap_apqn_bindings_complete, timeout); else l = wait_for_completion_interruptible( - &ap_init_apqn_bindings_complete); + &ap_apqn_bindings_complete); if (l < 0) - return l == -ERESTARTSYS ? -EINTR : l; + rc = l == -ERESTARTSYS ? -EINTR : l; else if (l == 0 && timeout) - return -ETIME; + rc = -ETIME; - return 0; + pr_debug("%s rc=%d\n", __func__, rc); + return rc; } -EXPORT_SYMBOL(ap_wait_init_apqn_bindings_complete); +EXPORT_SYMBOL(ap_wait_apqn_bindings_complete); static int __ap_queue_devices_with_id_unregister(struct device *dev, void *data) { @@ -826,8 +830,8 @@ static int __ap_revise_reserved(struct device *dev, void *dummy) drvres = to_ap_drv(dev->driver)->flags & AP_DRIVER_FLAG_DEFAULT; if (!!devres != !!drvres) { - AP_DBF_DBG("%s reprobing queue=%02x.%04x\n", - __func__, card, queue); + pr_debug("%s reprobing queue=%02x.%04x\n", + __func__, card, queue); rc = device_reprobe(dev); if (rc) AP_DBF_WARN("%s reprobing queue=%02x.%04x failed\n", @@ -939,8 +943,6 @@ static int ap_device_probe(struct device *dev) if (is_queue_dev(dev)) hash_del(&to_ap_queue(dev)->hnode); spin_unlock_bh(&ap_queues_lock); - } else { - ap_check_bindings_complete(); } out: @@ -1012,16 +1014,47 @@ void ap_driver_unregister(struct ap_driver *ap_drv) } EXPORT_SYMBOL(ap_driver_unregister); -void ap_bus_force_rescan(void) +/* + * Enforce a synchronous AP bus rescan. + * Returns true if the bus scan finds a change in the AP configuration + * and AP devices have been added or deleted when this function returns. + */ +bool ap_bus_force_rescan(void) { + unsigned long scan_counter = atomic64_read(&ap_scan_bus_count); + bool rc = false; + + pr_debug(">%s scan counter=%lu\n", __func__, scan_counter); + /* Only trigger AP bus scans after the initial scan is done */ - if (atomic64_read(&ap_scan_bus_count) <= 0) - return; + if (scan_counter <= 0) + goto out; + + /* Try to acquire the AP scan bus mutex */ + if (mutex_trylock(&ap_scan_bus_mutex)) { + /* mutex acquired, run the AP bus scan */ + ap_scan_bus_result = ap_scan_bus(); + rc = ap_scan_bus_result; + mutex_unlock(&ap_scan_bus_mutex); + goto out; + } + + /* + * Mutex acquire failed. So there is currently another task + * already running the AP bus scan. Then let's simple wait + * for the lock which means the other task has finished and + * stored the result in ap_scan_bus_result. + */ + if (mutex_lock_interruptible(&ap_scan_bus_mutex)) { + /* some error occurred, ignore and go out */ + goto out; + } + rc = ap_scan_bus_result; + mutex_unlock(&ap_scan_bus_mutex); - /* processing a asynchronous bus rescan */ - del_timer(&ap_config_timer); - queue_work(system_long_wq, &ap_scan_work); - flush_work(&ap_scan_work); +out: + pr_debug("%s rc=%d\n", __func__, rc); + return rc; } EXPORT_SYMBOL(ap_bus_force_rescan); @@ -1030,7 +1063,7 @@ EXPORT_SYMBOL(ap_bus_force_rescan); */ void ap_bus_cfg_chg(void) { - AP_DBF_DBG("%s config change, forcing bus rescan\n", __func__); + pr_debug("%s config change, forcing bus rescan\n", __func__); ap_bus_force_rescan(); } @@ -1250,7 +1283,7 @@ static BUS_ATTR_RO(ap_interrupts); static ssize_t config_time_show(const struct bus_type *bus, char *buf) { - return sysfs_emit(buf, "%d\n", ap_config_time); + return sysfs_emit(buf, "%d\n", ap_scan_bus_time); } static ssize_t config_time_store(const struct bus_type *bus, @@ -1260,8 +1293,8 @@ static ssize_t config_time_store(const struct bus_type *bus, if (sscanf(buf, "%d\n", &time) != 1 || time < 5 || time > 120) return -EINVAL; - ap_config_time = time; - mod_timer(&ap_config_timer, jiffies + ap_config_time * HZ); + ap_scan_bus_time = time; + mod_timer(&ap_scan_bus_timer, jiffies + ap_scan_bus_time * HZ); return count; } @@ -1603,7 +1636,7 @@ static struct attribute *ap_bus_attrs[] = { }; ATTRIBUTE_GROUPS(ap_bus); -static struct bus_type ap_bus_type = { +static const struct bus_type ap_bus_type = { .name = "ap", .bus_groups = ap_bus_groups, .match = &ap_bus_match, @@ -1888,8 +1921,8 @@ static inline void ap_scan_domains(struct ap_card *ac) aq->last_err_rc = AP_RESPONSE_CHECKSTOPPED; } spin_unlock_bh(&aq->lock); - AP_DBF_DBG("%s(%d,%d) queue dev checkstop on\n", - __func__, ac->id, dom); + pr_debug("%s(%d,%d) queue dev checkstop on\n", + __func__, ac->id, dom); /* 'receive' pending messages with -EAGAIN */ ap_flush_queue(aq); goto put_dev_and_continue; @@ -1899,8 +1932,8 @@ static inline void ap_scan_domains(struct ap_card *ac) if (aq->dev_state > AP_DEV_STATE_UNINITIATED) _ap_queue_init_state(aq); spin_unlock_bh(&aq->lock); - AP_DBF_DBG("%s(%d,%d) queue dev checkstop off\n", - __func__, ac->id, dom); + pr_debug("%s(%d,%d) queue dev checkstop off\n", + __func__, ac->id, dom); goto put_dev_and_continue; } /* config state change */ @@ -1912,8 +1945,8 @@ static inline void ap_scan_domains(struct ap_card *ac) aq->last_err_rc = AP_RESPONSE_DECONFIGURED; } spin_unlock_bh(&aq->lock); - AP_DBF_DBG("%s(%d,%d) queue dev config off\n", - __func__, ac->id, dom); + pr_debug("%s(%d,%d) queue dev config off\n", + __func__, ac->id, dom); ap_send_config_uevent(&aq->ap_dev, aq->config); /* 'receive' pending messages with -EAGAIN */ ap_flush_queue(aq); @@ -1924,8 +1957,8 @@ static inline void ap_scan_domains(struct ap_card *ac) if (aq->dev_state > AP_DEV_STATE_UNINITIATED) _ap_queue_init_state(aq); spin_unlock_bh(&aq->lock); - AP_DBF_DBG("%s(%d,%d) queue dev config on\n", - __func__, ac->id, dom); + pr_debug("%s(%d,%d) queue dev config on\n", + __func__, ac->id, dom); ap_send_config_uevent(&aq->ap_dev, aq->config); goto put_dev_and_continue; } @@ -1997,8 +2030,8 @@ static inline void ap_scan_adapter(int ap) ap_scan_rm_card_dev_and_queue_devs(ac); put_device(dev); } else { - AP_DBF_DBG("%s(%d) no type info (no APQN found), ignored\n", - __func__, ap); + pr_debug("%s(%d) no type info (no APQN found), ignored\n", + __func__, ap); } return; } @@ -2010,8 +2043,8 @@ static inline void ap_scan_adapter(int ap) ap_scan_rm_card_dev_and_queue_devs(ac); put_device(dev); } else { - AP_DBF_DBG("%s(%d) no valid type (0) info, ignored\n", - __func__, ap); + pr_debug("%s(%d) no valid type (0) info, ignored\n", + __func__, ap); } return; } @@ -2135,23 +2168,80 @@ static bool ap_get_configuration(void) sizeof(struct ap_config_info)) != 0; } +/* + * ap_config_has_new_aps - Check current against old qci info if + * new adapters have appeared. Returns true if at least one new + * adapter in the apm mask is showing up. Existing adapters or + * receding adapters are not counted. + */ +static bool ap_config_has_new_aps(void) +{ + + unsigned long m[BITS_TO_LONGS(AP_DEVICES)]; + + if (!ap_qci_info) + return false; + + bitmap_andnot(m, (unsigned long *)ap_qci_info->apm, + (unsigned long *)ap_qci_info_old->apm, AP_DEVICES); + if (!bitmap_empty(m, AP_DEVICES)) + return true; + + return false; +} + +/* + * ap_config_has_new_doms - Check current against old qci info if + * new (usage) domains have appeared. Returns true if at least one + * new domain in the aqm mask is showing up. Existing domains or + * receding domains are not counted. + */ +static bool ap_config_has_new_doms(void) +{ + unsigned long m[BITS_TO_LONGS(AP_DOMAINS)]; + + if (!ap_qci_info) + return false; + + bitmap_andnot(m, (unsigned long *)ap_qci_info->aqm, + (unsigned long *)ap_qci_info_old->aqm, AP_DOMAINS); + if (!bitmap_empty(m, AP_DOMAINS)) + return true; + + return false; +} + /** * ap_scan_bus(): Scan the AP bus for new devices - * Runs periodically, workqueue timer (ap_config_time) - * @unused: Unused pointer. + * Always run under mutex ap_scan_bus_mutex protection + * which needs to get locked/unlocked by the caller! + * Returns true if any config change has been detected + * during the scan, otherwise false. */ -static void ap_scan_bus(struct work_struct *unused) +static bool ap_scan_bus(void) { - int ap, config_changed = 0; + bool config_changed; + int ap; + + pr_debug(">%s\n", __func__); - /* config change notify */ + /* (re-)fetch configuration via QCI */ config_changed = ap_get_configuration(); - if (config_changed) + if (config_changed) { + if (ap_config_has_new_aps() || ap_config_has_new_doms()) { + /* + * Appearance of new adapters and/or domains need to + * build new ap devices which need to get bound to an + * device driver. Thus reset the APQN bindings complete + * completion. + */ + reinit_completion(&ap_apqn_bindings_complete); + } + /* post a config change notify */ notify_config_changed(); + } ap_select_domain(); - AP_DBF_DBG("%s running\n", __func__); - /* loop over all possible adapters */ for (ap = 0; ap <= ap_max_adapter_id; ap++) ap_scan_adapter(ap); @@ -2174,23 +2264,56 @@ static void ap_scan_bus(struct work_struct *unused) } if (atomic64_inc_return(&ap_scan_bus_count) == 1) { - AP_DBF_DBG("%s init scan complete\n", __func__); + pr_debug("%s init scan complete\n", __func__); ap_send_init_scan_done_uevent(); - ap_check_bindings_complete(); } - mod_timer(&ap_config_timer, jiffies + ap_config_time * HZ); + ap_check_bindings_complete(); + + mod_timer(&ap_scan_bus_timer, jiffies + ap_scan_bus_time * HZ); + + pr_debug("<%s config_changed=%d\n", __func__, config_changed); + + return config_changed; } -static void ap_config_timeout(struct timer_list *unused) +/* + * Callback for the ap_scan_bus_timer + * Runs periodically, workqueue timer (ap_scan_bus_time) + */ +static void ap_scan_bus_timer_callback(struct timer_list *unused) { - queue_work(system_long_wq, &ap_scan_work); + /* + * schedule work into the system long wq which when + * the work is finally executed, calls the AP bus scan. + */ + queue_work(system_long_wq, &ap_scan_bus_work); +} + +/* + * Callback for the ap_scan_bus_work + */ +static void ap_scan_bus_wq_callback(struct work_struct *unused) +{ + /* + * Try to invoke an ap_scan_bus(). If the mutex acquisition + * fails there is currently another task already running the + * AP scan bus and there is no need to wait and re-trigger the + * scan again. Please note at the end of the scan bus function + * the AP scan bus timer is re-armed which triggers then the + * ap_scan_bus_timer_callback which enqueues a work into the + * system_long_wq which invokes this function here again. + */ + if (mutex_trylock(&ap_scan_bus_mutex)) { + ap_scan_bus_result = ap_scan_bus(); + mutex_unlock(&ap_scan_bus_mutex); + } } static int __init ap_debug_init(void) { ap_dbf_info = debug_register("ap", 2, 1, - DBF_MAX_SPRINTF_ARGS * sizeof(long)); + AP_DBF_MAX_SPRINTF_ARGS * sizeof(long)); debug_register_view(ap_dbf_info, &debug_sprintf_view); debug_set_level(ap_dbf_info, DBF_ERR); @@ -2274,7 +2397,7 @@ static int __init ap_module_init(void) ap_root_device->bus = &ap_bus_type; /* Setup the AP bus rescan timer. */ - timer_setup(&ap_config_timer, ap_config_timeout, 0); + timer_setup(&ap_scan_bus_timer, ap_scan_bus_timer_callback, 0); /* * Setup the high resolution poll timer. @@ -2292,7 +2415,7 @@ static int __init ap_module_init(void) goto out_work; } - queue_work(system_long_wq, &ap_scan_work); + queue_work(system_long_wq, &ap_scan_bus_work); return 0; diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h index 98814839ef30..59c7ed49aa02 100644 --- a/drivers/s390/crypto/ap_bus.h +++ b/drivers/s390/crypto/ap_bus.h @@ -266,7 +266,7 @@ int ap_sb_available(void); bool ap_is_se_guest(void); void ap_wait(enum ap_sm_wait wait); void ap_request_timeout(struct timer_list *t); -void ap_bus_force_rescan(void); +bool ap_bus_force_rescan(void); int ap_test_config_usage_domain(unsigned int domain); int ap_test_config_ctrl_domain(unsigned int domain); @@ -352,8 +352,12 @@ int ap_parse_mask_str(const char *str, * the return value is 0. If the timeout (in jiffies) hits instead * -ETIME is returned. On failures negative return values are * returned to the caller. + * It may be that the AP bus scan finds new devices. Then the + * condition that all APQNs are bound to their device drivers + * is reset to false and this call again blocks until either all + * APQNs are bound to a device driver or the timeout hits again. */ -int ap_wait_init_apqn_bindings_complete(unsigned long timeout); +int ap_wait_apqn_bindings_complete(unsigned long timeout); void ap_send_config_uevent(struct ap_device *ap_dev, bool cfg); void ap_send_online_uevent(struct ap_device *ap_dev, int online); diff --git a/drivers/s390/crypto/ap_debug.h b/drivers/s390/crypto/ap_debug.h index c083ce88a9a6..2f66271b8564 100644 --- a/drivers/s390/crypto/ap_debug.h +++ b/drivers/s390/crypto/ap_debug.h @@ -16,7 +16,7 @@ #define RC2ERR(rc) ((rc) ? DBF_ERR : DBF_INFO) #define RC2WARN(rc) ((rc) ? DBF_WARN : DBF_INFO) -#define DBF_MAX_SPRINTF_ARGS 6 +#define AP_DBF_MAX_SPRINTF_ARGS 6 #define AP_DBF(...) \ debug_sprintf_event(ap_dbf_info, ##__VA_ARGS__) @@ -26,8 +26,6 @@ debug_sprintf_event(ap_dbf_info, DBF_WARN, ##__VA_ARGS__) #define AP_DBF_INFO(...) \ debug_sprintf_event(ap_dbf_info, DBF_INFO, ##__VA_ARGS__) -#define AP_DBF_DBG(...) \ - debug_sprintf_event(ap_dbf_info, DBF_DEBUG, ##__VA_ARGS__) extern debug_info_t *ap_dbf_info; diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c index 682595443145..6e4e8d324a6d 100644 --- a/drivers/s390/crypto/ap_queue.c +++ b/drivers/s390/crypto/ap_queue.c @@ -136,6 +136,8 @@ static struct ap_queue_status ap_sm_recv(struct ap_queue *aq) switch (status.response_code) { case AP_RESPONSE_NORMAL: + print_hex_dump_debug("aprpl: ", DUMP_PREFIX_ADDRESS, 16, 1, + aq->reply->msg, aq->reply->len, false); aq->queue_count = max_t(int, 0, aq->queue_count - 1); if (!status.queue_empty && !aq->queue_count) aq->queue_count++; @@ -169,6 +171,9 @@ static struct ap_queue_status ap_sm_recv(struct ap_queue *aq) aq->queue_count = 0; list_splice_init(&aq->pendingq, &aq->requestq); aq->requestq_count += aq->pendingq_count; + pr_debug("%s queue 0x%02x.%04x rescheduled %d reqs (new req %d)\n", + __func__, AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid), + aq->pendingq_count, aq->requestq_count); aq->pendingq_count = 0; break; default: @@ -243,6 +248,8 @@ static enum ap_sm_wait ap_sm_write(struct ap_queue *aq) /* Start the next request on the queue. */ ap_msg = list_entry(aq->requestq.next, struct ap_message, list); + print_hex_dump_debug("apreq: ", DUMP_PREFIX_ADDRESS, 16, 1, + ap_msg->msg, ap_msg->len, false); status = __ap_send(qid, ap_msg->psmid, ap_msg->msg, ap_msg->len, ap_msg->flags & AP_MSG_FLAG_SPECIAL); @@ -446,9 +453,9 @@ static enum ap_sm_wait ap_sm_assoc_wait(struct ap_queue *aq) case AP_BS_Q_USABLE: /* association is through */ aq->sm_state = AP_SM_STATE_IDLE; - AP_DBF_DBG("%s queue 0x%02x.%04x associated with %u\n", - __func__, AP_QID_CARD(aq->qid), - AP_QID_QUEUE(aq->qid), aq->assoc_idx); + pr_debug("%s queue 0x%02x.%04x associated with %u\n", + __func__, AP_QID_CARD(aq->qid), + AP_QID_QUEUE(aq->qid), aq->assoc_idx); return AP_SM_WAIT_NONE; case AP_BS_Q_USABLE_NO_SECURE_KEY: /* association still pending */ @@ -690,9 +697,9 @@ static ssize_t ap_functions_show(struct device *dev, status = ap_test_queue(aq->qid, 1, &hwinfo); if (status.response_code > AP_RESPONSE_BUSY) { - AP_DBF_DBG("%s RC 0x%02x on tapq(0x%02x.%04x)\n", - __func__, status.response_code, - AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid)); + pr_debug("%s RC 0x%02x on tapq(0x%02x.%04x)\n", + __func__, status.response_code, + AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid)); return -EIO; } @@ -846,9 +853,9 @@ static ssize_t se_bind_show(struct device *dev, status = ap_test_queue(aq->qid, 1, &hwinfo); if (status.response_code > AP_RESPONSE_BUSY) { - AP_DBF_DBG("%s RC 0x%02x on tapq(0x%02x.%04x)\n", - __func__, status.response_code, - AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid)); + pr_debug("%s RC 0x%02x on tapq(0x%02x.%04x)\n", + __func__, status.response_code, + AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid)); return -EIO; } @@ -974,9 +981,9 @@ static ssize_t se_associate_show(struct device *dev, status = ap_test_queue(aq->qid, 1, &hwinfo); if (status.response_code > AP_RESPONSE_BUSY) { - AP_DBF_DBG("%s RC 0x%02x on tapq(0x%02x.%04x)\n", - __func__, status.response_code, - AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid)); + pr_debug("%s RC 0x%02x on tapq(0x%02x.%04x)\n", + __func__, status.response_code, + AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid)); return -EIO; } diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c index 6cfb6b2340c9..dccf664a3d95 100644 --- a/drivers/s390/crypto/pkey_api.c +++ b/drivers/s390/crypto/pkey_api.c @@ -42,24 +42,23 @@ MODULE_DESCRIPTION("s390 protected key interface"); * debug feature data and functions */ -static debug_info_t *debug_info; +static debug_info_t *pkey_dbf_info; -#define DEBUG_DBG(...) debug_sprintf_event(debug_info, 6, ##__VA_ARGS__) -#define DEBUG_INFO(...) debug_sprintf_event(debug_info, 5, ##__VA_ARGS__) -#define DEBUG_WARN(...) debug_sprintf_event(debug_info, 4, ##__VA_ARGS__) -#define DEBUG_ERR(...) debug_sprintf_event(debug_info, 3, ##__VA_ARGS__) +#define PKEY_DBF_INFO(...) debug_sprintf_event(pkey_dbf_info, 5, ##__VA_ARGS__) +#define PKEY_DBF_WARN(...) debug_sprintf_event(pkey_dbf_info, 4, ##__VA_ARGS__) +#define PKEY_DBF_ERR(...) debug_sprintf_event(pkey_dbf_info, 3, ##__VA_ARGS__) static void __init pkey_debug_init(void) { /* 5 arguments per dbf entry (including the format string ptr) */ - debug_info = debug_register("pkey", 1, 1, 5 * sizeof(long)); - debug_register_view(debug_info, &debug_sprintf_view); - debug_set_level(debug_info, 3); + pkey_dbf_info = debug_register("pkey", 1, 1, 5 * sizeof(long)); + debug_register_view(pkey_dbf_info, &debug_sprintf_view); + debug_set_level(pkey_dbf_info, 3); } static void __exit pkey_debug_exit(void) { - debug_unregister(debug_info); + debug_unregister(pkey_dbf_info); } /* inside view of a protected key token (only type 0x00 version 0x01) */ @@ -163,14 +162,14 @@ static int pkey_clr2protkey(u32 keytype, const u8 *clrkey, fc = CPACF_PCKMO_ENC_ECC_ED448_KEY; break; default: - DEBUG_ERR("%s unknown/unsupported keytype %u\n", - __func__, keytype); + PKEY_DBF_ERR("%s unknown/unsupported keytype %u\n", + __func__, keytype); return -EINVAL; } if (*protkeylen < keysize + AES_WK_VP_SIZE) { - DEBUG_ERR("%s prot key buffer size too small: %u < %d\n", - __func__, *protkeylen, keysize + AES_WK_VP_SIZE); + PKEY_DBF_ERR("%s prot key buffer size too small: %u < %d\n", + __func__, *protkeylen, keysize + AES_WK_VP_SIZE); return -EINVAL; } @@ -182,7 +181,7 @@ static int pkey_clr2protkey(u32 keytype, const u8 *clrkey, } /* check for the pckmo subfunction we need now */ if (!cpacf_test_func(&pckmo_functions, fc)) { - DEBUG_ERR("%s pckmo functions not available\n", __func__); + PKEY_DBF_ERR("%s pckmo functions not available\n", __func__); return -ENODEV; } @@ -244,7 +243,7 @@ static int pkey_skey2pkey(const u8 *key, u8 *protkey, } if (rc) - DEBUG_DBG("%s failed rc=%d\n", __func__, rc); + pr_debug("%s failed rc=%d\n", __func__, rc); return rc; } @@ -283,7 +282,7 @@ static int pkey_clr2ep11key(const u8 *clrkey, size_t clrkeylen, out: kfree(apqns); if (rc) - DEBUG_DBG("%s failed rc=%d\n", __func__, rc); + pr_debug("%s failed rc=%d\n", __func__, rc); return rc; } @@ -294,33 +293,36 @@ static int pkey_ep11key2pkey(const u8 *key, size_t keylen, u8 *protkey, u32 *protkeylen, u32 *protkeytype) { u32 nr_apqns, *apqns = NULL; + int i, j, rc = -ENODEV; u16 card, dom; - int i, rc; zcrypt_wait_api_operational(); - /* build a list of apqns suitable for this key */ - rc = ep11_findcard2(&apqns, &nr_apqns, 0xFFFF, 0xFFFF, - ZCRYPT_CEX7, - ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4, - ep11_kb_wkvp(key, keylen)); - if (rc) - goto out; + /* try two times in case of failure */ + for (i = 0; i < 2 && rc; i++) { - /* go through the list of apqns and try to derive an pkey */ - for (rc = -ENODEV, i = 0; i < nr_apqns; i++) { - card = apqns[i] >> 16; - dom = apqns[i] & 0xFFFF; - rc = ep11_kblob2protkey(card, dom, key, keylen, - protkey, protkeylen, protkeytype); - if (rc == 0) - break; + /* build a list of apqns suitable for this key */ + rc = ep11_findcard2(&apqns, &nr_apqns, 0xFFFF, 0xFFFF, + ZCRYPT_CEX7, + ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4, + ep11_kb_wkvp(key, keylen)); + if (rc) + continue; /* retry findcard on failure */ + + /* go through the list of apqns and try to derive an pkey */ + for (rc = -ENODEV, j = 0; j < nr_apqns && rc; j++) { + card = apqns[j] >> 16; + dom = apqns[j] & 0xFFFF; + rc = ep11_kblob2protkey(card, dom, key, keylen, + protkey, protkeylen, protkeytype); + } + + kfree(apqns); } -out: - kfree(apqns); if (rc) - DEBUG_DBG("%s failed rc=%d\n", __func__, rc); + pr_debug("%s failed rc=%d\n", __func__, rc); + return rc; } @@ -336,7 +338,7 @@ static int pkey_verifykey(const struct pkey_seckey *seckey, int rc; /* check the secure key for valid AES secure key */ - rc = cca_check_secaeskeytoken(debug_info, 3, (u8 *)seckey, 0); + rc = cca_check_secaeskeytoken(pkey_dbf_info, 3, (u8 *)seckey, 0); if (rc) goto out; if (pattributes) @@ -351,7 +353,7 @@ static int pkey_verifykey(const struct pkey_seckey *seckey, if (rc > 0) { /* key mkvp matches to old master key mkvp */ - DEBUG_DBG("%s secure key has old mkvp\n", __func__); + pr_debug("%s secure key has old mkvp\n", __func__); if (pattributes) *pattributes |= PKEY_VERIFY_ATTR_OLD_MKVP; rc = 0; @@ -363,7 +365,7 @@ static int pkey_verifykey(const struct pkey_seckey *seckey, *pdomain = domain; out: - DEBUG_DBG("%s rc=%d\n", __func__, rc); + pr_debug("%s rc=%d\n", __func__, rc); return rc; } @@ -379,8 +381,8 @@ static int pkey_genprotkey(u32 keytype, u8 *protkey, keysize = pkey_keytype_aes_to_size(keytype); if (!keysize) { - DEBUG_ERR("%s unknown/unsupported keytype %d\n", __func__, - keytype); + PKEY_DBF_ERR("%s unknown/unsupported keytype %d\n", __func__, + keytype); return -EINVAL; } @@ -428,13 +430,13 @@ static int pkey_verifyprotkey(const u8 *protkey, u32 protkeylen, fc = CPACF_KMC_PAES_256; break; default: - DEBUG_ERR("%s unknown/unsupported keytype %u\n", __func__, - protkeytype); + PKEY_DBF_ERR("%s unknown/unsupported keytype %u\n", __func__, + protkeytype); return -EINVAL; } if (protkeylen != pkeylen) { - DEBUG_ERR("%s invalid protected key size %u for keytype %u\n", - __func__, protkeylen, protkeytype); + PKEY_DBF_ERR("%s invalid protected key size %u for keytype %u\n", + __func__, protkeylen, protkeytype); return -EINVAL; } @@ -446,7 +448,7 @@ static int pkey_verifyprotkey(const u8 *protkey, u32 protkeylen, k = cpacf_kmc(fc | CPACF_ENCRYPT, ¶m, null_msg, dest_buf, sizeof(null_msg)); if (k != sizeof(null_msg)) { - DEBUG_ERR("%s protected key is not valid\n", __func__); + PKEY_DBF_ERR("%s protected key is not valid\n", __func__); return -EKEYREJECTED; } @@ -464,13 +466,13 @@ static int nonccatokaes2pkey(const struct clearkeytoken *t, keysize = pkey_keytype_aes_to_size(t->keytype); if (!keysize) { - DEBUG_ERR("%s unknown/unsupported keytype %u\n", - __func__, t->keytype); + PKEY_DBF_ERR("%s unknown/unsupported keytype %u\n", + __func__, t->keytype); return -EINVAL; } if (t->len != keysize) { - DEBUG_ERR("%s non clear key aes token: invalid key len %u\n", - __func__, t->len); + PKEY_DBF_ERR("%s non clear key aes token: invalid key len %u\n", + __func__, t->len); return -EINVAL; } @@ -505,7 +507,7 @@ try_via_ep11: goto out; failure: - DEBUG_ERR("%s unable to build protected key from clear", __func__); + PKEY_DBF_ERR("%s unable to build protected key from clear", __func__); out: kfree(tmpbuf); @@ -536,14 +538,14 @@ static int nonccatokecc2pkey(const struct clearkeytoken *t, keylen = 64; break; default: - DEBUG_ERR("%s unknown/unsupported keytype %u\n", - __func__, t->keytype); + PKEY_DBF_ERR("%s unknown/unsupported keytype %u\n", + __func__, t->keytype); return -EINVAL; } if (t->len != keylen) { - DEBUG_ERR("%s non clear key ecc token: invalid key len %u\n", - __func__, t->len); + PKEY_DBF_ERR("%s non clear key ecc token: invalid key len %u\n", + __func__, t->len); return -EINVAL; } @@ -551,8 +553,8 @@ static int nonccatokecc2pkey(const struct clearkeytoken *t, rc = pkey_clr2protkey(t->keytype, t->clearkey, protkey, protkeylen, protkeytype); if (rc) { - DEBUG_ERR("%s unable to build protected key from clear", - __func__); + PKEY_DBF_ERR("%s unable to build protected key from clear", + __func__); } return rc; @@ -604,15 +606,15 @@ static int pkey_nonccatok2pkey(const u8 *key, u32 keylen, protkeylen, protkeytype); break; default: - DEBUG_ERR("%s unknown/unsupported non cca clear key type %u\n", - __func__, t->keytype); + PKEY_DBF_ERR("%s unknown/unsupported non cca clear key type %u\n", + __func__, t->keytype); return -EINVAL; } break; } case TOKVER_EP11_AES: { /* check ep11 key for exportable as protected key */ - rc = ep11_check_aes_key(debug_info, 3, key, keylen, 1); + rc = ep11_check_aes_key(pkey_dbf_info, 3, key, keylen, 1); if (rc) goto out; rc = pkey_ep11key2pkey(key, keylen, @@ -621,15 +623,16 @@ static int pkey_nonccatok2pkey(const u8 *key, u32 keylen, } case TOKVER_EP11_AES_WITH_HEADER: /* check ep11 key with header for exportable as protected key */ - rc = ep11_check_aes_key_with_hdr(debug_info, 3, key, keylen, 1); + rc = ep11_check_aes_key_with_hdr(pkey_dbf_info, + 3, key, keylen, 1); if (rc) goto out; rc = pkey_ep11key2pkey(key, keylen, protkey, protkeylen, protkeytype); break; default: - DEBUG_ERR("%s unknown/unsupported non-CCA token version %d\n", - __func__, hdr->version); + PKEY_DBF_ERR("%s unknown/unsupported non-CCA token version %d\n", + __func__, hdr->version); } out: @@ -654,8 +657,8 @@ static int pkey_ccainttok2pkey(const u8 *key, u32 keylen, return -EINVAL; break; default: - DEBUG_ERR("%s unknown/unsupported CCA internal token version %d\n", - __func__, hdr->version); + PKEY_DBF_ERR("%s unknown/unsupported CCA internal token version %d\n", + __func__, hdr->version); return -EINVAL; } @@ -672,7 +675,7 @@ int pkey_keyblob2pkey(const u8 *key, u32 keylen, int rc; if (keylen < sizeof(struct keytoken_header)) { - DEBUG_ERR("%s invalid keylen %d\n", __func__, keylen); + PKEY_DBF_ERR("%s invalid keylen %d\n", __func__, keylen); return -EINVAL; } @@ -686,12 +689,12 @@ int pkey_keyblob2pkey(const u8 *key, u32 keylen, protkey, protkeylen, protkeytype); break; default: - DEBUG_ERR("%s unknown/unsupported blob type %d\n", - __func__, hdr->type); + PKEY_DBF_ERR("%s unknown/unsupported blob type %d\n", + __func__, hdr->type); return -EINVAL; } - DEBUG_DBG("%s rc=%d\n", __func__, rc); + pr_debug("%s rc=%d\n", __func__, rc); return rc; } EXPORT_SYMBOL(pkey_keyblob2pkey); @@ -839,7 +842,7 @@ static int pkey_verifykey2(const u8 *key, size_t keylen, hdr->version == TOKVER_CCA_AES) { struct secaeskeytoken *t = (struct secaeskeytoken *)key; - rc = cca_check_secaeskeytoken(debug_info, 3, key, 0); + rc = cca_check_secaeskeytoken(pkey_dbf_info, 3, key, 0); if (rc) goto out; if (ktype) @@ -869,7 +872,7 @@ static int pkey_verifykey2(const u8 *key, size_t keylen, hdr->version == TOKVER_CCA_VLSC) { struct cipherkeytoken *t = (struct cipherkeytoken *)key; - rc = cca_check_secaescipherkey(debug_info, 3, key, 0, 1); + rc = cca_check_secaescipherkey(pkey_dbf_info, 3, key, 0, 1); if (rc) goto out; if (ktype) @@ -907,7 +910,7 @@ static int pkey_verifykey2(const u8 *key, size_t keylen, struct ep11keyblob *kb = (struct ep11keyblob *)key; int api; - rc = ep11_check_aes_key(debug_info, 3, key, keylen, 1); + rc = ep11_check_aes_key(pkey_dbf_info, 3, key, keylen, 1); if (rc) goto out; if (ktype) @@ -933,8 +936,8 @@ static int pkey_verifykey2(const u8 *key, size_t keylen, struct ep11kblob_header *kh = (struct ep11kblob_header *)key; int api; - rc = ep11_check_aes_key_with_hdr(debug_info, 3, - key, keylen, 1); + rc = ep11_check_aes_key_with_hdr(pkey_dbf_info, + 3, key, keylen, 1); if (rc) goto out; if (ktype) @@ -981,25 +984,27 @@ static int pkey_keyblob2pkey2(const struct pkey_apqn *apqns, size_t nr_apqns, if (hdr->version == TOKVER_CCA_AES) { if (keylen != sizeof(struct secaeskeytoken)) return -EINVAL; - if (cca_check_secaeskeytoken(debug_info, 3, key, 0)) + if (cca_check_secaeskeytoken(pkey_dbf_info, 3, key, 0)) return -EINVAL; } else if (hdr->version == TOKVER_CCA_VLSC) { if (keylen < hdr->len || keylen > MAXCCAVLSCTOKENSIZE) return -EINVAL; - if (cca_check_secaescipherkey(debug_info, 3, key, 0, 1)) + if (cca_check_secaescipherkey(pkey_dbf_info, + 3, key, 0, 1)) return -EINVAL; } else { - DEBUG_ERR("%s unknown CCA internal token version %d\n", - __func__, hdr->version); + PKEY_DBF_ERR("%s unknown CCA internal token version %d\n", + __func__, hdr->version); return -EINVAL; } } else if (hdr->type == TOKTYPE_NON_CCA) { if (hdr->version == TOKVER_EP11_AES) { - if (ep11_check_aes_key(debug_info, 3, key, keylen, 1)) + if (ep11_check_aes_key(pkey_dbf_info, + 3, key, keylen, 1)) return -EINVAL; } else if (hdr->version == TOKVER_EP11_AES_WITH_HEADER) { - if (ep11_check_aes_key_with_hdr(debug_info, 3, - key, keylen, 1)) + if (ep11_check_aes_key_with_hdr(pkey_dbf_info, + 3, key, keylen, 1)) return -EINVAL; } else { return pkey_nonccatok2pkey(key, keylen, @@ -1007,8 +1012,8 @@ static int pkey_keyblob2pkey2(const struct pkey_apqn *apqns, size_t nr_apqns, protkeytype); } } else { - DEBUG_ERR("%s unknown/unsupported blob type %d\n", - __func__, hdr->type); + PKEY_DBF_ERR("%s unknown/unsupported blob type %d\n", + __func__, hdr->type); return -EINVAL; } @@ -1234,50 +1239,53 @@ static int pkey_keyblob2pkey3(const struct pkey_apqn *apqns, size_t nr_apqns, hdr->version == TOKVER_EP11_AES_WITH_HEADER && is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) { /* EP11 AES key blob with header */ - if (ep11_check_aes_key_with_hdr(debug_info, 3, key, keylen, 1)) + if (ep11_check_aes_key_with_hdr(pkey_dbf_info, + 3, key, keylen, 1)) return -EINVAL; } else if (hdr->type == TOKTYPE_NON_CCA && hdr->version == TOKVER_EP11_ECC_WITH_HEADER && is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) { /* EP11 ECC key blob with header */ - if (ep11_check_ecc_key_with_hdr(debug_info, 3, key, keylen, 1)) + if (ep11_check_ecc_key_with_hdr(pkey_dbf_info, + 3, key, keylen, 1)) return -EINVAL; } else if (hdr->type == TOKTYPE_NON_CCA && hdr->version == TOKVER_EP11_AES && is_ep11_keyblob(key)) { /* EP11 AES key blob with header in session field */ - if (ep11_check_aes_key(debug_info, 3, key, keylen, 1)) + if (ep11_check_aes_key(pkey_dbf_info, 3, key, keylen, 1)) return -EINVAL; } else if (hdr->type == TOKTYPE_CCA_INTERNAL) { if (hdr->version == TOKVER_CCA_AES) { /* CCA AES data key */ if (keylen != sizeof(struct secaeskeytoken)) return -EINVAL; - if (cca_check_secaeskeytoken(debug_info, 3, key, 0)) + if (cca_check_secaeskeytoken(pkey_dbf_info, 3, key, 0)) return -EINVAL; } else if (hdr->version == TOKVER_CCA_VLSC) { /* CCA AES cipher key */ if (keylen < hdr->len || keylen > MAXCCAVLSCTOKENSIZE) return -EINVAL; - if (cca_check_secaescipherkey(debug_info, 3, key, 0, 1)) + if (cca_check_secaescipherkey(pkey_dbf_info, + 3, key, 0, 1)) return -EINVAL; } else { - DEBUG_ERR("%s unknown CCA internal token version %d\n", - __func__, hdr->version); + PKEY_DBF_ERR("%s unknown CCA internal token version %d\n", + __func__, hdr->version); return -EINVAL; } } else if (hdr->type == TOKTYPE_CCA_INTERNAL_PKA) { /* CCA ECC (private) key */ if (keylen < sizeof(struct eccprivkeytoken)) return -EINVAL; - if (cca_check_sececckeytoken(debug_info, 3, key, keylen, 1)) + if (cca_check_sececckeytoken(pkey_dbf_info, 3, key, keylen, 1)) return -EINVAL; } else if (hdr->type == TOKTYPE_NON_CCA) { return pkey_nonccatok2pkey(key, keylen, protkey, protkeylen, protkeytype); } else { - DEBUG_ERR("%s unknown/unsupported blob type %d\n", - __func__, hdr->type); + PKEY_DBF_ERR("%s unknown/unsupported blob type %d\n", + __func__, hdr->type); return -EINVAL; } @@ -1350,7 +1358,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd, return -EFAULT; rc = cca_genseckey(kgs.cardnr, kgs.domain, kgs.keytype, kgs.seckey.seckey); - DEBUG_DBG("%s cca_genseckey()=%d\n", __func__, rc); + pr_debug("%s cca_genseckey()=%d\n", __func__, rc); if (rc) break; if (copy_to_user(ugs, &kgs, sizeof(kgs))) @@ -1365,7 +1373,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd, return -EFAULT; rc = cca_clr2seckey(kcs.cardnr, kcs.domain, kcs.keytype, kcs.clrkey.clrkey, kcs.seckey.seckey); - DEBUG_DBG("%s cca_clr2seckey()=%d\n", __func__, rc); + pr_debug("%s cca_clr2seckey()=%d\n", __func__, rc); if (rc) break; if (copy_to_user(ucs, &kcs, sizeof(kcs))) @@ -1383,7 +1391,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd, rc = cca_sec2protkey(ksp.cardnr, ksp.domain, ksp.seckey.seckey, ksp.protkey.protkey, &ksp.protkey.len, &ksp.protkey.type); - DEBUG_DBG("%s cca_sec2protkey()=%d\n", __func__, rc); + pr_debug("%s cca_sec2protkey()=%d\n", __func__, rc); if (rc) break; if (copy_to_user(usp, &ksp, sizeof(ksp))) @@ -1400,7 +1408,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd, rc = pkey_clr2protkey(kcp.keytype, kcp.clrkey.clrkey, kcp.protkey.protkey, &kcp.protkey.len, &kcp.protkey.type); - DEBUG_DBG("%s pkey_clr2protkey()=%d\n", __func__, rc); + pr_debug("%s pkey_clr2protkey()=%d\n", __func__, rc); if (rc) break; if (copy_to_user(ucp, &kcp, sizeof(kcp))) @@ -1416,7 +1424,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd, return -EFAULT; rc = cca_findcard(kfc.seckey.seckey, &kfc.cardnr, &kfc.domain, 1); - DEBUG_DBG("%s cca_findcard()=%d\n", __func__, rc); + pr_debug("%s cca_findcard()=%d\n", __func__, rc); if (rc < 0) break; if (copy_to_user(ufc, &kfc, sizeof(kfc))) @@ -1432,7 +1440,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd, ksp.protkey.len = sizeof(ksp.protkey.protkey); rc = pkey_skey2pkey(ksp.seckey.seckey, ksp.protkey.protkey, &ksp.protkey.len, &ksp.protkey.type); - DEBUG_DBG("%s pkey_skey2pkey()=%d\n", __func__, rc); + pr_debug("%s pkey_skey2pkey()=%d\n", __func__, rc); if (rc) break; if (copy_to_user(usp, &ksp, sizeof(ksp))) @@ -1447,7 +1455,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd, return -EFAULT; rc = pkey_verifykey(&kvk.seckey, &kvk.cardnr, &kvk.domain, &kvk.keysize, &kvk.attributes); - DEBUG_DBG("%s pkey_verifykey()=%d\n", __func__, rc); + pr_debug("%s pkey_verifykey()=%d\n", __func__, rc); if (rc) break; if (copy_to_user(uvk, &kvk, sizeof(kvk))) @@ -1463,7 +1471,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd, kgp.protkey.len = sizeof(kgp.protkey.protkey); rc = pkey_genprotkey(kgp.keytype, kgp.protkey.protkey, &kgp.protkey.len, &kgp.protkey.type); - DEBUG_DBG("%s pkey_genprotkey()=%d\n", __func__, rc); + pr_debug("%s pkey_genprotkey()=%d\n", __func__, rc); if (rc) break; if (copy_to_user(ugp, &kgp, sizeof(kgp))) @@ -1478,7 +1486,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd, return -EFAULT; rc = pkey_verifyprotkey(kvp.protkey.protkey, kvp.protkey.len, kvp.protkey.type); - DEBUG_DBG("%s pkey_verifyprotkey()=%d\n", __func__, rc); + pr_debug("%s pkey_verifyprotkey()=%d\n", __func__, rc); break; } case PKEY_KBLOB2PROTK: { @@ -1494,7 +1502,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd, ktp.protkey.len = sizeof(ktp.protkey.protkey); rc = pkey_keyblob2pkey(kkey, ktp.keylen, ktp.protkey.protkey, &ktp.protkey.len, &ktp.protkey.type); - DEBUG_DBG("%s pkey_keyblob2pkey()=%d\n", __func__, rc); + pr_debug("%s pkey_keyblob2pkey()=%d\n", __func__, rc); memzero_explicit(kkey, ktp.keylen); kfree(kkey); if (rc) @@ -1523,7 +1531,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd, rc = pkey_genseckey2(apqns, kgs.apqn_entries, kgs.type, kgs.size, kgs.keygenflags, kkey, &klen); - DEBUG_DBG("%s pkey_genseckey2()=%d\n", __func__, rc); + pr_debug("%s pkey_genseckey2()=%d\n", __func__, rc); kfree(apqns); if (rc) { kfree(kkey); @@ -1565,7 +1573,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd, rc = pkey_clr2seckey2(apqns, kcs.apqn_entries, kcs.type, kcs.size, kcs.keygenflags, kcs.clrkey.clrkey, kkey, &klen); - DEBUG_DBG("%s pkey_clr2seckey2()=%d\n", __func__, rc); + pr_debug("%s pkey_clr2seckey2()=%d\n", __func__, rc); kfree(apqns); if (rc) { kfree(kkey); @@ -1601,7 +1609,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd, rc = pkey_verifykey2(kkey, kvk.keylen, &kvk.cardnr, &kvk.domain, &kvk.type, &kvk.size, &kvk.flags); - DEBUG_DBG("%s pkey_verifykey2()=%d\n", __func__, rc); + pr_debug("%s pkey_verifykey2()=%d\n", __func__, rc); kfree(kkey); if (rc) break; @@ -1630,7 +1638,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd, kkey, ktp.keylen, ktp.protkey.protkey, &ktp.protkey.len, &ktp.protkey.type); - DEBUG_DBG("%s pkey_keyblob2pkey2()=%d\n", __func__, rc); + pr_debug("%s pkey_keyblob2pkey2()=%d\n", __func__, rc); kfree(apqns); memzero_explicit(kkey, ktp.keylen); kfree(kkey); @@ -1664,7 +1672,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd, } rc = pkey_apqns4key(kkey, kak.keylen, kak.flags, apqns, &nr_apqns); - DEBUG_DBG("%s pkey_apqns4key()=%d\n", __func__, rc); + pr_debug("%s pkey_apqns4key()=%d\n", __func__, rc); kfree(kkey); if (rc && rc != -ENOSPC) { kfree(apqns); @@ -1707,7 +1715,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd, } rc = pkey_apqns4keytype(kat.type, kat.cur_mkvp, kat.alt_mkvp, kat.flags, apqns, &nr_apqns); - DEBUG_DBG("%s pkey_apqns4keytype()=%d\n", __func__, rc); + pr_debug("%s pkey_apqns4keytype()=%d\n", __func__, rc); if (rc && rc != -ENOSPC) { kfree(apqns); break; @@ -1757,7 +1765,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd, rc = pkey_keyblob2pkey3(apqns, ktp.apqn_entries, kkey, ktp.keylen, protkey, &protkeylen, &ktp.pkeytype); - DEBUG_DBG("%s pkey_keyblob2pkey3()=%d\n", __func__, rc); + pr_debug("%s pkey_keyblob2pkey3()=%d\n", __func__, rc); kfree(apqns); memzero_explicit(kkey, ktp.keylen); kfree(kkey); diff --git a/drivers/s390/crypto/vfio_ap_drv.c b/drivers/s390/crypto/vfio_ap_drv.c index a5ab03e42ff1..4aeb3e1213c7 100644 --- a/drivers/s390/crypto/vfio_ap_drv.c +++ b/drivers/s390/crypto/vfio_ap_drv.c @@ -60,7 +60,7 @@ static void vfio_ap_matrix_dev_release(struct device *dev) kfree(matrix_dev); } -static struct bus_type matrix_bus = { +static const struct bus_type matrix_bus = { .name = "matrix", }; diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c index 983b3b16196c..fc169bc61593 100644 --- a/drivers/s390/crypto/vfio_ap_ops.c +++ b/drivers/s390/crypto/vfio_ap_ops.c @@ -659,6 +659,21 @@ static bool vfio_ap_mdev_filter_cdoms(struct ap_matrix_mdev *matrix_mdev) AP_DOMAINS); } +static bool _queue_passable(struct vfio_ap_queue *q) +{ + if (!q) + return false; + + switch (q->reset_status.response_code) { + case AP_RESPONSE_NORMAL: + case AP_RESPONSE_DECONFIGURED: + case AP_RESPONSE_CHECKSTOPPED: + return true; + default: + return false; + } +} + /* * vfio_ap_mdev_filter_matrix - filter the APQNs assigned to the matrix mdev * to ensure no queue devices are passed through to @@ -687,7 +702,6 @@ static bool vfio_ap_mdev_filter_matrix(struct ap_matrix_mdev *matrix_mdev, unsigned long apid, apqi, apqn; DECLARE_BITMAP(prev_shadow_apm, AP_DEVICES); DECLARE_BITMAP(prev_shadow_aqm, AP_DOMAINS); - struct vfio_ap_queue *q; bitmap_copy(prev_shadow_apm, matrix_mdev->shadow_apcb.apm, AP_DEVICES); bitmap_copy(prev_shadow_aqm, matrix_mdev->shadow_apcb.aqm, AP_DOMAINS); @@ -716,8 +730,7 @@ static bool vfio_ap_mdev_filter_matrix(struct ap_matrix_mdev *matrix_mdev, * hardware device. */ apqn = AP_MKQID(apid, apqi); - q = vfio_ap_mdev_get_queue(matrix_mdev, apqn); - if (!q || q->reset_status.response_code) { + if (!_queue_passable(vfio_ap_mdev_get_queue(matrix_mdev, apqn))) { clear_bit_inv(apid, matrix_mdev->shadow_apcb.apm); /* @@ -1691,6 +1704,7 @@ static int apq_status_check(int apqn, struct ap_queue_status *status) switch (status->response_code) { case AP_RESPONSE_NORMAL: case AP_RESPONSE_DECONFIGURED: + case AP_RESPONSE_CHECKSTOPPED: return 0; case AP_RESPONSE_RESET_IN_PROGRESS: case AP_RESPONSE_BUSY: @@ -1747,14 +1761,6 @@ static void apq_reset_check(struct work_struct *reset_work) memcpy(&q->reset_status, &status, sizeof(status)); continue; } - /* - * When an AP adapter is deconfigured, the - * associated queues are reset, so let's set the - * status response code to 0 so the queue may be - * passed through (i.e., not filtered) - */ - if (status.response_code == AP_RESPONSE_DECONFIGURED) - q->reset_status.response_code = 0; if (q->saved_isc != VFIO_AP_ISC_INVALID) vfio_ap_free_aqic_resources(q); break; @@ -1781,12 +1787,7 @@ static void vfio_ap_mdev_reset_queue(struct vfio_ap_queue *q) queue_work(system_long_wq, &q->reset_work); break; case AP_RESPONSE_DECONFIGURED: - /* - * When an AP adapter is deconfigured, the associated - * queues are reset, so let's set the status response code to 0 - * so the queue may be passed through (i.e., not filtered). - */ - q->reset_status.response_code = 0; + case AP_RESPONSE_CHECKSTOPPED: vfio_ap_free_aqic_resources(q); break; default: diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c index 74200f54dfff..02c503f16bc2 100644 --- a/drivers/s390/crypto/zcrypt_api.c +++ b/drivers/s390/crypto/zcrypt_api.c @@ -12,6 +12,9 @@ * Multiple device nodes: Harald Freudenberger <freude@linux.ibm.com> */ +#define KMSG_COMPONENT "zcrypt" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + #include <linux/module.h> #include <linux/init.h> #include <linux/interrupt.h> @@ -57,10 +60,6 @@ DEFINE_SPINLOCK(zcrypt_list_lock); LIST_HEAD(zcrypt_card_list); static atomic_t zcrypt_open_count = ATOMIC_INIT(0); -static atomic_t zcrypt_rescan_count = ATOMIC_INIT(0); - -atomic_t zcrypt_rescan_req = ATOMIC_INIT(0); -EXPORT_SYMBOL(zcrypt_rescan_req); static LIST_HEAD(zcrypt_ops_list); @@ -69,20 +68,15 @@ debug_info_t *zcrypt_dbf_info; /* * Process a rescan of the transport layer. - * - * Returns 1, if the rescan has been processed, otherwise 0. + * Runs a synchronous AP bus rescan. + * Returns true if something has changed (for example the + * bus scan has found and build up new devices) and it is + * worth to do a retry. Otherwise false is returned meaning + * no changes on the AP bus level. */ -static inline int zcrypt_process_rescan(void) -{ - if (atomic_read(&zcrypt_rescan_req)) { - atomic_set(&zcrypt_rescan_req, 0); - atomic_inc(&zcrypt_rescan_count); - ap_bus_force_rescan(); - ZCRYPT_DBF_INFO("%s rescan count=%07d\n", __func__, - atomic_inc_return(&zcrypt_rescan_count)); - return 1; - } - return 0; +static inline bool zcrypt_process_rescan(void) +{ + return ap_bus_force_rescan(); } void zcrypt_msgtype_register(struct zcrypt_ops *zops) @@ -715,8 +709,7 @@ static long zcrypt_rsa_modexpo(struct ap_perms *perms, spin_unlock(&zcrypt_list_lock); if (!pref_zq) { - ZCRYPT_DBF_DBG("%s no matching queue found => ENODEV\n", - __func__); + pr_debug("%s no matching queue found => ENODEV\n", __func__); rc = -ENODEV; goto out; } @@ -820,8 +813,7 @@ static long zcrypt_rsa_crt(struct ap_perms *perms, spin_unlock(&zcrypt_list_lock); if (!pref_zq) { - ZCRYPT_DBF_DBG("%s no matching queue found => ENODEV\n", - __func__); + pr_debug("%s no matching queue found => ENODEV\n", __func__); rc = -ENODEV; goto out; } @@ -865,6 +857,8 @@ static long _zcrypt_send_cprb(bool userspace, struct ap_perms *perms, rc = prep_cca_ap_msg(userspace, xcrb, &ap_msg, &func_code, &domain); if (rc) goto out; + print_hex_dump_debug("ccareq: ", DUMP_PREFIX_ADDRESS, 16, 1, + ap_msg.msg, ap_msg.len, false); tdom = *domain; if (perms != &ap_perms && tdom < AP_DOMAINS) { @@ -940,8 +934,8 @@ static long _zcrypt_send_cprb(bool userspace, struct ap_perms *perms, spin_unlock(&zcrypt_list_lock); if (!pref_zq) { - ZCRYPT_DBF_DBG("%s no match for address %02x.%04x => ENODEV\n", - __func__, xcrb->user_defined, *domain); + pr_debug("%s no match for address %02x.%04x => ENODEV\n", + __func__, xcrb->user_defined, *domain); rc = -ENODEV; goto out; } @@ -952,6 +946,10 @@ static long _zcrypt_send_cprb(bool userspace, struct ap_perms *perms, *domain = AP_QID_QUEUE(qid); rc = pref_zq->ops->send_cprb(userspace, pref_zq, xcrb, &ap_msg); + if (!rc) { + print_hex_dump_debug("ccarpl: ", DUMP_PREFIX_ADDRESS, 16, 1, + ap_msg.msg, ap_msg.len, false); + } spin_lock(&zcrypt_list_lock); zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt); @@ -970,7 +968,26 @@ out: long zcrypt_send_cprb(struct ica_xcRB *xcrb) { - return _zcrypt_send_cprb(false, &ap_perms, NULL, xcrb); + struct zcrypt_track tr; + int rc; + + memset(&tr, 0, sizeof(tr)); + + do { + rc = _zcrypt_send_cprb(false, &ap_perms, &tr, xcrb); + } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); + + /* on ENODEV failure: retry once again after a requested rescan */ + if (rc == -ENODEV && zcrypt_process_rescan()) + do { + rc = _zcrypt_send_cprb(false, &ap_perms, &tr, xcrb); + } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); + if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX) + rc = -EIO; + if (rc) + pr_debug("%s rc=%d\n", __func__, rc); + + return rc; } EXPORT_SYMBOL(zcrypt_send_cprb); @@ -1045,6 +1062,8 @@ static long _zcrypt_send_ep11_cprb(bool userspace, struct ap_perms *perms, rc = prep_ep11_ap_msg(userspace, xcrb, &ap_msg, &func_code, &domain); if (rc) goto out_free; + print_hex_dump_debug("ep11req: ", DUMP_PREFIX_ADDRESS, 16, 1, + ap_msg.msg, ap_msg.len, false); if (perms != &ap_perms && domain < AUTOSEL_DOM) { if (ap_msg.flags & AP_MSG_FLAG_ADMIN) { @@ -1113,15 +1132,15 @@ static long _zcrypt_send_ep11_cprb(bool userspace, struct ap_perms *perms, if (!pref_zq) { if (targets && target_num == 1) { - ZCRYPT_DBF_DBG("%s no match for address %02x.%04x => ENODEV\n", - __func__, (int)targets->ap_id, - (int)targets->dom_id); + pr_debug("%s no match for address %02x.%04x => ENODEV\n", + __func__, (int)targets->ap_id, + (int)targets->dom_id); } else if (targets) { - ZCRYPT_DBF_DBG("%s no match for %d target addrs => ENODEV\n", - __func__, (int)target_num); + pr_debug("%s no match for %d target addrs => ENODEV\n", + __func__, (int)target_num); } else { - ZCRYPT_DBF_DBG("%s no match for address ff.ffff => ENODEV\n", - __func__); + pr_debug("%s no match for address ff.ffff => ENODEV\n", + __func__); } rc = -ENODEV; goto out_free; @@ -1129,6 +1148,10 @@ static long _zcrypt_send_ep11_cprb(bool userspace, struct ap_perms *perms, qid = pref_zq->queue->qid; rc = pref_zq->ops->send_ep11_cprb(userspace, pref_zq, xcrb, &ap_msg); + if (!rc) { + print_hex_dump_debug("ep11rpl: ", DUMP_PREFIX_ADDRESS, 16, 1, + ap_msg.msg, ap_msg.len, false); + } spin_lock(&zcrypt_list_lock); zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt); @@ -1149,7 +1172,26 @@ out: long zcrypt_send_ep11_cprb(struct ep11_urb *xcrb) { - return _zcrypt_send_ep11_cprb(false, &ap_perms, NULL, xcrb); + struct zcrypt_track tr; + int rc; + + memset(&tr, 0, sizeof(tr)); + + do { + rc = _zcrypt_send_ep11_cprb(false, &ap_perms, &tr, xcrb); + } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); + + /* on ENODEV failure: retry once again after a requested rescan */ + if (rc == -ENODEV && zcrypt_process_rescan()) + do { + rc = _zcrypt_send_ep11_cprb(false, &ap_perms, &tr, xcrb); + } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); + if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX) + rc = -EIO; + if (rc) + pr_debug("%s rc=%d\n", __func__, rc); + + return rc; } EXPORT_SYMBOL(zcrypt_send_ep11_cprb); @@ -1199,8 +1241,7 @@ static long zcrypt_rng(char *buffer) spin_unlock(&zcrypt_list_lock); if (!pref_zq) { - ZCRYPT_DBF_DBG("%s no matching queue found => ENODEV\n", - __func__); + pr_debug("%s no matching queue found => ENODEV\n", __func__); rc = -ENODEV; goto out; } @@ -1431,20 +1472,17 @@ static int icarsamodexpo_ioctl(struct ap_perms *perms, unsigned long arg) do { rc = zcrypt_rsa_modexpo(perms, &tr, &mex); - if (rc == -EAGAIN) - tr.again_counter++; - } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX); - /* on failure: retry once again after a requested rescan */ - if ((rc == -ENODEV) && (zcrypt_process_rescan())) + } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); + + /* on ENODEV failure: retry once again after a requested rescan */ + if (rc == -ENODEV && zcrypt_process_rescan()) do { rc = zcrypt_rsa_modexpo(perms, &tr, &mex); - if (rc == -EAGAIN) - tr.again_counter++; - } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX); + } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX) rc = -EIO; if (rc) { - ZCRYPT_DBF_DBG("ioctl ICARSAMODEXPO rc=%d\n", rc); + pr_debug("ioctl ICARSAMODEXPO rc=%d\n", rc); return rc; } return put_user(mex.outputdatalength, &umex->outputdatalength); @@ -1463,20 +1501,17 @@ static int icarsacrt_ioctl(struct ap_perms *perms, unsigned long arg) do { rc = zcrypt_rsa_crt(perms, &tr, &crt); - if (rc == -EAGAIN) - tr.again_counter++; - } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX); - /* on failure: retry once again after a requested rescan */ - if ((rc == -ENODEV) && (zcrypt_process_rescan())) + } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); + + /* on ENODEV failure: retry once again after a requested rescan */ + if (rc == -ENODEV && zcrypt_process_rescan()) do { rc = zcrypt_rsa_crt(perms, &tr, &crt); - if (rc == -EAGAIN) - tr.again_counter++; - } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX); + } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX) rc = -EIO; if (rc) { - ZCRYPT_DBF_DBG("ioctl ICARSACRT rc=%d\n", rc); + pr_debug("ioctl ICARSACRT rc=%d\n", rc); return rc; } return put_user(crt.outputdatalength, &ucrt->outputdatalength); @@ -1495,21 +1530,18 @@ static int zsecsendcprb_ioctl(struct ap_perms *perms, unsigned long arg) do { rc = _zcrypt_send_cprb(true, perms, &tr, &xcrb); - if (rc == -EAGAIN) - tr.again_counter++; - } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX); - /* on failure: retry once again after a requested rescan */ - if ((rc == -ENODEV) && (zcrypt_process_rescan())) + } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); + + /* on ENODEV failure: retry once again after a requested rescan */ + if (rc == -ENODEV && zcrypt_process_rescan()) do { rc = _zcrypt_send_cprb(true, perms, &tr, &xcrb); - if (rc == -EAGAIN) - tr.again_counter++; - } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX); + } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX) rc = -EIO; if (rc) - ZCRYPT_DBF_DBG("ioctl ZSENDCPRB rc=%d status=0x%x\n", - rc, xcrb.status); + pr_debug("ioctl ZSENDCPRB rc=%d status=0x%x\n", + rc, xcrb.status); if (copy_to_user(uxcrb, &xcrb, sizeof(xcrb))) return -EFAULT; return rc; @@ -1528,20 +1560,17 @@ static int zsendep11cprb_ioctl(struct ap_perms *perms, unsigned long arg) do { rc = _zcrypt_send_ep11_cprb(true, perms, &tr, &xcrb); - if (rc == -EAGAIN) - tr.again_counter++; - } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX); - /* on failure: retry once again after a requested rescan */ - if ((rc == -ENODEV) && (zcrypt_process_rescan())) + } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); + + /* on ENODEV failure: retry once again after a requested rescan */ + if (rc == -ENODEV && zcrypt_process_rescan()) do { rc = _zcrypt_send_ep11_cprb(true, perms, &tr, &xcrb); - if (rc == -EAGAIN) - tr.again_counter++; - } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX); + } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX) rc = -EIO; if (rc) - ZCRYPT_DBF_DBG("ioctl ZSENDEP11CPRB rc=%d\n", rc); + pr_debug("ioctl ZSENDEP11CPRB rc=%d\n", rc); if (copy_to_user(uxcrb, &xcrb, sizeof(xcrb))) return -EFAULT; return rc; @@ -1670,7 +1699,7 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd, } /* unknown ioctl number */ default: - ZCRYPT_DBF_DBG("unknown ioctl 0x%08x\n", cmd); + pr_debug("unknown ioctl 0x%08x\n", cmd); return -ENOIOCTLCMD; } } @@ -1708,16 +1737,13 @@ static long trans_modexpo32(struct ap_perms *perms, struct file *filp, mex64.n_modulus = compat_ptr(mex32.n_modulus); do { rc = zcrypt_rsa_modexpo(perms, &tr, &mex64); - if (rc == -EAGAIN) - tr.again_counter++; - } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX); - /* on failure: retry once again after a requested rescan */ - if ((rc == -ENODEV) && (zcrypt_process_rescan())) + } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); + + /* on ENODEV failure: retry once again after a requested rescan */ + if (rc == -ENODEV && zcrypt_process_rescan()) do { rc = zcrypt_rsa_modexpo(perms, &tr, &mex64); - if (rc == -EAGAIN) - tr.again_counter++; - } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX); + } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX) rc = -EIO; if (rc) @@ -1761,16 +1787,13 @@ static long trans_modexpo_crt32(struct ap_perms *perms, struct file *filp, crt64.u_mult_inv = compat_ptr(crt32.u_mult_inv); do { rc = zcrypt_rsa_crt(perms, &tr, &crt64); - if (rc == -EAGAIN) - tr.again_counter++; - } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX); - /* on failure: retry once again after a requested rescan */ - if ((rc == -ENODEV) && (zcrypt_process_rescan())) + } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); + + /* on ENODEV failure: retry once again after a requested rescan */ + if (rc == -ENODEV && zcrypt_process_rescan()) do { rc = zcrypt_rsa_crt(perms, &tr, &crt64); - if (rc == -EAGAIN) - tr.again_counter++; - } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX); + } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX) rc = -EIO; if (rc) @@ -1833,16 +1856,13 @@ static long trans_xcrb32(struct ap_perms *perms, struct file *filp, xcrb64.status = xcrb32.status; do { rc = _zcrypt_send_cprb(true, perms, &tr, &xcrb64); - if (rc == -EAGAIN) - tr.again_counter++; - } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX); - /* on failure: retry once again after a requested rescan */ - if ((rc == -ENODEV) && (zcrypt_process_rescan())) + } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); + + /* on ENODEV failure: retry once again after a requested rescan */ + if (rc == -ENODEV && zcrypt_process_rescan()) do { rc = _zcrypt_send_cprb(true, perms, &tr, &xcrb64); - if (rc == -EAGAIN) - tr.again_counter++; - } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX); + } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX) rc = -EIO; xcrb32.reply_control_blk_length = xcrb64.reply_control_blk_length; @@ -1914,8 +1934,8 @@ static int zcrypt_rng_data_read(struct hwrng *rng, u32 *data) */ if (zcrypt_rng_buffer_index == 0) { rc = zcrypt_rng((char *)zcrypt_rng_buffer); - /* on failure: retry once again after a requested rescan */ - if ((rc == -ENODEV) && (zcrypt_process_rescan())) + /* on ENODEV failure: retry once again after an AP bus rescan */ + if (rc == -ENODEV && zcrypt_process_rescan()) rc = zcrypt_rng((char *)zcrypt_rng_buffer); if (rc < 0) return -EIO; @@ -1977,7 +1997,7 @@ void zcrypt_rng_device_remove(void) * an asynchronous job. This function waits until these initial jobs * are done and so the zcrypt api should be ready to serve crypto * requests - if there are resources available. The function uses an - * internal timeout of 60s. The very first caller will either wait for + * internal timeout of 30s. The very first caller will either wait for * ap bus bindings complete or the timeout happens. This state will be * remembered for further callers which will only be blocked until a * decision is made (timeout or bindings complete). @@ -1996,8 +2016,8 @@ int zcrypt_wait_api_operational(void) switch (zcrypt_wait_api_state) { case 0: /* initial state, invoke wait for the ap bus complete */ - rc = ap_wait_init_apqn_bindings_complete( - msecs_to_jiffies(60 * 1000)); + rc = ap_wait_apqn_bindings_complete( + msecs_to_jiffies(ZCRYPT_WAIT_BINDINGS_COMPLETE_MS)); switch (rc) { case 0: /* ap bus bindings are complete */ @@ -2014,8 +2034,8 @@ int zcrypt_wait_api_operational(void) break; default: /* other failure */ - ZCRYPT_DBF_DBG("%s ap_wait_init_apqn_bindings_complete()=%d\n", - __func__, rc); + pr_debug("%s ap_wait_init_apqn_bindings_complete()=%d\n", + __func__, rc); break; } break; @@ -2038,7 +2058,7 @@ EXPORT_SYMBOL(zcrypt_wait_api_operational); int __init zcrypt_debug_init(void) { zcrypt_dbf_info = debug_register("zcrypt", 2, 1, - DBF_MAX_SPRINTF_ARGS * sizeof(long)); + ZCRYPT_DBF_MAX_SPRINTF_ARGS * sizeof(long)); debug_register_view(zcrypt_dbf_info, &debug_sprintf_view); debug_set_level(zcrypt_dbf_info, DBF_ERR); diff --git a/drivers/s390/crypto/zcrypt_api.h b/drivers/s390/crypto/zcrypt_api.h index de659954c8f7..4ed481df57ca 100644 --- a/drivers/s390/crypto/zcrypt_api.h +++ b/drivers/s390/crypto/zcrypt_api.h @@ -38,6 +38,15 @@ */ #define ZCRYPT_RNG_BUFFER_SIZE 4096 +/** + * The zcrypt_wait_api_operational() function waits this + * amount in milliseconds for ap_wait_aqpn_bindings_complete(). + * Also on a cprb send failure with ENODEV the send functions + * trigger an ap bus rescan and wait this time in milliseconds + * for ap_wait_aqpn_bindings_complete() before resending. + */ +#define ZCRYPT_WAIT_BINDINGS_COMPLETE_MS 30000 + /* * Identifier for Crypto Request Performance Index */ diff --git a/drivers/s390/crypto/zcrypt_ccamisc.c b/drivers/s390/crypto/zcrypt_ccamisc.c index 263fe182648b..0a3a678ffc7e 100644 --- a/drivers/s390/crypto/zcrypt_ccamisc.c +++ b/drivers/s390/crypto/zcrypt_ccamisc.c @@ -23,11 +23,6 @@ #include "zcrypt_msgtype6.h" #include "zcrypt_ccamisc.h" -#define DEBUG_DBG(...) ZCRYPT_DBF(DBF_DEBUG, ##__VA_ARGS__) -#define DEBUG_INFO(...) ZCRYPT_DBF(DBF_INFO, ##__VA_ARGS__) -#define DEBUG_WARN(...) ZCRYPT_DBF(DBF_WARN, ##__VA_ARGS__) -#define DEBUG_ERR(...) ZCRYPT_DBF(DBF_ERR, ##__VA_ARGS__) - /* Size of parameter block used for all cca requests/replies */ #define PARMBSIZE 512 @@ -367,8 +362,8 @@ int cca_genseckey(u16 cardnr, u16 domain, memcpy(preqparm->lv1.key_length, "KEYLN32 ", 8); break; default: - DEBUG_ERR("%s unknown/unsupported keybitsize %d\n", - __func__, keybitsize); + ZCRYPT_DBF_ERR("%s unknown/unsupported keybitsize %d\n", + __func__, keybitsize); rc = -EINVAL; goto out; } @@ -386,15 +381,15 @@ int cca_genseckey(u16 cardnr, u16 domain, /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */ rc = zcrypt_send_cprb(&xcrb); if (rc) { - DEBUG_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, errno %d\n", - __func__, (int)cardnr, (int)domain, rc); + ZCRYPT_DBF_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, errno %d\n", + __func__, (int)cardnr, (int)domain, rc); goto out; } /* check response returncode and reasoncode */ if (prepcblk->ccp_rtcode != 0) { - DEBUG_ERR("%s secure key generate failure, card response %d/%d\n", - __func__, + ZCRYPT_DBF_ERR("%s secure key generate failure, card response %d/%d\n", + __func__, (int)prepcblk->ccp_rtcode, (int)prepcblk->ccp_rscode); rc = -EIO; @@ -411,8 +406,8 @@ int cca_genseckey(u16 cardnr, u16 domain, - sizeof(prepparm->lv3.keyblock.toklen) - sizeof(prepparm->lv3.keyblock.tokattr); if (seckeysize != SECKEYBLOBSIZE) { - DEBUG_ERR("%s secure token size mismatch %d != %d bytes\n", - __func__, seckeysize, SECKEYBLOBSIZE); + ZCRYPT_DBF_ERR("%s secure token size mismatch %d != %d bytes\n", + __func__, seckeysize, SECKEYBLOBSIZE); rc = -EIO; goto out; } @@ -505,8 +500,8 @@ int cca_clr2seckey(u16 cardnr, u16 domain, u32 keybitsize, keysize = 32; break; default: - DEBUG_ERR("%s unknown/unsupported keybitsize %d\n", - __func__, keybitsize); + ZCRYPT_DBF_ERR("%s unknown/unsupported keybitsize %d\n", + __func__, keybitsize); rc = -EINVAL; goto out; } @@ -524,17 +519,17 @@ int cca_clr2seckey(u16 cardnr, u16 domain, u32 keybitsize, /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */ rc = zcrypt_send_cprb(&xcrb); if (rc) { - DEBUG_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n", - __func__, (int)cardnr, (int)domain, rc); + ZCRYPT_DBF_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n", + __func__, (int)cardnr, (int)domain, rc); goto out; } /* check response returncode and reasoncode */ if (prepcblk->ccp_rtcode != 0) { - DEBUG_ERR("%s clear key import failure, card response %d/%d\n", - __func__, - (int)prepcblk->ccp_rtcode, - (int)prepcblk->ccp_rscode); + ZCRYPT_DBF_ERR("%s clear key import failure, card response %d/%d\n", + __func__, + (int)prepcblk->ccp_rtcode, + (int)prepcblk->ccp_rscode); rc = -EIO; goto out; } @@ -549,8 +544,8 @@ int cca_clr2seckey(u16 cardnr, u16 domain, u32 keybitsize, - sizeof(prepparm->lv3.keyblock.toklen) - sizeof(prepparm->lv3.keyblock.tokattr); if (seckeysize != SECKEYBLOBSIZE) { - DEBUG_ERR("%s secure token size mismatch %d != %d bytes\n", - __func__, seckeysize, SECKEYBLOBSIZE); + ZCRYPT_DBF_ERR("%s secure token size mismatch %d != %d bytes\n", + __func__, seckeysize, SECKEYBLOBSIZE); rc = -EIO; goto out; } @@ -651,17 +646,17 @@ int cca_sec2protkey(u16 cardnr, u16 domain, /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */ rc = zcrypt_send_cprb(&xcrb); if (rc) { - DEBUG_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n", - __func__, (int)cardnr, (int)domain, rc); + ZCRYPT_DBF_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n", + __func__, (int)cardnr, (int)domain, rc); goto out; } /* check response returncode and reasoncode */ if (prepcblk->ccp_rtcode != 0) { - DEBUG_ERR("%s unwrap secure key failure, card response %d/%d\n", - __func__, - (int)prepcblk->ccp_rtcode, - (int)prepcblk->ccp_rscode); + ZCRYPT_DBF_ERR("%s unwrap secure key failure, card response %d/%d\n", + __func__, + (int)prepcblk->ccp_rtcode, + (int)prepcblk->ccp_rscode); if (prepcblk->ccp_rtcode == 8 && prepcblk->ccp_rscode == 2290) rc = -EAGAIN; else @@ -669,10 +664,10 @@ int cca_sec2protkey(u16 cardnr, u16 domain, goto out; } if (prepcblk->ccp_rscode != 0) { - DEBUG_WARN("%s unwrap secure key warning, card response %d/%d\n", - __func__, - (int)prepcblk->ccp_rtcode, - (int)prepcblk->ccp_rscode); + ZCRYPT_DBF_WARN("%s unwrap secure key warning, card response %d/%d\n", + __func__, + (int)prepcblk->ccp_rtcode, + (int)prepcblk->ccp_rscode); } /* process response cprb param block */ @@ -683,8 +678,8 @@ int cca_sec2protkey(u16 cardnr, u16 domain, /* check the returned keyblock */ if (prepparm->lv3.ckb.version != 0x01 && prepparm->lv3.ckb.version != 0x02) { - DEBUG_ERR("%s reply param keyblock version mismatch 0x%02x\n", - __func__, (int)prepparm->lv3.ckb.version); + ZCRYPT_DBF_ERR("%s reply param keyblock version mismatch 0x%02x\n", + __func__, (int)prepparm->lv3.ckb.version); rc = -EIO; goto out; } @@ -707,8 +702,8 @@ int cca_sec2protkey(u16 cardnr, u16 domain, *protkeytype = PKEY_KEYTYPE_AES_256; break; default: - DEBUG_ERR("%s unknown/unsupported keylen %d\n", - __func__, prepparm->lv3.ckb.len); + ZCRYPT_DBF_ERR("%s unknown/unsupported keylen %d\n", + __func__, prepparm->lv3.ckb.len); rc = -EIO; goto out; } @@ -840,9 +835,8 @@ int cca_gencipherkey(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags, case 256: break; default: - DEBUG_ERR( - "%s unknown/unsupported keybitsize %d\n", - __func__, keybitsize); + ZCRYPT_DBF_ERR("%s unknown/unsupported keybitsize %d\n", + __func__, keybitsize); rc = -EINVAL; goto out; } @@ -880,19 +874,17 @@ int cca_gencipherkey(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags, /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */ rc = zcrypt_send_cprb(&xcrb); if (rc) { - DEBUG_ERR( - "%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n", - __func__, (int)cardnr, (int)domain, rc); + ZCRYPT_DBF_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n", + __func__, (int)cardnr, (int)domain, rc); goto out; } /* check response returncode and reasoncode */ if (prepcblk->ccp_rtcode != 0) { - DEBUG_ERR( - "%s cipher key generate failure, card response %d/%d\n", - __func__, - (int)prepcblk->ccp_rtcode, - (int)prepcblk->ccp_rscode); + ZCRYPT_DBF_ERR("%s cipher key generate failure, card response %d/%d\n", + __func__, + (int)prepcblk->ccp_rtcode, + (int)prepcblk->ccp_rscode); rc = -EIO; goto out; } @@ -905,8 +897,8 @@ int cca_gencipherkey(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags, /* do some plausibility checks on the key block */ if (prepparm->kb.len < 120 + 5 * sizeof(uint16_t) || prepparm->kb.len > 136 + 5 * sizeof(uint16_t)) { - DEBUG_ERR("%s reply with invalid or unknown key block\n", - __func__); + ZCRYPT_DBF_ERR("%s reply with invalid or unknown key block\n", + __func__); rc = -EIO; goto out; } @@ -1048,19 +1040,17 @@ static int _ip_cprb_helper(u16 cardnr, u16 domain, /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */ rc = zcrypt_send_cprb(&xcrb); if (rc) { - DEBUG_ERR( - "%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n", - __func__, (int)cardnr, (int)domain, rc); + ZCRYPT_DBF_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n", + __func__, (int)cardnr, (int)domain, rc); goto out; } /* check response returncode and reasoncode */ if (prepcblk->ccp_rtcode != 0) { - DEBUG_ERR( - "%s CSNBKPI2 failure, card response %d/%d\n", - __func__, - (int)prepcblk->ccp_rtcode, - (int)prepcblk->ccp_rscode); + ZCRYPT_DBF_ERR("%s CSNBKPI2 failure, card response %d/%d\n", + __func__, + (int)prepcblk->ccp_rtcode, + (int)prepcblk->ccp_rscode); rc = -EIO; goto out; } @@ -1073,8 +1063,8 @@ static int _ip_cprb_helper(u16 cardnr, u16 domain, /* do some plausibility checks on the key block */ if (prepparm->kb.len < 120 + 3 * sizeof(uint16_t) || prepparm->kb.len > 136 + 3 * sizeof(uint16_t)) { - DEBUG_ERR("%s reply with invalid or unknown key block\n", - __func__); + ZCRYPT_DBF_ERR("%s reply with invalid or unknown key block\n", + __func__); rc = -EIO; goto out; } @@ -1132,33 +1122,29 @@ int cca_clr2cipherkey(u16 card, u16 dom, u32 keybitsize, u32 keygenflags, rc = _ip_cprb_helper(card, dom, "AES ", "FIRST ", "MIN3PART", exorbuf, keybitsize, token, &tokensize); if (rc) { - DEBUG_ERR( - "%s clear key import 1/4 with CSNBKPI2 failed, rc=%d\n", - __func__, rc); + ZCRYPT_DBF_ERR("%s clear key import 1/4 with CSNBKPI2 failed, rc=%d\n", + __func__, rc); goto out; } rc = _ip_cprb_helper(card, dom, "AES ", "ADD-PART", NULL, clrkey, keybitsize, token, &tokensize); if (rc) { - DEBUG_ERR( - "%s clear key import 2/4 with CSNBKPI2 failed, rc=%d\n", - __func__, rc); + ZCRYPT_DBF_ERR("%s clear key import 2/4 with CSNBKPI2 failed, rc=%d\n", + __func__, rc); goto out; } rc = _ip_cprb_helper(card, dom, "AES ", "ADD-PART", NULL, exorbuf, keybitsize, token, &tokensize); if (rc) { - DEBUG_ERR( - "%s clear key import 3/4 with CSNBKPI2 failed, rc=%d\n", - __func__, rc); + ZCRYPT_DBF_ERR("%s clear key import 3/4 with CSNBKPI2 failed, rc=%d\n", + __func__, rc); goto out; } rc = _ip_cprb_helper(card, dom, "AES ", "COMPLETE", NULL, NULL, keybitsize, token, &tokensize); if (rc) { - DEBUG_ERR( - "%s clear key import 4/4 with CSNBKPI2 failed, rc=%d\n", - __func__, rc); + ZCRYPT_DBF_ERR("%s clear key import 4/4 with CSNBKPI2 failed, rc=%d\n", + __func__, rc); goto out; } @@ -1265,19 +1251,17 @@ int cca_cipher2protkey(u16 cardnr, u16 domain, const u8 *ckey, /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */ rc = zcrypt_send_cprb(&xcrb); if (rc) { - DEBUG_ERR( - "%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n", - __func__, (int)cardnr, (int)domain, rc); + ZCRYPT_DBF_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n", + __func__, (int)cardnr, (int)domain, rc); goto out; } /* check response returncode and reasoncode */ if (prepcblk->ccp_rtcode != 0) { - DEBUG_ERR( - "%s unwrap secure key failure, card response %d/%d\n", - __func__, - (int)prepcblk->ccp_rtcode, - (int)prepcblk->ccp_rscode); + ZCRYPT_DBF_ERR("%s unwrap secure key failure, card response %d/%d\n", + __func__, + (int)prepcblk->ccp_rtcode, + (int)prepcblk->ccp_rscode); if (prepcblk->ccp_rtcode == 8 && prepcblk->ccp_rscode == 2290) rc = -EAGAIN; else @@ -1285,11 +1269,10 @@ int cca_cipher2protkey(u16 cardnr, u16 domain, const u8 *ckey, goto out; } if (prepcblk->ccp_rscode != 0) { - DEBUG_WARN( - "%s unwrap secure key warning, card response %d/%d\n", - __func__, - (int)prepcblk->ccp_rtcode, - (int)prepcblk->ccp_rscode); + ZCRYPT_DBF_WARN("%s unwrap secure key warning, card response %d/%d\n", + __func__, + (int)prepcblk->ccp_rtcode, + (int)prepcblk->ccp_rscode); } /* process response cprb param block */ @@ -1300,15 +1283,14 @@ int cca_cipher2protkey(u16 cardnr, u16 domain, const u8 *ckey, /* check the returned keyblock */ if (prepparm->vud.ckb.version != 0x01 && prepparm->vud.ckb.version != 0x02) { - DEBUG_ERR("%s reply param keyblock version mismatch 0x%02x\n", - __func__, (int)prepparm->vud.ckb.version); + ZCRYPT_DBF_ERR("%s reply param keyblock version mismatch 0x%02x\n", + __func__, (int)prepparm->vud.ckb.version); rc = -EIO; goto out; } if (prepparm->vud.ckb.algo != 0x02) { - DEBUG_ERR( - "%s reply param keyblock algo mismatch 0x%02x != 0x02\n", - __func__, (int)prepparm->vud.ckb.algo); + ZCRYPT_DBF_ERR("%s reply param keyblock algo mismatch 0x%02x != 0x02\n", + __func__, (int)prepparm->vud.ckb.algo); rc = -EIO; goto out; } @@ -1331,8 +1313,8 @@ int cca_cipher2protkey(u16 cardnr, u16 domain, const u8 *ckey, *protkeytype = PKEY_KEYTYPE_AES_256; break; default: - DEBUG_ERR("%s unknown/unsupported keylen %d\n", - __func__, prepparm->vud.ckb.keylen); + ZCRYPT_DBF_ERR("%s unknown/unsupported keylen %d\n", + __func__, prepparm->vud.ckb.keylen); rc = -EIO; goto out; } @@ -1432,19 +1414,17 @@ int cca_ecc2protkey(u16 cardnr, u16 domain, const u8 *key, /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */ rc = zcrypt_send_cprb(&xcrb); if (rc) { - DEBUG_ERR( - "%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n", - __func__, (int)cardnr, (int)domain, rc); + ZCRYPT_DBF_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n", + __func__, (int)cardnr, (int)domain, rc); goto out; } /* check response returncode and reasoncode */ if (prepcblk->ccp_rtcode != 0) { - DEBUG_ERR( - "%s unwrap secure key failure, card response %d/%d\n", - __func__, - (int)prepcblk->ccp_rtcode, - (int)prepcblk->ccp_rscode); + ZCRYPT_DBF_ERR("%s unwrap secure key failure, card response %d/%d\n", + __func__, + (int)prepcblk->ccp_rtcode, + (int)prepcblk->ccp_rscode); if (prepcblk->ccp_rtcode == 8 && prepcblk->ccp_rscode == 2290) rc = -EAGAIN; else @@ -1452,11 +1432,10 @@ int cca_ecc2protkey(u16 cardnr, u16 domain, const u8 *key, goto out; } if (prepcblk->ccp_rscode != 0) { - DEBUG_WARN( - "%s unwrap secure key warning, card response %d/%d\n", - __func__, - (int)prepcblk->ccp_rtcode, - (int)prepcblk->ccp_rscode); + ZCRYPT_DBF_WARN("%s unwrap secure key warning, card response %d/%d\n", + __func__, + (int)prepcblk->ccp_rtcode, + (int)prepcblk->ccp_rscode); } /* process response cprb param block */ @@ -1466,23 +1445,22 @@ int cca_ecc2protkey(u16 cardnr, u16 domain, const u8 *key, /* check the returned keyblock */ if (prepparm->vud.ckb.version != 0x02) { - DEBUG_ERR("%s reply param keyblock version mismatch 0x%02x != 0x02\n", - __func__, (int)prepparm->vud.ckb.version); + ZCRYPT_DBF_ERR("%s reply param keyblock version mismatch 0x%02x != 0x02\n", + __func__, (int)prepparm->vud.ckb.version); rc = -EIO; goto out; } if (prepparm->vud.ckb.algo != 0x81) { - DEBUG_ERR( - "%s reply param keyblock algo mismatch 0x%02x != 0x81\n", - __func__, (int)prepparm->vud.ckb.algo); + ZCRYPT_DBF_ERR("%s reply param keyblock algo mismatch 0x%02x != 0x81\n", + __func__, (int)prepparm->vud.ckb.algo); rc = -EIO; goto out; } /* copy the translated protected key */ if (prepparm->vud.ckb.keylen > *protkeylen) { - DEBUG_ERR("%s prot keylen mismatch %d > buffersize %u\n", - __func__, prepparm->vud.ckb.keylen, *protkeylen); + ZCRYPT_DBF_ERR("%s prot keylen mismatch %d > buffersize %u\n", + __func__, prepparm->vud.ckb.keylen, *protkeylen); rc = -EIO; goto out; } @@ -1550,17 +1528,17 @@ int cca_query_crypto_facility(u16 cardnr, u16 domain, /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */ rc = zcrypt_send_cprb(&xcrb); if (rc) { - DEBUG_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n", - __func__, (int)cardnr, (int)domain, rc); + ZCRYPT_DBF_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n", + __func__, (int)cardnr, (int)domain, rc); goto out; } /* check response returncode and reasoncode */ if (prepcblk->ccp_rtcode != 0) { - DEBUG_ERR("%s unwrap secure key failure, card response %d/%d\n", - __func__, - (int)prepcblk->ccp_rtcode, - (int)prepcblk->ccp_rscode); + ZCRYPT_DBF_ERR("%s unwrap secure key failure, card response %d/%d\n", + __func__, + (int)prepcblk->ccp_rtcode, + (int)prepcblk->ccp_rscode); rc = -EIO; goto out; } diff --git a/drivers/s390/crypto/zcrypt_debug.h b/drivers/s390/crypto/zcrypt_debug.h index 5cf88aabd64b..9a208dc4c200 100644 --- a/drivers/s390/crypto/zcrypt_debug.h +++ b/drivers/s390/crypto/zcrypt_debug.h @@ -17,7 +17,7 @@ #define RC2ERR(rc) ((rc) ? DBF_ERR : DBF_INFO) #define RC2WARN(rc) ((rc) ? DBF_WARN : DBF_INFO) -#define DBF_MAX_SPRINTF_ARGS 6 +#define ZCRYPT_DBF_MAX_SPRINTF_ARGS 6 #define ZCRYPT_DBF(...) \ debug_sprintf_event(zcrypt_dbf_info, ##__VA_ARGS__) @@ -27,8 +27,6 @@ debug_sprintf_event(zcrypt_dbf_info, DBF_WARN, ##__VA_ARGS__) #define ZCRYPT_DBF_INFO(...) \ debug_sprintf_event(zcrypt_dbf_info, DBF_INFO, ##__VA_ARGS__) -#define ZCRYPT_DBF_DBG(...) \ - debug_sprintf_event(zcrypt_dbf_info, DBF_DEBUG, ##__VA_ARGS__) extern debug_info_t *zcrypt_dbf_info; diff --git a/drivers/s390/crypto/zcrypt_ep11misc.c b/drivers/s390/crypto/zcrypt_ep11misc.c index 0a877f9792c2..eb7f5489ccf9 100644 --- a/drivers/s390/crypto/zcrypt_ep11misc.c +++ b/drivers/s390/crypto/zcrypt_ep11misc.c @@ -24,11 +24,6 @@ #include "zcrypt_ep11misc.h" #include "zcrypt_ccamisc.h" -#define DEBUG_DBG(...) ZCRYPT_DBF(DBF_DEBUG, ##__VA_ARGS__) -#define DEBUG_INFO(...) ZCRYPT_DBF(DBF_INFO, ##__VA_ARGS__) -#define DEBUG_WARN(...) ZCRYPT_DBF(DBF_WARN, ##__VA_ARGS__) -#define DEBUG_ERR(...) ZCRYPT_DBF(DBF_ERR, ##__VA_ARGS__) - #define EP11_PINBLOB_V1_BYTES 56 /* default iv used here */ @@ -510,7 +505,7 @@ static int check_reply_pl(const u8 *pl, const char *func) /* start tag */ if (*pl++ != 0x30) { - DEBUG_ERR("%s reply start tag mismatch\n", func); + ZCRYPT_DBF_ERR("%s reply start tag mismatch\n", func); return -EIO; } @@ -527,40 +522,41 @@ static int check_reply_pl(const u8 *pl, const char *func) len = *((u16 *)pl); pl += 2; } else { - DEBUG_ERR("%s reply start tag lenfmt mismatch 0x%02hhx\n", - func, *pl); + ZCRYPT_DBF_ERR("%s reply start tag lenfmt mismatch 0x%02hhx\n", + func, *pl); return -EIO; } /* len should cover at least 3 fields with 32 bit value each */ if (len < 3 * 6) { - DEBUG_ERR("%s reply length %d too small\n", func, len); + ZCRYPT_DBF_ERR("%s reply length %d too small\n", func, len); return -EIO; } /* function tag, length and value */ if (pl[0] != 0x04 || pl[1] != 0x04) { - DEBUG_ERR("%s function tag or length mismatch\n", func); + ZCRYPT_DBF_ERR("%s function tag or length mismatch\n", func); return -EIO; } pl += 6; /* dom tag, length and value */ if (pl[0] != 0x04 || pl[1] != 0x04) { - DEBUG_ERR("%s dom tag or length mismatch\n", func); + ZCRYPT_DBF_ERR("%s dom tag or length mismatch\n", func); return -EIO; } pl += 6; /* return value tag, length and value */ if (pl[0] != 0x04 || pl[1] != 0x04) { - DEBUG_ERR("%s return value tag or length mismatch\n", func); + ZCRYPT_DBF_ERR("%s return value tag or length mismatch\n", + func); return -EIO; } pl += 2; ret = *((u32 *)pl); if (ret != 0) { - DEBUG_ERR("%s return value 0x%04x != 0\n", func, ret); + ZCRYPT_DBF_ERR("%s return value 0x%04x != 0\n", func, ret); return -EIO; } @@ -626,9 +622,8 @@ static int ep11_query_info(u16 cardnr, u16 domain, u32 query_type, rc = zcrypt_send_ep11_cprb(urb); if (rc) { - DEBUG_ERR( - "%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n", - __func__, (int)cardnr, (int)domain, rc); + ZCRYPT_DBF_ERR("%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n", + __func__, (int)cardnr, (int)domain, rc); goto out; } @@ -636,13 +631,13 @@ static int ep11_query_info(u16 cardnr, u16 domain, u32 query_type, if (rc) goto out; if (rep_pl->data_tag != 0x04 || rep_pl->data_lenfmt != 0x82) { - DEBUG_ERR("%s unknown reply data format\n", __func__); + ZCRYPT_DBF_ERR("%s unknown reply data format\n", __func__); rc = -EIO; goto out; } if (rep_pl->data_len > buflen) { - DEBUG_ERR("%s mismatch between reply data len and buffer len\n", - __func__); + ZCRYPT_DBF_ERR("%s mismatch between reply data len and buffer len\n", + __func__); rc = -ENOSPC; goto out; } @@ -816,9 +811,8 @@ static int _ep11_genaeskey(u16 card, u16 domain, case 256: break; default: - DEBUG_ERR( - "%s unknown/unsupported keybitsize %d\n", - __func__, keybitsize); + ZCRYPT_DBF_ERR("%s unknown/unsupported keybitsize %d\n", + __func__, keybitsize); rc = -EINVAL; goto out; } @@ -878,9 +872,8 @@ static int _ep11_genaeskey(u16 card, u16 domain, rc = zcrypt_send_ep11_cprb(urb); if (rc) { - DEBUG_ERR( - "%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n", - __func__, (int)card, (int)domain, rc); + ZCRYPT_DBF_ERR("%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n", + __func__, (int)card, (int)domain, rc); goto out; } @@ -888,13 +881,13 @@ static int _ep11_genaeskey(u16 card, u16 domain, if (rc) goto out; if (rep_pl->data_tag != 0x04 || rep_pl->data_lenfmt != 0x82) { - DEBUG_ERR("%s unknown reply data format\n", __func__); + ZCRYPT_DBF_ERR("%s unknown reply data format\n", __func__); rc = -EIO; goto out; } if (rep_pl->data_len > *keybufsize) { - DEBUG_ERR("%s mismatch reply data len / key buffer len\n", - __func__); + ZCRYPT_DBF_ERR("%s mismatch reply data len / key buffer len\n", + __func__); rc = -ENOSPC; goto out; } @@ -1030,9 +1023,8 @@ static int ep11_cryptsingle(u16 card, u16 domain, rc = zcrypt_send_ep11_cprb(urb); if (rc) { - DEBUG_ERR( - "%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n", - __func__, (int)card, (int)domain, rc); + ZCRYPT_DBF_ERR("%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n", + __func__, (int)card, (int)domain, rc); goto out; } @@ -1040,7 +1032,7 @@ static int ep11_cryptsingle(u16 card, u16 domain, if (rc) goto out; if (rep_pl->data_tag != 0x04) { - DEBUG_ERR("%s unknown reply data format\n", __func__); + ZCRYPT_DBF_ERR("%s unknown reply data format\n", __func__); rc = -EIO; goto out; } @@ -1053,14 +1045,14 @@ static int ep11_cryptsingle(u16 card, u16 domain, n = *((u16 *)p); p += 2; } else { - DEBUG_ERR("%s unknown reply data length format 0x%02hhx\n", - __func__, rep_pl->data_lenfmt); + ZCRYPT_DBF_ERR("%s unknown reply data length format 0x%02hhx\n", + __func__, rep_pl->data_lenfmt); rc = -EIO; goto out; } if (n > *outbufsize) { - DEBUG_ERR("%s mismatch reply data len %d / output buffer %zu\n", - __func__, n, *outbufsize); + ZCRYPT_DBF_ERR("%s mismatch reply data len %d / output buffer %zu\n", + __func__, n, *outbufsize); rc = -ENOSPC; goto out; } @@ -1188,9 +1180,8 @@ static int _ep11_unwrapkey(u16 card, u16 domain, rc = zcrypt_send_ep11_cprb(urb); if (rc) { - DEBUG_ERR( - "%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n", - __func__, (int)card, (int)domain, rc); + ZCRYPT_DBF_ERR("%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n", + __func__, (int)card, (int)domain, rc); goto out; } @@ -1198,13 +1189,13 @@ static int _ep11_unwrapkey(u16 card, u16 domain, if (rc) goto out; if (rep_pl->data_tag != 0x04 || rep_pl->data_lenfmt != 0x82) { - DEBUG_ERR("%s unknown reply data format\n", __func__); + ZCRYPT_DBF_ERR("%s unknown reply data format\n", __func__); rc = -EIO; goto out; } if (rep_pl->data_len > *keybufsize) { - DEBUG_ERR("%s mismatch reply data len / key buffer len\n", - __func__); + ZCRYPT_DBF_ERR("%s mismatch reply data len / key buffer len\n", + __func__); rc = -ENOSPC; goto out; } @@ -1343,9 +1334,8 @@ static int _ep11_wrapkey(u16 card, u16 domain, rc = zcrypt_send_ep11_cprb(urb); if (rc) { - DEBUG_ERR( - "%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n", - __func__, (int)card, (int)domain, rc); + ZCRYPT_DBF_ERR("%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n", + __func__, (int)card, (int)domain, rc); goto out; } @@ -1353,13 +1343,13 @@ static int _ep11_wrapkey(u16 card, u16 domain, if (rc) goto out; if (rep_pl->data_tag != 0x04 || rep_pl->data_lenfmt != 0x82) { - DEBUG_ERR("%s unknown reply data format\n", __func__); + ZCRYPT_DBF_ERR("%s unknown reply data format\n", __func__); rc = -EIO; goto out; } if (rep_pl->data_len > *datasize) { - DEBUG_ERR("%s mismatch reply data len / data buffer len\n", - __func__); + ZCRYPT_DBF_ERR("%s mismatch reply data len / data buffer len\n", + __func__); rc = -ENOSPC; goto out; } @@ -1386,9 +1376,8 @@ int ep11_clr2keyblob(u16 card, u16 domain, u32 keybitsize, u32 keygenflags, if (keybitsize == 128 || keybitsize == 192 || keybitsize == 256) { clrkeylen = keybitsize / 8; } else { - DEBUG_ERR( - "%s unknown/unsupported keybitsize %d\n", - __func__, keybitsize); + ZCRYPT_DBF_ERR("%s unknown/unsupported keybitsize %d\n", + __func__, keybitsize); return -EINVAL; } @@ -1405,9 +1394,8 @@ int ep11_clr2keyblob(u16 card, u16 domain, u32 keybitsize, u32 keygenflags, 0x00006c00, /* EN/DECRYPT, WRAP/UNWRAP */ kek, &keklen); if (rc) { - DEBUG_ERR( - "%s generate kek key failed, rc=%d\n", - __func__, rc); + ZCRYPT_DBF_ERR("%s generate kek key failed, rc=%d\n", + __func__, rc); goto out; } @@ -1415,9 +1403,8 @@ int ep11_clr2keyblob(u16 card, u16 domain, u32 keybitsize, u32 keygenflags, rc = ep11_cryptsingle(card, domain, 0, 0, def_iv, kek, keklen, clrkey, clrkeylen, encbuf, &encbuflen); if (rc) { - DEBUG_ERR( - "%s encrypting key value with kek key failed, rc=%d\n", - __func__, rc); + ZCRYPT_DBF_ERR("%s encrypting key value with kek key failed, rc=%d\n", + __func__, rc); goto out; } @@ -1426,9 +1413,8 @@ int ep11_clr2keyblob(u16 card, u16 domain, u32 keybitsize, u32 keygenflags, encbuf, encbuflen, 0, def_iv, keybitsize, 0, keybuf, keybufsize, keytype); if (rc) { - DEBUG_ERR( - "%s importing key value as new key failed,, rc=%d\n", - __func__, rc); + ZCRYPT_DBF_ERR("%s importing key value as new key failed,, rc=%d\n", + __func__, rc); goto out; } @@ -1476,17 +1462,16 @@ int ep11_kblob2protkey(u16 card, u16 dom, rc = _ep11_wrapkey(card, dom, (u8 *)key, keylen, 0, def_iv, wkbuf, &wkbuflen); if (rc) { - DEBUG_ERR( - "%s rewrapping ep11 key to pkey failed, rc=%d\n", - __func__, rc); + ZCRYPT_DBF_ERR("%s rewrapping ep11 key to pkey failed, rc=%d\n", + __func__, rc); goto out; } wki = (struct wk_info *)wkbuf; /* check struct version and pkey type */ if (wki->version != 1 || wki->pkeytype < 1 || wki->pkeytype > 5) { - DEBUG_ERR("%s wk info version %d or pkeytype %d mismatch.\n", - __func__, (int)wki->version, (int)wki->pkeytype); + ZCRYPT_DBF_ERR("%s wk info version %d or pkeytype %d mismatch.\n", + __func__, (int)wki->version, (int)wki->pkeytype); rc = -EIO; goto out; } @@ -1511,8 +1496,8 @@ int ep11_kblob2protkey(u16 card, u16 dom, *protkeytype = PKEY_KEYTYPE_AES_256; break; default: - DEBUG_ERR("%s unknown/unsupported AES pkeysize %d\n", - __func__, (int)wki->pkeysize); + ZCRYPT_DBF_ERR("%s unknown/unsupported AES pkeysize %d\n", + __func__, (int)wki->pkeysize); rc = -EIO; goto out; } @@ -1525,16 +1510,16 @@ int ep11_kblob2protkey(u16 card, u16 dom, break; case 2: /* TDES */ default: - DEBUG_ERR("%s unknown/unsupported key type %d\n", - __func__, (int)wki->pkeytype); + ZCRYPT_DBF_ERR("%s unknown/unsupported key type %d\n", + __func__, (int)wki->pkeytype); rc = -EIO; goto out; } /* copy the translated protected key */ if (wki->pkeysize > *protkeylen) { - DEBUG_ERR("%s wk info pkeysize %llu > protkeysize %u\n", - __func__, wki->pkeysize, *protkeylen); + ZCRYPT_DBF_ERR("%s wk info pkeysize %llu > protkeysize %u\n", + __func__, wki->pkeysize, *protkeylen); rc = -EINVAL; goto out; } diff --git a/drivers/s390/crypto/zcrypt_error.h b/drivers/s390/crypto/zcrypt_error.h index a44fcfcec938..46e27b43a8af 100644 --- a/drivers/s390/crypto/zcrypt_error.h +++ b/drivers/s390/crypto/zcrypt_error.h @@ -119,10 +119,9 @@ static inline int convert_error(struct zcrypt_queue *zq, case REP82_ERROR_MESSAGE_TYPE: /* 0x20 */ case REP82_ERROR_TRANSPORT_FAIL: /* 0x90 */ /* - * Msg to wrong type or card/infrastructure failure. - * Trigger rescan of the ap bus, trigger retry request. + * Msg to wrong type or card/infrastructure failure. Return + * EAGAIN, the upper layer may do a retry on the request. */ - atomic_set(&zcrypt_rescan_req, 1); /* For type 86 response show the apfs value (failure reason) */ if (ehdr->reply_code == REP82_ERROR_TRANSPORT_FAIL && ehdr->type == TYPE86_RSP_CODE) { diff --git a/drivers/s390/crypto/zcrypt_msgtype50.c b/drivers/s390/crypto/zcrypt_msgtype50.c index 2e155de8abe5..3b39cb8f926d 100644 --- a/drivers/s390/crypto/zcrypt_msgtype50.c +++ b/drivers/s390/crypto/zcrypt_msgtype50.c @@ -427,7 +427,7 @@ static void zcrypt_msgtype50_receive(struct ap_queue *aq, len = t80h->len; if (len > reply->bufsize || len > msg->bufsize || len != reply->len) { - ZCRYPT_DBF_DBG("%s len mismatch => EMSGSIZE\n", __func__); + pr_debug("%s len mismatch => EMSGSIZE\n", __func__); msg->rc = -EMSGSIZE; goto out; } @@ -487,9 +487,9 @@ static long zcrypt_msgtype50_modexpo(struct zcrypt_queue *zq, out: ap_msg->private = NULL; if (rc) - ZCRYPT_DBF_DBG("%s send me cprb at dev=%02x.%04x rc=%d\n", - __func__, AP_QID_CARD(zq->queue->qid), - AP_QID_QUEUE(zq->queue->qid), rc); + pr_debug("%s send me cprb at dev=%02x.%04x rc=%d\n", + __func__, AP_QID_CARD(zq->queue->qid), + AP_QID_QUEUE(zq->queue->qid), rc); return rc; } @@ -537,9 +537,9 @@ static long zcrypt_msgtype50_modexpo_crt(struct zcrypt_queue *zq, out: ap_msg->private = NULL; if (rc) - ZCRYPT_DBF_DBG("%s send crt cprb at dev=%02x.%04x rc=%d\n", - __func__, AP_QID_CARD(zq->queue->qid), - AP_QID_QUEUE(zq->queue->qid), rc); + pr_debug("%s send crt cprb at dev=%02x.%04x rc=%d\n", + __func__, AP_QID_CARD(zq->queue->qid), + AP_QID_QUEUE(zq->queue->qid), rc); return rc; } diff --git a/drivers/s390/crypto/zcrypt_msgtype6.c b/drivers/s390/crypto/zcrypt_msgtype6.c index 3c53abbdc342..215f257d2360 100644 --- a/drivers/s390/crypto/zcrypt_msgtype6.c +++ b/drivers/s390/crypto/zcrypt_msgtype6.c @@ -437,9 +437,9 @@ static int xcrb_msg_to_type6cprb_msgx(bool userspace, struct ap_message *ap_msg, ap_msg->flags |= AP_MSG_FLAG_ADMIN; break; default: - ZCRYPT_DBF_DBG("%s unknown CPRB minor version '%c%c'\n", - __func__, msg->cprbx.func_id[0], - msg->cprbx.func_id[1]); + pr_debug("%s unknown CPRB minor version '%c%c'\n", + __func__, msg->cprbx.func_id[0], + msg->cprbx.func_id[1]); } /* copy data block */ @@ -629,9 +629,9 @@ static int convert_type86_xcrb(bool userspace, struct zcrypt_queue *zq, /* Copy CPRB to user */ if (xcrb->reply_control_blk_length < msg->fmt2.count1) { - ZCRYPT_DBF_DBG("%s reply_control_blk_length %u < required %u => EMSGSIZE\n", - __func__, xcrb->reply_control_blk_length, - msg->fmt2.count1); + pr_debug("%s reply_control_blk_length %u < required %u => EMSGSIZE\n", + __func__, xcrb->reply_control_blk_length, + msg->fmt2.count1); return -EMSGSIZE; } if (z_copy_to_user(userspace, xcrb->reply_control_blk_addr, @@ -642,9 +642,9 @@ static int convert_type86_xcrb(bool userspace, struct zcrypt_queue *zq, /* Copy data buffer to user */ if (msg->fmt2.count2) { if (xcrb->reply_data_length < msg->fmt2.count2) { - ZCRYPT_DBF_DBG("%s reply_data_length %u < required %u => EMSGSIZE\n", - __func__, xcrb->reply_data_length, - msg->fmt2.count2); + pr_debug("%s reply_data_length %u < required %u => EMSGSIZE\n", + __func__, xcrb->reply_data_length, + msg->fmt2.count2); return -EMSGSIZE; } if (z_copy_to_user(userspace, xcrb->reply_data_addr, @@ -673,9 +673,9 @@ static int convert_type86_ep11_xcrb(bool userspace, struct zcrypt_queue *zq, char *data = reply->msg; if (xcrb->resp_len < msg->fmt2.count1) { - ZCRYPT_DBF_DBG("%s resp_len %u < required %u => EMSGSIZE\n", - __func__, (unsigned int)xcrb->resp_len, - msg->fmt2.count1); + pr_debug("%s resp_len %u < required %u => EMSGSIZE\n", + __func__, (unsigned int)xcrb->resp_len, + msg->fmt2.count1); return -EMSGSIZE; } @@ -875,7 +875,8 @@ static void zcrypt_msgtype6_receive(struct ap_queue *aq, len = sizeof(struct type86x_reply) + t86r->length; if (len > reply->bufsize || len > msg->bufsize || len != reply->len) { - ZCRYPT_DBF_DBG("%s len mismatch => EMSGSIZE\n", __func__); + pr_debug("%s len mismatch => EMSGSIZE\n", + __func__); msg->rc = -EMSGSIZE; goto out; } @@ -889,7 +890,8 @@ static void zcrypt_msgtype6_receive(struct ap_queue *aq, len = t86r->fmt2.offset1 + t86r->fmt2.count1; if (len > reply->bufsize || len > msg->bufsize || len != reply->len) { - ZCRYPT_DBF_DBG("%s len mismatch => EMSGSIZE\n", __func__); + pr_debug("%s len mismatch => EMSGSIZE\n", + __func__); msg->rc = -EMSGSIZE; goto out; } @@ -939,7 +941,8 @@ static void zcrypt_msgtype6_receive_ep11(struct ap_queue *aq, len = t86r->fmt2.offset1 + t86r->fmt2.count1; if (len > reply->bufsize || len > msg->bufsize || len != reply->len) { - ZCRYPT_DBF_DBG("%s len mismatch => EMSGSIZE\n", __func__); + pr_debug("%s len mismatch => EMSGSIZE\n", + __func__); msg->rc = -EMSGSIZE; goto out; } @@ -1151,9 +1154,9 @@ static long zcrypt_msgtype6_send_cprb(bool userspace, struct zcrypt_queue *zq, out: if (rc) - ZCRYPT_DBF_DBG("%s send cprb at dev=%02x.%04x rc=%d\n", - __func__, AP_QID_CARD(zq->queue->qid), - AP_QID_QUEUE(zq->queue->qid), rc); + pr_debug("%s send cprb at dev=%02x.%04x rc=%d\n", + __func__, AP_QID_CARD(zq->queue->qid), + AP_QID_QUEUE(zq->queue->qid), rc); return rc; } @@ -1274,9 +1277,9 @@ static long zcrypt_msgtype6_send_ep11_cprb(bool userspace, struct zcrypt_queue * out: if (rc) - ZCRYPT_DBF_DBG("%s send cprb at dev=%02x.%04x rc=%d\n", - __func__, AP_QID_CARD(zq->queue->qid), - AP_QID_QUEUE(zq->queue->qid), rc); + pr_debug("%s send cprb at dev=%02x.%04x rc=%d\n", + __func__, AP_QID_CARD(zq->queue->qid), + AP_QID_QUEUE(zq->queue->qid), rc); return rc; } diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index b92a32b4b114..04c64ce0a1ca 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -255,9 +255,10 @@ static void qeth_l3_clear_ip_htable(struct qeth_card *card, int recover) if (!recover) { hash_del(&addr->hnode); kfree(addr); - continue; + } else { + /* prepare for recovery */ + addr->disp_flag = QETH_DISP_ADDR_ADD; } - addr->disp_flag = QETH_DISP_ADDR_ADD; } mutex_unlock(&card->ip_lock); @@ -278,9 +279,11 @@ static void qeth_l3_recover_ip(struct qeth_card *card) if (addr->disp_flag == QETH_DISP_ADDR_ADD) { rc = qeth_l3_register_addr_entry(card, addr); - if (!rc) { + if (!rc || rc == -EADDRINUSE || rc == -ENETDOWN) { + /* keep it in the records */ addr->disp_flag = QETH_DISP_ADDR_DO_NOTHING; } else { + /* bad address */ hash_del(&addr->hnode); kfree(addr); } diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index addac7fbe37b..9ce27092729c 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig @@ -1270,7 +1270,7 @@ source "drivers/scsi/arm/Kconfig" config JAZZ_ESP bool "MIPS JAZZ FAS216 SCSI support" - depends on MACH_JAZZ && SCSI + depends on MACH_JAZZ && SCSI=y select SCSI_SPI_ATTRS help This is the driver for the onboard SCSI host adapter of MIPS Magnum diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c index 19eee108db02..5c8d1ba3f8f3 100644 --- a/drivers/scsi/fcoe/fcoe_ctlr.c +++ b/drivers/scsi/fcoe/fcoe_ctlr.c @@ -319,17 +319,16 @@ static void fcoe_ctlr_announce(struct fcoe_ctlr *fip) { struct fcoe_fcf *sel; struct fcoe_fcf *fcf; - unsigned long flags; mutex_lock(&fip->ctlr_mutex); - spin_lock_irqsave(&fip->ctlr_lock, flags); + spin_lock_bh(&fip->ctlr_lock); kfree_skb(fip->flogi_req); fip->flogi_req = NULL; list_for_each_entry(fcf, &fip->fcfs, list) fcf->flogi_sent = 0; - spin_unlock_irqrestore(&fip->ctlr_lock, flags); + spin_unlock_bh(&fip->ctlr_lock); sel = fip->sel_fcf; if (sel && ether_addr_equal(sel->fcf_mac, fip->dest_addr)) @@ -700,7 +699,6 @@ int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct fc_lport *lport, { struct fc_frame *fp; struct fc_frame_header *fh; - unsigned long flags; u16 old_xid; u8 op; u8 mac[ETH_ALEN]; @@ -734,11 +732,11 @@ int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct fc_lport *lport, op = FIP_DT_FLOGI; if (fip->mode == FIP_MODE_VN2VN) break; - spin_lock_irqsave(&fip->ctlr_lock, flags); + spin_lock_bh(&fip->ctlr_lock); kfree_skb(fip->flogi_req); fip->flogi_req = skb; fip->flogi_req_send = 1; - spin_unlock_irqrestore(&fip->ctlr_lock, flags); + spin_unlock_bh(&fip->ctlr_lock); schedule_work(&fip->timer_work); return -EINPROGRESS; case ELS_FDISC: @@ -1707,11 +1705,10 @@ static int fcoe_ctlr_flogi_send_locked(struct fcoe_ctlr *fip) static int fcoe_ctlr_flogi_retry(struct fcoe_ctlr *fip) { struct fcoe_fcf *fcf; - unsigned long flags; int error; mutex_lock(&fip->ctlr_mutex); - spin_lock_irqsave(&fip->ctlr_lock, flags); + spin_lock_bh(&fip->ctlr_lock); LIBFCOE_FIP_DBG(fip, "re-sending FLOGI - reselect\n"); fcf = fcoe_ctlr_select(fip); if (!fcf || fcf->flogi_sent) { @@ -1722,7 +1719,7 @@ static int fcoe_ctlr_flogi_retry(struct fcoe_ctlr *fip) fcoe_ctlr_solicit(fip, NULL); error = fcoe_ctlr_flogi_send_locked(fip); } - spin_unlock_irqrestore(&fip->ctlr_lock, flags); + spin_unlock_bh(&fip->ctlr_lock); mutex_unlock(&fip->ctlr_mutex); return error; } @@ -1739,9 +1736,8 @@ static int fcoe_ctlr_flogi_retry(struct fcoe_ctlr *fip) static void fcoe_ctlr_flogi_send(struct fcoe_ctlr *fip) { struct fcoe_fcf *fcf; - unsigned long flags; - spin_lock_irqsave(&fip->ctlr_lock, flags); + spin_lock_bh(&fip->ctlr_lock); fcf = fip->sel_fcf; if (!fcf || !fip->flogi_req_send) goto unlock; @@ -1768,7 +1764,7 @@ static void fcoe_ctlr_flogi_send(struct fcoe_ctlr *fip) } else /* XXX */ LIBFCOE_FIP_DBG(fip, "No FCF selected - defer send\n"); unlock: - spin_unlock_irqrestore(&fip->ctlr_lock, flags); + spin_unlock_bh(&fip->ctlr_lock); } /** diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h index 2074937c05bc..ce73f08ee889 100644 --- a/drivers/scsi/fnic/fnic.h +++ b/drivers/scsi/fnic/fnic.h @@ -305,6 +305,7 @@ struct fnic { unsigned int copy_wq_base; struct work_struct link_work; struct work_struct frame_work; + struct work_struct flush_work; struct sk_buff_head frame_queue; struct sk_buff_head tx_queue; @@ -363,7 +364,7 @@ void fnic_handle_event(struct work_struct *work); int fnic_rq_cmpl_handler(struct fnic *fnic, int); int fnic_alloc_rq_frame(struct vnic_rq *rq); void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf); -void fnic_flush_tx(struct fnic *); +void fnic_flush_tx(struct work_struct *work); void fnic_eth_send(struct fcoe_ctlr *, struct sk_buff *skb); void fnic_set_port_id(struct fc_lport *, u32, struct fc_frame *); void fnic_update_mac(struct fc_lport *, u8 *new); diff --git a/drivers/scsi/fnic/fnic_fcs.c b/drivers/scsi/fnic/fnic_fcs.c index 5e312a55cc7d..a08293b2ad9f 100644 --- a/drivers/scsi/fnic/fnic_fcs.c +++ b/drivers/scsi/fnic/fnic_fcs.c @@ -1182,7 +1182,7 @@ int fnic_send(struct fc_lport *lp, struct fc_frame *fp) /** * fnic_flush_tx() - send queued frames. - * @fnic: fnic device + * @work: pointer to work element * * Send frames that were waiting to go out in FC or Ethernet mode. * Whenever changing modes we purge queued frames, so these frames should @@ -1190,8 +1190,9 @@ int fnic_send(struct fc_lport *lp, struct fc_frame *fp) * * Called without fnic_lock held. */ -void fnic_flush_tx(struct fnic *fnic) +void fnic_flush_tx(struct work_struct *work) { + struct fnic *fnic = container_of(work, struct fnic, flush_work); struct sk_buff *skb; struct fc_frame *fp; diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c index 5ed1d897311a..29eead383eb9 100644 --- a/drivers/scsi/fnic/fnic_main.c +++ b/drivers/scsi/fnic/fnic_main.c @@ -830,6 +830,7 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) spin_lock_init(&fnic->vlans_lock); INIT_WORK(&fnic->fip_frame_work, fnic_handle_fip_frame); INIT_WORK(&fnic->event_work, fnic_handle_event); + INIT_WORK(&fnic->flush_work, fnic_flush_tx); skb_queue_head_init(&fnic->fip_frame_queue); INIT_LIST_HEAD(&fnic->evlist); INIT_LIST_HEAD(&fnic->vlans); diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c index 8d7fc5284293..fc4cee91b175 100644 --- a/drivers/scsi/fnic/fnic_scsi.c +++ b/drivers/scsi/fnic/fnic_scsi.c @@ -680,7 +680,7 @@ static int fnic_fcpio_fw_reset_cmpl_handler(struct fnic *fnic, spin_unlock_irqrestore(&fnic->fnic_lock, flags); - fnic_flush_tx(fnic); + queue_work(fnic_event_queue, &fnic->flush_work); reset_cmpl_handler_end: fnic_clear_state_flags(fnic, FNIC_FLAGS_FWRESET); @@ -736,7 +736,7 @@ static int fnic_fcpio_flogi_reg_cmpl_handler(struct fnic *fnic, } spin_unlock_irqrestore(&fnic->fnic_lock, flags); - fnic_flush_tx(fnic); + queue_work(fnic_event_queue, &fnic->flush_work); queue_work(fnic_event_queue, &fnic->frame_work); } else { spin_unlock_irqrestore(&fnic->fnic_lock, flags); diff --git a/drivers/scsi/initio.c b/drivers/scsi/initio.c index 2a50fda3a628..625fd547ee60 100644 --- a/drivers/scsi/initio.c +++ b/drivers/scsi/initio.c @@ -371,7 +371,6 @@ static u16 initio_se2_rd(unsigned long base, u8 addr) */ static void initio_se2_wr(unsigned long base, u8 addr, u16 val) { - u8 rb; u8 instr; int i; @@ -400,7 +399,7 @@ static void initio_se2_wr(unsigned long base, u8 addr, u16 val) udelay(30); outb(SE2CS, base + TUL_NVRAM); /* -CLK */ udelay(30); - if ((rb = inb(base + TUL_NVRAM)) & SE2DI) + if (inb(base + TUL_NVRAM) & SE2DI) break; /* write complete */ } outb(0, base + TUL_NVRAM); /* -CS */ diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c index 71f711cb0628..355a0bc0828e 100644 --- a/drivers/scsi/isci/request.c +++ b/drivers/scsi/isci/request.c @@ -3387,7 +3387,7 @@ static enum sci_status isci_io_request_build(struct isci_host *ihost, return SCI_FAILURE; } - return SCI_SUCCESS; + return status; } static struct isci_request *isci_request_from_tag(struct isci_host *ihost, u16 tag) diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index d26941b131fd..bf879d81846b 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -1918,7 +1918,7 @@ out: * * Returns the number of SGEs added to the SGL. **/ -static int +static uint32_t lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc, struct sli4_sge *sgl, int datasegcnt, struct lpfc_io_buf *lpfc_cmd) @@ -1926,8 +1926,8 @@ lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc, struct scatterlist *sgde = NULL; /* s/g data entry */ struct sli4_sge_diseed *diseed = NULL; dma_addr_t physaddr; - int i = 0, num_sge = 0, status; - uint32_t reftag; + int i = 0, status; + uint32_t reftag, num_sge = 0; uint8_t txop, rxop; #ifdef CONFIG_SCSI_LPFC_DEBUG_FS uint32_t rc; @@ -2099,7 +2099,7 @@ out: * * Returns the number of SGEs added to the SGL. **/ -static int +static uint32_t lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, struct sli4_sge *sgl, int datacnt, int protcnt, struct lpfc_io_buf *lpfc_cmd) @@ -2123,8 +2123,8 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, uint32_t rc; #endif uint32_t checking = 1; - uint32_t dma_offset = 0; - int num_sge = 0, j = 2; + uint32_t dma_offset = 0, num_sge = 0; + int j = 2; struct sli4_hybrid_sgl *sgl_xtra = NULL; sgpe = scsi_prot_sglist(sc); diff --git a/drivers/scsi/mpi3mr/mpi3mr_transport.c b/drivers/scsi/mpi3mr/mpi3mr_transport.c index c0c8ab586957..d32ad46318cb 100644 --- a/drivers/scsi/mpi3mr/mpi3mr_transport.c +++ b/drivers/scsi/mpi3mr/mpi3mr_transport.c @@ -1671,7 +1671,7 @@ mpi3mr_update_mr_sas_port(struct mpi3mr_ioc *mrioc, struct host_port *h_port, void mpi3mr_refresh_sas_ports(struct mpi3mr_ioc *mrioc) { - struct host_port h_port[64]; + struct host_port *h_port = NULL; int i, j, found, host_port_count = 0, port_idx; u16 sz, attached_handle, ioc_status; struct mpi3_sas_io_unit_page0 *sas_io_unit_pg0 = NULL; @@ -1685,6 +1685,10 @@ mpi3mr_refresh_sas_ports(struct mpi3mr_ioc *mrioc) sas_io_unit_pg0 = kzalloc(sz, GFP_KERNEL); if (!sas_io_unit_pg0) return; + h_port = kcalloc(64, sizeof(struct host_port), GFP_KERNEL); + if (!h_port) + goto out; + if (mpi3mr_cfg_get_sas_io_unit_pg0(mrioc, sas_io_unit_pg0, sz)) { ioc_err(mrioc, "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); @@ -1814,6 +1818,7 @@ mpi3mr_refresh_sas_ports(struct mpi3mr_ioc *mrioc) } } out: + kfree(h_port); kfree(sas_io_unit_pg0); } diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c index 8761bc58d965..b8120ca93c79 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.c +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c @@ -7378,7 +7378,9 @@ _base_wait_for_iocstate(struct MPT3SAS_ADAPTER *ioc, int timeout) return -EFAULT; } - issue_diag_reset: + return 0; + +issue_diag_reset: rc = _base_diag_reset(ioc); return rc; } diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c index 76d369343c7a..8cad9792a562 100644 --- a/drivers/scsi/scsi.c +++ b/drivers/scsi/scsi.c @@ -328,21 +328,39 @@ static int scsi_vpd_inquiry(struct scsi_device *sdev, unsigned char *buffer, return result + 4; } +enum scsi_vpd_parameters { + SCSI_VPD_HEADER_SIZE = 4, + SCSI_VPD_LIST_SIZE = 36, +}; + static int scsi_get_vpd_size(struct scsi_device *sdev, u8 page) { - unsigned char vpd_header[SCSI_VPD_HEADER_SIZE] __aligned(4); + unsigned char vpd[SCSI_VPD_LIST_SIZE] __aligned(4); int result; if (sdev->no_vpd_size) return SCSI_DEFAULT_VPD_LEN; /* + * Fetch the supported pages VPD and validate that the requested page + * number is present. + */ + if (page != 0) { + result = scsi_vpd_inquiry(sdev, vpd, 0, sizeof(vpd)); + if (result < SCSI_VPD_HEADER_SIZE) + return 0; + + result -= SCSI_VPD_HEADER_SIZE; + if (!memchr(&vpd[SCSI_VPD_HEADER_SIZE], page, result)) + return 0; + } + /* * Fetch the VPD page header to find out how big the page * is. This is done to prevent problems on legacy devices * which can not handle allocation lengths as large as * potentially requested by the caller. */ - result = scsi_vpd_inquiry(sdev, vpd_header, page, sizeof(vpd_header)); + result = scsi_vpd_inquiry(sdev, vpd, page, SCSI_VPD_HEADER_SIZE); if (result < 0) return 0; diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index 79da4b1c1df0..612489afe8d2 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c @@ -61,11 +61,11 @@ static int scsi_eh_try_stu(struct scsi_cmnd *scmd); static enum scsi_disposition scsi_try_to_abort_cmd(const struct scsi_host_template *, struct scsi_cmnd *); -void scsi_eh_wakeup(struct Scsi_Host *shost) +void scsi_eh_wakeup(struct Scsi_Host *shost, unsigned int busy) { lockdep_assert_held(shost->host_lock); - if (scsi_host_busy(shost) == shost->host_failed) { + if (busy == shost->host_failed) { trace_scsi_eh_wakeup(shost); wake_up_process(shost->ehandler); SCSI_LOG_ERROR_RECOVERY(5, shost_printk(KERN_INFO, shost, @@ -88,7 +88,7 @@ void scsi_schedule_eh(struct Scsi_Host *shost) if (scsi_host_set_state(shost, SHOST_RECOVERY) == 0 || scsi_host_set_state(shost, SHOST_CANCEL_RECOVERY) == 0) { shost->host_eh_scheduled++; - scsi_eh_wakeup(shost); + scsi_eh_wakeup(shost, scsi_host_busy(shost)); } spin_unlock_irqrestore(shost->host_lock, flags); @@ -282,11 +282,12 @@ static void scsi_eh_inc_host_failed(struct rcu_head *head) { struct scsi_cmnd *scmd = container_of(head, typeof(*scmd), rcu); struct Scsi_Host *shost = scmd->device->host; + unsigned int busy = scsi_host_busy(shost); unsigned long flags; spin_lock_irqsave(shost->host_lock, flags); shost->host_failed++; - scsi_eh_wakeup(shost); + scsi_eh_wakeup(shost, busy); spin_unlock_irqrestore(shost->host_lock, flags); } diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index cf3864f72093..df5ac03d5d6c 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -278,9 +278,11 @@ static void scsi_dec_host_busy(struct Scsi_Host *shost, struct scsi_cmnd *cmd) rcu_read_lock(); __clear_bit(SCMD_STATE_INFLIGHT, &cmd->state); if (unlikely(scsi_host_in_recovery(shost))) { + unsigned int busy = scsi_host_busy(shost); + spin_lock_irqsave(shost->host_lock, flags); if (shost->host_failed || shost->host_eh_scheduled) - scsi_eh_wakeup(shost); + scsi_eh_wakeup(shost, busy); spin_unlock_irqrestore(shost->host_lock, flags); } rcu_read_unlock(); diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h index 3f0dfb97db6b..1fbfe1b52c9f 100644 --- a/drivers/scsi/scsi_priv.h +++ b/drivers/scsi/scsi_priv.h @@ -92,7 +92,7 @@ extern void scmd_eh_abort_handler(struct work_struct *work); extern enum blk_eh_timer_return scsi_timeout(struct request *req); extern int scsi_error_handler(void *host); extern enum scsi_disposition scsi_decide_disposition(struct scsi_cmnd *cmd); -extern void scsi_eh_wakeup(struct Scsi_Host *shost); +extern void scsi_eh_wakeup(struct Scsi_Host *shost, unsigned int busy); extern void scsi_eh_scmd_add(struct scsi_cmnd *); void scsi_eh_ready_devs(struct Scsi_Host *shost, struct list_head *work_q, diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c index 44680f65ea14..9969f4e2f1c3 100644 --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c @@ -332,7 +332,7 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget, sdev->sg_reserved_size = INT_MAX; - q = blk_mq_init_queue(&sdev->host->tag_set); + q = blk_mq_alloc_queue(&sdev->host->tag_set, NULL, NULL); if (IS_ERR(q)) { /* release fn is set up in scsi_sysfs_device_initialise, so * have to free and put manually here */ diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 0833b3e6aa6e..bdd0acf7fa3c 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -3407,6 +3407,24 @@ static bool sd_validate_opt_xfer_size(struct scsi_disk *sdkp, return true; } +static void sd_read_block_zero(struct scsi_disk *sdkp) +{ + unsigned int buf_len = sdkp->device->sector_size; + char *buffer, cmd[10] = { }; + + buffer = kmalloc(buf_len, GFP_KERNEL); + if (!buffer) + return; + + cmd[0] = READ_10; + put_unaligned_be32(0, &cmd[2]); /* Logical block address 0 */ + put_unaligned_be16(1, &cmd[7]); /* Transfer 1 logical block */ + + scsi_execute_cmd(sdkp->device, cmd, REQ_OP_DRV_IN, buffer, buf_len, + SD_TIMEOUT, sdkp->max_retries, NULL); + kfree(buffer); +} + /** * sd_revalidate_disk - called the first time a new disk is seen, * performs disk spin up, read_capacity, etc. @@ -3446,7 +3464,13 @@ static int sd_revalidate_disk(struct gendisk *disk) */ if (sdkp->media_present) { sd_read_capacity(sdkp, buffer); - + /* + * Some USB/UAS devices return generic values for mode pages + * until the media has been accessed. Trigger a READ operation + * to force the device to populate mode pages. + */ + if (sdp->read_before_ms) + sd_read_block_zero(sdkp); /* * set the default to rotational. All non-rotational devices * support the block characteristics VPD page, which will diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c index ceff1ec13f9e..385180c98be4 100644 --- a/drivers/scsi/smartpqi/smartpqi_init.c +++ b/drivers/scsi/smartpqi/smartpqi_init.c @@ -6533,8 +6533,11 @@ static void pqi_map_queues(struct Scsi_Host *shost) { struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); - blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT], + if (!ctrl_info->disable_managed_interrupts) + return blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT], ctrl_info->pci_dev, 0); + else + return blk_mq_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT]); } static inline bool pqi_is_tape_changer_device(struct pqi_scsi_dev *device) diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c index a95936b18f69..7ceb982040a5 100644 --- a/drivers/scsi/storvsc_drv.c +++ b/drivers/scsi/storvsc_drv.c @@ -330,6 +330,7 @@ enum storvsc_request_type { */ static int storvsc_ringbuffer_size = (128 * 1024); +static int aligned_ringbuffer_size; static u32 max_outstanding_req_per_channel; static int storvsc_change_queue_depth(struct scsi_device *sdev, int queue_depth); @@ -687,8 +688,8 @@ static void handle_sc_creation(struct vmbus_channel *new_sc) new_sc->next_request_id_callback = storvsc_next_request_id; ret = vmbus_open(new_sc, - storvsc_ringbuffer_size, - storvsc_ringbuffer_size, + aligned_ringbuffer_size, + aligned_ringbuffer_size, (void *)&props, sizeof(struct vmstorage_channel_properties), storvsc_on_channel_callback, new_sc); @@ -1973,7 +1974,7 @@ static int storvsc_probe(struct hv_device *device, dma_set_min_align_mask(&device->device, HV_HYP_PAGE_SIZE - 1); stor_device->port_number = host->host_no; - ret = storvsc_connect_to_vsp(device, storvsc_ringbuffer_size, is_fc); + ret = storvsc_connect_to_vsp(device, aligned_ringbuffer_size, is_fc); if (ret) goto err_out1; @@ -2164,7 +2165,7 @@ static int storvsc_resume(struct hv_device *hv_dev) { int ret; - ret = storvsc_connect_to_vsp(hv_dev, storvsc_ringbuffer_size, + ret = storvsc_connect_to_vsp(hv_dev, aligned_ringbuffer_size, hv_dev_is_fc(hv_dev)); return ret; } @@ -2198,8 +2199,9 @@ static int __init storvsc_drv_init(void) * the ring buffer indices) by the max request size (which is * vmbus_channel_packet_multipage_buffer + struct vstor_packet + u64) */ + aligned_ringbuffer_size = VMBUS_RING_SIZE(storvsc_ringbuffer_size); max_outstanding_req_per_channel = - ((storvsc_ringbuffer_size - PAGE_SIZE) / + ((aligned_ringbuffer_size - PAGE_SIZE) / ALIGN(MAX_MULTIPAGE_BUFFER_PACKET + sizeof(struct vstor_packet) + sizeof(u64), sizeof(u64))); diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c index 4cf20be668a6..617eb892f4ad 100644 --- a/drivers/scsi/virtio_scsi.c +++ b/drivers/scsi/virtio_scsi.c @@ -188,8 +188,6 @@ static void virtscsi_vq_done(struct virtio_scsi *vscsi, while ((buf = virtqueue_get_buf(vq, &len)) != NULL) fn(vscsi, buf); - if (unlikely(virtqueue_is_broken(vq))) - break; } while (!virtqueue_enable_cb(vq)); spin_unlock_irqrestore(&virtscsi_vq->vq_lock, flags); } diff --git a/drivers/soc/apple/mailbox.c b/drivers/soc/apple/mailbox.c index 780199bf351e..49a0955e82d6 100644 --- a/drivers/soc/apple/mailbox.c +++ b/drivers/soc/apple/mailbox.c @@ -296,14 +296,14 @@ struct apple_mbox *apple_mbox_get(struct device *dev, int index) of_node_put(args.np); if (!pdev) - return ERR_PTR(EPROBE_DEFER); + return ERR_PTR(-EPROBE_DEFER); mbox = platform_get_drvdata(pdev); if (!mbox) - return ERR_PTR(EPROBE_DEFER); + return ERR_PTR(-EPROBE_DEFER); if (!device_link_add(dev, &pdev->dev, DL_FLAG_AUTOREMOVE_CONSUMER)) - return ERR_PTR(ENODEV); + return ERR_PTR(-ENODEV); return mbox; } diff --git a/drivers/soc/mediatek/Kconfig b/drivers/soc/mediatek/Kconfig index 0810b5b0c688..50c664b65f4d 100644 --- a/drivers/soc/mediatek/Kconfig +++ b/drivers/soc/mediatek/Kconfig @@ -68,4 +68,13 @@ config MTK_SVS chip process corner, temperatures and other factors. Then DVFS driver could apply SVS bank voltage to PMIC/Buck. +config MTK_SOCINFO + tristate "MediaTek SoC Information" + default y + depends on NVMEM_MTK_EFUSE + help + The MediaTek SoC Information (mtk-socinfo) driver provides + information about the SoC to the userspace including the + manufacturer name, marketing name and soc name. + endmenu diff --git a/drivers/soc/mediatek/Makefile b/drivers/soc/mediatek/Makefile index 9d3ce7878c5c..6830512848fd 100644 --- a/drivers/soc/mediatek/Makefile +++ b/drivers/soc/mediatek/Makefile @@ -7,3 +7,4 @@ obj-$(CONFIG_MTK_REGULATOR_COUPLER) += mtk-regulator-coupler.o obj-$(CONFIG_MTK_MMSYS) += mtk-mmsys.o obj-$(CONFIG_MTK_MMSYS) += mtk-mutex.o obj-$(CONFIG_MTK_SVS) += mtk-svs.o +obj-$(CONFIG_MTK_SOCINFO) += mtk-socinfo.o diff --git a/drivers/soc/mediatek/mtk-socinfo.c b/drivers/soc/mediatek/mtk-socinfo.c new file mode 100644 index 000000000000..42572e8c1520 --- /dev/null +++ b/drivers/soc/mediatek/mtk-socinfo.c @@ -0,0 +1,191 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2023 MediaTek Inc. + */ +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_platform.h> +#include <linux/pm_runtime.h> +#include <linux/nvmem-consumer.h> +#include <linux/device.h> +#include <linux/device/bus.h> +#include <linux/debugfs.h> +#include <linux/seq_file.h> +#include <linux/string.h> +#include <linux/sys_soc.h> +#include <linux/slab.h> +#include <linux/platform_device.h> + +#define MTK_SOCINFO_ENTRY(_soc_name, _segment_name, _marketing_name, _cell_data1, _cell_data2) {\ + .soc_name = _soc_name, \ + .segment_name = _segment_name, \ + .marketing_name = _marketing_name, \ + .cell_data = {_cell_data1, _cell_data2} \ +} +#define CELL_NOT_USED (0xFFFFFFFF) +#define MAX_CELLS (2) + +struct mtk_socinfo { + struct device *dev; + struct name_data *name_data; + struct socinfo_data *socinfo_data; + struct soc_device *soc_dev; +}; + +struct socinfo_data { + char *soc_name; + char *segment_name; + char *marketing_name; + u32 cell_data[MAX_CELLS]; +}; + +static const char *cell_names[MAX_CELLS] = {"socinfo-data1", "socinfo-data2"}; + +static struct socinfo_data socinfo_data_table[] = { + MTK_SOCINFO_ENTRY("MT8173", "MT8173V/AC", "MT8173", 0x6CA20004, 0x10000000), + MTK_SOCINFO_ENTRY("MT8183", "MT8183V/AZA", "Kompanio 500", 0x00010043, 0x00000840), + MTK_SOCINFO_ENTRY("MT8183", "MT8183V/AZA", "Kompanio 500", 0x00010043, 0x00000940), + MTK_SOCINFO_ENTRY("MT8186", "MT8186GV/AZA", "Kompanio 520", 0x81861001, CELL_NOT_USED), + MTK_SOCINFO_ENTRY("MT8186T", "MT8186TV/AZA", "Kompanio 528", 0x81862001, CELL_NOT_USED), + MTK_SOCINFO_ENTRY("MT8188", "MT8188GV/AZA", "Kompanio 830", 0x81880000, 0x00000010), + MTK_SOCINFO_ENTRY("MT8188", "MT8188GV/HZA", "Kompanio 830", 0x81880000, 0x00000011), + MTK_SOCINFO_ENTRY("MT8192", "MT8192V/AZA", "Kompanio 820", 0x00001100, 0x00040080), + MTK_SOCINFO_ENTRY("MT8192T", "MT8192V/ATZA", "Kompanio 828", 0x00000100, 0x000400C0), + MTK_SOCINFO_ENTRY("MT8195", "MT8195GV/EZA", "Kompanio 1200", 0x81950300, CELL_NOT_USED), + MTK_SOCINFO_ENTRY("MT8195", "MT8195GV/EHZA", "Kompanio 1200", 0x81950304, CELL_NOT_USED), + MTK_SOCINFO_ENTRY("MT8195", "MT8195TV/EZA", "Kompanio 1380", 0x81950400, CELL_NOT_USED), + MTK_SOCINFO_ENTRY("MT8195", "MT8195TV/EHZA", "Kompanio 1380", 0x81950404, CELL_NOT_USED), +}; + +static int mtk_socinfo_create_socinfo_node(struct mtk_socinfo *mtk_socinfop) +{ + struct soc_device_attribute *attrs; + static char machine[30] = {0}; + static const char *soc_manufacturer = "MediaTek"; + + attrs = devm_kzalloc(mtk_socinfop->dev, sizeof(*attrs), GFP_KERNEL); + if (!attrs) + return -ENOMEM; + + snprintf(machine, sizeof(machine), "%s (%s)", mtk_socinfop->socinfo_data->marketing_name, + mtk_socinfop->socinfo_data->soc_name); + attrs->family = soc_manufacturer; + attrs->machine = machine; + + mtk_socinfop->soc_dev = soc_device_register(attrs); + if (IS_ERR(mtk_socinfop->soc_dev)) + return PTR_ERR(mtk_socinfop->soc_dev); + + dev_info(mtk_socinfop->dev, "%s %s SoC detected.\n", soc_manufacturer, attrs->machine); + return 0; +} + +static u32 mtk_socinfo_read_cell(struct device *dev, const char *name) +{ + struct nvmem_device *nvmemp; + struct device_node *np, *nvmem_node = dev->parent->of_node; + u32 offset; + u32 cell_val = CELL_NOT_USED; + + /* should never fail since the nvmem driver registers this child */ + nvmemp = nvmem_device_find(nvmem_node, device_match_of_node); + if (IS_ERR(nvmemp)) + goto out; + + np = of_get_child_by_name(nvmem_node, name); + if (!np) + goto put_device; + + if (of_property_read_u32_index(np, "reg", 0, &offset)) + goto put_node; + + nvmem_device_read(nvmemp, offset, sizeof(cell_val), &cell_val); + +put_node: + of_node_put(np); +put_device: + nvmem_device_put(nvmemp); +out: + return cell_val; +} + +static int mtk_socinfo_get_socinfo_data(struct mtk_socinfo *mtk_socinfop) +{ + unsigned int i, j; + unsigned int num_cell_data = 0; + u32 cell_data[MAX_CELLS] = {0}; + bool match_socinfo; + int match_socinfo_index = -1; + + for (i = 0; i < MAX_CELLS; i++) { + cell_data[i] = mtk_socinfo_read_cell(mtk_socinfop->dev, cell_names[i]); + if (cell_data[i] != CELL_NOT_USED) + num_cell_data++; + else + break; + } + + if (!num_cell_data) + return -ENOENT; + + for (i = 0; i < ARRAY_SIZE(socinfo_data_table); i++) { + match_socinfo = true; + for (j = 0; j < num_cell_data; j++) { + if (cell_data[j] != socinfo_data_table[i].cell_data[j]) { + match_socinfo = false; + break; + } + } + if (match_socinfo) { + mtk_socinfop->socinfo_data = &(socinfo_data_table[i]); + match_socinfo_index = i; + break; + } + } + + return match_socinfo_index >= 0 ? match_socinfo_index : -ENOENT; +} + +static int mtk_socinfo_probe(struct platform_device *pdev) +{ + struct mtk_socinfo *mtk_socinfop; + int ret; + + mtk_socinfop = devm_kzalloc(&pdev->dev, sizeof(*mtk_socinfop), GFP_KERNEL); + if (!mtk_socinfop) + return -ENOMEM; + + mtk_socinfop->dev = &pdev->dev; + + ret = mtk_socinfo_get_socinfo_data(mtk_socinfop); + if (ret < 0) + return dev_err_probe(mtk_socinfop->dev, ret, "Failed to get socinfo data\n"); + + ret = mtk_socinfo_create_socinfo_node(mtk_socinfop); + if (ret) + return dev_err_probe(mtk_socinfop->dev, ret, "Cannot create node\n"); + + platform_set_drvdata(pdev, mtk_socinfop); + return 0; +} + +static void mtk_socinfo_remove(struct platform_device *pdev) +{ + struct mtk_socinfo *mtk_socinfop = platform_get_drvdata(pdev); + + soc_device_unregister(mtk_socinfop->soc_dev); +} + +static struct platform_driver mtk_socinfo = { + .probe = mtk_socinfo_probe, + .remove_new = mtk_socinfo_remove, + .driver = { + .name = "mtk-socinfo", + }, +}; +module_platform_driver(mtk_socinfo); + +MODULE_AUTHOR("William-TW LIN <william-tw.lin@mediatek.com>"); +MODULE_DESCRIPTION("MediaTek socinfo driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/soc/microchip/Kconfig b/drivers/soc/microchip/Kconfig index 9b0fdd95276e..19f4b576f822 100644 --- a/drivers/soc/microchip/Kconfig +++ b/drivers/soc/microchip/Kconfig @@ -1,5 +1,5 @@ config POLARFIRE_SOC_SYS_CTRL - tristate "POLARFIRE_SOC_SYS_CTRL" + tristate "Microchip PolarFire SoC (MPFS) system controller support" depends on POLARFIRE_SOC_MAILBOX depends on MTD help diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig index c6ca4de42586..5af33b0e3470 100644 --- a/drivers/soc/qcom/Kconfig +++ b/drivers/soc/qcom/Kconfig @@ -268,4 +268,13 @@ config QCOM_INLINE_CRYPTO_ENGINE tristate select QCOM_SCM +config QCOM_PBS + tristate "PBS trigger support for Qualcomm Technologies, Inc. PMICS" + depends on SPMI + help + This driver supports configuring software programmable boot sequencer (PBS) + trigger event through PBS RAM on Qualcomm Technologies, Inc. PMICs. + This module provides the APIs to the client drivers that wants to send the + PBS trigger event to the PBS RAM. + endmenu diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile index 05b3d54e8dc9..ca0bece0dfff 100644 --- a/drivers/soc/qcom/Makefile +++ b/drivers/soc/qcom/Makefile @@ -1,5 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 CFLAGS_rpmh-rsc.o := -I$(src) +CFLAGS_qcom_aoss.o := -I$(src) obj-$(CONFIG_QCOM_AOSS_QMP) += qcom_aoss.o obj-$(CONFIG_QCOM_GENI_SE) += qcom-geni-se.o obj-$(CONFIG_QCOM_COMMAND_DB) += cmd-db.o @@ -34,3 +35,4 @@ obj-$(CONFIG_QCOM_KRYO_L2_ACCESSORS) += kryo-l2-accessors.o obj-$(CONFIG_QCOM_ICC_BWMON) += icc-bwmon.o qcom_ice-objs += ice.o obj-$(CONFIG_QCOM_INLINE_CRYPTO_ENGINE) += qcom_ice.o +obj-$(CONFIG_QCOM_PBS) += qcom-pbs.o diff --git a/drivers/soc/qcom/apr.c b/drivers/soc/qcom/apr.c index 1f8b315576a4..50749e870efa 100644 --- a/drivers/soc/qcom/apr.c +++ b/drivers/soc/qcom/apr.c @@ -399,7 +399,7 @@ static int apr_uevent(const struct device *dev, struct kobj_uevent_env *env) return add_uevent_var(env, "MODALIAS=apr:%s", adev->name); } -struct bus_type aprbus = { +const struct bus_type aprbus = { .name = "aprbus", .match = apr_device_match, .probe = apr_device_probe, diff --git a/drivers/soc/qcom/llcc-qcom.c b/drivers/soc/qcom/llcc-qcom.c index 4ca88eaebf06..cbef0dea1d5d 100644 --- a/drivers/soc/qcom/llcc-qcom.c +++ b/drivers/soc/qcom/llcc-qcom.c @@ -859,6 +859,8 @@ static int llcc_update_act_ctrl(u32 sid, ret = regmap_read_poll_timeout(drv_data->bcast_regmap, status_reg, slice_status, !(slice_status & status), 0, LLCC_STATUS_READ_DELAY); + if (ret) + return ret; if (drv_data->version >= LLCC_VERSION_4_1_0_0) ret = regmap_write(drv_data->bcast_regmap, act_clear_reg, diff --git a/drivers/soc/qcom/pmic_glink.c b/drivers/soc/qcom/pmic_glink.c index f4bfd24386f1..f913e9bd57ed 100644 --- a/drivers/soc/qcom/pmic_glink.c +++ b/drivers/soc/qcom/pmic_glink.c @@ -265,10 +265,17 @@ static int pmic_glink_probe(struct platform_device *pdev) pg->client_mask = *match_data; + pg->pdr = pdr_handle_alloc(pmic_glink_pdr_callback, pg); + if (IS_ERR(pg->pdr)) { + ret = dev_err_probe(&pdev->dev, PTR_ERR(pg->pdr), + "failed to initialize pdr\n"); + return ret; + } + if (pg->client_mask & BIT(PMIC_GLINK_CLIENT_UCSI)) { ret = pmic_glink_add_aux_device(pg, &pg->ucsi_aux, "ucsi"); if (ret) - return ret; + goto out_release_pdr_handle; } if (pg->client_mask & BIT(PMIC_GLINK_CLIENT_ALTMODE)) { ret = pmic_glink_add_aux_device(pg, &pg->altmode_aux, "altmode"); @@ -281,17 +288,11 @@ static int pmic_glink_probe(struct platform_device *pdev) goto out_release_altmode_aux; } - pg->pdr = pdr_handle_alloc(pmic_glink_pdr_callback, pg); - if (IS_ERR(pg->pdr)) { - ret = dev_err_probe(&pdev->dev, PTR_ERR(pg->pdr), "failed to initialize pdr\n"); - goto out_release_aux_devices; - } - service = pdr_add_lookup(pg->pdr, "tms/servreg", "msm/adsp/charger_pd"); if (IS_ERR(service)) { ret = dev_err_probe(&pdev->dev, PTR_ERR(service), "failed adding pdr lookup for charger_pd\n"); - goto out_release_pdr_handle; + goto out_release_aux_devices; } mutex_lock(&__pmic_glink_lock); @@ -300,8 +301,6 @@ static int pmic_glink_probe(struct platform_device *pdev) return 0; -out_release_pdr_handle: - pdr_handle_release(pg->pdr); out_release_aux_devices: if (pg->client_mask & BIT(PMIC_GLINK_CLIENT_BATT)) pmic_glink_del_aux_device(pg, &pg->ps_aux); @@ -311,6 +310,8 @@ out_release_altmode_aux: out_release_ucsi_aux: if (pg->client_mask & BIT(PMIC_GLINK_CLIENT_UCSI)) pmic_glink_del_aux_device(pg, &pg->ucsi_aux); +out_release_pdr_handle: + pdr_handle_release(pg->pdr); return ret; } diff --git a/drivers/soc/qcom/pmic_glink_altmode.c b/drivers/soc/qcom/pmic_glink_altmode.c index 5fcd0fdd2faa..b3808fc24c69 100644 --- a/drivers/soc/qcom/pmic_glink_altmode.c +++ b/drivers/soc/qcom/pmic_glink_altmode.c @@ -76,7 +76,7 @@ struct pmic_glink_altmode_port { struct work_struct work; - struct device *bridge; + struct auxiliary_device *bridge; enum typec_orientation orientation; u16 svid; @@ -230,7 +230,7 @@ static void pmic_glink_altmode_worker(struct work_struct *work) else pmic_glink_altmode_enable_usb(altmode, alt_port); - drm_aux_hpd_bridge_notify(alt_port->bridge, + drm_aux_hpd_bridge_notify(&alt_port->bridge->dev, alt_port->hpd_state ? connector_status_connected : connector_status_disconnected); @@ -454,7 +454,7 @@ static int pmic_glink_altmode_probe(struct auxiliary_device *adev, alt_port->index = port; INIT_WORK(&alt_port->work, pmic_glink_altmode_worker); - alt_port->bridge = drm_dp_hpd_bridge_register(dev, to_of_node(fwnode)); + alt_port->bridge = devm_drm_dp_hpd_bridge_alloc(dev, to_of_node(fwnode)); if (IS_ERR(alt_port->bridge)) { fwnode_handle_put(fwnode); return PTR_ERR(alt_port->bridge); @@ -510,6 +510,16 @@ static int pmic_glink_altmode_probe(struct auxiliary_device *adev, } } + for (port = 0; port < ARRAY_SIZE(altmode->ports); port++) { + alt_port = &altmode->ports[port]; + if (!alt_port->bridge) + continue; + + ret = devm_drm_dp_hpd_bridge_add(dev, alt_port->bridge); + if (ret) + return ret; + } + altmode->client = devm_pmic_glink_register_client(dev, altmode->owner_id, pmic_glink_altmode_callback, diff --git a/drivers/soc/qcom/qcom-geni-se.c b/drivers/soc/qcom/qcom-geni-se.c index bdcf44b85b2f..2e8f24d5da80 100644 --- a/drivers/soc/qcom/qcom-geni-se.c +++ b/drivers/soc/qcom/qcom-geni-se.c @@ -89,7 +89,6 @@ * @base: Base address of this instance of QUP wrapper core * @clks: Handle to the primary & optional secondary AHB clocks * @num_clks: Count of clocks - * @to_core: Core ICC path */ struct geni_wrapper { struct device *dev; diff --git a/drivers/soc/qcom/qcom-pbs.c b/drivers/soc/qcom/qcom-pbs.c new file mode 100644 index 000000000000..6af49b5060e5 --- /dev/null +++ b/drivers/soc/qcom/qcom-pbs.c @@ -0,0 +1,236 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include <linux/delay.h> +#include <linux/err.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/of_platform.h> +#include <linux/regmap.h> +#include <linux/spmi.h> +#include <linux/soc/qcom/qcom-pbs.h> + +#define PBS_CLIENT_TRIG_CTL 0x42 +#define PBS_CLIENT_SW_TRIG_BIT BIT(7) +#define PBS_CLIENT_SCRATCH1 0x50 +#define PBS_CLIENT_SCRATCH2 0x51 +#define PBS_CLIENT_SCRATCH2_ERROR 0xFF + +#define RETRIES 2000 +#define DELAY 1100 + +struct pbs_dev { + struct device *dev; + struct regmap *regmap; + struct mutex lock; + struct device_link *link; + + u32 base; +}; + +static int qcom_pbs_wait_for_ack(struct pbs_dev *pbs, u8 bit_pos) +{ + unsigned int val; + int ret; + + ret = regmap_read_poll_timeout(pbs->regmap, pbs->base + PBS_CLIENT_SCRATCH2, + val, val & BIT(bit_pos), DELAY, DELAY * RETRIES); + + if (ret < 0) { + dev_err(pbs->dev, "Timeout for PBS ACK/NACK for bit %u\n", bit_pos); + return -ETIMEDOUT; + } + + if (val == PBS_CLIENT_SCRATCH2_ERROR) { + ret = regmap_write(pbs->regmap, pbs->base + PBS_CLIENT_SCRATCH2, 0); + dev_err(pbs->dev, "NACK from PBS for bit %u\n", bit_pos); + return -EINVAL; + } + + dev_dbg(pbs->dev, "PBS sequence for bit %u executed!\n", bit_pos); + return 0; +} + +/** + * qcom_pbs_trigger_event() - Trigger the PBS RAM sequence + * @pbs: Pointer to PBS device + * @bitmap: bitmap + * + * This function is used to trigger the PBS RAM sequence to be + * executed by the client driver. + * + * The PBS trigger sequence involves + * 1. setting the PBS sequence bit in PBS_CLIENT_SCRATCH1 + * 2. Initiating the SW PBS trigger + * 3. Checking the equivalent bit in PBS_CLIENT_SCRATCH2 for the + * completion of the sequence. + * 4. If PBS_CLIENT_SCRATCH2 == 0xFF, the PBS sequence failed to execute + * + * Return: 0 on success, < 0 on failure + */ +int qcom_pbs_trigger_event(struct pbs_dev *pbs, u8 bitmap) +{ + unsigned int val; + u16 bit_pos; + int ret; + + if (WARN_ON(!bitmap)) + return -EINVAL; + + if (IS_ERR_OR_NULL(pbs)) + return -EINVAL; + + mutex_lock(&pbs->lock); + ret = regmap_read(pbs->regmap, pbs->base + PBS_CLIENT_SCRATCH2, &val); + if (ret < 0) + goto out; + + if (val == PBS_CLIENT_SCRATCH2_ERROR) { + /* PBS error - clear SCRATCH2 register */ + ret = regmap_write(pbs->regmap, pbs->base + PBS_CLIENT_SCRATCH2, 0); + if (ret < 0) + goto out; + } + + for (bit_pos = 0; bit_pos < 8; bit_pos++) { + if (!(bitmap & BIT(bit_pos))) + continue; + + /* Clear the PBS sequence bit position */ + ret = regmap_update_bits(pbs->regmap, pbs->base + PBS_CLIENT_SCRATCH2, + BIT(bit_pos), 0); + if (ret < 0) + goto out_clear_scratch1; + + /* Set the PBS sequence bit position */ + ret = regmap_update_bits(pbs->regmap, pbs->base + PBS_CLIENT_SCRATCH1, + BIT(bit_pos), BIT(bit_pos)); + if (ret < 0) + goto out_clear_scratch1; + + /* Initiate the SW trigger */ + ret = regmap_update_bits(pbs->regmap, pbs->base + PBS_CLIENT_TRIG_CTL, + PBS_CLIENT_SW_TRIG_BIT, PBS_CLIENT_SW_TRIG_BIT); + if (ret < 0) + goto out_clear_scratch1; + + ret = qcom_pbs_wait_for_ack(pbs, bit_pos); + if (ret < 0) + goto out_clear_scratch1; + + /* Clear the PBS sequence bit position */ + regmap_update_bits(pbs->regmap, pbs->base + PBS_CLIENT_SCRATCH1, BIT(bit_pos), 0); + regmap_update_bits(pbs->regmap, pbs->base + PBS_CLIENT_SCRATCH2, BIT(bit_pos), 0); + } + +out_clear_scratch1: + /* Clear all the requested bitmap */ + ret = regmap_update_bits(pbs->regmap, pbs->base + PBS_CLIENT_SCRATCH1, bitmap, 0); + +out: + mutex_unlock(&pbs->lock); + + return ret; +} +EXPORT_SYMBOL_GPL(qcom_pbs_trigger_event); + +/** + * get_pbs_client_device() - Get the PBS device used by client + * @dev: Client device + * + * This function is used to get the PBS device that is being + * used by the client. + * + * Return: pbs_dev on success, ERR_PTR on failure + */ +struct pbs_dev *get_pbs_client_device(struct device *dev) +{ + struct device_node *pbs_dev_node; + struct platform_device *pdev; + struct pbs_dev *pbs; + + pbs_dev_node = of_parse_phandle(dev->of_node, "qcom,pbs", 0); + if (!pbs_dev_node) { + dev_err(dev, "Missing qcom,pbs property\n"); + return ERR_PTR(-ENODEV); + } + + pdev = of_find_device_by_node(pbs_dev_node); + if (!pdev) { + dev_err(dev, "Unable to find PBS dev_node\n"); + pbs = ERR_PTR(-EPROBE_DEFER); + goto out; + } + + pbs = platform_get_drvdata(pdev); + if (!pbs) { + dev_err(dev, "Cannot get pbs instance from %s\n", dev_name(&pdev->dev)); + platform_device_put(pdev); + pbs = ERR_PTR(-EPROBE_DEFER); + goto out; + } + + pbs->link = device_link_add(dev, &pdev->dev, DL_FLAG_AUTOREMOVE_SUPPLIER); + if (!pbs->link) { + dev_err(&pdev->dev, "Failed to create device link to consumer %s\n", dev_name(dev)); + platform_device_put(pdev); + pbs = ERR_PTR(-EINVAL); + goto out; + } + +out: + of_node_put(pbs_dev_node); + return pbs; +} +EXPORT_SYMBOL_GPL(get_pbs_client_device); + +static int qcom_pbs_probe(struct platform_device *pdev) +{ + struct pbs_dev *pbs; + u32 val; + int ret; + + pbs = devm_kzalloc(&pdev->dev, sizeof(*pbs), GFP_KERNEL); + if (!pbs) + return -ENOMEM; + + pbs->dev = &pdev->dev; + pbs->regmap = dev_get_regmap(pbs->dev->parent, NULL); + if (!pbs->regmap) { + dev_err(pbs->dev, "Couldn't get parent's regmap\n"); + return -EINVAL; + } + + ret = device_property_read_u32(pbs->dev, "reg", &val); + if (ret < 0) { + dev_err(pbs->dev, "Couldn't find reg, ret = %d\n", ret); + return ret; + } + pbs->base = val; + mutex_init(&pbs->lock); + + platform_set_drvdata(pdev, pbs); + + return 0; +} + +static const struct of_device_id qcom_pbs_match_table[] = { + { .compatible = "qcom,pbs" }, + {} +}; +MODULE_DEVICE_TABLE(of, qcom_pbs_match_table); + +static struct platform_driver qcom_pbs_driver = { + .driver = { + .name = "qcom-pbs", + .of_match_table = qcom_pbs_match_table, + }, + .probe = qcom_pbs_probe, +}; +module_platform_driver(qcom_pbs_driver) + +MODULE_DESCRIPTION("QCOM PBS DRIVER"); +MODULE_LICENSE("GPL"); diff --git a/drivers/soc/qcom/qcom_aoss.c b/drivers/soc/qcom/qcom_aoss.c index aff0cfb71482..ca2f6b7629ce 100644 --- a/drivers/soc/qcom/qcom_aoss.c +++ b/drivers/soc/qcom/qcom_aoss.c @@ -3,6 +3,7 @@ * Copyright (c) 2019, Linaro Ltd */ #include <linux/clk-provider.h> +#include <linux/debugfs.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/mailbox_client.h> @@ -13,6 +14,9 @@ #include <linux/slab.h> #include <linux/soc/qcom/qcom_aoss.h> +#define CREATE_TRACE_POINTS +#include "trace-aoss.h" + #define QMP_DESC_MAGIC 0x0 #define QMP_DESC_VERSION 0x4 #define QMP_DESC_FEATURES 0x8 @@ -44,6 +48,8 @@ #define QMP_NUM_COOLING_RESOURCES 2 +#define QMP_DEBUGFS_FILES 4 + static bool qmp_cdev_max_state = 1; struct qmp_cooling_device { @@ -65,6 +71,8 @@ struct qmp_cooling_device { * @tx_lock: provides synchronization between multiple callers of qmp_send() * @qdss_clk: QDSS clock hw struct * @cooling_devs: thermal cooling devices + * @debugfs_root: directory for the developer/tester interface + * @debugfs_files: array of individual debugfs entries under debugfs_root */ struct qmp { void __iomem *msgram; @@ -82,6 +90,8 @@ struct qmp { struct clk_hw qdss_clk; struct qmp_cooling_device *cooling_devs; + struct dentry *debugfs_root; + struct dentry *debugfs_files[QMP_DEBUGFS_FILES]; }; static void qmp_kick(struct qmp *qmp) @@ -214,7 +224,7 @@ static bool qmp_message_empty(struct qmp *qmp) * * Return: 0 on success, negative errno on failure */ -int qmp_send(struct qmp *qmp, const char *fmt, ...) +int __printf(2, 3) qmp_send(struct qmp *qmp, const char *fmt, ...) { char buf[QMP_MSG_LEN]; long time_left; @@ -235,6 +245,8 @@ int qmp_send(struct qmp *qmp, const char *fmt, ...) mutex_lock(&qmp->tx_lock); + trace_aoss_send(buf); + /* The message RAM only implements 32-bit accesses */ __iowrite32_copy(qmp->msgram + qmp->offset + sizeof(u32), buf, sizeof(buf) / sizeof(u32)); @@ -256,6 +268,8 @@ int qmp_send(struct qmp *qmp, const char *fmt, ...) ret = 0; } + trace_aoss_send_done(buf, ret); + mutex_unlock(&qmp->tx_lock); return ret; @@ -475,6 +489,91 @@ void qmp_put(struct qmp *qmp) } EXPORT_SYMBOL_GPL(qmp_put); +struct qmp_debugfs_entry { + const char *name; + const char *fmt; + bool is_bool; + const char *true_val; + const char *false_val; +}; + +static const struct qmp_debugfs_entry qmp_debugfs_entries[QMP_DEBUGFS_FILES] = { + { "ddr_frequency_mhz", "{class: ddr, res: fixed, val: %u}", false }, + { "prevent_aoss_sleep", "{class: aoss_slp, res: sleep: %s}", true, "enable", "disable" }, + { "prevent_cx_collapse", "{class: cx_mol, res: cx, val: %s}", true, "mol", "off" }, + { "prevent_ddr_collapse", "{class: ddr_mol, res: ddr, val: %s}", true, "mol", "off" }, +}; + +static ssize_t qmp_debugfs_write(struct file *file, const char __user *user_buf, + size_t count, loff_t *pos) +{ + const struct qmp_debugfs_entry *entry = NULL; + struct qmp *qmp = file->private_data; + char buf[QMP_MSG_LEN]; + unsigned int uint_val; + const char *str_val; + bool bool_val; + int ret; + int i; + + for (i = 0; i < ARRAY_SIZE(qmp->debugfs_files); i++) { + if (qmp->debugfs_files[i] == file->f_path.dentry) { + entry = &qmp_debugfs_entries[i]; + break; + } + } + if (WARN_ON(!entry)) + return -EFAULT; + + if (entry->is_bool) { + ret = kstrtobool_from_user(user_buf, count, &bool_val); + if (ret) + return ret; + + str_val = bool_val ? entry->true_val : entry->false_val; + + ret = snprintf(buf, sizeof(buf), entry->fmt, str_val); + if (ret >= sizeof(buf)) + return -EINVAL; + } else { + ret = kstrtou32_from_user(user_buf, count, 0, &uint_val); + if (ret) + return ret; + + ret = snprintf(buf, sizeof(buf), entry->fmt, uint_val); + if (ret >= sizeof(buf)) + return -EINVAL; + } + + ret = qmp_send(qmp, buf); + if (ret < 0) + return ret; + + return count; +} + +static const struct file_operations qmp_debugfs_fops = { + .open = simple_open, + .write = qmp_debugfs_write, +}; + +static void qmp_debugfs_create(struct qmp *qmp) +{ + const struct qmp_debugfs_entry *entry; + int i; + + qmp->debugfs_root = debugfs_create_dir("qcom_aoss", NULL); + + for (i = 0; i < ARRAY_SIZE(qmp->debugfs_files); i++) { + entry = &qmp_debugfs_entries[i]; + + qmp->debugfs_files[i] = debugfs_create_file(entry->name, 0200, + qmp->debugfs_root, + qmp, + &qmp_debugfs_fops); + } +} + static int qmp_probe(struct platform_device *pdev) { struct qmp *qmp; @@ -523,6 +622,8 @@ static int qmp_probe(struct platform_device *pdev) platform_set_drvdata(pdev, qmp); + qmp_debugfs_create(qmp); + return 0; err_close_qmp: @@ -537,6 +638,8 @@ static void qmp_remove(struct platform_device *pdev) { struct qmp *qmp = platform_get_drvdata(pdev); + debugfs_remove_recursive(qmp->debugfs_root); + qmp_qdss_clk_remove(qmp); qmp_cooling_devices_remove(qmp); diff --git a/drivers/soc/qcom/smem.c b/drivers/soc/qcom/smem.c index 690afc9a12f4..7191fa0c087f 100644 --- a/drivers/soc/qcom/smem.c +++ b/drivers/soc/qcom/smem.c @@ -655,8 +655,6 @@ invalid_canary: void *qcom_smem_get(unsigned host, unsigned item, size_t *size) { struct smem_partition *part; - unsigned long flags; - int ret; void *ptr = ERR_PTR(-EPROBE_DEFER); if (!__smem) @@ -665,12 +663,6 @@ void *qcom_smem_get(unsigned host, unsigned item, size_t *size) if (WARN_ON(item >= __smem->item_count)) return ERR_PTR(-EINVAL); - ret = hwspin_lock_timeout_irqsave(__smem->hwlock, - HWSPINLOCK_TIMEOUT, - &flags); - if (ret) - return ERR_PTR(ret); - if (host < SMEM_HOST_COUNT && __smem->partitions[host].virt_base) { part = &__smem->partitions[host]; ptr = qcom_smem_get_private(__smem, part, item, size); @@ -681,10 +673,7 @@ void *qcom_smem_get(unsigned host, unsigned item, size_t *size) ptr = qcom_smem_get_global(__smem, item, size); } - hwspin_unlock_irqrestore(__smem->hwlock, &flags); - return ptr; - } EXPORT_SYMBOL_GPL(qcom_smem_get); diff --git a/drivers/soc/qcom/smp2p.c b/drivers/soc/qcom/smp2p.c index 914b2246148f..a21241cbeec7 100644 --- a/drivers/soc/qcom/smp2p.c +++ b/drivers/soc/qcom/smp2p.c @@ -58,8 +58,8 @@ * @valid_entries: number of allocated entries * @flags: * @entries: individual communication entries - * @name: name of the entry - * @value: content of the entry + * @entries.name: name of the entry + * @entries.value: content of the entry */ struct smp2p_smem_item { u32 magic; @@ -275,6 +275,8 @@ static void qcom_smp2p_notify_in(struct qcom_smp2p *smp2p) * * Handle notifications from the remote side to handle newly allocated entries * or any changes to the state bits of existing entries. + * + * Return: %IRQ_HANDLED */ static irqreturn_t qcom_smp2p_intr(int irq, void *data) { diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c index 6349a0debeb5..e8ff9819ac47 100644 --- a/drivers/soc/qcom/socinfo.c +++ b/drivers/soc/qcom/socinfo.c @@ -124,7 +124,7 @@ static const char *const pmic_models[] = { [50] = "PM8350B", [51] = "PMR735A", [52] = "PMR735B", - [55] = "PM2250", + [55] = "PM4125", [58] = "PM8450", [65] = "PM8010", [69] = "PM8550VS", @@ -424,8 +424,11 @@ static const struct soc_id soc_id[] = { { qcom_board_id(IPQ9510) }, { qcom_board_id(QRB4210) }, { qcom_board_id(QRB2210) }, + { qcom_board_id(SM8475) }, + { qcom_board_id(SM8475P) }, { qcom_board_id(SA8775P) }, { qcom_board_id(QRU1000) }, + { qcom_board_id(SM8475_2) }, { qcom_board_id(QDU1000) }, { qcom_board_id(SM8650) }, { qcom_board_id(SM4450) }, @@ -437,6 +440,8 @@ static const struct soc_id soc_id[] = { { qcom_board_id(IPQ5322) }, { qcom_board_id(IPQ5312) }, { qcom_board_id(IPQ5302) }, + { qcom_board_id(QCS8550) }, + { qcom_board_id(QCM8550) }, { qcom_board_id(IPQ5300) }, }; diff --git a/drivers/soc/qcom/spm.c b/drivers/soc/qcom/spm.c index 2f0b1bfe7658..06e2c4c2a4a8 100644 --- a/drivers/soc/qcom/spm.c +++ b/drivers/soc/qcom/spm.c @@ -6,20 +6,40 @@ * SAW power controller driver */ -#include <linux/kernel.h> +#include <linux/bitfield.h> +#include <linux/err.h> #include <linux/init.h> #include <linux/io.h> +#include <linux/iopoll.h> +#include <linux/kernel.h> +#include <linux/linear_range.h> #include <linux/module.h> -#include <linux/slab.h> #include <linux/of.h> -#include <linux/err.h> #include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/smp.h> + +#include <linux/regulator/driver.h> + #include <soc/qcom/spm.h> +#define FIELD_SET(current, mask, val) \ + (((current) & ~(mask)) | FIELD_PREP((mask), (val))) + #define SPM_CTL_INDEX 0x7f #define SPM_CTL_INDEX_SHIFT 4 #define SPM_CTL_EN BIT(0) +/* These registers might be specific to SPM 1.1 */ +#define SPM_VCTL_VLVL GENMASK(7, 0) +#define SPM_PMIC_DATA_0_VLVL GENMASK(7, 0) +#define SPM_PMIC_DATA_1_MIN_VSEL GENMASK(5, 0) +#define SPM_PMIC_DATA_1_MAX_VSEL GENMASK(21, 16) + +#define SPM_1_1_AVS_CTL_AVS_ENABLED BIT(27) +#define SPM_AVS_CTL_MAX_VLVL GENMASK(22, 17) +#define SPM_AVS_CTL_MIN_VLVL GENMASK(15, 10) + enum spm_reg { SPM_REG_CFG, SPM_REG_SPM_CTL, @@ -29,13 +49,44 @@ enum spm_reg { SPM_REG_PMIC_DATA_1, SPM_REG_VCTL, SPM_REG_SEQ_ENTRY, - SPM_REG_SPM_STS, + SPM_REG_STS0, + SPM_REG_STS1, SPM_REG_PMIC_STS, SPM_REG_AVS_CTL, SPM_REG_AVS_LIMIT, + SPM_REG_RST, SPM_REG_NR, }; +#define MAX_PMIC_DATA 2 +#define MAX_SEQ_DATA 64 + +struct spm_reg_data { + const u16 *reg_offset; + u32 spm_cfg; + u32 spm_dly; + u32 pmic_dly; + u32 pmic_data[MAX_PMIC_DATA]; + u32 avs_ctl; + u32 avs_limit; + u8 seq[MAX_SEQ_DATA]; + u8 start_index[PM_SLEEP_MODE_NR]; + + smp_call_func_t set_vdd; + /* for now we support only a single range */ + struct linear_range *range; + unsigned int ramp_delay; + unsigned int init_uV; +}; + +struct spm_driver_data { + void __iomem *reg_base; + const struct spm_reg_data *reg_data; + struct device *dev; + unsigned int volt_sel; + int reg_cpu; +}; + static const u16 spm_reg_offset_v4_1[SPM_REG_NR] = { [SPM_REG_AVS_CTL] = 0x904, [SPM_REG_AVS_LIMIT] = 0x908, @@ -169,6 +220,10 @@ static const struct spm_reg_data spm_reg_8226_cpu = { static const u16 spm_reg_offset_v1_1[SPM_REG_NR] = { [SPM_REG_CFG] = 0x08, + [SPM_REG_STS0] = 0x0c, + [SPM_REG_STS1] = 0x10, + [SPM_REG_VCTL] = 0x14, + [SPM_REG_AVS_CTL] = 0x18, [SPM_REG_SPM_CTL] = 0x20, [SPM_REG_PMIC_DLY] = 0x24, [SPM_REG_PMIC_DATA_0] = 0x28, @@ -176,7 +231,12 @@ static const u16 spm_reg_offset_v1_1[SPM_REG_NR] = { [SPM_REG_SEQ_ENTRY] = 0x80, }; +static void smp_set_vdd_v1_1(void *data); + /* SPM register data for 8064 */ +static struct linear_range spm_v1_1_regulator_range = + REGULATOR_LINEAR_RANGE(700000, 0, 56, 12500); + static const struct spm_reg_data spm_reg_8064_cpu = { .reg_offset = spm_reg_offset_v1_1, .spm_cfg = 0x1F, @@ -187,6 +247,10 @@ static const struct spm_reg_data spm_reg_8064_cpu = { 0x10, 0x54, 0x30, 0x0C, 0x24, 0x30, 0x0F }, .start_index[PM_SLEEP_MODE_STBY] = 0, .start_index[PM_SLEEP_MODE_SPC] = 2, + .set_vdd = smp_set_vdd_v1_1, + .range = &spm_v1_1_regulator_range, + .init_uV = 1300000, + .ramp_delay = 1250, }; static inline void spm_register_write(struct spm_driver_data *drv, @@ -238,6 +302,178 @@ void spm_set_low_power_mode(struct spm_driver_data *drv, spm_register_write_sync(drv, SPM_REG_SPM_CTL, ctl_val); } +static int spm_set_voltage_sel(struct regulator_dev *rdev, unsigned int selector) +{ + struct spm_driver_data *drv = rdev_get_drvdata(rdev); + + drv->volt_sel = selector; + + /* Always do the SAW register writes on the corresponding CPU */ + return smp_call_function_single(drv->reg_cpu, drv->reg_data->set_vdd, drv, true); +} + +static int spm_get_voltage_sel(struct regulator_dev *rdev) +{ + struct spm_driver_data *drv = rdev_get_drvdata(rdev); + + return drv->volt_sel; +} + +static const struct regulator_ops spm_reg_ops = { + .set_voltage_sel = spm_set_voltage_sel, + .get_voltage_sel = spm_get_voltage_sel, + .list_voltage = regulator_list_voltage_linear_range, + .set_voltage_time_sel = regulator_set_voltage_time_sel, +}; + +static void smp_set_vdd_v1_1(void *data) +{ + struct spm_driver_data *drv = data; + unsigned int vctl, data0, data1, avs_ctl, sts; + unsigned int vlevel, volt_sel; + bool avs_enabled; + + volt_sel = drv->volt_sel; + vlevel = volt_sel | 0x80; /* band */ + + avs_ctl = spm_register_read(drv, SPM_REG_AVS_CTL); + vctl = spm_register_read(drv, SPM_REG_VCTL); + data0 = spm_register_read(drv, SPM_REG_PMIC_DATA_0); + data1 = spm_register_read(drv, SPM_REG_PMIC_DATA_1); + + avs_enabled = avs_ctl & SPM_1_1_AVS_CTL_AVS_ENABLED; + + /* If AVS is enabled, switch it off during the voltage change */ + if (avs_enabled) { + avs_ctl &= ~SPM_1_1_AVS_CTL_AVS_ENABLED; + spm_register_write(drv, SPM_REG_AVS_CTL, avs_ctl); + } + + /* Kick the state machine back to idle */ + spm_register_write(drv, SPM_REG_RST, 1); + + vctl = FIELD_SET(vctl, SPM_VCTL_VLVL, vlevel); + data0 = FIELD_SET(data0, SPM_PMIC_DATA_0_VLVL, vlevel); + data1 = FIELD_SET(data1, SPM_PMIC_DATA_1_MIN_VSEL, volt_sel); + data1 = FIELD_SET(data1, SPM_PMIC_DATA_1_MAX_VSEL, volt_sel); + + spm_register_write(drv, SPM_REG_VCTL, vctl); + spm_register_write(drv, SPM_REG_PMIC_DATA_0, data0); + spm_register_write(drv, SPM_REG_PMIC_DATA_1, data1); + + if (read_poll_timeout_atomic(spm_register_read, + sts, sts == vlevel, + 1, 200, false, + drv, SPM_REG_STS1)) { + dev_err_ratelimited(drv->dev, "timeout setting the voltage (%x %x)!\n", sts, vlevel); + goto enable_avs; + } + + if (avs_enabled) { + unsigned int max_avs = volt_sel; + unsigned int min_avs = max(max_avs, 4U) - 4; + + avs_ctl = FIELD_SET(avs_ctl, SPM_AVS_CTL_MIN_VLVL, min_avs); + avs_ctl = FIELD_SET(avs_ctl, SPM_AVS_CTL_MAX_VLVL, max_avs); + spm_register_write(drv, SPM_REG_AVS_CTL, avs_ctl); + } + +enable_avs: + if (avs_enabled) { + avs_ctl |= SPM_1_1_AVS_CTL_AVS_ENABLED; + spm_register_write(drv, SPM_REG_AVS_CTL, avs_ctl); + } +} + +static int spm_get_cpu(struct device *dev) +{ + int cpu; + bool found; + + for_each_possible_cpu(cpu) { + struct device_node *cpu_node, *saw_node; + + cpu_node = of_cpu_device_node_get(cpu); + if (!cpu_node) + continue; + + saw_node = of_parse_phandle(cpu_node, "qcom,saw", 0); + found = (saw_node == dev->of_node); + of_node_put(saw_node); + of_node_put(cpu_node); + + if (found) + return cpu; + } + + /* L2 SPM is not bound to any CPU, voltage setting is not supported */ + + return -EOPNOTSUPP; +} + +static int spm_register_regulator(struct device *dev, struct spm_driver_data *drv) +{ + struct regulator_config config = { + .dev = dev, + .driver_data = drv, + }; + struct regulator_desc *rdesc; + struct regulator_dev *rdev; + int ret; + bool found; + + if (!drv->reg_data->set_vdd) + return 0; + + rdesc = devm_kzalloc(dev, sizeof(*rdesc), GFP_KERNEL); + if (!rdesc) + return -ENOMEM; + + rdesc->name = "spm"; + rdesc->of_match = of_match_ptr("regulator"); + rdesc->type = REGULATOR_VOLTAGE; + rdesc->owner = THIS_MODULE; + rdesc->ops = &spm_reg_ops; + + rdesc->linear_ranges = drv->reg_data->range; + rdesc->n_linear_ranges = 1; + rdesc->n_voltages = rdesc->linear_ranges[rdesc->n_linear_ranges - 1].max_sel + 1; + rdesc->ramp_delay = drv->reg_data->ramp_delay; + + ret = spm_get_cpu(dev); + if (ret < 0) + return ret; + + drv->reg_cpu = ret; + dev_dbg(dev, "SAW2 bound to CPU %d\n", drv->reg_cpu); + + /* + * Program initial voltage, otherwise registration will also try + * setting the voltage, which might result in undervolting the CPU. + */ + drv->volt_sel = DIV_ROUND_UP(drv->reg_data->init_uV - rdesc->min_uV, + rdesc->uV_step); + ret = linear_range_get_selector_high(drv->reg_data->range, + drv->reg_data->init_uV, + &drv->volt_sel, + &found); + if (ret) { + dev_err(dev, "Initial uV value out of bounds\n"); + return ret; + } + + /* Always do the SAW register writes on the corresponding CPU */ + smp_call_function_single(drv->reg_cpu, drv->reg_data->set_vdd, drv, true); + + rdev = devm_regulator_register(dev, rdesc, &config); + if (IS_ERR(rdev)) { + dev_err(dev, "failed to register regulator\n"); + return PTR_ERR(rdev); + } + + return 0; +} + static const struct of_device_id spm_match_table[] = { { .compatible = "qcom,sdm660-gold-saw2-v4.1-l2", .data = &spm_reg_660_gold_l2 }, @@ -288,6 +524,7 @@ static int spm_dev_probe(struct platform_device *pdev) return -ENODEV; drv->reg_data = match_id->data; + drv->dev = &pdev->dev; platform_set_drvdata(pdev, drv); /* Write the SPM sequences first.. */ @@ -315,6 +552,9 @@ static int spm_dev_probe(struct platform_device *pdev) if (drv->reg_data->reg_offset[SPM_REG_SPM_CTL]) spm_set_low_power_mode(drv, PM_SLEEP_MODE_STBY); + if (IS_ENABLED(CONFIG_REGULATOR)) + return spm_register_regulator(&pdev->dev, drv); + return 0; } diff --git a/drivers/soc/qcom/trace-aoss.h b/drivers/soc/qcom/trace-aoss.h new file mode 100644 index 000000000000..554029b33b44 --- /dev/null +++ b/drivers/soc/qcom/trace-aoss.h @@ -0,0 +1,48 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM qcom_aoss + +#if !defined(_TRACE_QCOM_AOSS_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_QCOM_AOSS_H + +#include <linux/tracepoint.h> + +TRACE_EVENT(aoss_send, + TP_PROTO(const char *msg), + TP_ARGS(msg), + TP_STRUCT__entry( + __string(msg, msg) + ), + TP_fast_assign( + __assign_str(msg, msg); + ), + TP_printk("%s", __get_str(msg)) +); + +TRACE_EVENT(aoss_send_done, + TP_PROTO(const char *msg, int ret), + TP_ARGS(msg, ret), + TP_STRUCT__entry( + __string(msg, msg) + __field(int, ret) + ), + TP_fast_assign( + __assign_str(msg, msg); + __entry->ret = ret; + ), + TP_printk("%s: %d", __get_str(msg), __entry->ret) +); + +#endif /* _TRACE_QCOM_AOSS_H */ + +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . + +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE trace-aoss + +#include <trace/define_trace.h> diff --git a/drivers/soc/renesas/Kconfig b/drivers/soc/renesas/Kconfig index 0986672f6375..5deca747fb77 100644 --- a/drivers/soc/renesas/Kconfig +++ b/drivers/soc/renesas/Kconfig @@ -34,6 +34,10 @@ config ARCH_RCAR_GEN3 select SYS_SUPPORTS_SH_CMT select SYS_SUPPORTS_SH_TMU +config ARCH_RCAR_GEN4 + bool + select ARCH_RCAR_GEN3 + config ARCH_RMOBILE bool select PM @@ -240,7 +244,7 @@ config ARCH_R8A77961 config ARCH_R8A779F0 bool "ARM64 Platform support for R-Car S4-8" - select ARCH_RCAR_GEN3 + select ARCH_RCAR_GEN4 select SYSC_R8A779F0 help This enables support for the Renesas R-Car S4-8 SoC. @@ -261,18 +265,25 @@ config ARCH_R8A77970 config ARCH_R8A779A0 bool "ARM64 Platform support for R-Car V3U" - select ARCH_RCAR_GEN3 + select ARCH_RCAR_GEN4 select SYSC_R8A779A0 help This enables support for the Renesas R-Car V3U SoC. config ARCH_R8A779G0 bool "ARM64 Platform support for R-Car V4H" - select ARCH_RCAR_GEN3 + select ARCH_RCAR_GEN4 select SYSC_R8A779G0 help This enables support for the Renesas R-Car V4H SoC. +config ARCH_R8A779H0 + bool "ARM64 Platform support for R-Car V4M" + select ARCH_RCAR_GEN4 + select SYSC_R8A779H0 + help + This enables support for the Renesas R-Car V4M SoC. + config ARCH_R8A774C0 bool "ARM64 Platform support for RZ/G2E" select ARCH_RCAR_GEN3 diff --git a/drivers/soc/renesas/rcar-rst.c b/drivers/soc/renesas/rcar-rst.c index 98fd97da6cd4..7ba02f3a4a4f 100644 --- a/drivers/soc/renesas/rcar-rst.c +++ b/drivers/soc/renesas/rcar-rst.c @@ -117,6 +117,7 @@ static const struct of_device_id rcar_rst_matches[] __initconst = { { .compatible = "renesas,r8a779a0-rst", .data = &rcar_rst_v3u }, { .compatible = "renesas,r8a779f0-rst", .data = &rcar_rst_gen4 }, { .compatible = "renesas,r8a779g0-rst", .data = &rcar_rst_gen4 }, + { .compatible = "renesas,r8a779h0-rst", .data = &rcar_rst_gen4 }, { /* sentinel */ } }; diff --git a/drivers/soc/renesas/renesas-soc.c b/drivers/soc/renesas/renesas-soc.c index 27eae1a354ab..8f9b8d3736dc 100644 --- a/drivers/soc/renesas/renesas-soc.c +++ b/drivers/soc/renesas/renesas-soc.c @@ -270,6 +270,11 @@ static const struct renesas_soc soc_rcar_v4h __initconst __maybe_unused = { .id = 0x5c, }; +static const struct renesas_soc soc_rcar_v4m __initconst __maybe_unused = { + .family = &fam_rcar_gen4, + .id = 0x5d, +}; + static const struct renesas_soc soc_shmobile_ag5 __initconst __maybe_unused = { .family = &fam_shmobile, .id = 0x37, @@ -380,6 +385,9 @@ static const struct of_device_id renesas_socs[] __initconst __maybe_unused = { #ifdef CONFIG_ARCH_R8A779G0 { .compatible = "renesas,r8a779g0", .data = &soc_rcar_v4h }, #endif +#ifdef CONFIG_ARCH_R8A779H0 + { .compatible = "renesas,r8a779h0", .data = &soc_rcar_v4m }, +#endif #ifdef CONFIG_ARCH_R9A07G043 #ifdef CONFIG_RISCV { .compatible = "renesas,r9a07g043", .data = &soc_rz_five }, diff --git a/drivers/soc/samsung/Kconfig b/drivers/soc/samsung/Kconfig index 27ec99af77e3..1a5dfdc978dc 100644 --- a/drivers/soc/samsung/Kconfig +++ b/drivers/soc/samsung/Kconfig @@ -42,6 +42,7 @@ config EXYNOS_PMU depends on ARCH_EXYNOS || ((ARM || ARM64) && COMPILE_TEST) select EXYNOS_PMU_ARM_DRIVERS if ARM && ARCH_EXYNOS select MFD_CORE + select REGMAP_MMIO # There is no need to enable these drivers for ARMv8 config EXYNOS_PMU_ARM_DRIVERS diff --git a/drivers/soc/samsung/exynos-pmu.c b/drivers/soc/samsung/exynos-pmu.c index 250537d7cfd6..fd8b6ac06656 100644 --- a/drivers/soc/samsung/exynos-pmu.c +++ b/drivers/soc/samsung/exynos-pmu.c @@ -5,6 +5,7 @@ // // Exynos - CPU PMU(Power Management Unit) support +#include <linux/arm-smccc.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/mfd/core.h> @@ -12,19 +13,134 @@ #include <linux/of_platform.h> #include <linux/platform_device.h> #include <linux/delay.h> +#include <linux/regmap.h> #include <linux/soc/samsung/exynos-regs-pmu.h> #include <linux/soc/samsung/exynos-pmu.h> #include "exynos-pmu.h" +#define PMUALIVE_MASK GENMASK(13, 0) +#define TENSOR_SET_BITS (BIT(15) | BIT(14)) +#define TENSOR_CLR_BITS BIT(15) +#define TENSOR_SMC_PMU_SEC_REG 0x82000504 +#define TENSOR_PMUREG_READ 0 +#define TENSOR_PMUREG_WRITE 1 +#define TENSOR_PMUREG_RMW 2 + struct exynos_pmu_context { struct device *dev; const struct exynos_pmu_data *pmu_data; + struct regmap *pmureg; }; void __iomem *pmu_base_addr; static struct exynos_pmu_context *pmu_context; +/* forward declaration */ +static struct platform_driver exynos_pmu_driver; + +/* + * Tensor SoCs are configured so that PMU_ALIVE registers can only be written + * from EL3, but are still read accessible. As Linux needs to write some of + * these registers, the following functions are provided and exposed via + * regmap. + * + * Note: This SMC interface is known to be implemented on gs101 and derivative + * SoCs. + */ + +/* Write to a protected PMU register. */ +static int tensor_sec_reg_write(void *context, unsigned int reg, + unsigned int val) +{ + struct arm_smccc_res res; + unsigned long pmu_base = (unsigned long)context; + + arm_smccc_smc(TENSOR_SMC_PMU_SEC_REG, pmu_base + reg, + TENSOR_PMUREG_WRITE, val, 0, 0, 0, 0, &res); + + /* returns -EINVAL if access isn't allowed or 0 */ + if (res.a0) + pr_warn("%s(): SMC failed: %d\n", __func__, (int)res.a0); + + return (int)res.a0; +} + +/* Read/Modify/Write a protected PMU register. */ +static int tensor_sec_reg_rmw(void *context, unsigned int reg, + unsigned int mask, unsigned int val) +{ + struct arm_smccc_res res; + unsigned long pmu_base = (unsigned long)context; + + arm_smccc_smc(TENSOR_SMC_PMU_SEC_REG, pmu_base + reg, + TENSOR_PMUREG_RMW, mask, val, 0, 0, 0, &res); + + /* returns -EINVAL if access isn't allowed or 0 */ + if (res.a0) + pr_warn("%s(): SMC failed: %d\n", __func__, (int)res.a0); + + return (int)res.a0; +} + +/* + * Read a protected PMU register. All PMU registers can be read by Linux. + * Note: The SMC read register is not used, as only registers that can be + * written are readable via SMC. + */ +static int tensor_sec_reg_read(void *context, unsigned int reg, + unsigned int *val) +{ + *val = pmu_raw_readl(reg); + return 0; +} + +/* + * For SoCs that have set/clear bit hardware this function can be used when + * the PMU register will be accessed by multiple masters. + * + * For example, to set bits 13:8 in PMU reg offset 0x3e80 + * tensor_set_bits_atomic(ctx, 0x3e80, 0x3f00, 0x3f00); + * + * Set bit 8, and clear bits 13:9 PMU reg offset 0x3e80 + * tensor_set_bits_atomic(0x3e80, 0x100, 0x3f00); + */ +static int tensor_set_bits_atomic(void *ctx, unsigned int offset, u32 val, + u32 mask) +{ + int ret; + unsigned int i; + + for (i = 0; i < 32; i++) { + if (!(mask & BIT(i))) + continue; + + offset &= ~TENSOR_SET_BITS; + + if (val & BIT(i)) + offset |= TENSOR_SET_BITS; + else + offset |= TENSOR_CLR_BITS; + + ret = tensor_sec_reg_write(ctx, offset, i); + if (ret) + return ret; + } + return ret; +} + +static int tensor_sec_update_bits(void *ctx, unsigned int reg, + unsigned int mask, unsigned int val) +{ + /* + * Use atomic operations for PMU_ALIVE registers (offset 0~0x3FFF) + * as the target registers can be accessed by multiple masters. + */ + if (reg > PMUALIVE_MASK) + return tensor_sec_reg_rmw(ctx, reg, mask, val); + + return tensor_set_bits_atomic(ctx, reg, val, mask); +} void pmu_raw_writel(u32 val, u32 offset) { @@ -75,11 +191,41 @@ void exynos_sys_powerdown_conf(enum sys_powerdown mode) #define exynos_pmu_data_arm_ptr(data) NULL #endif +static const struct regmap_config regmap_smccfg = { + .name = "pmu_regs", + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, + .fast_io = true, + .use_single_read = true, + .use_single_write = true, + .reg_read = tensor_sec_reg_read, + .reg_write = tensor_sec_reg_write, + .reg_update_bits = tensor_sec_update_bits, +}; + +static const struct regmap_config regmap_mmiocfg = { + .name = "pmu_regs", + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, + .fast_io = true, + .use_single_read = true, + .use_single_write = true, +}; + +static const struct exynos_pmu_data gs101_pmu_data = { + .pmu_secure = true +}; + /* * PMU platform driver and devicetree bindings. */ static const struct of_device_id exynos_pmu_of_device_ids[] = { { + .compatible = "google,gs101-pmu", + .data = &gs101_pmu_data, + }, { .compatible = "samsung,exynos3250-pmu", .data = exynos_pmu_data_arm_ptr(exynos3250_pmu_data), }, { @@ -113,19 +259,75 @@ static const struct mfd_cell exynos_pmu_devs[] = { { .name = "exynos-clkout", }, }; +/** + * exynos_get_pmu_regmap() - Obtain pmureg regmap + * + * Find the pmureg regmap previously configured in probe() and return regmap + * pointer. + * + * Return: A pointer to regmap if found or ERR_PTR error value. + */ struct regmap *exynos_get_pmu_regmap(void) { struct device_node *np = of_find_matching_node(NULL, exynos_pmu_of_device_ids); if (np) - return syscon_node_to_regmap(np); + return exynos_get_pmu_regmap_by_phandle(np, NULL); return ERR_PTR(-ENODEV); } EXPORT_SYMBOL_GPL(exynos_get_pmu_regmap); +/** + * exynos_get_pmu_regmap_by_phandle() - Obtain pmureg regmap via phandle + * @np: Device node holding PMU phandle property + * @propname: Name of property holding phandle value + * + * Find the pmureg regmap previously configured in probe() and return regmap + * pointer. + * + * Return: A pointer to regmap if found or ERR_PTR error value. + */ +struct regmap *exynos_get_pmu_regmap_by_phandle(struct device_node *np, + const char *propname) +{ + struct exynos_pmu_context *ctx; + struct device_node *pmu_np; + struct device *dev; + + if (propname) + pmu_np = of_parse_phandle(np, propname, 0); + else + pmu_np = np; + + if (!pmu_np) + return ERR_PTR(-ENODEV); + + /* + * Determine if exynos-pmu device has probed and therefore regmap + * has been created and can be returned to the caller. Otherwise we + * return -EPROBE_DEFER. + */ + dev = driver_find_device_by_of_node(&exynos_pmu_driver.driver, + (void *)pmu_np); + + if (propname) + of_node_put(pmu_np); + + if (!dev) + return ERR_PTR(-EPROBE_DEFER); + + ctx = dev_get_drvdata(dev); + + return ctx->pmureg; +} +EXPORT_SYMBOL_GPL(exynos_get_pmu_regmap_by_phandle); + static int exynos_pmu_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; + struct regmap_config pmu_regmcfg; + struct regmap *regmap; + struct resource *res; int ret; pmu_base_addr = devm_platform_ioremap_resource(pdev, 0); @@ -137,9 +339,38 @@ static int exynos_pmu_probe(struct platform_device *pdev) GFP_KERNEL); if (!pmu_context) return -ENOMEM; - pmu_context->dev = dev; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return -ENODEV; + pmu_context->pmu_data = of_device_get_match_data(dev); + /* For SoCs that secure PMU register writes use custom regmap */ + if (pmu_context->pmu_data && pmu_context->pmu_data->pmu_secure) { + pmu_regmcfg = regmap_smccfg; + pmu_regmcfg.max_register = resource_size(res) - + pmu_regmcfg.reg_stride; + /* Need physical address for SMC call */ + regmap = devm_regmap_init(dev, NULL, + (void *)(uintptr_t)res->start, + &pmu_regmcfg); + } else { + /* All other SoCs use a MMIO regmap */ + pmu_regmcfg = regmap_mmiocfg; + pmu_regmcfg.max_register = resource_size(res) - + pmu_regmcfg.reg_stride; + regmap = devm_regmap_init_mmio(dev, pmu_base_addr, + &pmu_regmcfg); + } + + if (IS_ERR(regmap)) + return dev_err_probe(&pdev->dev, PTR_ERR(regmap), + "regmap init failed\n"); + + pmu_context->pmureg = regmap; + pmu_context->dev = dev; + if (pmu_context->pmu_data && pmu_context->pmu_data->pmu_init) pmu_context->pmu_data->pmu_init(); diff --git a/drivers/soc/samsung/exynos-pmu.h b/drivers/soc/samsung/exynos-pmu.h index 1c652ffd79b4..0a49a2c9a08e 100644 --- a/drivers/soc/samsung/exynos-pmu.h +++ b/drivers/soc/samsung/exynos-pmu.h @@ -21,6 +21,7 @@ struct exynos_pmu_conf { struct exynos_pmu_data { const struct exynos_pmu_conf *pmu_config; const struct exynos_pmu_conf *pmu_config_extra; + bool pmu_secure; void (*pmu_init)(void); void (*powerdown_conf)(enum sys_powerdown); diff --git a/drivers/soc/tegra/Kconfig b/drivers/soc/tegra/Kconfig index f16beeabaa92..33512558af9f 100644 --- a/drivers/soc/tegra/Kconfig +++ b/drivers/soc/tegra/Kconfig @@ -133,6 +133,11 @@ config ARCH_TEGRA_234_SOC help Enable support for the NVIDIA Tegra234 SoC. +config ARCH_TEGRA_241_SOC + bool "NVIDIA Tegra241 SoC" + help + Enable support for the NVIDIA Tegra241 SoC. + endif endif diff --git a/drivers/soc/tegra/fuse/fuse-tegra.c b/drivers/soc/tegra/fuse/fuse-tegra.c index a2c28f493a75..b6bfd6729df3 100644 --- a/drivers/soc/tegra/fuse/fuse-tegra.c +++ b/drivers/soc/tegra/fuse/fuse-tegra.c @@ -3,11 +3,13 @@ * Copyright (c) 2013-2023, NVIDIA CORPORATION. All rights reserved. */ +#include <linux/acpi.h> #include <linux/clk.h> #include <linux/device.h> #include <linux/kobject.h> #include <linux/init.h> #include <linux/io.h> +#include <linux/mod_devicetable.h> #include <linux/nvmem-consumer.h> #include <linux/nvmem-provider.h> #include <linux/of.h> @@ -113,6 +115,28 @@ static void tegra_fuse_restore(void *base) fuse->clk = NULL; } +static void tegra_fuse_print_sku_info(struct tegra_sku_info *tegra_sku_info) +{ + pr_info("Tegra Revision: %s SKU: %d CPU Process: %d SoC Process: %d\n", + tegra_revision_name[tegra_sku_info->revision], + tegra_sku_info->sku_id, tegra_sku_info->cpu_process_id, + tegra_sku_info->soc_process_id); + pr_debug("Tegra CPU Speedo ID %d, SoC Speedo ID %d\n", + tegra_sku_info->cpu_speedo_id, tegra_sku_info->soc_speedo_id); +} + +static int tegra_fuse_add_lookups(struct tegra_fuse *fuse) +{ + fuse->lookups = kmemdup_array(fuse->soc->lookups, sizeof(*fuse->lookups), + fuse->soc->num_lookups, GFP_KERNEL); + if (!fuse->lookups) + return -ENOMEM; + + nvmem_add_cell_lookups(fuse->lookups, fuse->soc->num_lookups); + + return 0; +} + static int tegra_fuse_probe(struct platform_device *pdev) { void __iomem *base = fuse->base; @@ -130,15 +154,46 @@ static int tegra_fuse_probe(struct platform_device *pdev) return PTR_ERR(fuse->base); fuse->phys = res->start; - fuse->clk = devm_clk_get(&pdev->dev, "fuse"); - if (IS_ERR(fuse->clk)) { - if (PTR_ERR(fuse->clk) != -EPROBE_DEFER) - dev_err(&pdev->dev, "failed to get FUSE clock: %ld", - PTR_ERR(fuse->clk)); + /* Initialize the soc data and lookups if using ACPI boot. */ + if (is_acpi_node(dev_fwnode(&pdev->dev)) && !fuse->soc) { + u8 chip; - return PTR_ERR(fuse->clk); + tegra_acpi_init_apbmisc(); + + chip = tegra_get_chip_id(); + switch (chip) { +#if defined(CONFIG_ARCH_TEGRA_194_SOC) + case TEGRA194: + fuse->soc = &tegra194_fuse_soc; + break; +#endif +#if defined(CONFIG_ARCH_TEGRA_234_SOC) + case TEGRA234: + fuse->soc = &tegra234_fuse_soc; + break; +#endif +#if defined(CONFIG_ARCH_TEGRA_241_SOC) + case TEGRA241: + fuse->soc = &tegra241_fuse_soc; + break; +#endif + default: + return dev_err_probe(&pdev->dev, -EINVAL, "Unsupported SoC: %02x\n", chip); + } + + fuse->soc->init(fuse); + tegra_fuse_print_sku_info(&tegra_sku_info); + tegra_soc_device_register(); + + err = tegra_fuse_add_lookups(fuse); + if (err) + return dev_err_probe(&pdev->dev, err, "failed to add FUSE lookups\n"); } + fuse->clk = devm_clk_get_optional(&pdev->dev, "fuse"); + if (IS_ERR(fuse->clk)) + return dev_err_probe(&pdev->dev, PTR_ERR(fuse->clk), "failed to get FUSE clock\n"); + platform_set_drvdata(pdev, fuse); fuse->dev = &pdev->dev; @@ -179,12 +234,8 @@ static int tegra_fuse_probe(struct platform_device *pdev) } fuse->rst = devm_reset_control_get_optional(&pdev->dev, "fuse"); - if (IS_ERR(fuse->rst)) { - err = PTR_ERR(fuse->rst); - dev_err(&pdev->dev, "failed to get FUSE reset: %pe\n", - fuse->rst); - return err; - } + if (IS_ERR(fuse->rst)) + return dev_err_probe(&pdev->dev, PTR_ERR(fuse->rst), "failed to get FUSE reset\n"); /* * FUSE clock is enabled at a boot time, hence this resume/suspend @@ -262,10 +313,17 @@ static const struct dev_pm_ops tegra_fuse_pm = { SET_SYSTEM_SLEEP_PM_OPS(tegra_fuse_suspend, tegra_fuse_resume) }; +static const struct acpi_device_id tegra_fuse_acpi_match[] = { + { "NVDA200F" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(acpi, tegra_fuse_acpi_match); + static struct platform_driver tegra_fuse_driver = { .driver = { .name = "tegra-fuse", .of_match_table = tegra_fuse_match, + .acpi_match_table = tegra_fuse_acpi_match, .pm = &tegra_fuse_pm, .suppress_bind_attrs = true, }, @@ -287,7 +345,16 @@ u32 __init tegra_fuse_read_early(unsigned int offset) int tegra_fuse_readl(unsigned long offset, u32 *value) { - if (!fuse->read || !fuse->clk) + if (!fuse->dev) + return -EPROBE_DEFER; + + /* + * Wait for fuse->clk to be initialized if device-tree boot is used. + */ + if (is_of_node(dev_fwnode(fuse->dev)) && !fuse->clk) + return -EPROBE_DEFER; + + if (!fuse->read) return -EPROBE_DEFER; if (IS_ERR(fuse->clk)) @@ -343,7 +410,8 @@ const struct attribute_group tegra_soc_attr_group = { }; #if IS_ENABLED(CONFIG_ARCH_TEGRA_194_SOC) || \ - IS_ENABLED(CONFIG_ARCH_TEGRA_234_SOC) + IS_ENABLED(CONFIG_ARCH_TEGRA_234_SOC) || \ + IS_ENABLED(CONFIG_ARCH_TEGRA_241_SOC) static ssize_t platform_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -370,7 +438,7 @@ const struct attribute_group tegra194_soc_attr_group = { }; #endif -struct device * __init tegra_soc_device_register(void) +struct device *tegra_soc_device_register(void) { struct soc_device_attribute *attr; struct soc_device *dev; @@ -407,6 +475,7 @@ static int __init tegra_init_fuse(void) const struct of_device_id *match; struct device_node *np; struct resource regs; + int err; tegra_init_apbmisc(); @@ -497,22 +566,13 @@ static int __init tegra_init_fuse(void) fuse->soc->init(fuse); - pr_info("Tegra Revision: %s SKU: %d CPU Process: %d SoC Process: %d\n", - tegra_revision_name[tegra_sku_info.revision], - tegra_sku_info.sku_id, tegra_sku_info.cpu_process_id, - tegra_sku_info.soc_process_id); - pr_debug("Tegra CPU Speedo ID %d, SoC Speedo ID %d\n", - tegra_sku_info.cpu_speedo_id, tegra_sku_info.soc_speedo_id); + tegra_fuse_print_sku_info(&tegra_sku_info); - if (fuse->soc->lookups) { - size_t size = sizeof(*fuse->lookups) * fuse->soc->num_lookups; - - fuse->lookups = kmemdup(fuse->soc->lookups, size, GFP_KERNEL); - if (fuse->lookups) - nvmem_add_cell_lookups(fuse->lookups, fuse->soc->num_lookups); - } + err = tegra_fuse_add_lookups(fuse); + if (err) + pr_err("failed to add FUSE lookups\n"); - return 0; + return err; } early_initcall(tegra_init_fuse); diff --git a/drivers/soc/tegra/fuse/fuse-tegra30.c b/drivers/soc/tegra/fuse/fuse-tegra30.c index e94d46372a63..eb14e5ff5a0a 100644 --- a/drivers/soc/tegra/fuse/fuse-tegra30.c +++ b/drivers/soc/tegra/fuse/fuse-tegra30.c @@ -38,7 +38,8 @@ defined(CONFIG_ARCH_TEGRA_210_SOC) || \ defined(CONFIG_ARCH_TEGRA_186_SOC) || \ defined(CONFIG_ARCH_TEGRA_194_SOC) || \ - defined(CONFIG_ARCH_TEGRA_234_SOC) + defined(CONFIG_ARCH_TEGRA_234_SOC) || \ + defined(CONFIG_ARCH_TEGRA_241_SOC) static u32 tegra30_fuse_read_early(struct tegra_fuse *fuse, unsigned int offset) { if (WARN_ON(!fuse->base)) @@ -678,3 +679,23 @@ const struct tegra_fuse_soc tegra234_fuse_soc = { .clk_suspend_on = false, }; #endif + +#if defined(CONFIG_ARCH_TEGRA_241_SOC) +static const struct tegra_fuse_info tegra241_fuse_info = { + .read = tegra30_fuse_read, + .size = 0x16008, + .spare = 0xcf0, +}; + +static const struct nvmem_keepout tegra241_fuse_keepouts[] = { + { .start = 0xc, .end = 0x1600c } +}; + +const struct tegra_fuse_soc tegra241_fuse_soc = { + .init = tegra30_fuse_init, + .info = &tegra241_fuse_info, + .keepouts = tegra241_fuse_keepouts, + .num_keepouts = ARRAY_SIZE(tegra241_fuse_keepouts), + .soc_attr_group = &tegra194_soc_attr_group, +}; +#endif diff --git a/drivers/soc/tegra/fuse/fuse.h b/drivers/soc/tegra/fuse/fuse.h index 90f23be73894..9fee6ad6ad9e 100644 --- a/drivers/soc/tegra/fuse/fuse.h +++ b/drivers/soc/tegra/fuse/fuse.h @@ -69,6 +69,7 @@ struct tegra_fuse { void tegra_init_revision(void); void tegra_init_apbmisc(void); +void tegra_acpi_init_apbmisc(void); u32 __init tegra_fuse_read_spare(unsigned int spare); u32 __init tegra_fuse_read_early(unsigned int offset); @@ -123,7 +124,8 @@ extern const struct tegra_fuse_soc tegra186_fuse_soc; #endif #if IS_ENABLED(CONFIG_ARCH_TEGRA_194_SOC) || \ - IS_ENABLED(CONFIG_ARCH_TEGRA_234_SOC) + IS_ENABLED(CONFIG_ARCH_TEGRA_234_SOC) || \ + IS_ENABLED(CONFIG_ARCH_TEGRA_241_SOC) extern const struct attribute_group tegra194_soc_attr_group; #endif @@ -135,4 +137,8 @@ extern const struct tegra_fuse_soc tegra194_fuse_soc; extern const struct tegra_fuse_soc tegra234_fuse_soc; #endif +#ifdef CONFIG_ARCH_TEGRA_241_SOC +extern const struct tegra_fuse_soc tegra241_fuse_soc; +#endif + #endif diff --git a/drivers/soc/tegra/fuse/tegra-apbmisc.c b/drivers/soc/tegra/fuse/tegra-apbmisc.c index da970f3dbf35..e2ca5d55fd31 100644 --- a/drivers/soc/tegra/fuse/tegra-apbmisc.c +++ b/drivers/soc/tegra/fuse/tegra-apbmisc.c @@ -3,9 +3,11 @@ * Copyright (c) 2014-2023, NVIDIA CORPORATION. All rights reserved. */ +#include <linux/acpi.h> #include <linux/export.h> #include <linux/io.h> #include <linux/kernel.h> +#include <linux/mod_devicetable.h> #include <linux/of.h> #include <linux/of_address.h> @@ -62,6 +64,7 @@ bool tegra_is_silicon(void) switch (tegra_get_chip_id()) { case TEGRA194: case TEGRA234: + case TEGRA241: case TEGRA264: if (tegra_get_platform() == 0) return true; @@ -160,9 +163,34 @@ void __init tegra_init_revision(void) tegra_sku_info.platform = tegra_get_platform(); } -void __init tegra_init_apbmisc(void) +static void tegra_init_apbmisc_resources(struct resource *apbmisc, + struct resource *straps) { void __iomem *strapping_base; + + apbmisc_base = ioremap(apbmisc->start, resource_size(apbmisc)); + if (apbmisc_base) + chipid = readl_relaxed(apbmisc_base + 4); + else + pr_err("failed to map APBMISC registers\n"); + + strapping_base = ioremap(straps->start, resource_size(straps)); + if (strapping_base) { + strapping = readl_relaxed(strapping_base); + iounmap(strapping_base); + } else { + pr_err("failed to map strapping options registers\n"); + } +} + +/** + * tegra_init_apbmisc - Initializes Tegra APBMISC and Strapping registers. + * + * This is called during early init as some of the old 32-bit ARM code needs + * information from the APBMISC registers very early during boot. + */ +void __init tegra_init_apbmisc(void) +{ struct resource apbmisc, straps; struct device_node *np; @@ -219,23 +247,73 @@ void __init tegra_init_apbmisc(void) } } - apbmisc_base = ioremap(apbmisc.start, resource_size(&apbmisc)); - if (!apbmisc_base) { - pr_err("failed to map APBMISC registers\n"); - } else { - chipid = readl_relaxed(apbmisc_base + 4); + tegra_init_apbmisc_resources(&apbmisc, &straps); + long_ram_code = of_property_read_bool(np, "nvidia,long-ram-code"); + +put: + of_node_put(np); +} + +#ifdef CONFIG_ACPI +static const struct acpi_device_id apbmisc_acpi_match[] = { + { "NVDA2010" }, + { /* sentinel */ } +}; + +void tegra_acpi_init_apbmisc(void) +{ + struct resource *resources[2] = { NULL }; + struct resource_entry *rentry; + struct acpi_device *adev = NULL; + struct list_head resource_list; + int rcount = 0; + int ret; + + adev = acpi_dev_get_first_match_dev(apbmisc_acpi_match[0].id, NULL, -1); + if (!adev) + return; + + INIT_LIST_HEAD(&resource_list); + + ret = acpi_dev_get_memory_resources(adev, &resource_list); + if (ret < 0) { + pr_err("failed to get APBMISC memory resources"); + goto out_put_acpi_dev; } - strapping_base = ioremap(straps.start, resource_size(&straps)); - if (!strapping_base) { - pr_err("failed to map strapping options registers\n"); - } else { - strapping = readl_relaxed(strapping_base); - iounmap(strapping_base); + /* + * Get required memory resources. + * + * resources[0]: apbmisc. + * resources[1]: straps. + */ + resource_list_for_each_entry(rentry, &resource_list) { + if (rcount >= ARRAY_SIZE(resources)) + break; + + resources[rcount++] = rentry->res; } - long_ram_code = of_property_read_bool(np, "nvidia,long-ram-code"); + if (!resources[0]) { + pr_err("failed to get APBMISC registers\n"); + goto out_free_resource_list; + } -put: - of_node_put(np); + if (!resources[1]) { + pr_err("failed to get strapping options registers\n"); + goto out_free_resource_list; + } + + tegra_init_apbmisc_resources(resources[0], resources[1]); + +out_free_resource_list: + acpi_dev_free_resource_list(&resource_list); + +out_put_acpi_dev: + acpi_dev_put(adev); +} +#else +void tegra_acpi_init_apbmisc(void) +{ } +#endif diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c index f432aa022ace..d6bfcea5ee65 100644 --- a/drivers/soc/tegra/pmc.c +++ b/drivers/soc/tegra/pmc.c @@ -3,7 +3,7 @@ * drivers/soc/tegra/pmc.c * * Copyright (c) 2010 Google, Inc - * Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2018-2024, NVIDIA CORPORATION. All rights reserved. * * Author: * Colin Cross <ccross@google.com> @@ -384,6 +384,7 @@ struct tegra_pmc_soc { bool has_blink_output; bool has_usb_sleepwalk; bool supports_core_domain; + bool has_single_mmio_aperture; }; /** @@ -1777,30 +1778,6 @@ static int tegra_io_pad_get_voltage(struct tegra_pmc *pmc, enum tegra_io_pad id) return TEGRA_IO_PAD_VOLTAGE_3V3; } -/** - * tegra_io_rail_power_on() - enable power to I/O rail - * @id: Tegra I/O pad ID for which to enable power - * - * See also: tegra_io_pad_power_enable() - */ -int tegra_io_rail_power_on(unsigned int id) -{ - return tegra_io_pad_power_enable(id); -} -EXPORT_SYMBOL(tegra_io_rail_power_on); - -/** - * tegra_io_rail_power_off() - disable power to I/O rail - * @id: Tegra I/O pad ID for which to disable power - * - * See also: tegra_io_pad_power_disable() - */ -int tegra_io_rail_power_off(unsigned int id) -{ - return tegra_io_pad_power_disable(id); -} -EXPORT_SYMBOL(tegra_io_rail_power_off); - #ifdef CONFIG_PM_SLEEP enum tegra_suspend_mode tegra_pmc_get_suspend_mode(void) { @@ -2909,31 +2886,33 @@ static int tegra_pmc_probe(struct platform_device *pdev) if (IS_ERR(base)) return PTR_ERR(base); - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "wake"); - if (res) { + if (pmc->soc->has_single_mmio_aperture) { + pmc->wake = base; + pmc->aotag = base; + pmc->scratch = base; + } else { + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "wake"); pmc->wake = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(pmc->wake)) return PTR_ERR(pmc->wake); - } else { - pmc->wake = base; - } - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "aotag"); - if (res) { + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "aotag"); pmc->aotag = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(pmc->aotag)) return PTR_ERR(pmc->aotag); - } else { - pmc->aotag = base; - } - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "scratch"); - if (res) { - pmc->scratch = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(pmc->scratch)) - return PTR_ERR(pmc->scratch); - } else { - pmc->scratch = base; + /* "scratch" is an optional aperture */ + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "scratch"); + if (res) { + pmc->scratch = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(pmc->scratch)) + return PTR_ERR(pmc->scratch); + } else { + pmc->scratch = NULL; + } } pmc->clk = devm_clk_get_optional(&pdev->dev, "pclk"); @@ -2945,12 +2924,15 @@ static int tegra_pmc_probe(struct platform_device *pdev) * PMC should be last resort for restarting since it soft-resets * CPU without resetting everything else. */ - err = devm_register_reboot_notifier(&pdev->dev, - &tegra_pmc_reboot_notifier); - if (err) { - dev_err(&pdev->dev, "unable to register reboot notifier, %d\n", - err); - return err; + if (pmc->scratch) { + err = devm_register_reboot_notifier(&pdev->dev, + &tegra_pmc_reboot_notifier); + if (err) { + dev_err(&pdev->dev, + "unable to register reboot notifier, %d\n", + err); + return err; + } } err = devm_register_sys_off_handler(&pdev->dev, @@ -3324,6 +3306,7 @@ static const struct tegra_pmc_soc tegra20_pmc_soc = { .num_pmc_clks = 0, .has_blink_output = true, .has_usb_sleepwalk = true, + .has_single_mmio_aperture = true, }; static const char * const tegra30_powergates[] = { @@ -3385,6 +3368,7 @@ static const struct tegra_pmc_soc tegra30_pmc_soc = { .num_pmc_clks = ARRAY_SIZE(tegra_pmc_clks_data), .has_blink_output = true, .has_usb_sleepwalk = true, + .has_single_mmio_aperture = true, }; static const char * const tegra114_powergates[] = { @@ -3442,6 +3426,7 @@ static const struct tegra_pmc_soc tegra114_pmc_soc = { .num_pmc_clks = ARRAY_SIZE(tegra_pmc_clks_data), .has_blink_output = true, .has_usb_sleepwalk = true, + .has_single_mmio_aperture = true, }; static const char * const tegra124_powergates[] = { @@ -3586,6 +3571,7 @@ static const struct tegra_pmc_soc tegra124_pmc_soc = { .num_pmc_clks = ARRAY_SIZE(tegra_pmc_clks_data), .has_blink_output = true, .has_usb_sleepwalk = true, + .has_single_mmio_aperture = true, }; static const char * const tegra210_powergates[] = { @@ -3749,6 +3735,7 @@ static const struct tegra_pmc_soc tegra210_pmc_soc = { .num_pmc_clks = ARRAY_SIZE(tegra_pmc_clks_data), .has_blink_output = true, .has_usb_sleepwalk = true, + .has_single_mmio_aperture = true, }; static const struct tegra_io_pad_soc tegra186_io_pads[] = { @@ -3946,6 +3933,7 @@ static const struct tegra_pmc_soc tegra186_pmc_soc = { .num_pmc_clks = 0, .has_blink_output = false, .has_usb_sleepwalk = false, + .has_single_mmio_aperture = false, }; static const struct tegra_io_pad_soc tegra194_io_pads[] = { @@ -4131,6 +4119,7 @@ static const struct tegra_pmc_soc tegra194_pmc_soc = { .num_pmc_clks = 0, .has_blink_output = false, .has_usb_sleepwalk = false, + .has_single_mmio_aperture = false, }; static const struct tegra_io_pad_soc tegra234_io_pads[] = { @@ -4220,6 +4209,7 @@ static const char * const tegra234_reset_sources[] = { }; static const struct tegra_wake_event tegra234_wake_events[] = { + TEGRA_WAKE_GPIO("sd-wake", 8, 0, TEGRA234_MAIN_GPIO(G, 7)), TEGRA_WAKE_IRQ("pmu", 24, 209), TEGRA_WAKE_GPIO("power", 29, 1, TEGRA234_AON_GPIO(EE, 4)), TEGRA_WAKE_GPIO("mgbe", 56, 0, TEGRA234_MAIN_GPIO(Y, 3)), @@ -4259,6 +4249,7 @@ static const struct tegra_pmc_soc tegra234_pmc_soc = { .pmc_clks_data = NULL, .num_pmc_clks = 0, .has_blink_output = false, + .has_single_mmio_aperture = false, }; static const struct of_device_id tegra_pmc_match[] = { diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c index f94e0d370d46..1a8d03958dff 100644 --- a/drivers/spi/spi-cadence-quadspi.c +++ b/drivers/spi/spi-cadence-quadspi.c @@ -1927,24 +1927,18 @@ static void cqspi_remove(struct platform_device *pdev) pm_runtime_disable(&pdev->dev); } -static int cqspi_suspend(struct device *dev) +static int cqspi_runtime_suspend(struct device *dev) { struct cqspi_st *cqspi = dev_get_drvdata(dev); - struct spi_controller *host = dev_get_drvdata(dev); - int ret; - ret = spi_controller_suspend(host); cqspi_controller_enable(cqspi, 0); - clk_disable_unprepare(cqspi->clk); - - return ret; + return 0; } -static int cqspi_resume(struct device *dev) +static int cqspi_runtime_resume(struct device *dev) { struct cqspi_st *cqspi = dev_get_drvdata(dev); - struct spi_controller *host = dev_get_drvdata(dev); clk_prepare_enable(cqspi->clk); cqspi_wait_idle(cqspi); @@ -1952,12 +1946,27 @@ static int cqspi_resume(struct device *dev) cqspi->current_cs = -1; cqspi->sclk = 0; + return 0; +} + +static int cqspi_suspend(struct device *dev) +{ + struct cqspi_st *cqspi = dev_get_drvdata(dev); + + return spi_controller_suspend(cqspi->host); +} - return spi_controller_resume(host); +static int cqspi_resume(struct device *dev) +{ + struct cqspi_st *cqspi = dev_get_drvdata(dev); + + return spi_controller_resume(cqspi->host); } -static DEFINE_RUNTIME_DEV_PM_OPS(cqspi_dev_pm_ops, cqspi_suspend, - cqspi_resume, NULL); +static const struct dev_pm_ops cqspi_dev_pm_ops = { + RUNTIME_PM_OPS(cqspi_runtime_suspend, cqspi_runtime_resume, NULL) + SYSTEM_SLEEP_PM_OPS(cqspi_suspend, cqspi_resume) +}; static const struct cqspi_driver_platdata cdns_qspi = { .quirks = CQSPI_DISABLE_DAC_MODE, diff --git a/drivers/spi/spi-cs42l43.c b/drivers/spi/spi-cs42l43.c index b24190526ce9..adf19e8c4c8a 100644 --- a/drivers/spi/spi-cs42l43.c +++ b/drivers/spi/spi-cs42l43.c @@ -148,8 +148,7 @@ static void cs42l43_set_cs(struct spi_device *spi, bool is_high) { struct cs42l43_spi *priv = spi_controller_get_devdata(spi->controller); - if (spi_get_chipselect(spi, 0) == 0) - regmap_write(priv->regmap, CS42L43_SPI_CONFIG2, !is_high); + regmap_write(priv->regmap, CS42L43_SPI_CONFIG2, !is_high); } static int cs42l43_prepare_message(struct spi_controller *ctlr, struct spi_message *msg) diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c index 546cdce525fc..833a1bb7a914 100644 --- a/drivers/spi/spi-imx.c +++ b/drivers/spi/spi-imx.c @@ -2,6 +2,7 @@ // Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved. // Copyright (C) 2008 Juergen Beisert +#include <linux/bits.h> #include <linux/clk.h> #include <linux/completion.h> #include <linux/delay.h> @@ -660,15 +661,15 @@ static int mx51_ecspi_prepare_transfer(struct spi_imx_data *spi_imx, << MX51_ECSPI_CTRL_BL_OFFSET; else { if (spi_imx->usedma) { - ctrl |= (spi_imx->bits_per_word * - spi_imx_bytes_per_word(spi_imx->bits_per_word) - 1) + ctrl |= (spi_imx->bits_per_word - 1) << MX51_ECSPI_CTRL_BL_OFFSET; } else { if (spi_imx->count >= MX51_ECSPI_CTRL_MAX_BURST) - ctrl |= (MX51_ECSPI_CTRL_MAX_BURST - 1) + ctrl |= (MX51_ECSPI_CTRL_MAX_BURST * BITS_PER_BYTE - 1) << MX51_ECSPI_CTRL_BL_OFFSET; else - ctrl |= (spi_imx->count * spi_imx->bits_per_word - 1) + ctrl |= spi_imx->count / DIV_ROUND_UP(spi_imx->bits_per_word, + BITS_PER_BYTE) * spi_imx->bits_per_word << MX51_ECSPI_CTRL_BL_OFFSET; } } diff --git a/drivers/spi/spi-intel-pci.c b/drivers/spi/spi-intel-pci.c index 07d20ca1164c..4337ca51d7aa 100644 --- a/drivers/spi/spi-intel-pci.c +++ b/drivers/spi/spi-intel-pci.c @@ -85,6 +85,7 @@ static const struct pci_device_id intel_spi_pci_ids[] = { { PCI_VDEVICE(INTEL, 0xa2a4), (unsigned long)&cnl_info }, { PCI_VDEVICE(INTEL, 0xa324), (unsigned long)&cnl_info }, { PCI_VDEVICE(INTEL, 0xa3a4), (unsigned long)&cnl_info }, + { PCI_VDEVICE(INTEL, 0xa823), (unsigned long)&cnl_info }, { }, }; MODULE_DEVICE_TABLE(pci, intel_spi_pci_ids); diff --git a/drivers/spi/spi-mxs.c b/drivers/spi/spi-mxs.c index 1bf080339b5a..88cbe4f00cc3 100644 --- a/drivers/spi/spi-mxs.c +++ b/drivers/spi/spi-mxs.c @@ -39,6 +39,7 @@ #include <linux/spi/spi.h> #include <linux/spi/mxs-spi.h> #include <trace/events/spi.h> +#include <linux/dma/mxs-dma.h> #define DRIVER_NAME "mxs-spi" @@ -252,7 +253,7 @@ static int mxs_spi_txrx_dma(struct mxs_spi *spi, desc = dmaengine_prep_slave_sg(ssp->dmach, &dma_xfer[sg_count].sg, 1, (flags & TXRX_WRITE) ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM, - DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + DMA_PREP_INTERRUPT | MXS_DMA_CTRL_WAIT4END); if (!desc) { dev_err(ssp->dev, diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c index a0c9fea908f5..ddf1c684bcc7 100644 --- a/drivers/spi/spi-omap2-mcspi.c +++ b/drivers/spi/spi-omap2-mcspi.c @@ -53,8 +53,6 @@ /* per-register bitmasks: */ #define OMAP2_MCSPI_IRQSTATUS_EOW BIT(17) -#define OMAP2_MCSPI_IRQSTATUS_TX0_EMPTY BIT(0) -#define OMAP2_MCSPI_IRQSTATUS_RX0_FULL BIT(2) #define OMAP2_MCSPI_MODULCTRL_SINGLE BIT(0) #define OMAP2_MCSPI_MODULCTRL_MS BIT(2) @@ -293,7 +291,7 @@ static void omap2_mcspi_set_mode(struct spi_controller *ctlr) } static void omap2_mcspi_set_fifo(const struct spi_device *spi, - struct spi_transfer *t, int enable, int dma_enabled) + struct spi_transfer *t, int enable) { struct spi_controller *ctlr = spi->controller; struct omap2_mcspi_cs *cs = spi->controller_state; @@ -314,28 +312,20 @@ static void omap2_mcspi_set_fifo(const struct spi_device *spi, max_fifo_depth = OMAP2_MCSPI_MAX_FIFODEPTH / 2; else max_fifo_depth = OMAP2_MCSPI_MAX_FIFODEPTH; - if (dma_enabled) - wcnt = t->len / bytes_per_word; - else - wcnt = 0; + + wcnt = t->len / bytes_per_word; if (wcnt > OMAP2_MCSPI_MAX_FIFOWCNT) goto disable_fifo; xferlevel = wcnt << 16; if (t->rx_buf != NULL) { chconf |= OMAP2_MCSPI_CHCONF_FFER; - if (dma_enabled) - xferlevel |= (bytes_per_word - 1) << 8; - else - xferlevel |= (max_fifo_depth - 1) << 8; + xferlevel |= (bytes_per_word - 1) << 8; } if (t->tx_buf != NULL) { chconf |= OMAP2_MCSPI_CHCONF_FFET; - if (dma_enabled) - xferlevel |= bytes_per_word - 1; - else - xferlevel |= (max_fifo_depth - 1); + xferlevel |= bytes_per_word - 1; } mcspi_write_reg(ctlr, OMAP2_MCSPI_XFERLEVEL, xferlevel); @@ -892,113 +882,6 @@ out: return count - c; } -static unsigned -omap2_mcspi_txrx_piofifo(struct spi_device *spi, struct spi_transfer *xfer) -{ - struct omap2_mcspi_cs *cs = spi->controller_state; - struct omap2_mcspi *mcspi; - unsigned int count, c; - unsigned int iter, cwc; - int last_request; - void __iomem *base = cs->base; - void __iomem *tx_reg; - void __iomem *rx_reg; - void __iomem *chstat_reg; - void __iomem *irqstat_reg; - int word_len, bytes_per_word; - u8 *rx; - const u8 *tx; - - mcspi = spi_controller_get_devdata(spi->controller); - count = xfer->len; - c = count; - word_len = cs->word_len; - bytes_per_word = mcspi_bytes_per_word(word_len); - - /* - * We store the pre-calculated register addresses on stack to speed - * up the transfer loop. - */ - tx_reg = base + OMAP2_MCSPI_TX0; - rx_reg = base + OMAP2_MCSPI_RX0; - chstat_reg = base + OMAP2_MCSPI_CHSTAT0; - irqstat_reg = base + OMAP2_MCSPI_IRQSTATUS; - - if (c < (word_len >> 3)) - return 0; - - rx = xfer->rx_buf; - tx = xfer->tx_buf; - - do { - /* calculate number of words in current iteration */ - cwc = min((unsigned int)mcspi->fifo_depth / bytes_per_word, - c / bytes_per_word); - last_request = cwc != (mcspi->fifo_depth / bytes_per_word); - if (tx) { - if (mcspi_wait_for_reg_bit(irqstat_reg, - OMAP2_MCSPI_IRQSTATUS_TX0_EMPTY) < 0) { - dev_err(&spi->dev, "TX Empty timed out\n"); - goto out; - } - writel_relaxed(OMAP2_MCSPI_IRQSTATUS_TX0_EMPTY, irqstat_reg); - - for (iter = 0; iter < cwc; iter++, tx += bytes_per_word) { - if (bytes_per_word == 1) - writel_relaxed(*tx, tx_reg); - else if (bytes_per_word == 2) - writel_relaxed(*((u16 *)tx), tx_reg); - else if (bytes_per_word == 4) - writel_relaxed(*((u32 *)tx), tx_reg); - } - } - - if (rx) { - if (!last_request && - mcspi_wait_for_reg_bit(irqstat_reg, - OMAP2_MCSPI_IRQSTATUS_RX0_FULL) < 0) { - dev_err(&spi->dev, "RX_FULL timed out\n"); - goto out; - } - writel_relaxed(OMAP2_MCSPI_IRQSTATUS_RX0_FULL, irqstat_reg); - - for (iter = 0; iter < cwc; iter++, rx += bytes_per_word) { - if (last_request && - mcspi_wait_for_reg_bit(chstat_reg, - OMAP2_MCSPI_CHSTAT_RXS) < 0) { - dev_err(&spi->dev, "RXS timed out\n"); - goto out; - } - if (bytes_per_word == 1) - *rx = readl_relaxed(rx_reg); - else if (bytes_per_word == 2) - *((u16 *)rx) = readl_relaxed(rx_reg); - else if (bytes_per_word == 4) - *((u32 *)rx) = readl_relaxed(rx_reg); - } - } - - if (last_request) { - if (mcspi_wait_for_reg_bit(chstat_reg, - OMAP2_MCSPI_CHSTAT_EOT) < 0) { - dev_err(&spi->dev, "EOT timed out\n"); - goto out; - } - if (mcspi_wait_for_reg_bit(chstat_reg, - OMAP2_MCSPI_CHSTAT_TXFFE) < 0) { - dev_err(&spi->dev, "TXFFE timed out\n"); - goto out; - } - omap2_mcspi_set_enable(spi, 0); - } - c -= cwc * bytes_per_word; - } while (c >= bytes_per_word); - -out: - omap2_mcspi_set_enable(spi, 1); - return count - c; -} - static u32 omap2_mcspi_calc_divisor(u32 speed_hz, u32 ref_clk_hz) { u32 div; @@ -1323,9 +1206,7 @@ static int omap2_mcspi_transfer_one(struct spi_controller *ctlr, if ((mcspi_dma->dma_rx && mcspi_dma->dma_tx) && ctlr->cur_msg_mapped && ctlr->can_dma(ctlr, spi, t)) - omap2_mcspi_set_fifo(spi, t, 1, 1); - else if (t->len > OMAP2_MCSPI_MAX_FIFODEPTH) - omap2_mcspi_set_fifo(spi, t, 1, 0); + omap2_mcspi_set_fifo(spi, t, 1); omap2_mcspi_set_enable(spi, 1); @@ -1338,8 +1219,6 @@ static int omap2_mcspi_transfer_one(struct spi_controller *ctlr, ctlr->cur_msg_mapped && ctlr->can_dma(ctlr, spi, t)) count = omap2_mcspi_txrx_dma(spi, t); - else if (mcspi->fifo_depth > 0) - count = omap2_mcspi_txrx_piofifo(spi, t); else count = omap2_mcspi_txrx_pio(spi, t); @@ -1352,7 +1231,7 @@ static int omap2_mcspi_transfer_one(struct spi_controller *ctlr, omap2_mcspi_set_enable(spi, 0); if (mcspi->fifo_depth > 0) - omap2_mcspi_set_fifo(spi, t, 0, 0); + omap2_mcspi_set_fifo(spi, t, 0); out: /* Restore defaults if they were overriden */ @@ -1375,7 +1254,7 @@ out: omap2_mcspi_set_cs(spi, !(spi->mode & SPI_CS_HIGH)); if (mcspi->fifo_depth > 0 && t) - omap2_mcspi_set_fifo(spi, t, 0, 0); + omap2_mcspi_set_fifo(spi, t, 0); return status; } diff --git a/drivers/spi/spi-ppc4xx.c b/drivers/spi/spi-ppc4xx.c index 03aab661be9d..82d6264841fc 100644 --- a/drivers/spi/spi-ppc4xx.c +++ b/drivers/spi/spi-ppc4xx.c @@ -25,11 +25,13 @@ #include <linux/slab.h> #include <linux/errno.h> #include <linux/wait.h> +#include <linux/platform_device.h> #include <linux/of_address.h> #include <linux/of_irq.h> #include <linux/of_platform.h> #include <linux/interrupt.h> #include <linux/delay.h> +#include <linux/platform_device.h> #include <linux/spi/spi.h> #include <linux/spi/spi_bitbang.h> @@ -166,10 +168,8 @@ static int spi_ppc4xx_setupxfer(struct spi_device *spi, struct spi_transfer *t) int scr; u8 cdm = 0; u32 speed; - u8 bits_per_word; /* Start with the generic configuration for this device. */ - bits_per_word = spi->bits_per_word; speed = spi->max_speed_hz; /* @@ -177,9 +177,6 @@ static int spi_ppc4xx_setupxfer(struct spi_device *spi, struct spi_transfer *t) * the transfer to overwrite the generic configuration with zeros. */ if (t) { - if (t->bits_per_word) - bits_per_word = t->bits_per_word; - if (t->speed_hz) speed = min(t->speed_hz, spi->max_speed_hz); } diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c index cfc3b1ddbd22..6f12e4fb2e2e 100644 --- a/drivers/spi/spi-sh-msiof.c +++ b/drivers/spi/spi-sh-msiof.c @@ -136,14 +136,14 @@ struct sh_msiof_spi_priv { /* SIFCTR */ #define SIFCTR_TFWM_MASK GENMASK(31, 29) /* Transmit FIFO Watermark */ -#define SIFCTR_TFWM_64 (0 << 29) /* Transfer Request when 64 empty stages */ -#define SIFCTR_TFWM_32 (1 << 29) /* Transfer Request when 32 empty stages */ -#define SIFCTR_TFWM_24 (2 << 29) /* Transfer Request when 24 empty stages */ -#define SIFCTR_TFWM_16 (3 << 29) /* Transfer Request when 16 empty stages */ -#define SIFCTR_TFWM_12 (4 << 29) /* Transfer Request when 12 empty stages */ -#define SIFCTR_TFWM_8 (5 << 29) /* Transfer Request when 8 empty stages */ -#define SIFCTR_TFWM_4 (6 << 29) /* Transfer Request when 4 empty stages */ -#define SIFCTR_TFWM_1 (7 << 29) /* Transfer Request when 1 empty stage */ +#define SIFCTR_TFWM_64 (0UL << 29) /* Transfer Request when 64 empty stages */ +#define SIFCTR_TFWM_32 (1UL << 29) /* Transfer Request when 32 empty stages */ +#define SIFCTR_TFWM_24 (2UL << 29) /* Transfer Request when 24 empty stages */ +#define SIFCTR_TFWM_16 (3UL << 29) /* Transfer Request when 16 empty stages */ +#define SIFCTR_TFWM_12 (4UL << 29) /* Transfer Request when 12 empty stages */ +#define SIFCTR_TFWM_8 (5UL << 29) /* Transfer Request when 8 empty stages */ +#define SIFCTR_TFWM_4 (6UL << 29) /* Transfer Request when 4 empty stages */ +#define SIFCTR_TFWM_1 (7UL << 29) /* Transfer Request when 1 empty stage */ #define SIFCTR_TFUA_MASK GENMASK(26, 20) /* Transmit FIFO Usable Area */ #define SIFCTR_TFUA_SHIFT 20 #define SIFCTR_TFUA(i) ((i) << SIFCTR_TFUA_SHIFT) diff --git a/drivers/staging/fieldbus/Documentation/devicetree/bindings/fieldbus/arcx,anybus-controller.txt b/drivers/staging/fieldbus/Documentation/devicetree/bindings/fieldbus/arcx,anybus-controller.txt index b1f9474f36d5..f34a95611645 100644 --- a/drivers/staging/fieldbus/Documentation/devicetree/bindings/fieldbus/arcx,anybus-controller.txt +++ b/drivers/staging/fieldbus/Documentation/devicetree/bindings/fieldbus/arcx,anybus-controller.txt @@ -48,7 +48,7 @@ Example of usage: ----------------- This example places the bridge on top of the i.MX WEIM parallel bus, see: -Documentation/devicetree/bindings/bus/imx-weim.txt +Documentation/devicetree/bindings/memory-controllers/fsl/fsl,imx-weim.yaml &weim { controller@0,0 { diff --git a/drivers/staging/iio/impedance-analyzer/ad5933.c b/drivers/staging/iio/impedance-analyzer/ad5933.c index e748a5d04e97..9149d41fe65b 100644 --- a/drivers/staging/iio/impedance-analyzer/ad5933.c +++ b/drivers/staging/iio/impedance-analyzer/ad5933.c @@ -608,7 +608,7 @@ static void ad5933_work(struct work_struct *work) struct ad5933_state, work.work); struct iio_dev *indio_dev = i2c_get_clientdata(st->client); __be16 buf[2]; - int val[2]; + u16 val[2]; unsigned char status; int ret; diff --git a/drivers/staging/media/atomisp/pci/atomisp_cmd.c b/drivers/staging/media/atomisp/pci/atomisp_cmd.c index f44e6412f4e3..d0db2efe0045 100644 --- a/drivers/staging/media/atomisp/pci/atomisp_cmd.c +++ b/drivers/staging/media/atomisp/pci/atomisp_cmd.c @@ -3723,12 +3723,10 @@ apply_min_padding: static int atomisp_set_crop(struct atomisp_device *isp, const struct v4l2_mbus_framefmt *format, + struct v4l2_subdev_state *sd_state, int which) { struct atomisp_input_subdev *input = &isp->inputs[isp->asd.input_curr]; - struct v4l2_subdev_state pad_state = { - .pads = &input->pad_cfg, - }; struct v4l2_subdev_selection sel = { .which = which, .target = V4L2_SEL_TGT_CROP, @@ -3754,7 +3752,7 @@ static int atomisp_set_crop(struct atomisp_device *isp, sel.r.left = ((input->native_rect.width - sel.r.width) / 2) & ~1; sel.r.top = ((input->native_rect.height - sel.r.height) / 2) & ~1; - ret = v4l2_subdev_call(input->camera, pad, set_selection, &pad_state, &sel); + ret = v4l2_subdev_call(input->camera, pad, set_selection, sd_state, &sel); if (ret) dev_err(isp->dev, "Error setting crop to %ux%u @%ux%u: %d\n", sel.r.width, sel.r.height, sel.r.left, sel.r.top, ret); @@ -3770,9 +3768,6 @@ int atomisp_try_fmt(struct atomisp_device *isp, struct v4l2_pix_format *f, const struct atomisp_format_bridge *fmt, *snr_fmt; struct atomisp_sub_device *asd = &isp->asd; struct atomisp_input_subdev *input = &isp->inputs[asd->input_curr]; - struct v4l2_subdev_state pad_state = { - .pads = &input->pad_cfg, - }; struct v4l2_subdev_format format = { .which = V4L2_SUBDEV_FORMAT_TRY, }; @@ -3809,11 +3804,16 @@ int atomisp_try_fmt(struct atomisp_device *isp, struct v4l2_pix_format *f, dev_dbg(isp->dev, "try_mbus_fmt: asking for %ux%u\n", format.format.width, format.format.height); - ret = atomisp_set_crop(isp, &format.format, V4L2_SUBDEV_FORMAT_TRY); - if (ret) - return ret; + v4l2_subdev_lock_state(input->try_sd_state); + + ret = atomisp_set_crop(isp, &format.format, input->try_sd_state, + V4L2_SUBDEV_FORMAT_TRY); + if (ret == 0) + ret = v4l2_subdev_call(input->camera, pad, set_fmt, + input->try_sd_state, &format); + + v4l2_subdev_unlock_state(input->try_sd_state); - ret = v4l2_subdev_call(input->camera, pad, set_fmt, &pad_state, &format); if (ret) return ret; @@ -4238,9 +4238,7 @@ static int atomisp_set_fmt_to_snr(struct video_device *vdev, const struct v4l2_p struct atomisp_device *isp = asd->isp; struct atomisp_input_subdev *input = &isp->inputs[asd->input_curr]; const struct atomisp_format_bridge *format; - struct v4l2_subdev_state pad_state = { - .pads = &input->pad_cfg, - }; + struct v4l2_subdev_state *act_sd_state; struct v4l2_subdev_format vformat = { .which = V4L2_SUBDEV_FORMAT_TRY, }; @@ -4268,12 +4266,18 @@ static int atomisp_set_fmt_to_snr(struct video_device *vdev, const struct v4l2_p /* Disable dvs if resolution can't be supported by sensor */ if (asd->params.video_dis_en && asd->run_mode->val == ATOMISP_RUN_MODE_VIDEO) { - ret = atomisp_set_crop(isp, &vformat.format, V4L2_SUBDEV_FORMAT_TRY); - if (ret) - return ret; + v4l2_subdev_lock_state(input->try_sd_state); + + ret = atomisp_set_crop(isp, &vformat.format, input->try_sd_state, + V4L2_SUBDEV_FORMAT_TRY); + if (ret == 0) { + vformat.which = V4L2_SUBDEV_FORMAT_TRY; + ret = v4l2_subdev_call(input->camera, pad, set_fmt, + input->try_sd_state, &vformat); + } + + v4l2_subdev_unlock_state(input->try_sd_state); - vformat.which = V4L2_SUBDEV_FORMAT_TRY; - ret = v4l2_subdev_call(input->camera, pad, set_fmt, &pad_state, &vformat); if (ret) return ret; @@ -4291,12 +4295,18 @@ static int atomisp_set_fmt_to_snr(struct video_device *vdev, const struct v4l2_p } } - ret = atomisp_set_crop(isp, &vformat.format, V4L2_SUBDEV_FORMAT_ACTIVE); - if (ret) - return ret; + act_sd_state = v4l2_subdev_lock_and_get_active_state(input->camera); + + ret = atomisp_set_crop(isp, &vformat.format, act_sd_state, + V4L2_SUBDEV_FORMAT_ACTIVE); + if (ret == 0) { + vformat.which = V4L2_SUBDEV_FORMAT_ACTIVE; + ret = v4l2_subdev_call(input->camera, pad, set_fmt, act_sd_state, &vformat); + } + + if (act_sd_state) + v4l2_subdev_unlock_state(act_sd_state); - vformat.which = V4L2_SUBDEV_FORMAT_ACTIVE; - ret = v4l2_subdev_call(input->camera, pad, set_fmt, NULL, &vformat); if (ret) return ret; diff --git a/drivers/staging/media/atomisp/pci/atomisp_internal.h b/drivers/staging/media/atomisp/pci/atomisp_internal.h index f7b4bee9574b..d5b077e602ca 100644 --- a/drivers/staging/media/atomisp/pci/atomisp_internal.h +++ b/drivers/staging/media/atomisp/pci/atomisp_internal.h @@ -132,8 +132,8 @@ struct atomisp_input_subdev { /* Sensor rects for sensors which support crop */ struct v4l2_rect native_rect; struct v4l2_rect active_rect; - /* Sensor pad_cfg for which == V4L2_SUBDEV_FORMAT_TRY calls */ - struct v4l2_subdev_pad_config pad_cfg; + /* Sensor state for which == V4L2_SUBDEV_FORMAT_TRY calls */ + struct v4l2_subdev_state *try_sd_state; struct v4l2_subdev *motor; diff --git a/drivers/staging/media/atomisp/pci/atomisp_ioctl.c b/drivers/staging/media/atomisp/pci/atomisp_ioctl.c index 01b7fa9b56a2..5b2d88c02d36 100644 --- a/drivers/staging/media/atomisp/pci/atomisp_ioctl.c +++ b/drivers/staging/media/atomisp/pci/atomisp_ioctl.c @@ -781,12 +781,20 @@ static int atomisp_enum_framesizes(struct file *file, void *priv, .which = V4L2_SUBDEV_FORMAT_ACTIVE, .code = input->code, }; + struct v4l2_subdev_state *act_sd_state; int ret; + if (!input->camera) + return -EINVAL; + if (input->crop_support) return atomisp_enum_framesizes_crop(isp, fsize); - ret = v4l2_subdev_call(input->camera, pad, enum_frame_size, NULL, &fse); + act_sd_state = v4l2_subdev_lock_and_get_active_state(input->camera); + ret = v4l2_subdev_call(input->camera, pad, enum_frame_size, + act_sd_state, &fse); + if (act_sd_state) + v4l2_subdev_unlock_state(act_sd_state); if (ret) return ret; @@ -803,18 +811,25 @@ static int atomisp_enum_frameintervals(struct file *file, void *priv, struct video_device *vdev = video_devdata(file); struct atomisp_device *isp = video_get_drvdata(vdev); struct atomisp_sub_device *asd = atomisp_to_video_pipe(vdev)->asd; + struct atomisp_input_subdev *input = &isp->inputs[asd->input_curr]; struct v4l2_subdev_frame_interval_enum fie = { - .code = atomisp_in_fmt_conv[0].code, + .code = atomisp_in_fmt_conv[0].code, .index = fival->index, .width = fival->width, .height = fival->height, .which = V4L2_SUBDEV_FORMAT_ACTIVE, }; + struct v4l2_subdev_state *act_sd_state; int ret; - ret = v4l2_subdev_call(isp->inputs[asd->input_curr].camera, - pad, enum_frame_interval, NULL, - &fie); + if (!input->camera) + return -EINVAL; + + act_sd_state = v4l2_subdev_lock_and_get_active_state(input->camera); + ret = v4l2_subdev_call(input->camera, pad, enum_frame_interval, + act_sd_state, &fie); + if (act_sd_state) + v4l2_subdev_unlock_state(act_sd_state); if (ret) return ret; @@ -830,30 +845,25 @@ static int atomisp_enum_fmt_cap(struct file *file, void *fh, struct video_device *vdev = video_devdata(file); struct atomisp_device *isp = video_get_drvdata(vdev); struct atomisp_sub_device *asd = atomisp_to_video_pipe(vdev)->asd; + struct atomisp_input_subdev *input = &isp->inputs[asd->input_curr]; struct v4l2_subdev_mbus_code_enum code = { .which = V4L2_SUBDEV_FORMAT_ACTIVE, }; const struct atomisp_format_bridge *format; - struct v4l2_subdev *camera; + struct v4l2_subdev_state *act_sd_state; unsigned int i, fi = 0; - int rval; + int ret; - camera = isp->inputs[asd->input_curr].camera; - if(!camera) { - dev_err(isp->dev, "%s(): camera is NULL, device is %s\n", - __func__, vdev->name); + if (!input->camera) return -EINVAL; - } - rval = v4l2_subdev_call(camera, pad, enum_mbus_code, NULL, &code); - if (rval == -ENOIOCTLCMD) { - dev_warn(isp->dev, - "enum_mbus_code pad op not supported by %s. Please fix your sensor driver!\n", - camera->name); - } - - if (rval) - return rval; + act_sd_state = v4l2_subdev_lock_and_get_active_state(input->camera); + ret = v4l2_subdev_call(input->camera, pad, enum_mbus_code, + act_sd_state, &code); + if (act_sd_state) + v4l2_subdev_unlock_state(act_sd_state); + if (ret) + return ret; for (i = 0; i < ARRAY_SIZE(atomisp_output_fmts); i++) { format = &atomisp_output_fmts[i]; diff --git a/drivers/staging/media/atomisp/pci/atomisp_v4l2.c b/drivers/staging/media/atomisp/pci/atomisp_v4l2.c index c1c8501ec61f..547e1444ad97 100644 --- a/drivers/staging/media/atomisp/pci/atomisp_v4l2.c +++ b/drivers/staging/media/atomisp/pci/atomisp_v4l2.c @@ -862,6 +862,9 @@ static void atomisp_unregister_entities(struct atomisp_device *isp) v4l2_device_unregister(&isp->v4l2_dev); media_device_unregister(&isp->media_dev); media_device_cleanup(&isp->media_dev); + + for (i = 0; i < isp->input_cnt; i++) + __v4l2_subdev_state_free(isp->inputs[i].try_sd_state); } static int atomisp_register_entities(struct atomisp_device *isp) @@ -933,32 +936,49 @@ v4l2_device_failed: static void atomisp_init_sensor(struct atomisp_input_subdev *input) { + static struct lock_class_key try_sd_state_key; struct v4l2_subdev_mbus_code_enum mbus_code_enum = { }; struct v4l2_subdev_frame_size_enum fse = { }; - struct v4l2_subdev_state sd_state = { - .pads = &input->pad_cfg, - }; struct v4l2_subdev_selection sel = { }; + struct v4l2_subdev_state *try_sd_state, *act_sd_state; int i, err; + /* + * FIXME: Drivers are not supposed to use __v4l2_subdev_state_alloc() + * but atomisp needs this for try_fmt on its /dev/video# node since + * it emulates a normal v4l2 device there, passing through try_fmt / + * set_fmt to the sensor. + */ + try_sd_state = __v4l2_subdev_state_alloc(input->camera, + "atomisp:try_sd_state->lock", &try_sd_state_key); + if (IS_ERR(try_sd_state)) + return; + + input->try_sd_state = try_sd_state; + + act_sd_state = v4l2_subdev_lock_and_get_active_state(input->camera); + mbus_code_enum.which = V4L2_SUBDEV_FORMAT_ACTIVE; - err = v4l2_subdev_call(input->camera, pad, enum_mbus_code, NULL, &mbus_code_enum); + err = v4l2_subdev_call(input->camera, pad, enum_mbus_code, + act_sd_state, &mbus_code_enum); if (!err) input->code = mbus_code_enum.code; sel.which = V4L2_SUBDEV_FORMAT_ACTIVE; sel.target = V4L2_SEL_TGT_NATIVE_SIZE; - err = v4l2_subdev_call(input->camera, pad, get_selection, NULL, &sel); + err = v4l2_subdev_call(input->camera, pad, get_selection, + act_sd_state, &sel); if (err) - return; + goto unlock_act_sd_state; input->native_rect = sel.r; sel.which = V4L2_SUBDEV_FORMAT_ACTIVE; sel.target = V4L2_SEL_TGT_CROP_DEFAULT; - err = v4l2_subdev_call(input->camera, pad, get_selection, NULL, &sel); + err = v4l2_subdev_call(input->camera, pad, get_selection, + act_sd_state, &sel); if (err) - return; + goto unlock_act_sd_state; input->active_rect = sel.r; @@ -973,7 +993,8 @@ static void atomisp_init_sensor(struct atomisp_input_subdev *input) fse.code = input->code; fse.which = V4L2_SUBDEV_FORMAT_ACTIVE; - err = v4l2_subdev_call(input->camera, pad, enum_frame_size, NULL, &fse); + err = v4l2_subdev_call(input->camera, pad, enum_frame_size, + act_sd_state, &fse); if (err) break; @@ -989,22 +1010,26 @@ static void atomisp_init_sensor(struct atomisp_input_subdev *input) * for padding, set the crop rect to cover the entire sensor instead * of only the default active area. * - * Do this for both try and active formats since the try_crop rect in - * pad_cfg may influence (clamp) future try_fmt calls with which == try. + * Do this for both try and active formats since the crop rect in + * try_sd_state may influence (clamp size) in calls with which == try. */ sel.which = V4L2_SUBDEV_FORMAT_TRY; sel.target = V4L2_SEL_TGT_CROP; sel.r = input->native_rect; - err = v4l2_subdev_call(input->camera, pad, set_selection, &sd_state, &sel); + v4l2_subdev_lock_state(input->try_sd_state); + err = v4l2_subdev_call(input->camera, pad, set_selection, + input->try_sd_state, &sel); + v4l2_subdev_unlock_state(input->try_sd_state); if (err) - return; + goto unlock_act_sd_state; sel.which = V4L2_SUBDEV_FORMAT_ACTIVE; sel.target = V4L2_SEL_TGT_CROP; sel.r = input->native_rect; - err = v4l2_subdev_call(input->camera, pad, set_selection, NULL, &sel); + err = v4l2_subdev_call(input->camera, pad, set_selection, + act_sd_state, &sel); if (err) - return; + goto unlock_act_sd_state; dev_info(input->camera->dev, "Supports crop native %dx%d active %dx%d binning %d\n", input->native_rect.width, input->native_rect.height, @@ -1012,6 +1037,10 @@ static void atomisp_init_sensor(struct atomisp_input_subdev *input) input->binning_support); input->crop_support = true; + +unlock_act_sd_state: + if (act_sd_state) + v4l2_subdev_unlock_state(act_sd_state); } int atomisp_register_device_nodes(struct atomisp_device *isp) diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c index a5f58988130a..c1fbcdd16182 100644 --- a/drivers/target/target_core_configfs.c +++ b/drivers/target/target_core_configfs.c @@ -759,6 +759,29 @@ static ssize_t emulate_tas_store(struct config_item *item, return count; } +static int target_try_configure_unmap(struct se_device *dev, + const char *config_opt) +{ + if (!dev->transport->configure_unmap) { + pr_err("Generic Block Discard not supported\n"); + return -ENOSYS; + } + + if (!target_dev_configured(dev)) { + pr_err("Generic Block Discard setup for %s requires device to be configured\n", + config_opt); + return -ENODEV; + } + + if (!dev->transport->configure_unmap(dev)) { + pr_err("Generic Block Discard setup for %s failed\n", + config_opt); + return -ENOSYS; + } + + return 0; +} + static ssize_t emulate_tpu_store(struct config_item *item, const char *page, size_t count) { @@ -776,11 +799,9 @@ static ssize_t emulate_tpu_store(struct config_item *item, * Discard supported is detected iblock_create_virtdevice(). */ if (flag && !da->max_unmap_block_desc_count) { - if (!dev->transport->configure_unmap || - !dev->transport->configure_unmap(dev)) { - pr_err("Generic Block Discard not supported\n"); - return -ENOSYS; - } + ret = target_try_configure_unmap(dev, "emulate_tpu"); + if (ret) + return ret; } da->emulate_tpu = flag; @@ -806,11 +827,9 @@ static ssize_t emulate_tpws_store(struct config_item *item, * Discard supported is detected iblock_create_virtdevice(). */ if (flag && !da->max_unmap_block_desc_count) { - if (!dev->transport->configure_unmap || - !dev->transport->configure_unmap(dev)) { - pr_err("Generic Block Discard not supported\n"); - return -ENOSYS; - } + ret = target_try_configure_unmap(dev, "emulate_tpws"); + if (ret) + return ret; } da->emulate_tpws = flag; @@ -1022,12 +1041,9 @@ static ssize_t unmap_zeroes_data_store(struct config_item *item, * Discard supported is detected iblock_configure_device(). */ if (flag && !da->max_unmap_block_desc_count) { - if (!dev->transport->configure_unmap || - !dev->transport->configure_unmap(dev)) { - pr_err("dev[%p]: Thin Provisioning LBPRZ will not be set because max_unmap_block_desc_count is zero\n", - da->da_dev); - return -ENOSYS; - } + ret = target_try_configure_unmap(dev, "unmap_zeroes_data"); + if (ret) + return ret; } da->unmap_zeroes_data = flag; pr_debug("dev[%p]: SE Device Thin Provisioning LBPRZ bit: %d\n", diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index 8eb9eb7ce5df..7f6ca8177845 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c @@ -91,7 +91,7 @@ static int iblock_configure_device(struct se_device *dev) { struct iblock_dev *ib_dev = IBLOCK_DEV(dev); struct request_queue *q; - struct bdev_handle *bdev_handle; + struct file *bdev_file; struct block_device *bd; struct blk_integrity *bi; blk_mode_t mode = BLK_OPEN_READ; @@ -117,14 +117,14 @@ static int iblock_configure_device(struct se_device *dev) else dev->dev_flags |= DF_READ_ONLY; - bdev_handle = bdev_open_by_path(ib_dev->ibd_udev_path, mode, ib_dev, + bdev_file = bdev_file_open_by_path(ib_dev->ibd_udev_path, mode, ib_dev, NULL); - if (IS_ERR(bdev_handle)) { - ret = PTR_ERR(bdev_handle); + if (IS_ERR(bdev_file)) { + ret = PTR_ERR(bdev_file); goto out_free_bioset; } - ib_dev->ibd_bdev_handle = bdev_handle; - ib_dev->ibd_bd = bd = bdev_handle->bdev; + ib_dev->ibd_bdev_file = bdev_file; + ib_dev->ibd_bd = bd = file_bdev(bdev_file); q = bdev_get_queue(bd); @@ -180,7 +180,7 @@ static int iblock_configure_device(struct se_device *dev) return 0; out_blkdev_put: - bdev_release(ib_dev->ibd_bdev_handle); + fput(ib_dev->ibd_bdev_file); out_free_bioset: bioset_exit(&ib_dev->ibd_bio_set); out: @@ -205,8 +205,8 @@ static void iblock_destroy_device(struct se_device *dev) { struct iblock_dev *ib_dev = IBLOCK_DEV(dev); - if (ib_dev->ibd_bdev_handle) - bdev_release(ib_dev->ibd_bdev_handle); + if (ib_dev->ibd_bdev_file) + fput(ib_dev->ibd_bdev_file); bioset_exit(&ib_dev->ibd_bio_set); } diff --git a/drivers/target/target_core_iblock.h b/drivers/target/target_core_iblock.h index 683f9a55945b..91f6f4280666 100644 --- a/drivers/target/target_core_iblock.h +++ b/drivers/target/target_core_iblock.h @@ -32,7 +32,7 @@ struct iblock_dev { u32 ibd_flags; struct bio_set ibd_bio_set; struct block_device *ibd_bd; - struct bdev_handle *ibd_bdev_handle; + struct file *ibd_bdev_file; bool ibd_readonly; struct iblock_dev_plug *ibd_plug; } ____cacheline_aligned; diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c index 41b7489d37ce..f98ebb18666b 100644 --- a/drivers/target/target_core_pscsi.c +++ b/drivers/target/target_core_pscsi.c @@ -352,7 +352,7 @@ static int pscsi_create_type_disk(struct se_device *dev, struct scsi_device *sd) struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr; struct pscsi_dev_virt *pdv = PSCSI_DEV(dev); struct Scsi_Host *sh = sd->host; - struct bdev_handle *bdev_handle; + struct file *bdev_file; int ret; if (scsi_device_get(sd)) { @@ -366,18 +366,18 @@ static int pscsi_create_type_disk(struct se_device *dev, struct scsi_device *sd) * Claim exclusive struct block_device access to struct scsi_device * for TYPE_DISK and TYPE_ZBC using supplied udev_path */ - bdev_handle = bdev_open_by_path(dev->udev_path, + bdev_file = bdev_file_open_by_path(dev->udev_path, BLK_OPEN_WRITE | BLK_OPEN_READ, pdv, NULL); - if (IS_ERR(bdev_handle)) { + if (IS_ERR(bdev_file)) { pr_err("pSCSI: bdev_open_by_path() failed\n"); scsi_device_put(sd); - return PTR_ERR(bdev_handle); + return PTR_ERR(bdev_file); } - pdv->pdv_bdev_handle = bdev_handle; + pdv->pdv_bdev_file = bdev_file; ret = pscsi_add_device_to_list(dev, sd); if (ret) { - bdev_release(bdev_handle); + fput(bdev_file); scsi_device_put(sd); return ret; } @@ -564,9 +564,9 @@ static void pscsi_destroy_device(struct se_device *dev) * from pscsi_create_type_disk() */ if ((sd->type == TYPE_DISK || sd->type == TYPE_ZBC) && - pdv->pdv_bdev_handle) { - bdev_release(pdv->pdv_bdev_handle); - pdv->pdv_bdev_handle = NULL; + pdv->pdv_bdev_file) { + fput(pdv->pdv_bdev_file); + pdv->pdv_bdev_file = NULL; } /* * For HBA mode PHV_LLD_SCSI_HOST_NO, release the reference @@ -907,12 +907,15 @@ new_bio: return 0; fail: - if (bio) - bio_put(bio); + if (bio) { + bio_uninit(bio); + kfree(bio); + } while (req->bio) { bio = req->bio; req->bio = bio->bi_next; - bio_put(bio); + bio_uninit(bio); + kfree(bio); } req->biotail = NULL; return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; @@ -994,8 +997,8 @@ static sector_t pscsi_get_blocks(struct se_device *dev) { struct pscsi_dev_virt *pdv = PSCSI_DEV(dev); - if (pdv->pdv_bdev_handle) - return bdev_nr_sectors(pdv->pdv_bdev_handle->bdev); + if (pdv->pdv_bdev_file) + return bdev_nr_sectors(file_bdev(pdv->pdv_bdev_file)); return 0; } diff --git a/drivers/target/target_core_pscsi.h b/drivers/target/target_core_pscsi.h index b0a3ef136592..9acaa21e4c78 100644 --- a/drivers/target/target_core_pscsi.h +++ b/drivers/target/target_core_pscsi.h @@ -37,7 +37,7 @@ struct pscsi_dev_virt { int pdv_channel_id; int pdv_target_id; int pdv_lun_id; - struct bdev_handle *pdv_bdev_handle; + struct file *pdv_bdev_file; struct scsi_device *pdv_sd; struct Scsi_Host *pdv_lld_host; } ____cacheline_aligned; diff --git a/drivers/tee/optee/device.c b/drivers/tee/optee/device.c index 4b1092127694..1892e49a8e6a 100644 --- a/drivers/tee/optee/device.c +++ b/drivers/tee/optee/device.c @@ -90,13 +90,14 @@ static int optee_register_device(const uuid_t *device_uuid, u32 func) if (rc) { pr_err("device registration failed, err: %d\n", rc); put_device(&optee_device->dev); + return rc; } if (func == PTA_CMD_GET_DEVICES_SUPP) device_create_file(&optee_device->dev, &dev_attr_need_supplicant); - return rc; + return 0; } static int __optee_enumerate_devices(u32 func) diff --git a/drivers/tee/tee_core.c b/drivers/tee/tee_core.c index 792d6fae4354..e59c20d74b36 100644 --- a/drivers/tee/tee_core.c +++ b/drivers/tee/tee_core.c @@ -1226,7 +1226,7 @@ static int tee_client_device_uevent(const struct device *dev, return add_uevent_var(env, "MODALIAS=tee:%pUb", dev_id); } -struct bus_type tee_bus_type = { +const struct bus_type tee_bus_type = { .name = "tee", .match = tee_client_device_match, .uevent = tee_client_device_uevent, diff --git a/drivers/thermal/intel/intel_hfi.c b/drivers/thermal/intel/intel_hfi.c index 3b04c6ec4fca..40d664a66cdc 100644 --- a/drivers/thermal/intel/intel_hfi.c +++ b/drivers/thermal/intel/intel_hfi.c @@ -607,7 +607,7 @@ void __init intel_hfi_init(void) /* There is one HFI instance per die/package. */ max_hfi_instances = topology_max_packages() * - topology_max_die_per_package(); + topology_max_dies_per_package(); /* * This allocation may fail. CPU hotplug callbacks must check diff --git a/drivers/thermal/intel/intel_powerclamp.c b/drivers/thermal/intel/intel_powerclamp.c index bc6eb0dd66a4..4ba649370aa1 100644 --- a/drivers/thermal/intel/intel_powerclamp.c +++ b/drivers/thermal/intel/intel_powerclamp.c @@ -587,7 +587,7 @@ static int powerclamp_idle_injection_register(void) poll_pkg_cstate_enable = false; if (cpumask_equal(cpu_present_mask, idle_injection_cpu_mask)) { ii_dev = idle_inject_register_full(idle_injection_cpu_mask, idle_inject_update); - if (topology_max_packages() == 1 && topology_max_die_per_package() == 1) + if (topology_max_packages() == 1 && topology_max_dies_per_package() == 1) poll_pkg_cstate_enable = true; } else { ii_dev = idle_inject_register(idle_injection_cpu_mask); diff --git a/drivers/thermal/intel/x86_pkg_temp_thermal.c b/drivers/thermal/intel/x86_pkg_temp_thermal.c index 11a7f8108bbb..f6c2e5964b8f 100644 --- a/drivers/thermal/intel/x86_pkg_temp_thermal.c +++ b/drivers/thermal/intel/x86_pkg_temp_thermal.c @@ -494,7 +494,7 @@ static int __init pkg_temp_thermal_init(void) if (!x86_match_cpu(pkg_temp_thermal_ids)) return -ENODEV; - max_id = topology_max_packages() * topology_max_die_per_package(); + max_id = topology_max_packages() * topology_max_dies_per_package(); zones = kcalloc(max_id, sizeof(struct zone_device *), GFP_KERNEL); if (!zones) diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c index 900114ba4371..fad40c4bc710 100644 --- a/drivers/thunderbolt/switch.c +++ b/drivers/thunderbolt/switch.c @@ -1249,6 +1249,9 @@ int tb_port_update_credits(struct tb_port *port) ret = tb_port_do_update_credits(port); if (ret) return ret; + + if (!port->dual_link_port) + return 0; return tb_port_do_update_credits(port->dual_link_port); } diff --git a/drivers/thunderbolt/tb_regs.h b/drivers/thunderbolt/tb_regs.h index 87e4795275fe..6f798f6a2b84 100644 --- a/drivers/thunderbolt/tb_regs.h +++ b/drivers/thunderbolt/tb_regs.h @@ -203,7 +203,7 @@ struct tb_regs_switch_header { #define ROUTER_CS_5_WOP BIT(1) #define ROUTER_CS_5_WOU BIT(2) #define ROUTER_CS_5_WOD BIT(3) -#define ROUTER_CS_5_C3S BIT(23) +#define ROUTER_CS_5_CNS BIT(23) #define ROUTER_CS_5_PTO BIT(24) #define ROUTER_CS_5_UTO BIT(25) #define ROUTER_CS_5_HCO BIT(26) diff --git a/drivers/thunderbolt/usb4.c b/drivers/thunderbolt/usb4.c index f8f0d24ff6e4..1515eff8cc3e 100644 --- a/drivers/thunderbolt/usb4.c +++ b/drivers/thunderbolt/usb4.c @@ -290,7 +290,7 @@ int usb4_switch_setup(struct tb_switch *sw) } /* TBT3 supported by the CM */ - val |= ROUTER_CS_5_C3S; + val &= ~ROUTER_CS_5_CNS; return tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1); } diff --git a/drivers/tty/hvc/Kconfig b/drivers/tty/hvc/Kconfig index 6e05c5c7bca1..c2a4e88b328f 100644 --- a/drivers/tty/hvc/Kconfig +++ b/drivers/tty/hvc/Kconfig @@ -108,13 +108,15 @@ config HVC_DCC_SERIALIZE_SMP config HVC_RISCV_SBI bool "RISC-V SBI console support" - depends on RISCV_SBI + depends on RISCV_SBI && NONPORTABLE select HVC_DRIVER help This enables support for console output via RISC-V SBI calls, which - is normally used only during boot to output printk. + is normally used only during boot to output printk. This driver + conflicts with real console drivers and should not be enabled on + systems that directly access the console. - If you don't know what do to here, say Y. + If you don't know what do to here, say N. config HVCS tristate "IBM Hypervisor Virtual Console Server support" diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c index 2d1f350a4bea..c1d43f040c43 100644 --- a/drivers/tty/serial/8250/8250_dw.c +++ b/drivers/tty/serial/8250/8250_dw.c @@ -357,9 +357,9 @@ static void dw8250_set_termios(struct uart_port *p, struct ktermios *termios, long rate; int ret; - clk_disable_unprepare(d->clk); rate = clk_round_rate(d->clk, newrate); - if (rate > 0) { + if (rate > 0 && p->uartclk != rate) { + clk_disable_unprepare(d->clk); /* * Note that any clock-notifer worker will block in * serial8250_update_uartclk() until we are done. @@ -367,8 +367,8 @@ static void dw8250_set_termios(struct uart_port *p, struct ktermios *termios, ret = clk_set_rate(d->clk, newrate); if (!ret) p->uartclk = rate; + clk_prepare_enable(d->clk); } - clk_prepare_enable(d->clk); dw8250_do_set_termios(p, termios, old); } diff --git a/drivers/tty/serial/8250/8250_pci1xxxx.c b/drivers/tty/serial/8250/8250_pci1xxxx.c index 558c4c7f3104..2dda737b1660 100644 --- a/drivers/tty/serial/8250/8250_pci1xxxx.c +++ b/drivers/tty/serial/8250/8250_pci1xxxx.c @@ -311,7 +311,7 @@ static void pci1xxxx_process_read_data(struct uart_port *port, } while (*valid_byte_count) { - if (*buff_index > RX_BUF_SIZE) + if (*buff_index >= RX_BUF_SIZE) break; rx_buff[*buff_index] = readb(port->membase + UART_RX_BYTE_FIFO); diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c index fccec1698a54..cf2c890a560f 100644 --- a/drivers/tty/serial/amba-pl011.c +++ b/drivers/tty/serial/amba-pl011.c @@ -1339,11 +1339,41 @@ static void pl011_start_tx_pio(struct uart_amba_port *uap) } } +static void pl011_rs485_tx_start(struct uart_amba_port *uap) +{ + struct uart_port *port = &uap->port; + u32 cr; + + /* Enable transmitter */ + cr = pl011_read(uap, REG_CR); + cr |= UART011_CR_TXE; + + /* Disable receiver if half-duplex */ + if (!(port->rs485.flags & SER_RS485_RX_DURING_TX)) + cr &= ~UART011_CR_RXE; + + if (port->rs485.flags & SER_RS485_RTS_ON_SEND) + cr &= ~UART011_CR_RTS; + else + cr |= UART011_CR_RTS; + + pl011_write(cr, uap, REG_CR); + + if (port->rs485.delay_rts_before_send) + mdelay(port->rs485.delay_rts_before_send); + + uap->rs485_tx_started = true; +} + static void pl011_start_tx(struct uart_port *port) { struct uart_amba_port *uap = container_of(port, struct uart_amba_port, port); + if ((uap->port.rs485.flags & SER_RS485_ENABLED) && + !uap->rs485_tx_started) + pl011_rs485_tx_start(uap); + if (!pl011_dma_tx_start(uap)) pl011_start_tx_pio(uap); } @@ -1424,42 +1454,12 @@ static bool pl011_tx_char(struct uart_amba_port *uap, unsigned char c, return true; } -static void pl011_rs485_tx_start(struct uart_amba_port *uap) -{ - struct uart_port *port = &uap->port; - u32 cr; - - /* Enable transmitter */ - cr = pl011_read(uap, REG_CR); - cr |= UART011_CR_TXE; - - /* Disable receiver if half-duplex */ - if (!(port->rs485.flags & SER_RS485_RX_DURING_TX)) - cr &= ~UART011_CR_RXE; - - if (port->rs485.flags & SER_RS485_RTS_ON_SEND) - cr &= ~UART011_CR_RTS; - else - cr |= UART011_CR_RTS; - - pl011_write(cr, uap, REG_CR); - - if (port->rs485.delay_rts_before_send) - mdelay(port->rs485.delay_rts_before_send); - - uap->rs485_tx_started = true; -} - /* Returns true if tx interrupts have to be (kept) enabled */ static bool pl011_tx_chars(struct uart_amba_port *uap, bool from_irq) { struct circ_buf *xmit = &uap->port.state->xmit; int count = uap->fifosize >> 1; - if ((uap->port.rs485.flags & SER_RS485_ENABLED) && - !uap->rs485_tx_started) - pl011_rs485_tx_start(uap); - if (uap->port.x_char) { if (!pl011_tx_char(uap, uap->port.x_char, from_irq)) return true; diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c index 5ddf110aedbe..bbcbc91482af 100644 --- a/drivers/tty/serial/fsl_lpuart.c +++ b/drivers/tty/serial/fsl_lpuart.c @@ -2345,9 +2345,12 @@ lpuart32_set_termios(struct uart_port *port, struct ktermios *termios, lpuart32_write(&sport->port, bd, UARTBAUD); lpuart32_serial_setbrg(sport, baud); - lpuart32_write(&sport->port, modem, UARTMODIR); - lpuart32_write(&sport->port, ctrl, UARTCTRL); + /* disable CTS before enabling UARTCTRL_TE to avoid pending idle preamble */ + lpuart32_write(&sport->port, modem & ~UARTMODIR_TXCTSE, UARTMODIR); /* restore control register */ + lpuart32_write(&sport->port, ctrl, UARTCTRL); + /* re-enable the CTS if needed */ + lpuart32_write(&sport->port, modem, UARTMODIR); if ((ctrl & (UARTCTRL_PE | UARTCTRL_M)) == UARTCTRL_PE) sport->is_cs7 = true; diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c index 4aa72d5aeafb..e14813250616 100644 --- a/drivers/tty/serial/imx.c +++ b/drivers/tty/serial/imx.c @@ -462,8 +462,7 @@ static void imx_uart_stop_tx(struct uart_port *port) } } -/* called with port.lock taken and irqs off */ -static void imx_uart_stop_rx(struct uart_port *port) +static void imx_uart_stop_rx_with_loopback_ctrl(struct uart_port *port, bool loopback) { struct imx_port *sport = (struct imx_port *)port; u32 ucr1, ucr2, ucr4, uts; @@ -485,7 +484,7 @@ static void imx_uart_stop_rx(struct uart_port *port) /* See SER_RS485_ENABLED/UTS_LOOP comment in imx_uart_probe() */ if (port->rs485.flags & SER_RS485_ENABLED && port->rs485.flags & SER_RS485_RTS_ON_SEND && - sport->have_rtscts && !sport->have_rtsgpio) { + sport->have_rtscts && !sport->have_rtsgpio && loopback) { uts = imx_uart_readl(sport, imx_uart_uts_reg(sport)); uts |= UTS_LOOP; imx_uart_writel(sport, uts, imx_uart_uts_reg(sport)); @@ -498,6 +497,16 @@ static void imx_uart_stop_rx(struct uart_port *port) } /* called with port.lock taken and irqs off */ +static void imx_uart_stop_rx(struct uart_port *port) +{ + /* + * Stop RX and enable loopback in order to make sure RS485 bus + * is not blocked. Se comment in imx_uart_probe(). + */ + imx_uart_stop_rx_with_loopback_ctrl(port, true); +} + +/* called with port.lock taken and irqs off */ static void imx_uart_enable_ms(struct uart_port *port) { struct imx_port *sport = (struct imx_port *)port; @@ -682,9 +691,14 @@ static void imx_uart_start_tx(struct uart_port *port) imx_uart_rts_inactive(sport, &ucr2); imx_uart_writel(sport, ucr2, UCR2); + /* + * Since we are about to transmit we can not stop RX + * with loopback enabled because that will make our + * transmitted data being just looped to RX. + */ if (!(port->rs485.flags & SER_RS485_RX_DURING_TX) && !port->rs485_rx_during_tx_gpio) - imx_uart_stop_rx(port); + imx_uart_stop_rx_with_loopback_ctrl(port, false); sport->tx_state = WAIT_AFTER_RTS; diff --git a/drivers/tty/serial/max310x.c b/drivers/tty/serial/max310x.c index f3a99daebdaa..10bf6d75bf9e 100644 --- a/drivers/tty/serial/max310x.c +++ b/drivers/tty/serial/max310x.c @@ -237,6 +237,14 @@ #define MAX310x_REV_MASK (0xf8) #define MAX310X_WRITE_BIT 0x80 +/* Port startup definitions */ +#define MAX310X_PORT_STARTUP_WAIT_RETRIES 20 /* Number of retries */ +#define MAX310X_PORT_STARTUP_WAIT_DELAY_MS 10 /* Delay between retries */ + +/* Crystal-related definitions */ +#define MAX310X_XTAL_WAIT_RETRIES 20 /* Number of retries */ +#define MAX310X_XTAL_WAIT_DELAY_MS 10 /* Delay between retries */ + /* MAX3107 specific */ #define MAX3107_REV_ID (0xa0) @@ -583,7 +591,7 @@ static int max310x_update_best_err(unsigned long f, long *besterr) return 1; } -static u32 max310x_set_ref_clk(struct device *dev, struct max310x_port *s, +static s32 max310x_set_ref_clk(struct device *dev, struct max310x_port *s, unsigned long freq, bool xtal) { unsigned int div, clksrc, pllcfg = 0; @@ -641,12 +649,20 @@ static u32 max310x_set_ref_clk(struct device *dev, struct max310x_port *s, /* Wait for crystal */ if (xtal) { - unsigned int val; - msleep(10); - regmap_read(s->regmap, MAX310X_STS_IRQSTS_REG, &val); - if (!(val & MAX310X_STS_CLKREADY_BIT)) { - dev_warn(dev, "clock is not stable yet\n"); - } + bool stable = false; + unsigned int try = 0, val = 0; + + do { + msleep(MAX310X_XTAL_WAIT_DELAY_MS); + regmap_read(s->regmap, MAX310X_STS_IRQSTS_REG, &val); + + if (val & MAX310X_STS_CLKREADY_BIT) + stable = true; + } while (!stable && (++try < MAX310X_XTAL_WAIT_RETRIES)); + + if (!stable) + return dev_err_probe(dev, -EAGAIN, + "clock is not stable\n"); } return bestfreq; @@ -1271,7 +1287,7 @@ static int max310x_probe(struct device *dev, const struct max310x_devtype *devty { int i, ret, fmin, fmax, freq; struct max310x_port *s; - u32 uartclk = 0; + s32 uartclk = 0; bool xtal; for (i = 0; i < devtype->nr; i++) @@ -1334,6 +1350,9 @@ static int max310x_probe(struct device *dev, const struct max310x_devtype *devty goto out_clk; for (i = 0; i < devtype->nr; i++) { + bool started = false; + unsigned int try = 0, val = 0; + /* Reset port */ regmap_write(regmaps[i], MAX310X_MODE2_REG, MAX310X_MODE2_RST_BIT); @@ -1342,13 +1361,27 @@ static int max310x_probe(struct device *dev, const struct max310x_devtype *devty /* Wait for port startup */ do { - regmap_read(regmaps[i], MAX310X_BRGDIVLSB_REG, &ret); - } while (ret != 0x01); + msleep(MAX310X_PORT_STARTUP_WAIT_DELAY_MS); + regmap_read(regmaps[i], MAX310X_BRGDIVLSB_REG, &val); + + if (val == 0x01) + started = true; + } while (!started && (++try < MAX310X_PORT_STARTUP_WAIT_RETRIES)); + + if (!started) { + ret = dev_err_probe(dev, -EAGAIN, "port reset failed\n"); + goto out_uart; + } regmap_write(regmaps[i], MAX310X_MODE1_REG, devtype->mode1); } uartclk = max310x_set_ref_clk(dev, s, freq, xtal); + if (uartclk < 0) { + ret = uartclk; + goto out_uart; + } + dev_dbg(dev, "Reference clock set to %i Hz\n", uartclk); for (i = 0; i < devtype->nr; i++) { diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c index 3ec725555bcc..4749331fe618 100644 --- a/drivers/tty/serial/mxs-auart.c +++ b/drivers/tty/serial/mxs-auart.c @@ -605,13 +605,16 @@ static void mxs_auart_tx_chars(struct mxs_auart_port *s) return; } - pending = uart_port_tx(&s->port, ch, + pending = uart_port_tx_flags(&s->port, ch, UART_TX_NOSTOP, !(mxs_read(s, REG_STAT) & AUART_STAT_TXFF), mxs_write(ch, s, REG_DATA)); if (pending) mxs_set(AUART_INTR_TXIEN, s, REG_INTR); else mxs_clr(AUART_INTR_TXIEN, s, REG_INTR); + + if (uart_tx_stopped(&s->port)) + mxs_auart_stop_tx(&s->port); } static void mxs_auart_rx_char(struct mxs_auart_port *s) diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c index e63a8fbe63bd..99e08737f293 100644 --- a/drivers/tty/serial/qcom_geni_serial.c +++ b/drivers/tty/serial/qcom_geni_serial.c @@ -851,19 +851,21 @@ static void qcom_geni_serial_stop_tx(struct uart_port *uport) } static void qcom_geni_serial_send_chunk_fifo(struct uart_port *uport, - unsigned int remaining) + unsigned int chunk) { struct qcom_geni_serial_port *port = to_dev_port(uport); struct circ_buf *xmit = &uport->state->xmit; - unsigned int tx_bytes; + unsigned int tx_bytes, c, remaining = chunk; u8 buf[BYTES_PER_FIFO_WORD]; while (remaining) { memset(buf, 0, sizeof(buf)); tx_bytes = min(remaining, BYTES_PER_FIFO_WORD); - memcpy(buf, &xmit->buf[xmit->tail], tx_bytes); - uart_xmit_advance(uport, tx_bytes); + for (c = 0; c < tx_bytes ; c++) { + buf[c] = xmit->buf[xmit->tail]; + uart_xmit_advance(uport, 1); + } iowrite32_rep(uport->membase + SE_GENI_TX_FIFOn, buf, 1); diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c index b56ed8c376b2..d6a58a9e072a 100644 --- a/drivers/tty/serial/serial_core.c +++ b/drivers/tty/serial/serial_core.c @@ -1084,8 +1084,8 @@ static int uart_tiocmget(struct tty_struct *tty) goto out; if (!tty_io_error(tty)) { - result = uport->mctrl; uart_port_lock_irq(uport); + result = uport->mctrl; result |= uport->ops->get_mctrl(uport); uart_port_unlock_irq(uport); } diff --git a/drivers/tty/serial/serial_port.c b/drivers/tty/serial/serial_port.c index 88975a4df306..72b6f4f326e2 100644 --- a/drivers/tty/serial/serial_port.c +++ b/drivers/tty/serial/serial_port.c @@ -46,8 +46,31 @@ out: return 0; } +static int serial_port_runtime_suspend(struct device *dev) +{ + struct serial_port_device *port_dev = to_serial_base_port_device(dev); + struct uart_port *port = port_dev->port; + unsigned long flags; + bool busy; + + if (port->flags & UPF_DEAD) + return 0; + + uart_port_lock_irqsave(port, &flags); + busy = __serial_port_busy(port); + if (busy) + port->ops->start_tx(port); + uart_port_unlock_irqrestore(port, flags); + + if (busy) + pm_runtime_mark_last_busy(dev); + + return busy ? -EBUSY : 0; +} + static DEFINE_RUNTIME_DEV_PM_OPS(serial_port_pm, - NULL, serial_port_runtime_resume, NULL); + serial_port_runtime_suspend, + serial_port_runtime_resume, NULL); static int serial_port_probe(struct device *dev) { diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c index 794b77512740..693e932d6feb 100644 --- a/drivers/tty/serial/stm32-usart.c +++ b/drivers/tty/serial/stm32-usart.c @@ -251,7 +251,9 @@ static int stm32_usart_config_rs485(struct uart_port *port, struct ktermios *ter writel_relaxed(cr3, port->membase + ofs->cr3); writel_relaxed(cr1, port->membase + ofs->cr1); - rs485conf->flags |= SER_RS485_RX_DURING_TX; + if (!port->rs485_rx_during_tx_gpio) + rs485conf->flags |= SER_RS485_RX_DURING_TX; + } else { stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DEM | USART_CR3_DEP); diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c index 156efda7c80d..38a765eadbe2 100644 --- a/drivers/tty/vt/vt.c +++ b/drivers/tty/vt/vt.c @@ -381,7 +381,7 @@ static void vc_uniscr_delete(struct vc_data *vc, unsigned int nr) u32 *ln = vc->vc_uni_lines[vc->state.y]; unsigned int x = vc->state.x, cols = vc->vc_cols; - memcpy(&ln[x], &ln[x + nr], (cols - x - nr) * sizeof(*ln)); + memmove(&ln[x], &ln[x + nr], (cols - x - nr) * sizeof(*ln)); memset32(&ln[cols - nr], ' ', nr); } } diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c index 029d017fc1b6..eac7fff6992d 100644 --- a/drivers/ufs/core/ufshcd.c +++ b/drivers/ufs/core/ufshcd.c @@ -1469,7 +1469,7 @@ static int ufshcd_devfreq_target(struct device *dev, int ret = 0; struct ufs_hba *hba = dev_get_drvdata(dev); ktime_t start; - bool scale_up, sched_clk_scaling_suspend_work = false; + bool scale_up = false, sched_clk_scaling_suspend_work = false; struct list_head *clk_list = &hba->clk_list_head; struct ufs_clk_info *clki; unsigned long irq_flags; @@ -3057,7 +3057,7 @@ bool ufshcd_cmd_inflight(struct scsi_cmnd *cmd) */ static int ufshcd_clear_cmd(struct ufs_hba *hba, u32 task_tag) { - u32 mask = 1U << task_tag; + u32 mask; unsigned long flags; int err; @@ -3075,6 +3075,8 @@ static int ufshcd_clear_cmd(struct ufs_hba *hba, u32 task_tag) return 0; } + mask = 1U << task_tag; + /* clear outstanding transaction before retry */ spin_lock_irqsave(hba->host->host_lock, flags); ufshcd_utrl_clear(hba, mask); @@ -6352,7 +6354,6 @@ static void ufshcd_err_handling_prepare(struct ufs_hba *hba) ufshcd_hold(hba); if (!ufshcd_is_clkgating_allowed(hba)) ufshcd_setup_clocks(hba, true); - ufshcd_release(hba); pm_op = hba->is_sys_suspended ? UFS_SYSTEM_PM : UFS_RUNTIME_PM; ufshcd_vops_resume(hba, pm_op); } else { @@ -10592,7 +10593,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) err = blk_mq_alloc_tag_set(&hba->tmf_tag_set); if (err < 0) goto out_remove_scsi_host; - hba->tmf_queue = blk_mq_init_queue(&hba->tmf_tag_set); + hba->tmf_queue = blk_mq_alloc_queue(&hba->tmf_tag_set, NULL, NULL); if (IS_ERR(hba->tmf_queue)) { err = PTR_ERR(hba->tmf_queue); goto free_tmf_tag_set; diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c index 39eef470f8fa..8fde5204e88b 100644 --- a/drivers/ufs/host/ufs-qcom.c +++ b/drivers/ufs/host/ufs-qcom.c @@ -1712,8 +1712,8 @@ static int ufs_qcom_config_esi(struct ufs_hba *hba) * 2. Poll queues do not need ESI. */ nr_irqs = hba->nr_hw_queues - hba->nr_queues[HCTX_TYPE_POLL]; - ret = platform_msi_domain_alloc_irqs(hba->dev, nr_irqs, - ufs_qcom_write_msi_msg); + ret = platform_device_msi_init_and_alloc_irqs(hba->dev, nr_irqs, + ufs_qcom_write_msi_msg); if (ret) { dev_err(hba->dev, "Failed to request Platform MSI %d\n", ret); return ret; @@ -1742,7 +1742,7 @@ static int ufs_qcom_config_esi(struct ufs_hba *hba) devm_free_irq(hba->dev, desc->irq, hba); } msi_unlock_descs(hba->dev); - platform_msi_domain_free_irqs(hba->dev); + platform_device_msi_free_irqs_all(hba->dev); } else { if (host->hw_ver.major == 6 && host->hw_ver.minor == 0 && host->hw_ver.step == 0) @@ -1818,7 +1818,7 @@ static void ufs_qcom_remove(struct platform_device *pdev) pm_runtime_get_sync(&(pdev)->dev); ufshcd_remove(hba); - platform_msi_domain_free_irqs(hba->dev); + platform_device_msi_free_irqs_all(hba->dev); } static const struct of_device_id ufs_qcom_of_match[] __maybe_unused = { diff --git a/drivers/usb/cdns3/cdns3-gadget.c b/drivers/usb/cdns3/cdns3-gadget.c index aeca902ab6cc..fd1beb10bba7 100644 --- a/drivers/usb/cdns3/cdns3-gadget.c +++ b/drivers/usb/cdns3/cdns3-gadget.c @@ -828,7 +828,11 @@ void cdns3_gadget_giveback(struct cdns3_endpoint *priv_ep, return; } - if (request->complete) { + /* + * zlp request is appended by driver, needn't call usb_gadget_giveback_request() to notify + * gadget composite driver. + */ + if (request->complete && request->buf != priv_dev->zlp_buf) { spin_unlock(&priv_dev->lock); usb_gadget_giveback_request(&priv_ep->endpoint, request); @@ -2540,11 +2544,11 @@ static int cdns3_gadget_ep_disable(struct usb_ep *ep) while (!list_empty(&priv_ep->wa2_descmiss_req_list)) { priv_req = cdns3_next_priv_request(&priv_ep->wa2_descmiss_req_list); + list_del_init(&priv_req->list); kfree(priv_req->request.buf); cdns3_gadget_ep_free_request(&priv_ep->endpoint, &priv_req->request); - list_del_init(&priv_req->list); --priv_ep->wa2_counter; } diff --git a/drivers/usb/cdns3/core.c b/drivers/usb/cdns3/core.c index 33548771a0d3..465e9267b49c 100644 --- a/drivers/usb/cdns3/core.c +++ b/drivers/usb/cdns3/core.c @@ -395,7 +395,6 @@ pm_put: return ret; } - /** * cdns_wakeup_irq - interrupt handler for wakeup events * @irq: irq number for cdns3/cdnsp core device diff --git a/drivers/usb/cdns3/drd.c b/drivers/usb/cdns3/drd.c index 04b6d12f2b9a..ee917f1b091c 100644 --- a/drivers/usb/cdns3/drd.c +++ b/drivers/usb/cdns3/drd.c @@ -156,7 +156,8 @@ bool cdns_is_device(struct cdns *cdns) */ static void cdns_otg_disable_irq(struct cdns *cdns) { - writel(0, &cdns->otg_irq_regs->ien); + if (cdns->version) + writel(0, &cdns->otg_irq_regs->ien); } /** @@ -422,15 +423,20 @@ int cdns_drd_init(struct cdns *cdns) cdns->otg_regs = (void __iomem *)&cdns->otg_v1_regs->cmd; - if (readl(&cdns->otg_cdnsp_regs->did) == OTG_CDNSP_DID) { + state = readl(&cdns->otg_cdnsp_regs->did); + + if (OTG_CDNSP_CHECK_DID(state)) { cdns->otg_irq_regs = (struct cdns_otg_irq_regs __iomem *) &cdns->otg_cdnsp_regs->ien; cdns->version = CDNSP_CONTROLLER_V2; - } else { + } else if (OTG_CDNS3_CHECK_DID(state)) { cdns->otg_irq_regs = (struct cdns_otg_irq_regs __iomem *) &cdns->otg_v1_regs->ien; writel(1, &cdns->otg_v1_regs->simulate); cdns->version = CDNS3_CONTROLLER_V1; + } else { + dev_err(cdns->dev, "not supporte DID=0x%08x\n", state); + return -EINVAL; } dev_dbg(cdns->dev, "DRD version v1 (ID: %08x, rev: %08x)\n", @@ -483,7 +489,6 @@ int cdns_drd_exit(struct cdns *cdns) return 0; } - /* Indicate the cdns3 core was power lost before */ bool cdns_power_is_lost(struct cdns *cdns) { diff --git a/drivers/usb/cdns3/drd.h b/drivers/usb/cdns3/drd.h index cbdf94f73ed9..d72370c321d3 100644 --- a/drivers/usb/cdns3/drd.h +++ b/drivers/usb/cdns3/drd.h @@ -79,7 +79,11 @@ struct cdnsp_otg_regs { __le32 susp_timing_ctrl; }; -#define OTG_CDNSP_DID 0x0004034E +/* CDNSP driver supports 0x000403xx Cadence USB controller family. */ +#define OTG_CDNSP_CHECK_DID(did) (((did) & GENMASK(31, 8)) == 0x00040300) + +/* CDNS3 driver supports 0x000402xx Cadence USB controller family. */ +#define OTG_CDNS3_CHECK_DID(did) (((did) & GENMASK(31, 8)) == 0x00040200) /* * Common registers interface for both CDNS3 and CDNSP version of DRD. diff --git a/drivers/usb/cdns3/host.c b/drivers/usb/cdns3/host.c index 6164fc4c96a4..ceca4d839dfd 100644 --- a/drivers/usb/cdns3/host.c +++ b/drivers/usb/cdns3/host.c @@ -18,6 +18,11 @@ #include "../host/xhci.h" #include "../host/xhci-plat.h" +/* + * The XECP_PORT_CAP_REG and XECP_AUX_CTRL_REG1 exist only + * in Cadence USB3 dual-role controller, so it can't be used + * with Cadence CDNSP dual-role controller. + */ #define XECP_PORT_CAP_REG 0x8000 #define XECP_AUX_CTRL_REG1 0x8120 @@ -57,6 +62,8 @@ static const struct xhci_plat_priv xhci_plat_cdns3_xhci = { .resume_quirk = xhci_cdns3_resume_quirk, }; +static const struct xhci_plat_priv xhci_plat_cdnsp_xhci; + static int __cdns_host_init(struct cdns *cdns) { struct platform_device *xhci; @@ -81,8 +88,13 @@ static int __cdns_host_init(struct cdns *cdns) goto err1; } - cdns->xhci_plat_data = kmemdup(&xhci_plat_cdns3_xhci, - sizeof(struct xhci_plat_priv), GFP_KERNEL); + if (cdns->version < CDNSP_CONTROLLER_V2) + cdns->xhci_plat_data = kmemdup(&xhci_plat_cdns3_xhci, + sizeof(struct xhci_plat_priv), GFP_KERNEL); + else + cdns->xhci_plat_data = kmemdup(&xhci_plat_cdnsp_xhci, + sizeof(struct xhci_plat_priv), GFP_KERNEL); + if (!cdns->xhci_plat_data) { ret = -ENOMEM; goto err1; diff --git a/drivers/usb/chipidea/ci.h b/drivers/usb/chipidea/ci.h index d9bb3d3f026e..2a38e1eb6546 100644 --- a/drivers/usb/chipidea/ci.h +++ b/drivers/usb/chipidea/ci.h @@ -176,6 +176,7 @@ struct hw_bank { * @enabled_otg_timer_bits: bits of enabled otg timers * @next_otg_timer: next nearest enabled timer to be expired * @work: work for role changing + * @power_lost_work: work for power lost handling * @wq: workqueue thread * @qh_pool: allocation pool for queue heads * @td_pool: allocation pool for transfer descriptors @@ -226,6 +227,7 @@ struct ci_hdrc { enum otg_fsm_timer next_otg_timer; struct usb_role_switch *role_switch; struct work_struct work; + struct work_struct power_lost_work; struct workqueue_struct *wq; struct dma_pool *qh_pool; diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c index 41014f93cfdf..835bf2428dc6 100644 --- a/drivers/usb/chipidea/core.c +++ b/drivers/usb/chipidea/core.c @@ -856,6 +856,27 @@ static int ci_extcon_register(struct ci_hdrc *ci) return 0; } +static void ci_power_lost_work(struct work_struct *work) +{ + struct ci_hdrc *ci = container_of(work, struct ci_hdrc, power_lost_work); + enum ci_role role; + + disable_irq_nosync(ci->irq); + pm_runtime_get_sync(ci->dev); + if (!ci_otg_is_fsm_mode(ci)) { + role = ci_get_role(ci); + + if (ci->role != role) { + ci_handle_id_switch(ci); + } else if (role == CI_ROLE_GADGET) { + if (ci->is_otg && hw_read_otgsc(ci, OTGSC_BSV)) + usb_gadget_vbus_connect(&ci->gadget); + } + } + pm_runtime_put_sync(ci->dev); + enable_irq(ci->irq); +} + static DEFINE_IDA(ci_ida); struct platform_device *ci_hdrc_add_device(struct device *dev, @@ -1045,6 +1066,8 @@ static int ci_hdrc_probe(struct platform_device *pdev) spin_lock_init(&ci->lock); mutex_init(&ci->mutex); + INIT_WORK(&ci->power_lost_work, ci_power_lost_work); + ci->dev = dev; ci->platdata = dev_get_platdata(dev); ci->imx28_write_fix = !!(ci->platdata->flags & @@ -1396,25 +1419,6 @@ static int ci_suspend(struct device *dev) return 0; } -static void ci_handle_power_lost(struct ci_hdrc *ci) -{ - enum ci_role role; - - disable_irq_nosync(ci->irq); - if (!ci_otg_is_fsm_mode(ci)) { - role = ci_get_role(ci); - - if (ci->role != role) { - ci_handle_id_switch(ci); - } else if (role == CI_ROLE_GADGET) { - if (ci->is_otg && hw_read_otgsc(ci, OTGSC_BSV)) - usb_gadget_vbus_connect(&ci->gadget); - } - } - - enable_irq(ci->irq); -} - static int ci_resume(struct device *dev) { struct ci_hdrc *ci = dev_get_drvdata(dev); @@ -1446,7 +1450,7 @@ static int ci_resume(struct device *dev) ci_role(ci)->resume(ci, power_lost); if (power_lost) - ci_handle_power_lost(ci); + queue_work(system_freezable_wq, &ci->power_lost_work); if (ci->supports_runtime_pm) { pm_runtime_disable(dev); diff --git a/drivers/usb/common/ulpi.c b/drivers/usb/common/ulpi.c index 84d91b1c1eed..0886b19d2e1c 100644 --- a/drivers/usb/common/ulpi.c +++ b/drivers/usb/common/ulpi.c @@ -301,7 +301,7 @@ static int ulpi_register(struct device *dev, struct ulpi *ulpi) return ret; } - root = debugfs_create_dir(dev_name(dev), ulpi_root); + root = debugfs_create_dir(dev_name(&ulpi->dev), ulpi_root); debugfs_create_file("regs", 0444, root, ulpi, &ulpi_regs_fops); dev_dbg(&ulpi->dev, "registered ULPI PHY: vendor %04x, product %04x\n", diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c index 12b6dfeaf658..edf74458474a 100644 --- a/drivers/usb/core/hcd.c +++ b/drivers/usb/core/hcd.c @@ -1664,9 +1664,10 @@ static void __usb_hcd_giveback_urb(struct urb *urb) usb_put_urb(urb); } -static void usb_giveback_urb_bh(struct tasklet_struct *t) +static void usb_giveback_urb_bh(struct work_struct *work) { - struct giveback_urb_bh *bh = from_tasklet(bh, t, bh); + struct giveback_urb_bh *bh = + container_of(work, struct giveback_urb_bh, bh); struct list_head local_list; spin_lock_irq(&bh->lock); @@ -1691,9 +1692,9 @@ static void usb_giveback_urb_bh(struct tasklet_struct *t) spin_lock_irq(&bh->lock); if (!list_empty(&bh->head)) { if (bh->high_prio) - tasklet_hi_schedule(&bh->bh); + queue_work(system_bh_highpri_wq, &bh->bh); else - tasklet_schedule(&bh->bh); + queue_work(system_bh_wq, &bh->bh); } bh->running = false; spin_unlock_irq(&bh->lock); @@ -1706,7 +1707,7 @@ static void usb_giveback_urb_bh(struct tasklet_struct *t) * @status: completion status code for the URB. * * Context: atomic. The completion callback is invoked in caller's context. - * For HCDs with HCD_BH flag set, the completion callback is invoked in tasklet + * For HCDs with HCD_BH flag set, the completion callback is invoked in BH * context (except for URBs submitted to the root hub which always complete in * caller's context). * @@ -1725,7 +1726,7 @@ void usb_hcd_giveback_urb(struct usb_hcd *hcd, struct urb *urb, int status) struct giveback_urb_bh *bh; bool running; - /* pass status to tasklet via unlinked */ + /* pass status to BH via unlinked */ if (likely(!urb->unlinked)) urb->unlinked = status; @@ -1747,9 +1748,9 @@ void usb_hcd_giveback_urb(struct usb_hcd *hcd, struct urb *urb, int status) if (running) ; else if (bh->high_prio) - tasklet_hi_schedule(&bh->bh); + queue_work(system_bh_highpri_wq, &bh->bh); else - tasklet_schedule(&bh->bh); + queue_work(system_bh_wq, &bh->bh); } EXPORT_SYMBOL_GPL(usb_hcd_giveback_urb); @@ -2540,7 +2541,7 @@ static void init_giveback_urb_bh(struct giveback_urb_bh *bh) spin_lock_init(&bh->lock); INIT_LIST_HEAD(&bh->head); - tasklet_setup(&bh->bh, usb_giveback_urb_bh); + INIT_WORK(&bh->bh, usb_giveback_urb_bh); } struct usb_hcd *__usb_create_hcd(const struct hc_driver *driver, @@ -2926,7 +2927,7 @@ int usb_add_hcd(struct usb_hcd *hcd, && device_can_wakeup(&hcd->self.root_hub->dev)) dev_dbg(hcd->self.controller, "supports USB remote wakeup\n"); - /* initialize tasklets */ + /* initialize BHs */ init_giveback_urb_bh(&hcd->high_prio_bh); hcd->high_prio_bh.high_prio = true; init_giveback_urb_bh(&hcd->low_prio_bh); @@ -3036,7 +3037,7 @@ void usb_remove_hcd(struct usb_hcd *hcd) mutex_unlock(&usb_bus_idr_lock); /* - * tasklet_kill() isn't needed here because: + * flush_work() isn't needed here because: * - driver's disconnect() called from usb_disconnect() should * make sure its URBs are completed during the disconnect() * callback diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index ffd7c99e24a3..e38a4124f610 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -2053,9 +2053,19 @@ static void update_port_device_state(struct usb_device *udev) if (udev->parent) { hub = usb_hub_to_struct_hub(udev->parent); - port_dev = hub->ports[udev->portnum - 1]; - WRITE_ONCE(port_dev->state, udev->state); - sysfs_notify_dirent(port_dev->state_kn); + + /* + * The Link Layer Validation System Driver (lvstest) + * has a test step to unbind the hub before running the + * rest of the procedure. This triggers hub_disconnect + * which will set the hub's maxchild to 0, further + * resulting in usb_hub_to_struct_hub returning NULL. + */ + if (hub) { + port_dev = hub->ports[udev->portnum - 1]; + WRITE_ONCE(port_dev->state, udev->state); + sysfs_notify_dirent(port_dev->state_kn); + } } } @@ -2388,17 +2398,25 @@ static int usb_enumerate_device_otg(struct usb_device *udev) } } else if (desc->bLength == sizeof (struct usb_otg_descriptor)) { - /* Set a_alt_hnp_support for legacy otg device */ - err = usb_control_msg(udev, - usb_sndctrlpipe(udev, 0), - USB_REQ_SET_FEATURE, 0, - USB_DEVICE_A_ALT_HNP_SUPPORT, - 0, NULL, 0, - USB_CTRL_SET_TIMEOUT); - if (err < 0) - dev_err(&udev->dev, - "set a_alt_hnp_support failed: %d\n", - err); + /* + * We are operating on a legacy OTP device + * These should be told that they are operating + * on the wrong port if we have another port that does + * support HNP + */ + if (bus->otg_port != 0) { + /* Set a_alt_hnp_support for legacy otg device */ + err = usb_control_msg(udev, + usb_sndctrlpipe(udev, 0), + USB_REQ_SET_FEATURE, 0, + USB_DEVICE_A_ALT_HNP_SUPPORT, + 0, NULL, 0, + USB_CTRL_SET_TIMEOUT); + if (err < 0) + dev_err(&udev->dev, + "set a_alt_hnp_support failed: %d\n", + err); + } } } #endif diff --git a/drivers/usb/core/port.c b/drivers/usb/core/port.c index c628c1abc907..4d63496f98b6 100644 --- a/drivers/usb/core/port.c +++ b/drivers/usb/core/port.c @@ -573,7 +573,7 @@ static int match_location(struct usb_device *peer_hdev, void *p) struct usb_hub *peer_hub = usb_hub_to_struct_hub(peer_hdev); struct usb_device *hdev = to_usb_device(port_dev->dev.parent->parent); - if (!peer_hub) + if (!peer_hub || port_dev->connect_type == USB_PORT_NOT_USED) return 0; hcd = bus_to_hcd(hdev->bus); @@ -584,7 +584,8 @@ static int match_location(struct usb_device *peer_hdev, void *p) for (port1 = 1; port1 <= peer_hdev->maxchild; port1++) { peer = peer_hub->ports[port1 - 1]; - if (peer && peer->location == port_dev->location) { + if (peer && peer->connect_type != USB_PORT_NOT_USED && + peer->location == port_dev->location) { link_peers_report(port_dev, peer); return 1; /* done */ } diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h index e3eea965e57b..e120611a5174 100644 --- a/drivers/usb/dwc3/core.h +++ b/drivers/usb/dwc3/core.h @@ -376,7 +376,6 @@ /* Global HWPARAMS4 Register */ #define DWC3_GHWPARAMS4_HIBER_SCRATCHBUFS(n) (((n) & (0x0f << 13)) >> 13) #define DWC3_MAX_HIBER_SCRATCHBUFS 15 -#define DWC3_EXT_BUFF_CONTROL BIT(21) /* Global HWPARAMS6 Register */ #define DWC3_GHWPARAMS6_BCSUPPORT BIT(14) diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c index 6604845c397c..39564e17f3b0 100644 --- a/drivers/usb/dwc3/dwc3-pci.c +++ b/drivers/usb/dwc3/dwc3-pci.c @@ -51,6 +51,8 @@ #define PCI_DEVICE_ID_INTEL_MTLP 0x7ec1 #define PCI_DEVICE_ID_INTEL_MTLS 0x7f6f #define PCI_DEVICE_ID_INTEL_MTL 0x7e7e +#define PCI_DEVICE_ID_INTEL_ARLH 0x7ec1 +#define PCI_DEVICE_ID_INTEL_ARLH_PCH 0x777e #define PCI_DEVICE_ID_INTEL_TGL 0x9a15 #define PCI_DEVICE_ID_AMD_MR 0x163a @@ -421,6 +423,8 @@ static const struct pci_device_id dwc3_pci_id_table[] = { { PCI_DEVICE_DATA(INTEL, MTLP, &dwc3_pci_intel_swnode) }, { PCI_DEVICE_DATA(INTEL, MTL, &dwc3_pci_intel_swnode) }, { PCI_DEVICE_DATA(INTEL, MTLS, &dwc3_pci_intel_swnode) }, + { PCI_DEVICE_DATA(INTEL, ARLH, &dwc3_pci_intel_swnode) }, + { PCI_DEVICE_DATA(INTEL, ARLH_PCH, &dwc3_pci_intel_swnode) }, { PCI_DEVICE_DATA(INTEL, TGL, &dwc3_pci_intel_swnode) }, { PCI_DEVICE_DATA(AMD, NL_USB, &dwc3_pci_amd_swnode) }, diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 019368f8e9c4..28f49400f3e8 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -673,12 +673,6 @@ static int dwc3_gadget_set_ep_config(struct dwc3_ep *dep, unsigned int action) params.param1 |= DWC3_DEPCFG_BINTERVAL_M1(bInterval_m1); } - if (dep->endpoint.fifo_mode) { - if (!(dwc->hwparams.hwparams4 & DWC3_EXT_BUFF_CONTROL)) - return -EINVAL; - params.param1 |= DWC3_DEPCFG_EBC_HWO_NOWB | DWC3_DEPCFG_USE_EBC; - } - return dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETEPCONFIG, ¶ms); } @@ -2656,6 +2650,11 @@ static int dwc3_gadget_soft_disconnect(struct dwc3 *dwc) int ret; spin_lock_irqsave(&dwc->lock, flags); + if (!dwc->pullups_connected) { + spin_unlock_irqrestore(&dwc->lock, flags); + return 0; + } + dwc->connected = false; /* @@ -4709,15 +4708,13 @@ int dwc3_gadget_suspend(struct dwc3 *dwc) unsigned long flags; int ret; - if (!dwc->gadget_driver) - return 0; - ret = dwc3_gadget_soft_disconnect(dwc); if (ret) goto err; spin_lock_irqsave(&dwc->lock, flags); - dwc3_disconnect_gadget(dwc); + if (dwc->gadget_driver) + dwc3_disconnect_gadget(dwc); spin_unlock_irqrestore(&dwc->lock, flags); return 0; diff --git a/drivers/usb/dwc3/gadget.h b/drivers/usb/dwc3/gadget.h index fd7a4e94397e..55a56cf67d73 100644 --- a/drivers/usb/dwc3/gadget.h +++ b/drivers/usb/dwc3/gadget.h @@ -26,8 +26,6 @@ struct dwc3; #define DWC3_DEPCFG_XFER_NOT_READY_EN BIT(10) #define DWC3_DEPCFG_FIFO_ERROR_EN BIT(11) #define DWC3_DEPCFG_STREAM_EVENT_EN BIT(13) -#define DWC3_DEPCFG_EBC_HWO_NOWB BIT(14) -#define DWC3_DEPCFG_USE_EBC BIT(15) #define DWC3_DEPCFG_BINTERVAL_M1(n) (((n) & 0xff) << 16) #define DWC3_DEPCFG_STREAM_CAPABLE BIT(24) #define DWC3_DEPCFG_EP_NUMBER(n) (((n) & 0x1f) << 25) diff --git a/drivers/usb/dwc3/host.c b/drivers/usb/dwc3/host.c index 61f57fe5bb78..43230915323c 100644 --- a/drivers/usb/dwc3/host.c +++ b/drivers/usb/dwc3/host.c @@ -61,7 +61,7 @@ out: int dwc3_host_init(struct dwc3 *dwc) { - struct property_entry props[4]; + struct property_entry props[5]; struct platform_device *xhci; int ret, irq; int prop_idx = 0; @@ -89,6 +89,8 @@ int dwc3_host_init(struct dwc3 *dwc) memset(props, 0, sizeof(struct property_entry) * ARRAY_SIZE(props)); + props[prop_idx++] = PROPERTY_ENTRY_BOOL("xhci-sg-trb-cache-size-quirk"); + if (dwc->usb3_lpm_capable) props[prop_idx++] = PROPERTY_ENTRY_BOOL("usb3-lpm-capable"); diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c index 722a3ab2b337..c265a1f62fc1 100644 --- a/drivers/usb/gadget/function/f_mass_storage.c +++ b/drivers/usb/gadget/function/f_mass_storage.c @@ -545,21 +545,37 @@ static int start_transfer(struct fsg_dev *fsg, struct usb_ep *ep, static bool start_in_transfer(struct fsg_common *common, struct fsg_buffhd *bh) { + int rc; + if (!fsg_is_set(common)) return false; bh->state = BUF_STATE_SENDING; - if (start_transfer(common->fsg, common->fsg->bulk_in, bh->inreq)) + rc = start_transfer(common->fsg, common->fsg->bulk_in, bh->inreq); + if (rc) { bh->state = BUF_STATE_EMPTY; + if (rc == -ESHUTDOWN) { + common->running = 0; + return false; + } + } return true; } static bool start_out_transfer(struct fsg_common *common, struct fsg_buffhd *bh) { + int rc; + if (!fsg_is_set(common)) return false; bh->state = BUF_STATE_RECEIVING; - if (start_transfer(common->fsg, common->fsg->bulk_out, bh->outreq)) + rc = start_transfer(common->fsg, common->fsg->bulk_out, bh->outreq); + if (rc) { bh->state = BUF_STATE_FULL; + if (rc == -ESHUTDOWN) { + common->running = 0; + return false; + } + } return true; } diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c index a1575a0ca568..28f4e6552e84 100644 --- a/drivers/usb/gadget/function/f_ncm.c +++ b/drivers/usb/gadget/function/f_ncm.c @@ -105,8 +105,8 @@ static inline struct f_ncm *func_to_ncm(struct usb_function *f) /* * Although max mtu as dictated by u_ether is 15412 bytes, setting - * max_segment_sizeto 15426 would not be efficient. If user chooses segment - * size to be (>= 8192), then we can't aggregate more than one buffer in each + * max_segment_size to 15426 would not be efficient. If user chooses segment + * size to be (>= 8192), then we can't aggregate more than one buffer in each * NTB (assuming each packet coming from network layer is >= 8192 bytes) as ep * maxpacket limit is 16384. So let max_segment_size be limited to 8000 to allow * at least 2 packets to be aggregated reducing wastage of NTB buffer space @@ -1338,7 +1338,15 @@ parse_ntb: "Parsed NTB with %d frames\n", dgram_counter); to_process -= block_len; - if (to_process != 0) { + + /* + * Windows NCM driver avoids USB ZLPs by adding a 1-byte + * zero pad as needed. + */ + if (to_process == 1 && + (*(unsigned char *)(ntb_ptr + block_len) == 0x00)) { + to_process--; + } else if ((to_process > 0) && (block_len != 0)) { ntb_ptr = (unsigned char *)(ntb_ptr + block_len); goto parse_ntb; } @@ -1489,7 +1497,7 @@ static int ncm_bind(struct usb_configuration *c, struct usb_function *f) ncm_data_intf.bInterfaceNumber = status; ncm_union_desc.bSlaveInterface0 = status; - ecm_desc.wMaxSegmentSize = ncm_opts->max_segment_size; + ecm_desc.wMaxSegmentSize = cpu_to_le16(ncm_opts->max_segment_size); status = -ENODEV; @@ -1685,7 +1693,7 @@ static struct usb_function_instance *ncm_alloc_inst(void) kfree(opts); return ERR_CAST(net); } - opts->max_segment_size = cpu_to_le16(ETH_FRAME_LEN); + opts->max_segment_size = ETH_FRAME_LEN; INIT_LIST_HEAD(&opts->ncm_os_desc.ext_prop); descs[0] = &opts->ncm_os_desc; diff --git a/drivers/usb/gadget/udc/omap_udc.c b/drivers/usb/gadget/udc/omap_udc.c index 10c5d7f726a1..f90eeecf27de 100644 --- a/drivers/usb/gadget/udc/omap_udc.c +++ b/drivers/usb/gadget/udc/omap_udc.c @@ -2036,7 +2036,8 @@ static irqreturn_t omap_udc_iso_irq(int irq, void *_dev) static inline int machine_without_vbus_sense(void) { - return machine_is_omap_osk() || machine_is_sx1(); + return machine_is_omap_osk() || machine_is_omap_palmte() || + machine_is_sx1(); } static int omap_udc_start(struct usb_gadget *g, diff --git a/drivers/usb/gadget/udc/pch_udc.c b/drivers/usb/gadget/udc/pch_udc.c index 4f8617210d85..169f72665739 100644 --- a/drivers/usb/gadget/udc/pch_udc.c +++ b/drivers/usb/gadget/udc/pch_udc.c @@ -274,7 +274,6 @@ struct pch_udc_cfg_data { * @td_data: for data request * @dev: reference to device struct * @offset_addr: offset address of ep register - * @desc: for this ep * @queue: queue for requests * @num: endpoint number * @in: endpoint is IN diff --git a/drivers/usb/host/uhci-grlib.c b/drivers/usb/host/uhci-grlib.c index ac3fc5970315..cfebb833668e 100644 --- a/drivers/usb/host/uhci-grlib.c +++ b/drivers/usb/host/uhci-grlib.c @@ -22,6 +22,7 @@ #include <linux/of_irq.h> #include <linux/of_address.h> #include <linux/of_platform.h> +#include <linux/platform_device.h> static int uhci_grlib_init(struct usb_hcd *hcd) { diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index 4460fa7e9fab..a7716202a8dd 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c @@ -1861,14 +1861,14 @@ void xhci_remove_secondary_interrupter(struct usb_hcd *hcd, struct xhci_interrup struct xhci_hcd *xhci = hcd_to_xhci(hcd); unsigned int intr_num; + spin_lock_irq(&xhci->lock); + /* interrupter 0 is primary interrupter, don't touch it */ - if (!ir || !ir->intr_num || ir->intr_num >= xhci->max_interrupters) + if (!ir || !ir->intr_num || ir->intr_num >= xhci->max_interrupters) { xhci_dbg(xhci, "Invalid secondary interrupter, can't remove\n"); - - /* fixme, should we check xhci->interrupter[intr_num] == ir */ - /* fixme locking */ - - spin_lock_irq(&xhci->lock); + spin_unlock_irq(&xhci->lock); + return; + } intr_num = ir->intr_num; @@ -2322,7 +2322,7 @@ xhci_add_interrupter(struct xhci_hcd *xhci, struct xhci_interrupter *ir, u64 erst_base; u32 erst_size; - if (intr_num > xhci->max_interrupters) { + if (intr_num >= xhci->max_interrupters) { xhci_warn(xhci, "Can't add interrupter %d, max interrupters %d\n", intr_num, xhci->max_interrupters); return -EINVAL; diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c index f04fde19f551..3d071b875308 100644 --- a/drivers/usb/host/xhci-plat.c +++ b/drivers/usb/host/xhci-plat.c @@ -253,6 +253,9 @@ int xhci_plat_probe(struct platform_device *pdev, struct device *sysdev, const s if (device_property_read_bool(tmpdev, "quirk-broken-port-ped")) xhci->quirks |= XHCI_BROKEN_PORT_PED; + if (device_property_read_bool(tmpdev, "xhci-sg-trb-cache-size-quirk")) + xhci->quirks |= XHCI_SG_TRB_CACHE_SIZE_QUIRK; + device_property_read_u32(tmpdev, "imod-interval-ns", &xhci->imod_interval); } diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 33806ae966f9..4f64b814d4aa 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -326,7 +326,13 @@ static unsigned int xhci_ring_expansion_needed(struct xhci_hcd *xhci, struct xhc /* how many trbs will be queued past the enqueue segment? */ trbs_past_seg = enq_used + num_trbs - (TRBS_PER_SEGMENT - 1); - if (trbs_past_seg <= 0) + /* + * Consider expanding the ring already if num_trbs fills the current + * segment (i.e. trbs_past_seg == 0), not only when num_trbs goes into + * the next segment. Avoids confusing full ring with special empty ring + * case below + */ + if (trbs_past_seg < 0) return 0; /* Empty ring special case, enqueue stuck on link trb while dequeue advanced */ @@ -2376,6 +2382,9 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, /* handle completion code */ switch (trb_comp_code) { case COMP_SUCCESS: + /* Don't overwrite status if TD had an error, see xHCI 4.9.1 */ + if (td->error_mid_td) + break; if (remaining) { frame->status = short_framestatus; if (xhci->quirks & XHCI_TRUST_TX_LENGTH) @@ -2391,9 +2400,13 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, case COMP_BANDWIDTH_OVERRUN_ERROR: frame->status = -ECOMM; break; - case COMP_ISOCH_BUFFER_OVERRUN: case COMP_BABBLE_DETECTED_ERROR: + sum_trbs_for_length = true; + fallthrough; + case COMP_ISOCH_BUFFER_OVERRUN: frame->status = -EOVERFLOW; + if (ep_trb != td->last_trb) + td->error_mid_td = true; break; case COMP_INCOMPATIBLE_DEVICE_ERROR: case COMP_STALL_ERROR: @@ -2401,8 +2414,9 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, break; case COMP_USB_TRANSACTION_ERROR: frame->status = -EPROTO; + sum_trbs_for_length = true; if (ep_trb != td->last_trb) - return 0; + td->error_mid_td = true; break; case COMP_STOPPED: sum_trbs_for_length = true; @@ -2422,6 +2436,9 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, break; } + if (td->urb_length_set) + goto finish_td; + if (sum_trbs_for_length) frame->actual_length = sum_trb_lengths(xhci, ep->ring, ep_trb) + ep_trb_len - remaining; @@ -2430,6 +2447,14 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, td->urb->actual_length += frame->actual_length; +finish_td: + /* Don't give back TD yet if we encountered an error mid TD */ + if (td->error_mid_td && ep_trb != td->last_trb) { + xhci_dbg(xhci, "Error mid isoc TD, wait for final completion event\n"); + td->urb_length_set = true; + return 0; + } + return finish_td(xhci, ep, ep_ring, td, trb_comp_code); } @@ -2808,17 +2833,51 @@ static int handle_tx_event(struct xhci_hcd *xhci, } if (!ep_seg) { - if (!ep->skip || - !usb_endpoint_xfer_isoc(&td->urb->ep->desc)) { - /* Some host controllers give a spurious - * successful event after a short transfer. - * Ignore it. - */ - if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) && - ep_ring->last_td_was_short) { - ep_ring->last_td_was_short = false; - goto cleanup; + + if (ep->skip && usb_endpoint_xfer_isoc(&td->urb->ep->desc)) { + skip_isoc_td(xhci, td, ep, status); + goto cleanup; + } + + /* + * Some hosts give a spurious success event after a short + * transfer. Ignore it. + */ + if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) && + ep_ring->last_td_was_short) { + ep_ring->last_td_was_short = false; + goto cleanup; + } + + /* + * xhci 4.10.2 states isoc endpoints should continue + * processing the next TD if there was an error mid TD. + * So host like NEC don't generate an event for the last + * isoc TRB even if the IOC flag is set. + * xhci 4.9.1 states that if there are errors in mult-TRB + * TDs xHC should generate an error for that TRB, and if xHC + * proceeds to the next TD it should genete an event for + * any TRB with IOC flag on the way. Other host follow this. + * So this event might be for the next TD. + */ + if (td->error_mid_td && + !list_is_last(&td->td_list, &ep_ring->td_list)) { + struct xhci_td *td_next = list_next_entry(td, td_list); + + ep_seg = trb_in_td(xhci, td_next->start_seg, td_next->first_trb, + td_next->last_trb, ep_trb_dma, false); + if (ep_seg) { + /* give back previous TD, start handling new */ + xhci_dbg(xhci, "Missing TD completion event after mid TD error\n"); + ep_ring->dequeue = td->last_trb; + ep_ring->deq_seg = td->last_trb_seg; + inc_deq(xhci, ep_ring); + xhci_td_cleanup(xhci, td, ep_ring, td->status); + td = td_next; } + } + + if (!ep_seg) { /* HC is busted, give up! */ xhci_err(xhci, "ERROR Transfer event TRB DMA ptr not " @@ -2830,9 +2889,6 @@ static int handle_tx_event(struct xhci_hcd *xhci, ep_trb_dma, true); return -ESHUTDOWN; } - - skip_isoc_td(xhci, td, ep, status); - goto cleanup; } if (trb_comp_code == COMP_SHORT_PACKET) ep_ring->last_td_was_short = true; diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index a5c72a634e6a..6f82d404883f 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -1549,6 +1549,7 @@ struct xhci_td { struct xhci_segment *bounce_seg; /* actual_length of the URB has already been set */ bool urb_length_set; + bool error_mid_td; unsigned int num_trbs; }; diff --git a/drivers/usb/roles/class.c b/drivers/usb/roles/class.c index ae41578bd014..70165dd86b5d 100644 --- a/drivers/usb/roles/class.c +++ b/drivers/usb/roles/class.c @@ -21,7 +21,9 @@ static const struct class role_class = { struct usb_role_switch { struct device dev; struct mutex lock; /* device lock*/ + struct module *module; /* the module this device depends on */ enum usb_role role; + bool registered; /* From descriptor */ struct device *usb2_port; @@ -48,6 +50,9 @@ int usb_role_switch_set_role(struct usb_role_switch *sw, enum usb_role role) if (IS_ERR_OR_NULL(sw)) return 0; + if (!sw->registered) + return -EOPNOTSUPP; + mutex_lock(&sw->lock); ret = sw->set(sw, role); @@ -73,7 +78,7 @@ enum usb_role usb_role_switch_get_role(struct usb_role_switch *sw) { enum usb_role role; - if (IS_ERR_OR_NULL(sw)) + if (IS_ERR_OR_NULL(sw) || !sw->registered) return USB_ROLE_NONE; mutex_lock(&sw->lock); @@ -135,7 +140,7 @@ struct usb_role_switch *usb_role_switch_get(struct device *dev) usb_role_switch_match); if (!IS_ERR_OR_NULL(sw)) - WARN_ON(!try_module_get(sw->dev.parent->driver->owner)); + WARN_ON(!try_module_get(sw->module)); return sw; } @@ -157,7 +162,7 @@ struct usb_role_switch *fwnode_usb_role_switch_get(struct fwnode_handle *fwnode) sw = fwnode_connection_find_match(fwnode, "usb-role-switch", NULL, usb_role_switch_match); if (!IS_ERR_OR_NULL(sw)) - WARN_ON(!try_module_get(sw->dev.parent->driver->owner)); + WARN_ON(!try_module_get(sw->module)); return sw; } @@ -172,7 +177,7 @@ EXPORT_SYMBOL_GPL(fwnode_usb_role_switch_get); void usb_role_switch_put(struct usb_role_switch *sw) { if (!IS_ERR_OR_NULL(sw)) { - module_put(sw->dev.parent->driver->owner); + module_put(sw->module); put_device(&sw->dev); } } @@ -189,15 +194,18 @@ struct usb_role_switch * usb_role_switch_find_by_fwnode(const struct fwnode_handle *fwnode) { struct device *dev; + struct usb_role_switch *sw = NULL; if (!fwnode) return NULL; dev = class_find_device_by_fwnode(&role_class, fwnode); - if (dev) - WARN_ON(!try_module_get(dev->parent->driver->owner)); + if (dev) { + sw = to_role_switch(dev); + WARN_ON(!try_module_get(sw->module)); + } - return dev ? to_role_switch(dev) : NULL; + return sw; } EXPORT_SYMBOL_GPL(usb_role_switch_find_by_fwnode); @@ -338,6 +346,7 @@ usb_role_switch_register(struct device *parent, sw->set = desc->set; sw->get = desc->get; + sw->module = parent->driver->owner; sw->dev.parent = parent; sw->dev.fwnode = desc->fwnode; sw->dev.class = &role_class; @@ -352,6 +361,8 @@ usb_role_switch_register(struct device *parent, return ERR_PTR(ret); } + sw->registered = true; + /* TODO: Symlinks for the host port and the device controller. */ return sw; @@ -366,8 +377,10 @@ EXPORT_SYMBOL_GPL(usb_role_switch_register); */ void usb_role_switch_unregister(struct usb_role_switch *sw) { - if (!IS_ERR_OR_NULL(sw)) + if (!IS_ERR_OR_NULL(sw)) { + sw->registered = false; device_unregister(&sw->dev); + } } EXPORT_SYMBOL_GPL(usb_role_switch_unregister); diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c index 1e61fe043171..923e0ed85444 100644 --- a/drivers/usb/serial/cp210x.c +++ b/drivers/usb/serial/cp210x.c @@ -146,6 +146,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */ { USB_DEVICE(0x10C4, 0x8664) }, /* AC-Services CAN-IF */ { USB_DEVICE(0x10C4, 0x8665) }, /* AC-Services OBD-IF */ + { USB_DEVICE(0x10C4, 0x87ED) }, /* IMST USB-Stick for Smart Meter */ { USB_DEVICE(0x10C4, 0x8856) }, /* CEL EM357 ZigBee USB Stick - LR */ { USB_DEVICE(0x10C4, 0x8857) }, /* CEL EM357 ZigBee USB Stick */ { USB_DEVICE(0x10C4, 0x88A4) }, /* MMB Networks ZigBee USB Device */ diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 72390dbf0769..2ae124c49d44 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -2269,6 +2269,7 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0111, 0xff) }, /* Fibocom FM160 (MBIM mode) */ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a0, 0xff) }, /* Fibocom NL668-AM/NL652-EU (laptop MBIM) */ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a2, 0xff) }, /* Fibocom FM101-GL (laptop MBIM) */ + { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a3, 0xff) }, /* Fibocom FM101-GL (laptop MBIM) */ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a4, 0xff), /* Fibocom FM101-GL (laptop MBIM) */ .driver_info = RSVD(4) }, { USB_DEVICE_INTERFACE_CLASS(0x2df3, 0x9d03, 0xff) }, /* LongSung M5710 */ diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c index b1e844bf31f8..703a9c563557 100644 --- a/drivers/usb/serial/qcserial.c +++ b/drivers/usb/serial/qcserial.c @@ -184,6 +184,8 @@ static const struct usb_device_id id_table[] = { {DEVICE_SWI(0x413c, 0x81d0)}, /* Dell Wireless 5819 */ {DEVICE_SWI(0x413c, 0x81d1)}, /* Dell Wireless 5818 */ {DEVICE_SWI(0x413c, 0x81d2)}, /* Dell Wireless 5818 */ + {DEVICE_SWI(0x413c, 0x8217)}, /* Dell Wireless DW5826e */ + {DEVICE_SWI(0x413c, 0x8218)}, /* Dell Wireless DW5826e QDL */ /* Huawei devices */ {DEVICE_HWI(0x03f0, 0x581d)}, /* HP lt4112 LTE/HSPA+ Gobi 4G Modem (Huawei me906e) */ diff --git a/drivers/usb/storage/isd200.c b/drivers/usb/storage/isd200.c index 4e0eef1440b7..300aeef160e7 100644 --- a/drivers/usb/storage/isd200.c +++ b/drivers/usb/storage/isd200.c @@ -1105,7 +1105,7 @@ static void isd200_dump_driveid(struct us_data *us, u16 *id) static int isd200_get_inquiry_data( struct us_data *us ) { struct isd200_info *info = (struct isd200_info *)us->extra; - int retStatus = ISD200_GOOD; + int retStatus; u16 *id = info->id; usb_stor_dbg(us, "Entering isd200_get_inquiry_data\n"); @@ -1137,6 +1137,13 @@ static int isd200_get_inquiry_data( struct us_data *us ) isd200_fix_driveid(id); isd200_dump_driveid(us, id); + /* Prevent division by 0 in isd200_scsi_to_ata() */ + if (id[ATA_ID_HEADS] == 0 || id[ATA_ID_SECTORS] == 0) { + usb_stor_dbg(us, " Invalid ATA Identify data\n"); + retStatus = ISD200_ERROR; + goto Done; + } + memset(&info->InquiryData, 0, sizeof(info->InquiryData)); /* Standard IDE interface only supports disks */ @@ -1202,6 +1209,7 @@ static int isd200_get_inquiry_data( struct us_data *us ) } } + Done: usb_stor_dbg(us, "Leaving isd200_get_inquiry_data %08X\n", retStatus); return(retStatus); @@ -1481,22 +1489,27 @@ static int isd200_init_info(struct us_data *us) static int isd200_Initialization(struct us_data *us) { + int rc = 0; + usb_stor_dbg(us, "ISD200 Initialization...\n"); /* Initialize ISD200 info struct */ - if (isd200_init_info(us) == ISD200_ERROR) { + if (isd200_init_info(us) < 0) { usb_stor_dbg(us, "ERROR Initializing ISD200 Info struct\n"); + rc = -ENOMEM; } else { /* Get device specific data */ - if (isd200_get_inquiry_data(us) != ISD200_GOOD) + if (isd200_get_inquiry_data(us) != ISD200_GOOD) { usb_stor_dbg(us, "ISD200 Initialization Failure\n"); - else + rc = -EINVAL; + } else { usb_stor_dbg(us, "ISD200 Initialization complete\n"); + } } - return 0; + return rc; } diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c index c54e9805da53..12cf9940e5b6 100644 --- a/drivers/usb/storage/scsiglue.c +++ b/drivers/usb/storage/scsiglue.c @@ -180,6 +180,13 @@ static int slave_configure(struct scsi_device *sdev) sdev->use_192_bytes_for_3f = 1; /* + * Some devices report generic values until the media has been + * accessed. Force a READ(10) prior to querying device + * characteristics. + */ + sdev->read_before_ms = 1; + + /* * Some devices don't like MODE SENSE with page=0x3f, * which is the command used for checking if a device * is write-protected. Now that we tell the sd driver diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c index 9707f53cfda9..71ace274761f 100644 --- a/drivers/usb/storage/uas.c +++ b/drivers/usb/storage/uas.c @@ -879,6 +879,13 @@ static int uas_slave_configure(struct scsi_device *sdev) sdev->guess_capacity = 1; /* + * Some devices report generic values until the media has been + * accessed. Force a READ(10) prior to querying device + * characteristics. + */ + sdev->read_before_ms = 1; + + /* * Some devices don't like MODE SENSE with page=0x3f, * which is the command used for checking if a device * is write-protected. Now that we tell the sd driver diff --git a/drivers/usb/typec/altmodes/displayport.c b/drivers/usb/typec/altmodes/displayport.c index f81bec0c7b86..f8ea3054be54 100644 --- a/drivers/usb/typec/altmodes/displayport.c +++ b/drivers/usb/typec/altmodes/displayport.c @@ -559,16 +559,21 @@ static ssize_t hpd_show(struct device *dev, struct device_attribute *attr, char } static DEVICE_ATTR_RO(hpd); -static struct attribute *dp_altmode_attrs[] = { +static struct attribute *displayport_attrs[] = { &dev_attr_configuration.attr, &dev_attr_pin_assignment.attr, &dev_attr_hpd.attr, NULL }; -static const struct attribute_group dp_altmode_group = { +static const struct attribute_group displayport_group = { .name = "displayport", - .attrs = dp_altmode_attrs, + .attrs = displayport_attrs, +}; + +static const struct attribute_group *displayport_groups[] = { + &displayport_group, + NULL, }; int dp_altmode_probe(struct typec_altmode *alt) @@ -576,7 +581,6 @@ int dp_altmode_probe(struct typec_altmode *alt) const struct typec_altmode *port = typec_altmode_get_partner(alt); struct fwnode_handle *fwnode; struct dp_altmode *dp; - int ret; /* FIXME: Port can only be DFP_U. */ @@ -587,10 +591,6 @@ int dp_altmode_probe(struct typec_altmode *alt) DP_CAP_PIN_ASSIGN_DFP_D(alt->vdo))) return -ENODEV; - ret = sysfs_create_group(&alt->dev.kobj, &dp_altmode_group); - if (ret) - return ret; - dp = devm_kzalloc(&alt->dev, sizeof(*dp), GFP_KERNEL); if (!dp) return -ENOMEM; @@ -624,7 +624,6 @@ void dp_altmode_remove(struct typec_altmode *alt) { struct dp_altmode *dp = typec_altmode_get_drvdata(alt); - sysfs_remove_group(&alt->dev.kobj, &dp_altmode_group); cancel_work_sync(&dp->work); if (dp->connector_fwnode) { @@ -649,6 +648,7 @@ static struct typec_altmode_driver dp_altmode_driver = { .driver = { .name = "typec_displayport", .owner = THIS_MODULE, + .dev_groups = displayport_groups, }, }; module_typec_altmode_driver(dp_altmode_driver); diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c index 5945e3a2b0f7..096597231027 100644 --- a/drivers/usb/typec/tcpm/tcpm.c +++ b/drivers/usb/typec/tcpm/tcpm.c @@ -3743,9 +3743,6 @@ static void tcpm_detach(struct tcpm_port *port) if (tcpm_port_is_disconnected(port)) port->hard_reset_count = 0; - port->try_src_count = 0; - port->try_snk_count = 0; - if (!port->attached) return; @@ -4876,8 +4873,11 @@ static void run_state_machine(struct tcpm_port *port) break; case PORT_RESET: tcpm_reset_port(port); - tcpm_set_cc(port, tcpm_default_state(port) == SNK_UNATTACHED ? - TYPEC_CC_RD : tcpm_rp_cc(port)); + if (port->self_powered) + tcpm_set_cc(port, TYPEC_CC_OPEN); + else + tcpm_set_cc(port, tcpm_default_state(port) == SNK_UNATTACHED ? + TYPEC_CC_RD : tcpm_rp_cc(port)); tcpm_set_state(port, PORT_RESET_WAIT_OFF, PD_T_ERROR_RECOVERY); break; @@ -6848,7 +6848,8 @@ struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc) if (err) goto out_role_sw_put; - port->typec_caps.pd = port->pds[0]; + if (port->pds) + port->typec_caps.pd = port->pds[0]; port->typec_port = typec_register_port(port->dev, &port->typec_caps); if (IS_ERR(port->typec_port)) { diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c index 5392ec698959..14f5a7bfae2e 100644 --- a/drivers/usb/typec/ucsi/ucsi.c +++ b/drivers/usb/typec/ucsi/ucsi.c @@ -938,7 +938,9 @@ static void ucsi_handle_connector_change(struct work_struct *work) clear_bit(EVENT_PENDING, &con->ucsi->flags); + mutex_lock(&ucsi->ppm_lock); ret = ucsi_acknowledge_connector_change(ucsi); + mutex_unlock(&ucsi->ppm_lock); if (ret) dev_err(ucsi->dev, "%s: ACK failed (%d)", __func__, ret); diff --git a/drivers/usb/typec/ucsi/ucsi_acpi.c b/drivers/usb/typec/ucsi/ucsi_acpi.c index 6bbf490ac401..928eacbeb21a 100644 --- a/drivers/usb/typec/ucsi/ucsi_acpi.c +++ b/drivers/usb/typec/ucsi/ucsi_acpi.c @@ -25,6 +25,8 @@ struct ucsi_acpi { unsigned long flags; guid_t guid; u64 cmd; + bool dell_quirk_probed; + bool dell_quirk_active; }; static int ucsi_acpi_dsm(struct ucsi_acpi *ua, int func) @@ -73,9 +75,13 @@ static int ucsi_acpi_sync_write(struct ucsi *ucsi, unsigned int offset, const void *val, size_t val_len) { struct ucsi_acpi *ua = ucsi_get_drvdata(ucsi); + bool ack = UCSI_COMMAND(*(u64 *)val) == UCSI_ACK_CC_CI; int ret; - set_bit(COMMAND_PENDING, &ua->flags); + if (ack) + set_bit(ACK_PENDING, &ua->flags); + else + set_bit(COMMAND_PENDING, &ua->flags); ret = ucsi_acpi_async_write(ucsi, offset, val, val_len); if (ret) @@ -85,7 +91,10 @@ static int ucsi_acpi_sync_write(struct ucsi *ucsi, unsigned int offset, ret = -ETIMEDOUT; out_clear_bit: - clear_bit(COMMAND_PENDING, &ua->flags); + if (ack) + clear_bit(ACK_PENDING, &ua->flags); + else + clear_bit(COMMAND_PENDING, &ua->flags); return ret; } @@ -119,12 +128,73 @@ static const struct ucsi_operations ucsi_zenbook_ops = { .async_write = ucsi_acpi_async_write }; -static const struct dmi_system_id zenbook_dmi_id[] = { +/* + * Some Dell laptops expect that an ACK command with the + * UCSI_ACK_CONNECTOR_CHANGE bit set is followed by a (separate) + * ACK command that only has the UCSI_ACK_COMMAND_COMPLETE bit set. + * If this is not done events are not delivered to OSPM and + * subsequent commands will timeout. + */ +static int +ucsi_dell_sync_write(struct ucsi *ucsi, unsigned int offset, + const void *val, size_t val_len) +{ + struct ucsi_acpi *ua = ucsi_get_drvdata(ucsi); + u64 cmd = *(u64 *)val, ack = 0; + int ret; + + if (UCSI_COMMAND(cmd) == UCSI_ACK_CC_CI && + cmd & UCSI_ACK_CONNECTOR_CHANGE) + ack = UCSI_ACK_CC_CI | UCSI_ACK_COMMAND_COMPLETE; + + ret = ucsi_acpi_sync_write(ucsi, offset, val, val_len); + if (ret != 0) + return ret; + if (ack == 0) + return ret; + + if (!ua->dell_quirk_probed) { + ua->dell_quirk_probed = true; + + cmd = UCSI_GET_CAPABILITY; + ret = ucsi_acpi_sync_write(ucsi, UCSI_CONTROL, &cmd, + sizeof(cmd)); + if (ret == 0) + return ucsi_acpi_sync_write(ucsi, UCSI_CONTROL, + &ack, sizeof(ack)); + if (ret != -ETIMEDOUT) + return ret; + + ua->dell_quirk_active = true; + dev_err(ua->dev, "Firmware bug: Additional ACK required after ACKing a connector change.\n"); + dev_err(ua->dev, "Firmware bug: Enabling workaround\n"); + } + + if (!ua->dell_quirk_active) + return ret; + + return ucsi_acpi_sync_write(ucsi, UCSI_CONTROL, &ack, sizeof(ack)); +} + +static const struct ucsi_operations ucsi_dell_ops = { + .read = ucsi_acpi_read, + .sync_write = ucsi_dell_sync_write, + .async_write = ucsi_acpi_async_write +}; + +static const struct dmi_system_id ucsi_acpi_quirks[] = { { .matches = { DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), DMI_MATCH(DMI_PRODUCT_NAME, "ZenBook UX325UA_UM325UA"), }, + .driver_data = (void *)&ucsi_zenbook_ops, + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + }, + .driver_data = (void *)&ucsi_dell_ops, }, { } }; @@ -142,8 +212,10 @@ static void ucsi_acpi_notify(acpi_handle handle, u32 event, void *data) if (UCSI_CCI_CONNECTOR(cci)) ucsi_connector_change(ua->ucsi, UCSI_CCI_CONNECTOR(cci)); - if (test_bit(COMMAND_PENDING, &ua->flags) && - cci & (UCSI_CCI_ACK_COMPLETE | UCSI_CCI_COMMAND_COMPLETE)) + if (cci & UCSI_CCI_ACK_COMPLETE && test_bit(ACK_PENDING, &ua->flags)) + complete(&ua->complete); + if (cci & UCSI_CCI_COMMAND_COMPLETE && + test_bit(COMMAND_PENDING, &ua->flags)) complete(&ua->complete); } @@ -151,6 +223,7 @@ static int ucsi_acpi_probe(struct platform_device *pdev) { struct acpi_device *adev = ACPI_COMPANION(&pdev->dev); const struct ucsi_operations *ops = &ucsi_acpi_ops; + const struct dmi_system_id *id; struct ucsi_acpi *ua; struct resource *res; acpi_status status; @@ -180,8 +253,9 @@ static int ucsi_acpi_probe(struct platform_device *pdev) init_completion(&ua->complete); ua->dev = &pdev->dev; - if (dmi_check_system(zenbook_dmi_id)) - ops = &ucsi_zenbook_ops; + id = dmi_first_match(ucsi_acpi_quirks); + if (id) + ops = id->driver_data; ua->ucsi = ucsi_create(&pdev->dev, ops); if (IS_ERR(ua->ucsi)) diff --git a/drivers/usb/typec/ucsi/ucsi_glink.c b/drivers/usb/typec/ucsi/ucsi_glink.c index 53a7ede8556d..faccc942b381 100644 --- a/drivers/usb/typec/ucsi/ucsi_glink.c +++ b/drivers/usb/typec/ucsi/ucsi_glink.c @@ -301,6 +301,7 @@ static const struct of_device_id pmic_glink_ucsi_of_quirks[] = { { .compatible = "qcom,sc8180x-pmic-glink", .data = (void *)UCSI_NO_PARTNER_PDOS, }, { .compatible = "qcom,sc8280xp-pmic-glink", .data = (void *)UCSI_NO_PARTNER_PDOS, }, { .compatible = "qcom,sm8350-pmic-glink", .data = (void *)UCSI_NO_PARTNER_PDOS, }, + { .compatible = "qcom,sm8550-pmic-glink", .data = (void *)UCSI_NO_PARTNER_PDOS, }, {} }; diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c index 1183e7a871f8..46823c2e2ba1 100644 --- a/drivers/video/fbdev/core/fbcon.c +++ b/drivers/video/fbdev/core/fbcon.c @@ -2399,11 +2399,9 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h, int charcount, struct fbcon_ops *ops = info->fbcon_par; struct fbcon_display *p = &fb_display[vc->vc_num]; int resize, ret, old_userfont, old_width, old_height, old_charcount; - char *old_data = NULL; + u8 *old_data = vc->vc_font.data; resize = (w != vc->vc_font.width) || (h != vc->vc_font.height); - if (p->userfont) - old_data = vc->vc_font.data; vc->vc_font.data = (void *)(p->fontdata = data); old_userfont = p->userfont; if ((p->userfont = userfont)) @@ -2437,13 +2435,13 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h, int charcount, update_screen(vc); } - if (old_data && (--REFCOUNT(old_data) == 0)) + if (old_userfont && (--REFCOUNT(old_data) == 0)) kfree(old_data - FONT_EXTRA_WORDS * sizeof(int)); return 0; err_out: p->fontdata = old_data; - vc->vc_font.data = (void *)old_data; + vc->vc_font.data = old_data; if (userfont) { p->userfont = old_userfont; diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c index c26ee6fd73c9..8fdccf033b2d 100644 --- a/drivers/video/fbdev/hyperv_fb.c +++ b/drivers/video/fbdev/hyperv_fb.c @@ -1010,8 +1010,6 @@ static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info) goto getmem_done; } pr_info("Unable to allocate enough contiguous physical memory on Gen 1 VM. Using MMIO instead.\n"); - } else { - goto err1; } /* diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig index 7d22051b15a2..d78fe7137799 100644 --- a/drivers/watchdog/Kconfig +++ b/drivers/watchdog/Kconfig @@ -512,7 +512,6 @@ config S3C2410_WATCHDOG tristate "S3C6410/S5Pv210/Exynos Watchdog" depends on ARCH_S3C64XX || ARCH_S5PV210 || ARCH_EXYNOS || COMPILE_TEST select WATCHDOG_CORE - select MFD_SYSCON if ARCH_EXYNOS help Watchdog timer block in the Samsung S3C64xx, S5Pv210 and Exynos SoCs. This will reboot the system when the timer expires with diff --git a/drivers/watchdog/s3c2410_wdt.c b/drivers/watchdog/s3c2410_wdt.c index 349d30462c8c..686cf544d0ae 100644 --- a/drivers/watchdog/s3c2410_wdt.c +++ b/drivers/watchdog/s3c2410_wdt.c @@ -24,9 +24,9 @@ #include <linux/slab.h> #include <linux/err.h> #include <linux/of.h> -#include <linux/mfd/syscon.h> #include <linux/regmap.h> #include <linux/delay.h> +#include <linux/soc/samsung/exynos-pmu.h> #define S3C2410_WTCON 0x00 #define S3C2410_WTDAT 0x04 @@ -699,11 +699,11 @@ static int s3c2410wdt_probe(struct platform_device *pdev) return ret; if (wdt->drv_data->quirks & QUIRKS_HAVE_PMUREG) { - wdt->pmureg = syscon_regmap_lookup_by_phandle(dev->of_node, - "samsung,syscon-phandle"); + wdt->pmureg = exynos_get_pmu_regmap_by_phandle(dev->of_node, + "samsung,syscon-phandle"); if (IS_ERR(wdt->pmureg)) return dev_err_probe(dev, PTR_ERR(wdt->pmureg), - "syscon regmap lookup failed.\n"); + "PMU regmap lookup failed.\n"); } wdt_irq = platform_get_irq(pdev, 0); diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c index b8cfea7812d6..2faa4bf78c7a 100644 --- a/drivers/xen/events/events_base.c +++ b/drivers/xen/events/events_base.c @@ -923,8 +923,8 @@ static void shutdown_pirq(struct irq_data *data) return; do_mask(info, EVT_MASK_REASON_EXPLICIT); - xen_evtchn_close(evtchn); xen_irq_info_cleanup(info); + xen_evtchn_close(evtchn); } static void enable_pirq(struct irq_data *data) @@ -956,6 +956,7 @@ EXPORT_SYMBOL_GPL(xen_irq_from_gsi); static void __unbind_from_irq(struct irq_info *info, unsigned int irq) { evtchn_port_t evtchn; + bool close_evtchn = false; if (!info) { xen_irq_free_desc(irq); @@ -975,7 +976,7 @@ static void __unbind_from_irq(struct irq_info *info, unsigned int irq) struct xenbus_device *dev; if (!info->is_static) - xen_evtchn_close(evtchn); + close_evtchn = true; switch (info->type) { case IRQT_VIRQ: @@ -995,6 +996,9 @@ static void __unbind_from_irq(struct irq_info *info, unsigned int irq) } xen_irq_info_cleanup(info); + + if (close_evtchn) + xen_evtchn_close(evtchn); } xen_free_irq(info); @@ -2216,7 +2220,7 @@ static __init void xen_alloc_callback_vector(void) return; pr_info("Xen HVM callback vector for event delivery is enabled\n"); - alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, asm_sysvec_xen_hvm_callback); + sysvec_install(HYPERVISOR_CALLBACK_VECTOR, sysvec_xen_hvm_callback); } #else void xen_setup_callback_vector(void) {} diff --git a/drivers/xen/gntalloc.c b/drivers/xen/gntalloc.c index 26ffb8755ffb..f93f73ecefee 100644 --- a/drivers/xen/gntalloc.c +++ b/drivers/xen/gntalloc.c @@ -317,7 +317,7 @@ static long gntalloc_ioctl_alloc(struct gntalloc_file_private_data *priv, rc = -EFAULT; goto out_free; } - if (copy_to_user(arg->gref_ids, gref_ids, + if (copy_to_user(arg->gref_ids_flex, gref_ids, sizeof(gref_ids[0]) * op.count)) { rc = -EFAULT; goto out_free; diff --git a/drivers/xen/pcpu.c b/drivers/xen/pcpu.c index 508655273145..c63f317e3df3 100644 --- a/drivers/xen/pcpu.c +++ b/drivers/xen/pcpu.c @@ -65,7 +65,7 @@ struct pcpu { uint32_t flags; }; -static struct bus_type xen_pcpu_subsys = { +static const struct bus_type xen_pcpu_subsys = { .name = "xen_cpu", .dev_name = "xen_cpu", }; diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c index 35b6e306026a..67dfa4778864 100644 --- a/drivers/xen/privcmd.c +++ b/drivers/xen/privcmd.c @@ -1223,18 +1223,13 @@ struct privcmd_kernel_ioreq *alloc_ioreq(struct privcmd_ioeventfd *ioeventfd) kioreq->ioreq = (struct ioreq *)(page_to_virt(pages[0])); mmap_write_unlock(mm); - size = sizeof(*ports) * kioreq->vcpus; - ports = kzalloc(size, GFP_KERNEL); - if (!ports) { - ret = -ENOMEM; + ports = memdup_array_user(u64_to_user_ptr(ioeventfd->ports), + kioreq->vcpus, sizeof(*ports)); + if (IS_ERR(ports)) { + ret = PTR_ERR(ports); goto error_kfree; } - if (copy_from_user(ports, u64_to_user_ptr(ioeventfd->ports), size)) { - ret = -EFAULT; - goto error_kfree_ports; - } - for (i = 0; i < kioreq->vcpus; i++) { kioreq->ports[i].vcpu = i; kioreq->ports[i].port = ports[i]; @@ -1256,7 +1251,7 @@ struct privcmd_kernel_ioreq *alloc_ioreq(struct privcmd_ioeventfd *ioeventfd) error_unbind: while (--i >= 0) unbind_from_irqhandler(irq_from_evtchn(ports[i]), &kioreq->ports[i]); -error_kfree_ports: + kfree(ports); error_kfree: kfree(kioreq); diff --git a/drivers/xen/xen-balloon.c b/drivers/xen/xen-balloon.c index 8cd583db20b1..b293d7652f15 100644 --- a/drivers/xen/xen-balloon.c +++ b/drivers/xen/xen-balloon.c @@ -237,7 +237,7 @@ static const struct attribute_group *balloon_groups[] = { NULL }; -static struct bus_type balloon_subsys = { +static const struct bus_type balloon_subsys = { .name = BALLOON_CLASS_NAME, .dev_name = BALLOON_CLASS_NAME, }; diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c index 32835b4b9bc5..51b3124b0d56 100644 --- a/drivers/xen/xenbus/xenbus_client.c +++ b/drivers/xen/xenbus/xenbus_client.c @@ -116,14 +116,15 @@ EXPORT_SYMBOL_GPL(xenbus_strstate); * @dev: xenbus device * @path: path to watch * @watch: watch to register + * @will_handle: events queuing determine callback * @callback: callback to register * * Register a @watch on the given path, using the given xenbus_watch structure - * for storage, and the given @callback function as the callback. On success, - * the given @path will be saved as @watch->node, and remains the - * caller's to free. On error, @watch->node will - * be NULL, the device will switch to %XenbusStateClosing, and the error will - * be saved in the store. + * for storage, @will_handle function as the callback to determine if each + * event need to be queued, and the given @callback function as the callback. + * On success, the given @path will be saved as @watch->node, and remains the + * caller's to free. On error, @watch->node will be NULL, the device will + * switch to %XenbusStateClosing, and the error will be saved in the store. * * Returns: %0 on success or -errno on error */ @@ -158,11 +159,13 @@ EXPORT_SYMBOL_GPL(xenbus_watch_path); * xenbus_watch_pathfmt - register a watch on a sprintf-formatted path * @dev: xenbus device * @watch: watch to register + * @will_handle: events queuing determine callback * @callback: callback to register * @pathfmt: format of path to watch * * Register a watch on the given @path, using the given xenbus_watch - * structure for storage, and the given @callback function as the + * structure for storage, @will_handle function as the callback to determine if + * each event need to be queued, and the given @callback function as the * callback. On success, the watched path (@path/@path2) will be saved * as @watch->node, and becomes the caller's to kfree(). * On error, watch->node will be NULL, so the caller has nothing to diff --git a/drivers/zorro/zorro-driver.c b/drivers/zorro/zorro-driver.c index 025edfccedcf..f49d19977e82 100644 --- a/drivers/zorro/zorro-driver.c +++ b/drivers/zorro/zorro-driver.c @@ -150,7 +150,7 @@ static int zorro_uevent(const struct device *dev, struct kobj_uevent_env *env) return 0; } -struct bus_type zorro_bus_type = { +const struct bus_type zorro_bus_type = { .name = "zorro", .dev_name = "zorro", .dev_groups = zorro_device_attribute_groups, diff --git a/drivers/zorro/zorro.h b/drivers/zorro/zorro.h index f84df9fb4c20..df44e35203fd 100644 --- a/drivers/zorro/zorro.h +++ b/drivers/zorro/zorro.h @@ -4,7 +4,7 @@ * Zorro bus */ -extern struct bus_type zorro_bus_type; +extern const struct bus_type zorro_bus_type; #ifdef CONFIG_ZORRO_NAMES |