diff options
Diffstat (limited to 'drivers')
370 files changed, 8879 insertions, 5420 deletions
diff --git a/drivers/accel/habanalabs/common/device.c b/drivers/accel/habanalabs/common/device.c index 9711e8fc979d..3a89644f087c 100644 --- a/drivers/accel/habanalabs/common/device.c +++ b/drivers/accel/habanalabs/common/device.c @@ -2044,7 +2044,7 @@ static void hl_notifier_event_send(struct hl_notifier_event *notifier_event, u64 notifier_event->events_mask |= event_mask; if (notifier_event->eventfd) - eventfd_signal(notifier_event->eventfd, 1); + eventfd_signal(notifier_event->eventfd); mutex_unlock(¬ifier_event->lock); } diff --git a/drivers/accel/qaic/mhi_controller.c b/drivers/accel/qaic/mhi_controller.c index 5036e58e7235..1405623b03e4 100644 --- a/drivers/accel/qaic/mhi_controller.c +++ b/drivers/accel/qaic/mhi_controller.c @@ -404,8 +404,21 @@ static struct mhi_controller_config aic100_config = { static int mhi_read_reg(struct mhi_controller *mhi_cntrl, void __iomem *addr, u32 *out) { - u32 tmp = readl_relaxed(addr); + u32 tmp; + /* + * SOC_HW_VERSION quirk + * The SOC_HW_VERSION register (offset 0x224) is not reliable and + * may contain uninitialized values, including 0xFFFFFFFF. This could + * cause a false positive link down error. Instead, intercept any + * reads and provide the correct value of the register. + */ + if (addr - mhi_cntrl->regs == 0x224) { + *out = 0x60110200; + return 0; + } + + tmp = readl_relaxed(addr); if (tmp == U32_MAX) return -EIO; diff --git a/drivers/accel/qaic/qaic_data.c b/drivers/accel/qaic/qaic_data.c index 4a8e43a7a6a4..24e886f857d5 100644 --- a/drivers/accel/qaic/qaic_data.c +++ b/drivers/accel/qaic/qaic_data.c @@ -451,7 +451,7 @@ static int create_sgt(struct qaic_device *qdev, struct sg_table **sgt_out, u64 s * later */ buf_extra = (PAGE_SIZE - size % PAGE_SIZE) % PAGE_SIZE; - max_order = min(MAX_ORDER - 1, get_order(size)); + max_order = min(MAX_PAGE_ORDER - 1, get_order(size)); } else { /* allocate a single page for book keeping */ nr_pages = 1; @@ -777,7 +777,6 @@ struct drm_gem_object *qaic_gem_prime_import(struct drm_device *dev, struct dma_ struct dma_buf_attachment *attach; struct drm_gem_object *obj; struct qaic_bo *bo; - size_t size; int ret; bo = qaic_alloc_init_bo(); @@ -795,13 +794,12 @@ struct drm_gem_object *qaic_gem_prime_import(struct drm_device *dev, struct dma_ goto attach_fail; } - size = PAGE_ALIGN(attach->dmabuf->size); - if (size == 0) { + if (!attach->dmabuf->size) { ret = -EINVAL; goto size_align_fail; } - drm_gem_private_object_init(dev, obj, size); + drm_gem_private_object_init(dev, obj, attach->dmabuf->size); /* * skipping dma_buf_map_attachment() as we do not know the direction * just yet. Once the direction is known in the subsequent IOCTL to diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig index f819e760ff19..6f2bfcf7645c 100644 --- a/drivers/acpi/Kconfig +++ b/drivers/acpi/Kconfig @@ -61,6 +61,10 @@ config ACPI_CCA_REQUIRED config ACPI_TABLE_LIB bool +config ACPI_THERMAL_LIB + depends on THERMAL + bool + config ACPI_DEBUGGER bool "AML debugger interface" select ACPI_DEBUG @@ -327,6 +331,7 @@ config ACPI_THERMAL tristate "Thermal Zone" depends on ACPI_PROCESSOR select THERMAL + select ACPI_THERMAL_LIB default y help This driver supports ACPI thermal zones. Most mobile and diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile index eaa09bf52f17..12ef8180d272 100644 --- a/drivers/acpi/Makefile +++ b/drivers/acpi/Makefile @@ -37,7 +37,7 @@ acpi-$(CONFIG_ACPI_SLEEP) += proc.o # ACPI Bus and Device Drivers # acpi-y += bus.o glue.o -acpi-y += scan.o +acpi-y += scan.o mipi-disco-img.o acpi-y += resource.o acpi-y += acpi_processor.o acpi-y += processor_core.o @@ -89,6 +89,7 @@ obj-$(CONFIG_ACPI_TAD) += acpi_tad.o obj-$(CONFIG_ACPI_PCI_SLOT) += pci_slot.o obj-$(CONFIG_ACPI_PROCESSOR) += processor.o obj-$(CONFIG_ACPI) += container.o +obj-$(CONFIG_ACPI_THERMAL_LIB) += thermal_lib.o obj-$(CONFIG_ACPI_THERMAL) += thermal.o obj-$(CONFIG_ACPI_PLATFORM_PROFILE) += platform_profile.o obj-$(CONFIG_ACPI_NFIT) += nfit/ diff --git a/drivers/acpi/acpi_extlog.c b/drivers/acpi/acpi_extlog.c index e120a96e1eae..ca87a0939135 100644 --- a/drivers/acpi/acpi_extlog.c +++ b/drivers/acpi/acpi_extlog.c @@ -145,9 +145,14 @@ static int extlog_print(struct notifier_block *nb, unsigned long val, static u32 err_seq; estatus = extlog_elog_entry_check(cpu, bank); - if (estatus == NULL || (mce->kflags & MCE_HANDLED_CEC)) + if (!estatus) return NOTIFY_DONE; + if (mce->kflags & MCE_HANDLED_CEC) { + estatus->block_status = 0; + return NOTIFY_DONE; + } + memcpy(elog_buf, (void *)estatus, ELOG_ENTRY_LEN); /* clear record status to enable BIOS to update it again */ estatus->block_status = 0; @@ -303,9 +308,10 @@ err: static void __exit extlog_exit(void) { mce_unregister_decode_chain(&extlog_mce_dec); - ((struct extlog_l1_head *)extlog_l1_addr)->flags &= ~FLAG_OS_OPTIN; - if (extlog_l1_addr) + if (extlog_l1_addr) { + ((struct extlog_l1_head *)extlog_l1_addr)->flags &= ~FLAG_OS_OPTIN; acpi_os_unmap_iomem(extlog_l1_addr, l1_size); + } if (elog_addr) acpi_os_unmap_iomem(elog_addr, elog_size); release_mem_region(elog_base, elog_size); diff --git a/drivers/acpi/acpi_lpit.c b/drivers/acpi/acpi_lpit.c index c5598b6d5db8..794962c5c88e 100644 --- a/drivers/acpi/acpi_lpit.c +++ b/drivers/acpi/acpi_lpit.c @@ -105,7 +105,7 @@ static void lpit_update_residency(struct lpit_residency_info *info, return; info->frequency = lpit_native->counter_frequency ? - lpit_native->counter_frequency : tsc_khz * 1000; + lpit_native->counter_frequency : mul_u32_u32(tsc_khz, 1000U); if (!info->frequency) info->frequency = 1; diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c index 875de44961bf..04e273167e92 100644 --- a/drivers/acpi/acpi_lpss.c +++ b/drivers/acpi/acpi_lpss.c @@ -167,13 +167,9 @@ static struct pwm_lookup byt_pwm_lookup[] = { static void byt_pwm_setup(struct lpss_private_data *pdata) { - u64 uid; - /* Only call pwm_add_table for the first PWM controller */ - if (acpi_dev_uid_to_integer(pdata->adev, &uid) || uid != 1) - return; - - pwm_add_table(byt_pwm_lookup, ARRAY_SIZE(byt_pwm_lookup)); + if (acpi_dev_uid_match(pdata->adev, 1)) + pwm_add_table(byt_pwm_lookup, ARRAY_SIZE(byt_pwm_lookup)); } #define LPSS_I2C_ENABLE 0x6c @@ -218,13 +214,9 @@ static struct pwm_lookup bsw_pwm_lookup[] = { static void bsw_pwm_setup(struct lpss_private_data *pdata) { - u64 uid; - /* Only call pwm_add_table for the first PWM controller */ - if (acpi_dev_uid_to_integer(pdata->adev, &uid) || uid != 1) - return; - - pwm_add_table(bsw_pwm_lookup, ARRAY_SIZE(bsw_pwm_lookup)); + if (acpi_dev_uid_match(pdata->adev, 1)) + pwm_add_table(bsw_pwm_lookup, ARRAY_SIZE(bsw_pwm_lookup)); } static const struct property_entry lpt_spi_properties[] = { @@ -461,8 +453,9 @@ static int register_device_clock(struct acpi_device *adev, if (!clk_name) return -ENOMEM; clk = clk_register_fractional_divider(NULL, clk_name, parent, + 0, prv_base, 1, 15, 16, 15, CLK_FRAC_DIVIDER_POWER_OF_TWO_PS, - prv_base, 1, 15, 16, 15, 0, NULL); + NULL); parent = clk_name; clk_name = kasprintf(GFP_KERNEL, "%s-update", devname); @@ -570,34 +563,6 @@ static struct device *acpi_lpss_find_device(const char *hid, const char *uid) return bus_find_device(&pci_bus_type, NULL, &data, match_hid_uid); } -static bool acpi_lpss_dep(struct acpi_device *adev, acpi_handle handle) -{ - struct acpi_handle_list dep_devices; - acpi_status status; - bool ret = false; - int i; - - if (!acpi_has_method(adev->handle, "_DEP")) - return false; - - status = acpi_evaluate_reference(adev->handle, "_DEP", NULL, - &dep_devices); - if (ACPI_FAILURE(status)) { - dev_dbg(&adev->dev, "Failed to evaluate _DEP.\n"); - return false; - } - - for (i = 0; i < dep_devices.count; i++) { - if (dep_devices.handles[i] == handle) { - ret = true; - break; - } - } - - acpi_handle_list_free(&dep_devices); - return ret; -} - static void acpi_lpss_link_consumer(struct device *dev1, const struct lpss_device_links *link) { @@ -608,7 +573,7 @@ static void acpi_lpss_link_consumer(struct device *dev1, return; if ((link->dep_missing_ids && dmi_check_system(link->dep_missing_ids)) - || acpi_lpss_dep(ACPI_COMPANION(dev2), ACPI_HANDLE(dev1))) + || acpi_device_dep(ACPI_HANDLE(dev2), ACPI_HANDLE(dev1))) device_link_add(dev2, dev1, link->flags); put_device(dev2); @@ -624,7 +589,7 @@ static void acpi_lpss_link_supplier(struct device *dev1, return; if ((link->dep_missing_ids && dmi_check_system(link->dep_missing_ids)) - || acpi_lpss_dep(ACPI_COMPANION(dev1), ACPI_HANDLE(dev2))) + || acpi_device_dep(ACPI_HANDLE(dev1), ACPI_HANDLE(dev2))) device_link_add(dev1, dev2, link->flags); put_device(dev2); diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c index 6cee536c229a..4afdda9db019 100644 --- a/drivers/acpi/acpi_video.c +++ b/drivers/acpi/acpi_video.c @@ -67,7 +67,7 @@ MODULE_PARM_DESC(hw_changes_brightness, static bool device_id_scheme = false; module_param(device_id_scheme, bool, 0444); -static int only_lcd = -1; +static int only_lcd; module_param(only_lcd, int, 0444); static bool may_report_brightness_keys; @@ -500,6 +500,15 @@ static const struct dmi_system_id video_dmi_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 3350"), }, }, + { + .callback = video_set_report_key_events, + .driver_data = (void *)((uintptr_t)REPORT_BRIGHTNESS_KEY_EVENTS), + .ident = "COLORFUL X15 AT 23", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "COLORFUL"), + DMI_MATCH(DMI_PRODUCT_NAME, "X15 AT 23"), + }, + }, /* * Some machines change the brightness themselves when a brightness * hotkey gets pressed, despite us telling them not to. In this case @@ -1713,12 +1722,12 @@ static void acpi_video_dev_register_backlight(struct acpi_video_device *device) return; count++; - acpi_get_parent(device->dev->handle, &acpi_parent); - - pdev = acpi_get_pci_dev(acpi_parent); - if (pdev) { - parent = &pdev->dev; - pci_dev_put(pdev); + if (ACPI_SUCCESS(acpi_get_parent(device->dev->handle, &acpi_parent))) { + pdev = acpi_get_pci_dev(acpi_parent); + if (pdev) { + parent = &pdev->dev; + pci_dev_put(pdev); + } } memset(&props, 0, sizeof(struct backlight_properties)); @@ -2137,57 +2146,6 @@ static int __init intel_opregion_present(void) return opregion; } -/* Check if the chassis-type indicates there is no builtin LCD panel */ -static bool dmi_is_desktop(void) -{ - const char *chassis_type; - unsigned long type; - - chassis_type = dmi_get_system_info(DMI_CHASSIS_TYPE); - if (!chassis_type) - return false; - - if (kstrtoul(chassis_type, 10, &type) != 0) - return false; - - switch (type) { - case 0x03: /* Desktop */ - case 0x04: /* Low Profile Desktop */ - case 0x05: /* Pizza Box */ - case 0x06: /* Mini Tower */ - case 0x07: /* Tower */ - case 0x10: /* Lunch Box */ - case 0x11: /* Main Server Chassis */ - return true; - } - - return false; -} - -/* - * We're seeing a lot of bogus backlight interfaces on newer machines - * without a LCD such as desktops, servers and HDMI sticks. Checking the - * lcd flag fixes this, enable this by default on any machines which are: - * 1. Win8 ready (where we also prefer the native backlight driver, so - * normally the acpi_video code should not register there anyways); *and* - * 2.1 Report a desktop/server DMI chassis-type, or - * 2.2 Are an ACPI-reduced-hardware platform (and thus won't use the EC for - backlight control) - */ -static bool should_check_lcd_flag(void) -{ - if (!acpi_osi_is_win8()) - return false; - - if (dmi_is_desktop()) - return true; - - if (acpi_reduced_hardware()) - return true; - - return false; -} - int acpi_video_register(void) { int ret = 0; @@ -2201,9 +2159,6 @@ int acpi_video_register(void) goto leave; } - if (only_lcd == -1) - only_lcd = should_check_lcd_flag(); - dmi_check_system(video_dmi_table); ret = acpi_bus_register_driver(&acpi_video_bus); diff --git a/drivers/acpi/acpi_watchdog.c b/drivers/acpi/acpi_watchdog.c index ca28183f4d13..8e9e001da38f 100644 --- a/drivers/acpi/acpi_watchdog.c +++ b/drivers/acpi/acpi_watchdog.c @@ -81,7 +81,7 @@ static const struct acpi_table_wdat *acpi_watchdog_get_wdat(void) return wdat; } -/** +/* * Returns true if this system should prefer ACPI based watchdog instead of * the native one (which are typically the same hardware). */ diff --git a/drivers/acpi/apei/einj.c b/drivers/acpi/apei/einj.c index 013eb621dc92..89fb9331c611 100644 --- a/drivers/acpi/apei/einj.c +++ b/drivers/acpi/apei/einj.c @@ -73,6 +73,7 @@ static u32 notrigger; static u32 vendor_flags; static struct debugfs_blob_wrapper vendor_blob; +static struct debugfs_blob_wrapper vendor_errors; static char vendor_dev[64]; /* @@ -182,6 +183,21 @@ static int einj_timedout(u64 *t) return 0; } +static void get_oem_vendor_struct(u64 paddr, int offset, + struct vendor_error_type_extension *v) +{ + unsigned long vendor_size; + u64 target_pa = paddr + offset + sizeof(struct vendor_error_type_extension); + + vendor_size = v->length - sizeof(struct vendor_error_type_extension); + + if (vendor_size) + vendor_errors.data = acpi_os_map_memory(target_pa, vendor_size); + + if (vendor_errors.data) + vendor_errors.size = vendor_size; +} + static void check_vendor_extension(u64 paddr, struct set_error_type_with_address *v5param) { @@ -194,6 +210,7 @@ static void check_vendor_extension(u64 paddr, v = acpi_os_map_iomem(paddr + offset, sizeof(*v)); if (!v) return; + get_oem_vendor_struct(paddr, offset, v); sbdf = v->pcie_sbdf; sprintf(vendor_dev, "%x:%x:%x.%x vendor_id=%x device_id=%x rev_id=%x\n", sbdf >> 24, (sbdf >> 16) & 0xff, @@ -577,38 +594,40 @@ static u64 error_param2; static u64 error_param3; static u64 error_param4; static struct dentry *einj_debug_dir; -static const char * const einj_error_type_string[] = { - "0x00000001\tProcessor Correctable\n", - "0x00000002\tProcessor Uncorrectable non-fatal\n", - "0x00000004\tProcessor Uncorrectable fatal\n", - "0x00000008\tMemory Correctable\n", - "0x00000010\tMemory Uncorrectable non-fatal\n", - "0x00000020\tMemory Uncorrectable fatal\n", - "0x00000040\tPCI Express Correctable\n", - "0x00000080\tPCI Express Uncorrectable non-fatal\n", - "0x00000100\tPCI Express Uncorrectable fatal\n", - "0x00000200\tPlatform Correctable\n", - "0x00000400\tPlatform Uncorrectable non-fatal\n", - "0x00000800\tPlatform Uncorrectable fatal\n", - "0x00001000\tCXL.cache Protocol Correctable\n", - "0x00002000\tCXL.cache Protocol Uncorrectable non-fatal\n", - "0x00004000\tCXL.cache Protocol Uncorrectable fatal\n", - "0x00008000\tCXL.mem Protocol Correctable\n", - "0x00010000\tCXL.mem Protocol Uncorrectable non-fatal\n", - "0x00020000\tCXL.mem Protocol Uncorrectable fatal\n", +static struct { u32 mask; const char *str; } const einj_error_type_string[] = { + { BIT(0), "Processor Correctable" }, + { BIT(1), "Processor Uncorrectable non-fatal" }, + { BIT(2), "Processor Uncorrectable fatal" }, + { BIT(3), "Memory Correctable" }, + { BIT(4), "Memory Uncorrectable non-fatal" }, + { BIT(5), "Memory Uncorrectable fatal" }, + { BIT(6), "PCI Express Correctable" }, + { BIT(7), "PCI Express Uncorrectable non-fatal" }, + { BIT(8), "PCI Express Uncorrectable fatal" }, + { BIT(9), "Platform Correctable" }, + { BIT(10), "Platform Uncorrectable non-fatal" }, + { BIT(11), "Platform Uncorrectable fatal"}, + { BIT(12), "CXL.cache Protocol Correctable" }, + { BIT(13), "CXL.cache Protocol Uncorrectable non-fatal" }, + { BIT(14), "CXL.cache Protocol Uncorrectable fatal" }, + { BIT(15), "CXL.mem Protocol Correctable" }, + { BIT(16), "CXL.mem Protocol Uncorrectable non-fatal" }, + { BIT(17), "CXL.mem Protocol Uncorrectable fatal" }, + { BIT(31), "Vendor Defined Error Types" }, }; static int available_error_type_show(struct seq_file *m, void *v) { int rc; - u32 available_error_type = 0; + u32 error_type = 0; - rc = einj_get_available_error_type(&available_error_type); + rc = einj_get_available_error_type(&error_type); if (rc) return rc; for (int pos = 0; pos < ARRAY_SIZE(einj_error_type_string); pos++) - if (available_error_type & BIT(pos)) - seq_puts(m, einj_error_type_string[pos]); + if (error_type & einj_error_type_string[pos].mask) + seq_printf(m, "0x%08x\t%s\n", einj_error_type_string[pos].mask, + einj_error_type_string[pos].str); return 0; } @@ -767,6 +786,10 @@ static int __init einj_init(void) einj_debug_dir, &vendor_flags); } + if (vendor_errors.size) + debugfs_create_blob("oem_error", 0600, einj_debug_dir, + &vendor_errors); + pr_info("Error INJection is initialized.\n"); return 0; @@ -792,6 +815,8 @@ static void __exit einj_exit(void) sizeof(struct einj_parameter); acpi_os_unmap_iomem(einj_param, size); + if (vendor_errors.size) + acpi_os_unmap_memory(vendor_errors.data, vendor_errors.size); } einj_exec_ctx_init(&ctx); apei_exec_post_unmap_gars(&ctx); diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c index 63ad0541db38..ab2a82cb1b0b 100644 --- a/drivers/acpi/apei/ghes.c +++ b/drivers/acpi/apei/ghes.c @@ -102,6 +102,20 @@ static inline bool is_hest_type_generic_v2(struct ghes *ghes) } /* + * A platform may describe one error source for the handling of synchronous + * errors (e.g. MCE or SEA), or for handling asynchronous errors (e.g. SCI + * or External Interrupt). On x86, the HEST notifications are always + * asynchronous, so only SEA on ARM is delivered as a synchronous + * notification. + */ +static inline bool is_hest_sync_notify(struct ghes *ghes) +{ + u8 notify_type = ghes->generic->notify.type; + + return notify_type == ACPI_HEST_NOTIFY_SEA; +} + +/* * This driver isn't really modular, however for the time being, * continuing to use module_param is the easiest way to remain * compatible with existing boot arg use cases. @@ -489,7 +503,7 @@ static bool ghes_do_memory_failure(u64 physical_addr, int flags) } static bool ghes_handle_memory_failure(struct acpi_hest_generic_data *gdata, - int sev) + int sev, bool sync) { int flags = -1; int sec_sev = ghes_severity(gdata->error_severity); @@ -503,7 +517,7 @@ static bool ghes_handle_memory_failure(struct acpi_hest_generic_data *gdata, (gdata->flags & CPER_SEC_ERROR_THRESHOLD_EXCEEDED)) flags = MF_SOFT_OFFLINE; if (sev == GHES_SEV_RECOVERABLE && sec_sev == GHES_SEV_RECOVERABLE) - flags = 0; + flags = sync ? MF_ACTION_REQUIRED : 0; if (flags != -1) return ghes_do_memory_failure(mem_err->physical_addr, flags); @@ -511,9 +525,11 @@ static bool ghes_handle_memory_failure(struct acpi_hest_generic_data *gdata, return false; } -static bool ghes_handle_arm_hw_error(struct acpi_hest_generic_data *gdata, int sev) +static bool ghes_handle_arm_hw_error(struct acpi_hest_generic_data *gdata, + int sev, bool sync) { struct cper_sec_proc_arm *err = acpi_hest_get_payload(gdata); + int flags = sync ? MF_ACTION_REQUIRED : 0; bool queued = false; int sec_sev, i; char *p; @@ -538,7 +554,7 @@ static bool ghes_handle_arm_hw_error(struct acpi_hest_generic_data *gdata, int s * and don't filter out 'corrected' error here. */ if (is_cache && has_pa) { - queued = ghes_do_memory_failure(err_info->physical_fault_addr, 0); + queued = ghes_do_memory_failure(err_info->physical_fault_addr, flags); p += err_info->length; continue; } @@ -666,6 +682,7 @@ static bool ghes_do_proc(struct ghes *ghes, const guid_t *fru_id = &guid_null; char *fru_text = ""; bool queued = false; + bool sync = is_hest_sync_notify(ghes); sev = ghes_severity(estatus->error_severity); apei_estatus_for_each_section(estatus, gdata) { @@ -683,13 +700,13 @@ static bool ghes_do_proc(struct ghes *ghes, atomic_notifier_call_chain(&ghes_report_chain, sev, mem_err); arch_apei_report_mem_error(sev, mem_err); - queued = ghes_handle_memory_failure(gdata, sev); + queued = ghes_handle_memory_failure(gdata, sev, sync); } else if (guid_equal(sec_type, &CPER_SEC_PCIE)) { ghes_handle_aer(gdata); } else if (guid_equal(sec_type, &CPER_SEC_PROC_ARM)) { - queued = ghes_handle_arm_hw_error(gdata, sev); + queued = ghes_handle_arm_hw_error(gdata, sev, sync); } else { void *err = acpi_hest_get_payload(gdata); diff --git a/drivers/acpi/arm64/Makefile b/drivers/acpi/arm64/Makefile index 143debc1ba4a..726944648c9b 100644 --- a/drivers/acpi/arm64/Makefile +++ b/drivers/acpi/arm64/Makefile @@ -5,3 +5,4 @@ obj-$(CONFIG_ACPI_GTDT) += gtdt.o obj-$(CONFIG_ACPI_APMT) += apmt.o obj-$(CONFIG_ARM_AMBA) += amba.o obj-y += dma.o init.o +obj-y += thermal_cpufreq.o diff --git a/drivers/acpi/arm64/thermal_cpufreq.c b/drivers/acpi/arm64/thermal_cpufreq.c new file mode 100644 index 000000000000..582854914c5c --- /dev/null +++ b/drivers/acpi/arm64/thermal_cpufreq.c @@ -0,0 +1,22 @@ +// SPDX-License-Identifier: GPL-2.0-only +#include <linux/acpi.h> +#include <linux/export.h> + +#include "../internal.h" + +#define SMCCC_SOC_ID_T241 0x036b0241 + +int acpi_arch_thermal_cpufreq_pctg(void) +{ + s32 soc_id = arm_smccc_get_soc_id_version(); + + /* + * Check JEP106 code for NVIDIA Tegra241 chip (036b:0241) and + * reduce the CPUFREQ Thermal reduction percentage to 5%. + */ + if (soc_id == SMCCC_SOC_ID_T241) + return 5; + + return 0; +} +EXPORT_SYMBOL_GPL(acpi_arch_thermal_cpufreq_pctg); diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index 72e64c0718c9..569bd15f211b 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c @@ -408,7 +408,7 @@ static void acpi_bus_decode_usb_osc(const char *msg, u32 bits) static u8 sb_usb_uuid_str[] = "23A0D13A-26AB-486C-9C5F-0FFA525A575A"; static void acpi_bus_osc_negotiate_usb_control(void) { - u32 capbuf[3]; + u32 capbuf[3], *capbuf_ret; struct acpi_osc_context context = { .uuid_str = sb_usb_uuid_str, .rev = 1, @@ -428,7 +428,12 @@ static void acpi_bus_osc_negotiate_usb_control(void) control = OSC_USB_USB3_TUNNELING | OSC_USB_DP_TUNNELING | OSC_USB_PCIE_TUNNELING | OSC_USB_XDOMAIN; - capbuf[OSC_QUERY_DWORD] = 0; + /* + * Run _OSC first with query bit set, trying to get control over + * all tunneling. The platform can then clear out bits in the + * control dword that it does not want to grant to the OS. + */ + capbuf[OSC_QUERY_DWORD] = OSC_QUERY_ENABLE; capbuf[OSC_SUPPORT_DWORD] = 0; capbuf[OSC_CONTROL_DWORD] = control; @@ -441,8 +446,29 @@ static void acpi_bus_osc_negotiate_usb_control(void) goto out_free; } + /* + * Run _OSC again now with query bit clear and the control dword + * matching what the platform granted (which may not have all + * the control bits set). + */ + capbuf_ret = context.ret.pointer; + + capbuf[OSC_QUERY_DWORD] = 0; + capbuf[OSC_CONTROL_DWORD] = capbuf_ret[OSC_CONTROL_DWORD]; + + kfree(context.ret.pointer); + + status = acpi_run_osc(handle, &context); + if (ACPI_FAILURE(status)) + return; + + if (context.ret.length != sizeof(capbuf)) { + pr_info("USB4 _OSC: returned invalid length buffer\n"); + goto out_free; + } + osc_sb_native_usb4_control = - control & acpi_osc_ctx_get_pci_control(&context); + control & acpi_osc_ctx_get_pci_control(&context); acpi_bus_decode_usb_osc("USB4 _OSC: OS supports", control); acpi_bus_decode_usb_osc("USB4 _OSC: OS controls", diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c index 1e76a64cce0a..cc61020756be 100644 --- a/drivers/acpi/button.c +++ b/drivers/acpi/button.c @@ -480,6 +480,7 @@ static int acpi_button_suspend(struct device *dev) static int acpi_button_resume(struct device *dev) { + struct input_dev *input; struct acpi_device *device = to_acpi_device(dev); struct acpi_button *button = acpi_driver_data(device); @@ -489,6 +490,14 @@ static int acpi_button_resume(struct device *dev) button->last_time = ktime_get(); acpi_lid_initialize_state(device); } + + if (button->type == ACPI_BUTTON_TYPE_POWER) { + input = button->input; + input_report_key(input, KEY_WAKEUP, 1); + input_sync(input); + input_report_key(input, KEY_WAKEUP, 0); + input_sync(input); + } return 0; } #endif @@ -579,6 +588,7 @@ static int acpi_button_add(struct acpi_device *device) switch (button->type) { case ACPI_BUTTON_TYPE_POWER: input_set_capability(input, EV_KEY, KEY_POWER); + input_set_capability(input, EV_KEY, KEY_WAKEUP); break; case ACPI_BUTTON_TYPE_SLEEP: diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c index 7ff269a78c20..d155a86a8614 100644 --- a/drivers/acpi/cppc_acpi.c +++ b/drivers/acpi/cppc_acpi.c @@ -39,6 +39,9 @@ #include <linux/rwsem.h> #include <linux/wait.h> #include <linux/topology.h> +#include <linux/dmi.h> +#include <linux/units.h> +#include <asm/unaligned.h> #include <acpi/cppc_acpi.h> @@ -1760,3 +1763,104 @@ unsigned int cppc_get_transition_latency(int cpu_num) return latency_ns; } EXPORT_SYMBOL_GPL(cppc_get_transition_latency); + +/* Minimum struct length needed for the DMI processor entry we want */ +#define DMI_ENTRY_PROCESSOR_MIN_LENGTH 48 + +/* Offset in the DMI processor structure for the max frequency */ +#define DMI_PROCESSOR_MAX_SPEED 0x14 + +/* Callback function used to retrieve the max frequency from DMI */ +static void cppc_find_dmi_mhz(const struct dmi_header *dm, void *private) +{ + const u8 *dmi_data = (const u8 *)dm; + u16 *mhz = (u16 *)private; + + if (dm->type == DMI_ENTRY_PROCESSOR && + dm->length >= DMI_ENTRY_PROCESSOR_MIN_LENGTH) { + u16 val = (u16)get_unaligned((const u16 *) + (dmi_data + DMI_PROCESSOR_MAX_SPEED)); + *mhz = val > *mhz ? val : *mhz; + } +} + +/* Look up the max frequency in DMI */ +static u64 cppc_get_dmi_max_khz(void) +{ + u16 mhz = 0; + + dmi_walk(cppc_find_dmi_mhz, &mhz); + + /* + * Real stupid fallback value, just in case there is no + * actual value set. + */ + mhz = mhz ? mhz : 1; + + return KHZ_PER_MHZ * mhz; +} + +/* + * If CPPC lowest_freq and nominal_freq registers are exposed then we can + * use them to convert perf to freq and vice versa. The conversion is + * extrapolated as an affine function passing by the 2 points: + * - (Low perf, Low freq) + * - (Nominal perf, Nominal freq) + */ +unsigned int cppc_perf_to_khz(struct cppc_perf_caps *caps, unsigned int perf) +{ + s64 retval, offset = 0; + static u64 max_khz; + u64 mul, div; + + if (caps->lowest_freq && caps->nominal_freq) { + mul = caps->nominal_freq - caps->lowest_freq; + mul *= KHZ_PER_MHZ; + div = caps->nominal_perf - caps->lowest_perf; + offset = caps->nominal_freq * KHZ_PER_MHZ - + div64_u64(caps->nominal_perf * mul, div); + } else { + if (!max_khz) + max_khz = cppc_get_dmi_max_khz(); + mul = max_khz; + div = caps->highest_perf; + } + + retval = offset + div64_u64(perf * mul, div); + if (retval >= 0) + return retval; + return 0; +} +EXPORT_SYMBOL_GPL(cppc_perf_to_khz); + +unsigned int cppc_khz_to_perf(struct cppc_perf_caps *caps, unsigned int freq) +{ + s64 retval, offset = 0; + static u64 max_khz; + u64 mul, div; + + if (caps->lowest_freq && caps->nominal_freq) { + mul = caps->nominal_perf - caps->lowest_perf; + div = caps->nominal_freq - caps->lowest_freq; + /* + * We don't need to convert to kHz for computing offset and can + * directly use nominal_freq and lowest_freq as the div64_u64 + * will remove the frequency unit. + */ + offset = caps->nominal_perf - + div64_u64(caps->nominal_freq * mul, div); + /* But we need it for computing the perf level. */ + div *= KHZ_PER_MHZ; + } else { + if (!max_khz) + max_khz = cppc_get_dmi_max_khz(); + mul = caps->highest_perf; + div = max_khz; + } + + retval = offset + div64_u64(freq * mul, div); + if (retval >= 0) + return retval; + return 0; +} +EXPORT_SYMBOL_GPL(cppc_khz_to_perf); diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index a59c11df7375..dbdee2924594 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c @@ -525,12 +525,10 @@ static void acpi_ec_clear(struct acpi_ec *ec) static void acpi_ec_enable_event(struct acpi_ec *ec) { - unsigned long flags; - - spin_lock_irqsave(&ec->lock, flags); + spin_lock(&ec->lock); if (acpi_ec_started(ec)) __acpi_ec_enable_event(ec); - spin_unlock_irqrestore(&ec->lock, flags); + spin_unlock(&ec->lock); /* Drain additional events if hardware requires that */ if (EC_FLAGS_CLEAR_ON_RESUME) @@ -546,11 +544,9 @@ static void __acpi_ec_flush_work(void) static void acpi_ec_disable_event(struct acpi_ec *ec) { - unsigned long flags; - - spin_lock_irqsave(&ec->lock, flags); + spin_lock(&ec->lock); __acpi_ec_disable_event(ec); - spin_unlock_irqrestore(&ec->lock, flags); + spin_unlock(&ec->lock); /* * When ec_freeze_events is true, we need to flush events in @@ -571,10 +567,9 @@ void acpi_ec_flush_work(void) static bool acpi_ec_guard_event(struct acpi_ec *ec) { - unsigned long flags; bool guarded; - spin_lock_irqsave(&ec->lock, flags); + spin_lock(&ec->lock); /* * If firmware SCI_EVT clearing timing is "event", we actually * don't know when the SCI_EVT will be cleared by firmware after @@ -590,31 +585,29 @@ static bool acpi_ec_guard_event(struct acpi_ec *ec) guarded = ec_event_clearing == ACPI_EC_EVT_TIMING_EVENT && ec->event_state != EC_EVENT_READY && (!ec->curr || ec->curr->command != ACPI_EC_COMMAND_QUERY); - spin_unlock_irqrestore(&ec->lock, flags); + spin_unlock(&ec->lock); return guarded; } static int ec_transaction_polled(struct acpi_ec *ec) { - unsigned long flags; int ret = 0; - spin_lock_irqsave(&ec->lock, flags); + spin_lock(&ec->lock); if (ec->curr && (ec->curr->flags & ACPI_EC_COMMAND_POLL)) ret = 1; - spin_unlock_irqrestore(&ec->lock, flags); + spin_unlock(&ec->lock); return ret; } static int ec_transaction_completed(struct acpi_ec *ec) { - unsigned long flags; int ret = 0; - spin_lock_irqsave(&ec->lock, flags); + spin_lock(&ec->lock); if (ec->curr && (ec->curr->flags & ACPI_EC_COMMAND_COMPLETE)) ret = 1; - spin_unlock_irqrestore(&ec->lock, flags); + spin_unlock(&ec->lock); return ret; } @@ -756,7 +749,6 @@ static int ec_guard(struct acpi_ec *ec) static int ec_poll(struct acpi_ec *ec) { - unsigned long flags; int repeat = 5; /* number of command restarts */ while (repeat--) { @@ -765,14 +757,14 @@ static int ec_poll(struct acpi_ec *ec) do { if (!ec_guard(ec)) return 0; - spin_lock_irqsave(&ec->lock, flags); + spin_lock(&ec->lock); advance_transaction(ec, false); - spin_unlock_irqrestore(&ec->lock, flags); + spin_unlock(&ec->lock); } while (time_before(jiffies, delay)); pr_debug("controller reset, restart transaction\n"); - spin_lock_irqsave(&ec->lock, flags); + spin_lock(&ec->lock); start_transaction(ec); - spin_unlock_irqrestore(&ec->lock, flags); + spin_unlock(&ec->lock); } return -ETIME; } @@ -780,11 +772,10 @@ static int ec_poll(struct acpi_ec *ec) static int acpi_ec_transaction_unlocked(struct acpi_ec *ec, struct transaction *t) { - unsigned long tmp; int ret = 0; /* start transaction */ - spin_lock_irqsave(&ec->lock, tmp); + spin_lock(&ec->lock); /* Enable GPE for command processing (IBF=0/OBF=1) */ if (!acpi_ec_submit_flushable_request(ec)) { ret = -EINVAL; @@ -795,11 +786,11 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec, ec->curr = t; ec_dbg_req("Command(%s) started", acpi_ec_cmd_string(t->command)); start_transaction(ec); - spin_unlock_irqrestore(&ec->lock, tmp); + spin_unlock(&ec->lock); ret = ec_poll(ec); - spin_lock_irqsave(&ec->lock, tmp); + spin_lock(&ec->lock); if (t->irq_count == ec_storm_threshold) acpi_ec_unmask_events(ec); ec_dbg_req("Command(%s) stopped", acpi_ec_cmd_string(t->command)); @@ -808,7 +799,7 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec, acpi_ec_complete_request(ec); ec_dbg_ref(ec, "Decrease command"); unlock: - spin_unlock_irqrestore(&ec->lock, tmp); + spin_unlock(&ec->lock); return ret; } @@ -936,9 +927,7 @@ EXPORT_SYMBOL(ec_get_handle); static void acpi_ec_start(struct acpi_ec *ec, bool resuming) { - unsigned long flags; - - spin_lock_irqsave(&ec->lock, flags); + spin_lock(&ec->lock); if (!test_and_set_bit(EC_FLAGS_STARTED, &ec->flags)) { ec_dbg_drv("Starting EC"); /* Enable GPE for event processing (SCI_EVT=1) */ @@ -948,31 +937,28 @@ static void acpi_ec_start(struct acpi_ec *ec, bool resuming) } ec_log_drv("EC started"); } - spin_unlock_irqrestore(&ec->lock, flags); + spin_unlock(&ec->lock); } static bool acpi_ec_stopped(struct acpi_ec *ec) { - unsigned long flags; bool flushed; - spin_lock_irqsave(&ec->lock, flags); + spin_lock(&ec->lock); flushed = acpi_ec_flushed(ec); - spin_unlock_irqrestore(&ec->lock, flags); + spin_unlock(&ec->lock); return flushed; } static void acpi_ec_stop(struct acpi_ec *ec, bool suspending) { - unsigned long flags; - - spin_lock_irqsave(&ec->lock, flags); + spin_lock(&ec->lock); if (acpi_ec_started(ec)) { ec_dbg_drv("Stopping EC"); set_bit(EC_FLAGS_STOPPED, &ec->flags); - spin_unlock_irqrestore(&ec->lock, flags); + spin_unlock(&ec->lock); wait_event(ec->wait, acpi_ec_stopped(ec)); - spin_lock_irqsave(&ec->lock, flags); + spin_lock(&ec->lock); /* Disable GPE for event processing (SCI_EVT=1) */ if (!suspending) { acpi_ec_complete_request(ec); @@ -983,29 +969,25 @@ static void acpi_ec_stop(struct acpi_ec *ec, bool suspending) clear_bit(EC_FLAGS_STOPPED, &ec->flags); ec_log_drv("EC stopped"); } - spin_unlock_irqrestore(&ec->lock, flags); + spin_unlock(&ec->lock); } static void acpi_ec_enter_noirq(struct acpi_ec *ec) { - unsigned long flags; - - spin_lock_irqsave(&ec->lock, flags); + spin_lock(&ec->lock); ec->busy_polling = true; ec->polling_guard = 0; ec_log_drv("interrupt blocked"); - spin_unlock_irqrestore(&ec->lock, flags); + spin_unlock(&ec->lock); } static void acpi_ec_leave_noirq(struct acpi_ec *ec) { - unsigned long flags; - - spin_lock_irqsave(&ec->lock, flags); + spin_lock(&ec->lock); ec->busy_polling = ec_busy_polling; ec->polling_guard = ec_polling_guard; ec_log_drv("interrupt unblocked"); - spin_unlock_irqrestore(&ec->lock, flags); + spin_unlock(&ec->lock); } void acpi_ec_block_transactions(void) @@ -1137,9 +1119,9 @@ static void acpi_ec_event_processor(struct work_struct *work) ec_dbg_evt("Query(0x%02x) stopped", handler->query_bit); - spin_lock_irq(&ec->lock); + spin_lock(&ec->lock); ec->queries_in_progress--; - spin_unlock_irq(&ec->lock); + spin_unlock(&ec->lock); acpi_ec_put_query_handler(handler); kfree(q); @@ -1202,12 +1184,12 @@ static int acpi_ec_submit_query(struct acpi_ec *ec) */ ec_dbg_evt("Query(0x%02x) scheduled", value); - spin_lock_irq(&ec->lock); + spin_lock(&ec->lock); ec->queries_in_progress++; queue_work(ec_query_wq, &q->work); - spin_unlock_irq(&ec->lock); + spin_unlock(&ec->lock); return 0; @@ -1223,14 +1205,14 @@ static void acpi_ec_event_handler(struct work_struct *work) ec_dbg_evt("Event started"); - spin_lock_irq(&ec->lock); + spin_lock(&ec->lock); while (ec->events_to_process) { - spin_unlock_irq(&ec->lock); + spin_unlock(&ec->lock); acpi_ec_submit_query(ec); - spin_lock_irq(&ec->lock); + spin_lock(&ec->lock); ec->events_to_process--; } @@ -1247,11 +1229,11 @@ static void acpi_ec_event_handler(struct work_struct *work) ec_dbg_evt("Event stopped"); - spin_unlock_irq(&ec->lock); + spin_unlock(&ec->lock); guard_timeout = !!ec_guard(ec); - spin_lock_irq(&ec->lock); + spin_lock(&ec->lock); /* Take care of SCI_EVT unless someone else is doing that. */ if (guard_timeout && !ec->curr) @@ -1264,7 +1246,7 @@ static void acpi_ec_event_handler(struct work_struct *work) ec->events_in_progress--; - spin_unlock_irq(&ec->lock); + spin_unlock(&ec->lock); } static void clear_gpe_and_advance_transaction(struct acpi_ec *ec, bool interrupt) @@ -1289,13 +1271,11 @@ static void clear_gpe_and_advance_transaction(struct acpi_ec *ec, bool interrupt static void acpi_ec_handle_interrupt(struct acpi_ec *ec) { - unsigned long flags; - - spin_lock_irqsave(&ec->lock, flags); + spin_lock(&ec->lock); clear_gpe_and_advance_transaction(ec, true); - spin_unlock_irqrestore(&ec->lock, flags); + spin_unlock(&ec->lock); } static u32 acpi_ec_gpe_handler(acpi_handle gpe_device, @@ -1458,8 +1438,8 @@ static bool install_gpe_event_handler(struct acpi_ec *ec) static bool install_gpio_irq_event_handler(struct acpi_ec *ec) { - return request_irq(ec->irq, acpi_ec_irq_handler, IRQF_SHARED, - "ACPI EC", ec) >= 0; + return request_threaded_irq(ec->irq, NULL, acpi_ec_irq_handler, + IRQF_SHARED | IRQF_ONESHOT, "ACPI EC", ec) >= 0; } /** @@ -2105,7 +2085,7 @@ bool acpi_ec_dispatch_gpe(void) * Dispatch the EC GPE in-band, but do not report wakeup in any case * to allow the caller to process events properly after that. */ - spin_lock_irq(&first_ec->lock); + spin_lock(&first_ec->lock); if (acpi_ec_gpe_status_set(first_ec)) { pm_pr_dbg("ACPI EC GPE status set\n"); @@ -2114,7 +2094,7 @@ bool acpi_ec_dispatch_gpe(void) work_in_progress = acpi_ec_work_in_progress(first_ec); } - spin_unlock_irq(&first_ec->lock); + spin_unlock(&first_ec->lock); if (!work_in_progress) return false; @@ -2127,11 +2107,11 @@ bool acpi_ec_dispatch_gpe(void) pm_pr_dbg("ACPI EC work flushed\n"); - spin_lock_irq(&first_ec->lock); + spin_lock(&first_ec->lock); work_in_progress = acpi_ec_work_in_progress(first_ec); - spin_unlock_irq(&first_ec->lock); + spin_unlock(&first_ec->lock); } while (work_in_progress && !pm_wakeup_pending()); return false; diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h index 866c7c4ed233..6588525c45ef 100644 --- a/drivers/acpi/internal.h +++ b/drivers/acpi/internal.h @@ -85,6 +85,20 @@ bool acpi_scan_is_offline(struct acpi_device *adev, bool uevent); acpi_status acpi_sysfs_table_handler(u32 event, void *table, void *context); void acpi_scan_table_notify(void); +int acpi_active_trip_temp(struct acpi_device *adev, int id, int *ret_temp); +int acpi_passive_trip_temp(struct acpi_device *adev, int *ret_temp); +int acpi_hot_trip_temp(struct acpi_device *adev, int *ret_temp); +int acpi_critical_trip_temp(struct acpi_device *adev, int *ret_temp); + +#ifdef CONFIG_ARM64 +int acpi_arch_thermal_cpufreq_pctg(void); +#else +static inline int acpi_arch_thermal_cpufreq_pctg(void) +{ + return 0; +} +#endif + /* -------------------------------------------------------------------------- Device Node Initialization / Removal -------------------------------------------------------------------------- */ @@ -148,8 +162,11 @@ int acpi_wakeup_device_init(void); #ifdef CONFIG_ARCH_MIGHT_HAVE_ACPI_PDC void acpi_early_processor_control_setup(void); void acpi_early_processor_set_pdc(void); - +#ifdef CONFIG_X86 void acpi_proc_quirk_mwait_check(void); +#else +static inline void acpi_proc_quirk_mwait_check(void) {} +#endif bool processor_physically_present(acpi_handle handle); #else static inline void acpi_early_processor_control_setup(void) {} @@ -276,4 +293,13 @@ void acpi_init_lpit(void); static inline void acpi_init_lpit(void) { } #endif +/*-------------------------------------------------------------------------- + ACPI _CRS CSI-2 and MIPI DisCo for Imaging + -------------------------------------------------------------------------- */ + +void acpi_mipi_check_crs_csi2(acpi_handle handle); +void acpi_mipi_scan_crs_csi2(void); +void acpi_mipi_init_crs_csi2_swnodes(void); +void acpi_mipi_crs_csi2_cleanup(void); + #endif /* _ACPI_INTERNAL_H_ */ diff --git a/drivers/acpi/mipi-disco-img.c b/drivers/acpi/mipi-disco-img.c new file mode 100644 index 000000000000..7286cf4579bc --- /dev/null +++ b/drivers/acpi/mipi-disco-img.c @@ -0,0 +1,725 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * MIPI DisCo for Imaging support. + * + * Copyright (C) 2023 Intel Corporation + * + * Support MIPI DisCo for Imaging by parsing ACPI _CRS CSI-2 records defined in + * Section 6.4.3.8.2.4 "Camera Serial Interface (CSI-2) Connection Resource + * Descriptor" of ACPI 6.5 and using device properties defined by the MIPI DisCo + * for Imaging specification. + * + * The implementation looks for the information in the ACPI namespace (CSI-2 + * resource descriptors in _CRS) and constructs software nodes compatible with + * Documentation/firmware-guide/acpi/dsd/graph.rst to represent the CSI-2 + * connection graph. The software nodes are then populated with the data + * extracted from the _CRS CSI-2 resource descriptors and the MIPI DisCo + * for Imaging device properties present in _DSD for the ACPI device objects + * with CSI-2 connections. + */ + +#include <linux/acpi.h> +#include <linux/limits.h> +#include <linux/list.h> +#include <linux/module.h> +#include <linux/overflow.h> +#include <linux/types.h> +#include <linux/slab.h> +#include <linux/string.h> + +#include <media/v4l2-fwnode.h> + +#include "internal.h" + +static LIST_HEAD(acpi_mipi_crs_csi2_list); + +static void acpi_mipi_data_tag(acpi_handle handle, void *context) +{ +} + +/* Connection data extracted from one _CRS CSI-2 resource descriptor. */ +struct crs_csi2_connection { + struct list_head entry; + struct acpi_resource_csi2_serialbus csi2_data; + acpi_handle remote_handle; + char remote_name[]; +}; + +/* Data extracted from _CRS CSI-2 resource descriptors for one device. */ +struct crs_csi2 { + struct list_head entry; + acpi_handle handle; + struct acpi_device_software_nodes *swnodes; + struct list_head connections; + u32 port_count; +}; + +struct csi2_resources_walk_data { + acpi_handle handle; + struct list_head connections; +}; + +static acpi_status parse_csi2_resource(struct acpi_resource *res, void *context) +{ + struct csi2_resources_walk_data *crwd = context; + struct acpi_resource_csi2_serialbus *csi2_res; + struct acpi_resource_source *csi2_res_src; + u16 csi2_res_src_length; + struct crs_csi2_connection *conn; + acpi_handle remote_handle; + + if (res->type != ACPI_RESOURCE_TYPE_SERIAL_BUS) + return AE_OK; + + csi2_res = &res->data.csi2_serial_bus; + + if (csi2_res->type != ACPI_RESOURCE_SERIAL_TYPE_CSI2) + return AE_OK; + + csi2_res_src = &csi2_res->resource_source; + if (ACPI_FAILURE(acpi_get_handle(NULL, csi2_res_src->string_ptr, + &remote_handle))) { + acpi_handle_debug(crwd->handle, + "unable to find resource source\n"); + return AE_OK; + } + csi2_res_src_length = csi2_res_src->string_length; + if (!csi2_res_src_length) { + acpi_handle_debug(crwd->handle, + "invalid resource source string length\n"); + return AE_OK; + } + + conn = kmalloc(struct_size(conn, remote_name, csi2_res_src_length + 1), + GFP_KERNEL); + if (!conn) + return AE_OK; + + conn->csi2_data = *csi2_res; + strscpy(conn->remote_name, csi2_res_src->string_ptr, csi2_res_src_length); + conn->csi2_data.resource_source.string_ptr = conn->remote_name; + conn->remote_handle = remote_handle; + + list_add(&conn->entry, &crwd->connections); + + return AE_OK; +} + +static struct crs_csi2 *acpi_mipi_add_crs_csi2(acpi_handle handle, + struct list_head *list) +{ + struct crs_csi2 *csi2; + + csi2 = kzalloc(sizeof(*csi2), GFP_KERNEL); + if (!csi2) + return NULL; + + csi2->handle = handle; + INIT_LIST_HEAD(&csi2->connections); + csi2->port_count = 1; + + if (ACPI_FAILURE(acpi_attach_data(handle, acpi_mipi_data_tag, csi2))) { + kfree(csi2); + return NULL; + } + + list_add(&csi2->entry, list); + + return csi2; +} + +static struct crs_csi2 *acpi_mipi_get_crs_csi2(acpi_handle handle) +{ + struct crs_csi2 *csi2; + + if (ACPI_FAILURE(acpi_get_data_full(handle, acpi_mipi_data_tag, + (void **)&csi2, NULL))) + return NULL; + + return csi2; +} + +static void csi_csr2_release_connections(struct list_head *list) +{ + struct crs_csi2_connection *conn, *conn_tmp; + + list_for_each_entry_safe(conn, conn_tmp, list, entry) { + list_del(&conn->entry); + kfree(conn); + } +} + +static void acpi_mipi_del_crs_csi2(struct crs_csi2 *csi2) +{ + list_del(&csi2->entry); + acpi_detach_data(csi2->handle, acpi_mipi_data_tag); + kfree(csi2->swnodes); + csi_csr2_release_connections(&csi2->connections); + kfree(csi2); +} + +/** + * acpi_mipi_check_crs_csi2 - Look for CSI-2 resources in _CRS + * @handle: Device object handle to evaluate _CRS for. + * + * Find all CSI-2 resource descriptors in the given device's _CRS + * and collect them into a list. + */ +void acpi_mipi_check_crs_csi2(acpi_handle handle) +{ + struct csi2_resources_walk_data crwd = { + .handle = handle, + .connections = LIST_HEAD_INIT(crwd.connections), + }; + struct crs_csi2 *csi2; + + /* + * Avoid allocating _CRS CSI-2 objects for devices without any CSI-2 + * resource descriptions in _CRS to reduce overhead. + */ + acpi_walk_resources(handle, METHOD_NAME__CRS, parse_csi2_resource, &crwd); + if (list_empty(&crwd.connections)) + return; + + /* + * Create a _CRS CSI-2 entry to store the extracted connection + * information and add it to the global list. + */ + csi2 = acpi_mipi_add_crs_csi2(handle, &acpi_mipi_crs_csi2_list); + if (!csi2) { + csi_csr2_release_connections(&crwd.connections); + return; /* Nothing really can be done about this. */ + } + + list_replace(&crwd.connections, &csi2->connections); +} + +#define NO_CSI2_PORT (UINT_MAX - 1) + +static void alloc_crs_csi2_swnodes(struct crs_csi2 *csi2) +{ + size_t port_count = csi2->port_count; + struct acpi_device_software_nodes *swnodes; + size_t alloc_size; + unsigned int i; + + /* + * Allocate memory for ports, node pointers (number of nodes + + * 1 (guardian), nodes (root + number of ports * 2 (because for + * every port there is an endpoint)). + */ + if (check_mul_overflow(sizeof(*swnodes->ports) + + sizeof(*swnodes->nodes) * 2 + + sizeof(*swnodes->nodeptrs) * 2, + port_count, &alloc_size) || + check_add_overflow(sizeof(*swnodes) + + sizeof(*swnodes->nodes) + + sizeof(*swnodes->nodeptrs) * 2, + alloc_size, &alloc_size)) { + acpi_handle_info(csi2->handle, + "too many _CRS CSI-2 resource handles (%zu)", + port_count); + return; + } + + swnodes = kmalloc(alloc_size, GFP_KERNEL); + if (!swnodes) + return; + + swnodes->ports = (struct acpi_device_software_node_port *)(swnodes + 1); + swnodes->nodes = (struct software_node *)(swnodes->ports + port_count); + swnodes->nodeptrs = (const struct software_node **)(swnodes->nodes + 1 + + 2 * port_count); + swnodes->num_ports = port_count; + + for (i = 0; i < 2 * port_count + 1; i++) + swnodes->nodeptrs[i] = &swnodes->nodes[i]; + + swnodes->nodeptrs[i] = NULL; + + for (i = 0; i < port_count; i++) + swnodes->ports[i].port_nr = NO_CSI2_PORT; + + csi2->swnodes = swnodes; +} + +#define ACPI_CRS_CSI2_PHY_TYPE_C 0 +#define ACPI_CRS_CSI2_PHY_TYPE_D 1 + +static unsigned int next_csi2_port_index(struct acpi_device_software_nodes *swnodes, + unsigned int port_nr) +{ + unsigned int i; + + for (i = 0; i < swnodes->num_ports; i++) { + struct acpi_device_software_node_port *port = &swnodes->ports[i]; + + if (port->port_nr == port_nr) + return i; + + if (port->port_nr == NO_CSI2_PORT) { + port->port_nr = port_nr; + return i; + } + } + + return NO_CSI2_PORT; +} + +/* Print graph port name into a buffer, return non-zero on failure. */ +#define GRAPH_PORT_NAME(var, num) \ + (snprintf((var), sizeof(var), SWNODE_GRAPH_PORT_NAME_FMT, (num)) >= \ + sizeof(var)) + +static void extract_crs_csi2_conn_info(acpi_handle local_handle, + struct acpi_device_software_nodes *local_swnodes, + struct crs_csi2_connection *conn) +{ + struct crs_csi2 *remote_csi2 = acpi_mipi_get_crs_csi2(conn->remote_handle); + struct acpi_device_software_nodes *remote_swnodes; + struct acpi_device_software_node_port *local_port, *remote_port; + struct software_node *local_node, *remote_node; + unsigned int local_index, remote_index; + unsigned int bus_type; + + /* + * If the previous steps have failed to make room for a _CRS CSI-2 + * representation for the remote end of the given connection, skip it. + */ + if (!remote_csi2) + return; + + remote_swnodes = remote_csi2->swnodes; + if (!remote_swnodes) + return; + + switch (conn->csi2_data.phy_type) { + case ACPI_CRS_CSI2_PHY_TYPE_C: + bus_type = V4L2_FWNODE_BUS_TYPE_CSI2_CPHY; + break; + + case ACPI_CRS_CSI2_PHY_TYPE_D: + bus_type = V4L2_FWNODE_BUS_TYPE_CSI2_DPHY; + break; + + default: + acpi_handle_info(local_handle, "unknown CSI-2 PHY type %u\n", + conn->csi2_data.phy_type); + return; + } + + local_index = next_csi2_port_index(local_swnodes, + conn->csi2_data.local_port_instance); + if (WARN_ON_ONCE(local_index >= local_swnodes->num_ports)) + return; + + remote_index = next_csi2_port_index(remote_swnodes, + conn->csi2_data.resource_source.index); + if (WARN_ON_ONCE(remote_index >= remote_swnodes->num_ports)) + return; + + local_port = &local_swnodes->ports[local_index]; + local_node = &local_swnodes->nodes[ACPI_DEVICE_SWNODE_EP(local_index)]; + local_port->crs_csi2_local = true; + + remote_port = &remote_swnodes->ports[remote_index]; + remote_node = &remote_swnodes->nodes[ACPI_DEVICE_SWNODE_EP(remote_index)]; + + local_port->remote_ep[0] = SOFTWARE_NODE_REFERENCE(remote_node); + remote_port->remote_ep[0] = SOFTWARE_NODE_REFERENCE(local_node); + + local_port->ep_props[ACPI_DEVICE_SWNODE_EP_REMOTE_EP] = + PROPERTY_ENTRY_REF_ARRAY("remote-endpoint", + local_port->remote_ep); + + local_port->ep_props[ACPI_DEVICE_SWNODE_EP_BUS_TYPE] = + PROPERTY_ENTRY_U32("bus-type", bus_type); + + local_port->ep_props[ACPI_DEVICE_SWNODE_EP_REG] = + PROPERTY_ENTRY_U32("reg", 0); + + local_port->port_props[ACPI_DEVICE_SWNODE_PORT_REG] = + PROPERTY_ENTRY_U32("reg", conn->csi2_data.local_port_instance); + + if (GRAPH_PORT_NAME(local_port->port_name, + conn->csi2_data.local_port_instance)) + acpi_handle_info(local_handle, "local port %u name too long", + conn->csi2_data.local_port_instance); + + remote_port->ep_props[ACPI_DEVICE_SWNODE_EP_REMOTE_EP] = + PROPERTY_ENTRY_REF_ARRAY("remote-endpoint", + remote_port->remote_ep); + + remote_port->ep_props[ACPI_DEVICE_SWNODE_EP_BUS_TYPE] = + PROPERTY_ENTRY_U32("bus-type", bus_type); + + remote_port->ep_props[ACPI_DEVICE_SWNODE_EP_REG] = + PROPERTY_ENTRY_U32("reg", 0); + + remote_port->port_props[ACPI_DEVICE_SWNODE_PORT_REG] = + PROPERTY_ENTRY_U32("reg", conn->csi2_data.resource_source.index); + + if (GRAPH_PORT_NAME(remote_port->port_name, + conn->csi2_data.resource_source.index)) + acpi_handle_info(local_handle, "remote port %u name too long", + conn->csi2_data.resource_source.index); +} + +static void prepare_crs_csi2_swnodes(struct crs_csi2 *csi2) +{ + struct acpi_device_software_nodes *local_swnodes = csi2->swnodes; + acpi_handle local_handle = csi2->handle; + struct crs_csi2_connection *conn; + + /* Bail out if the allocation of swnodes has failed. */ + if (!local_swnodes) + return; + + list_for_each_entry(conn, &csi2->connections, entry) + extract_crs_csi2_conn_info(local_handle, local_swnodes, conn); +} + +/** + * acpi_mipi_scan_crs_csi2 - Create ACPI _CRS CSI-2 software nodes + * + * Note that this function must be called before any struct acpi_device objects + * are bound to any ACPI drivers or scan handlers, so it cannot assume the + * existence of struct acpi_device objects for every device present in the ACPI + * namespace. + * + * acpi_scan_lock in scan.c must be held when calling this function. + */ +void acpi_mipi_scan_crs_csi2(void) +{ + struct crs_csi2 *csi2; + LIST_HEAD(aux_list); + + /* Count references to each ACPI handle in the CSI-2 connection graph. */ + list_for_each_entry(csi2, &acpi_mipi_crs_csi2_list, entry) { + struct crs_csi2_connection *conn; + + list_for_each_entry(conn, &csi2->connections, entry) { + struct crs_csi2 *remote_csi2; + + csi2->port_count++; + + remote_csi2 = acpi_mipi_get_crs_csi2(conn->remote_handle); + if (remote_csi2) { + remote_csi2->port_count++; + continue; + } + /* + * The remote endpoint has no _CRS CSI-2 list entry yet, + * so create one for it and add it to the list. + */ + acpi_mipi_add_crs_csi2(conn->remote_handle, &aux_list); + } + } + list_splice(&aux_list, &acpi_mipi_crs_csi2_list); + + /* + * Allocate software nodes for representing the CSI-2 information. + * + * This needs to be done for all of the list entries in one go, because + * they may point to each other without restrictions and the next step + * relies on the availability of swnodes memory for each list entry. + */ + list_for_each_entry(csi2, &acpi_mipi_crs_csi2_list, entry) + alloc_crs_csi2_swnodes(csi2); + + /* + * Set up software node properties using data from _CRS CSI-2 resource + * descriptors. + */ + list_for_each_entry(csi2, &acpi_mipi_crs_csi2_list, entry) + prepare_crs_csi2_swnodes(csi2); +} + +/* + * Get the index of the next property in the property array, with a given + * maximum value. + */ +#define NEXT_PROPERTY(index, max) \ + (WARN_ON((index) > ACPI_DEVICE_SWNODE_##max) ? \ + ACPI_DEVICE_SWNODE_##max : (index)++) + +static void init_csi2_port_local(struct acpi_device *adev, + struct acpi_device_software_node_port *port, + struct fwnode_handle *port_fwnode, + unsigned int index) +{ + acpi_handle handle = acpi_device_handle(adev); + unsigned int num_link_freqs; + int ret; + + ret = fwnode_property_count_u64(port_fwnode, "mipi-img-link-frequencies"); + if (ret <= 0) + return; + + num_link_freqs = ret; + if (num_link_freqs > ACPI_DEVICE_CSI2_DATA_LANES) { + acpi_handle_info(handle, "Too many link frequencies: %u\n", + num_link_freqs); + num_link_freqs = ACPI_DEVICE_CSI2_DATA_LANES; + } + + ret = fwnode_property_read_u64_array(port_fwnode, + "mipi-img-link-frequencies", + port->link_frequencies, + num_link_freqs); + if (ret) { + acpi_handle_info(handle, "Unable to get link frequencies (%d)\n", + ret); + return; + } + + port->ep_props[NEXT_PROPERTY(index, EP_LINK_FREQUENCIES)] = + PROPERTY_ENTRY_U64_ARRAY_LEN("link-frequencies", + port->link_frequencies, + num_link_freqs); +} + +static void init_csi2_port(struct acpi_device *adev, + struct acpi_device_software_nodes *swnodes, + struct acpi_device_software_node_port *port, + struct fwnode_handle *port_fwnode, + unsigned int port_index) +{ + unsigned int ep_prop_index = ACPI_DEVICE_SWNODE_EP_CLOCK_LANES; + acpi_handle handle = acpi_device_handle(adev); + u8 val[ACPI_DEVICE_CSI2_DATA_LANES]; + int num_lanes = 0; + int ret; + + if (GRAPH_PORT_NAME(port->port_name, port->port_nr)) + return; + + swnodes->nodes[ACPI_DEVICE_SWNODE_PORT(port_index)] = + SOFTWARE_NODE(port->port_name, port->port_props, + &swnodes->nodes[ACPI_DEVICE_SWNODE_ROOT]); + + ret = fwnode_property_read_u8(port_fwnode, "mipi-img-clock-lane", val); + if (!ret) + port->ep_props[NEXT_PROPERTY(ep_prop_index, EP_CLOCK_LANES)] = + PROPERTY_ENTRY_U32("clock-lanes", val[0]); + + ret = fwnode_property_count_u8(port_fwnode, "mipi-img-data-lanes"); + if (ret > 0) { + num_lanes = ret; + + if (num_lanes > ACPI_DEVICE_CSI2_DATA_LANES) { + acpi_handle_info(handle, "Too many data lanes: %u\n", + num_lanes); + num_lanes = ACPI_DEVICE_CSI2_DATA_LANES; + } + + ret = fwnode_property_read_u8_array(port_fwnode, + "mipi-img-data-lanes", + val, num_lanes); + if (!ret) { + unsigned int i; + + for (i = 0; i < num_lanes; i++) + port->data_lanes[i] = val[i]; + + port->ep_props[NEXT_PROPERTY(ep_prop_index, EP_DATA_LANES)] = + PROPERTY_ENTRY_U32_ARRAY_LEN("data-lanes", + port->data_lanes, + num_lanes); + } + } + + ret = fwnode_property_count_u8(port_fwnode, "mipi-img-lane-polarities"); + if (ret < 0) { + acpi_handle_debug(handle, "Lane polarity bytes missing\n"); + } else if (ret * BITS_PER_TYPE(u8) < num_lanes + 1) { + acpi_handle_info(handle, "Too few lane polarity bits (%zu vs. %d)\n", + ret * BITS_PER_TYPE(u8), num_lanes + 1); + } else { + unsigned long mask = 0; + int byte_count = ret; + unsigned int i; + + /* + * The total number of lanes is ACPI_DEVICE_CSI2_DATA_LANES + 1 + * (data lanes + clock lane). It is not expected to ever be + * greater than the number of bits in an unsigned long + * variable, but ensure that this is the case. + */ + BUILD_BUG_ON(BITS_PER_TYPE(unsigned long) <= ACPI_DEVICE_CSI2_DATA_LANES); + + if (byte_count > sizeof(mask)) { + acpi_handle_info(handle, "Too many lane polarities: %d\n", + byte_count); + byte_count = sizeof(mask); + } + fwnode_property_read_u8_array(port_fwnode, "mipi-img-lane-polarities", + val, byte_count); + + for (i = 0; i < byte_count; i++) + mask |= (unsigned long)val[i] << BITS_PER_TYPE(u8) * i; + + for (i = 0; i <= num_lanes; i++) + port->lane_polarities[i] = test_bit(i, &mask); + + port->ep_props[NEXT_PROPERTY(ep_prop_index, EP_LANE_POLARITIES)] = + PROPERTY_ENTRY_U32_ARRAY_LEN("lane-polarities", + port->lane_polarities, + num_lanes + 1); + } + + swnodes->nodes[ACPI_DEVICE_SWNODE_EP(port_index)] = + SOFTWARE_NODE("endpoint@0", swnodes->ports[port_index].ep_props, + &swnodes->nodes[ACPI_DEVICE_SWNODE_PORT(port_index)]); + + if (port->crs_csi2_local) + init_csi2_port_local(adev, port, port_fwnode, ep_prop_index); +} + +#define MIPI_IMG_PORT_PREFIX "mipi-img-port-" + +static struct fwnode_handle *get_mipi_port_handle(struct fwnode_handle *adev_fwnode, + unsigned int port_nr) +{ + char port_name[sizeof(MIPI_IMG_PORT_PREFIX) + 2]; + + if (snprintf(port_name, sizeof(port_name), "%s%u", + MIPI_IMG_PORT_PREFIX, port_nr) >= sizeof(port_name)) + return NULL; + + return fwnode_get_named_child_node(adev_fwnode, port_name); +} + +static void init_crs_csi2_swnodes(struct crs_csi2 *csi2) +{ + struct acpi_buffer buffer = { .length = ACPI_ALLOCATE_BUFFER }; + struct acpi_device_software_nodes *swnodes = csi2->swnodes; + acpi_handle handle = csi2->handle; + unsigned int prop_index = 0; + struct fwnode_handle *adev_fwnode; + struct acpi_device *adev; + acpi_status status; + unsigned int i; + u32 val; + int ret; + + /* + * Bail out if the swnodes are not available (either they have not been + * allocated or they have been assigned to the device already). + */ + if (!swnodes) + return; + + adev = acpi_fetch_acpi_dev(handle); + if (!adev) + return; + + adev_fwnode = acpi_fwnode_handle(adev); + + /* + * If the "rotation" property is not present, but _PLD is there, + * evaluate it to get the "rotation" value. + */ + if (!fwnode_property_present(adev_fwnode, "rotation")) { + struct acpi_pld_info *pld; + + status = acpi_get_physical_device_location(handle, &pld); + if (ACPI_SUCCESS(status)) { + swnodes->dev_props[NEXT_PROPERTY(prop_index, DEV_ROTATION)] = + PROPERTY_ENTRY_U32("rotation", + pld->rotation * 45U); + kfree(pld); + } + } + + if (!fwnode_property_read_u32(adev_fwnode, "mipi-img-clock-frequency", &val)) + swnodes->dev_props[NEXT_PROPERTY(prop_index, DEV_CLOCK_FREQUENCY)] = + PROPERTY_ENTRY_U32("clock-frequency", val); + + if (!fwnode_property_read_u32(adev_fwnode, "mipi-img-led-max-current", &val)) + swnodes->dev_props[NEXT_PROPERTY(prop_index, DEV_LED_MAX_MICROAMP)] = + PROPERTY_ENTRY_U32("led-max-microamp", val); + + if (!fwnode_property_read_u32(adev_fwnode, "mipi-img-flash-max-current", &val)) + swnodes->dev_props[NEXT_PROPERTY(prop_index, DEV_FLASH_MAX_MICROAMP)] = + PROPERTY_ENTRY_U32("flash-max-microamp", val); + + if (!fwnode_property_read_u32(adev_fwnode, "mipi-img-flash-max-timeout-us", &val)) + swnodes->dev_props[NEXT_PROPERTY(prop_index, DEV_FLASH_MAX_TIMEOUT_US)] = + PROPERTY_ENTRY_U32("flash-max-timeout-us", val); + + status = acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer); + if (ACPI_FAILURE(status)) { + acpi_handle_info(handle, "Unable to get the path name\n"); + return; + } + + swnodes->nodes[ACPI_DEVICE_SWNODE_ROOT] = + SOFTWARE_NODE(buffer.pointer, swnodes->dev_props, NULL); + + for (i = 0; i < swnodes->num_ports; i++) { + struct acpi_device_software_node_port *port = &swnodes->ports[i]; + struct fwnode_handle *port_fwnode; + + /* + * The MIPI DisCo for Imaging specification defines _DSD device + * properties for providing CSI-2 port parameters that can be + * accessed through the generic device properties framework. To + * access them, it is first necessary to find the data node + * representing the port under the given ACPI device object. + */ + port_fwnode = get_mipi_port_handle(adev_fwnode, port->port_nr); + if (!port_fwnode) { + acpi_handle_info(handle, + "MIPI port name too long for port %u\n", + port->port_nr); + continue; + } + + init_csi2_port(adev, swnodes, port, port_fwnode, i); + + fwnode_handle_put(port_fwnode); + } + + ret = software_node_register_node_group(swnodes->nodeptrs); + if (ret < 0) { + acpi_handle_info(handle, + "Unable to register software nodes (%d)\n", ret); + return; + } + + adev->swnodes = swnodes; + adev_fwnode->secondary = software_node_fwnode(swnodes->nodes); + + /* + * Prevents the swnodes from this csi2 entry from being assigned again + * or freed prematurely. + */ + csi2->swnodes = NULL; +} + +/** + * acpi_mipi_init_crs_csi2_swnodes - Initialize _CRS CSI-2 software nodes + * + * Use MIPI DisCo for Imaging device properties to finalize the initialization + * of CSI-2 software nodes for all ACPI device objects that have been already + * enumerated. + */ +void acpi_mipi_init_crs_csi2_swnodes(void) +{ + struct crs_csi2 *csi2, *csi2_tmp; + + list_for_each_entry_safe(csi2, csi2_tmp, &acpi_mipi_crs_csi2_list, entry) + init_crs_csi2_swnodes(csi2); +} + +/** + * acpi_mipi_crs_csi2_cleanup - Free _CRS CSI-2 temporary data + */ +void acpi_mipi_crs_csi2_cleanup(void) +{ + struct crs_csi2 *csi2, *csi2_tmp; + + list_for_each_entry_safe(csi2, csi2_tmp, &acpi_mipi_crs_csi2_list, entry) + acpi_mipi_del_crs_csi2(csi2); +} diff --git a/drivers/acpi/numa/srat.c b/drivers/acpi/numa/srat.c index 12f330b0eac0..0214518fc582 100644 --- a/drivers/acpi/numa/srat.c +++ b/drivers/acpi/numa/srat.c @@ -67,9 +67,9 @@ int acpi_map_pxm_to_node(int pxm) node = pxm_to_node_map[pxm]; if (node == NUMA_NO_NODE) { - if (nodes_weight(nodes_found_map) >= MAX_NUMNODES) - return NUMA_NO_NODE; node = first_unset_node(nodes_found_map); + if (node >= MAX_NUMNODES) + return NUMA_NO_NODE; __acpi_map_pxm_to_node(pxm, node); node_set(node, nodes_found_map); } @@ -183,7 +183,7 @@ static int __init slit_valid(struct acpi_table_slit *slit) int i, j; int d = slit->locality_count; for (i = 0; i < d; i++) { - for (j = 0; j < d; j++) { + for (j = 0; j < d; j++) { u8 val = slit->entry[d*i + j]; if (i == j) { if (val != LOCAL_DISTANCE) @@ -430,7 +430,7 @@ acpi_parse_gi_affinity(union acpi_subtable_headers *header, return -EINVAL; node = acpi_map_pxm_to_node(gi_affinity->proximity_domain); - if (node == NUMA_NO_NODE || node >= MAX_NUMNODES) { + if (node == NUMA_NO_NODE) { pr_err("SRAT: Too many proximity domains.\n"); return -EINVAL; } @@ -532,7 +532,7 @@ int __init acpi_numa_init(void) */ /* fake_pxm is the next unused PXM value after SRAT parsing */ - for (i = 0, fake_pxm = -1; i < MAX_NUMNODES - 1; i++) { + for (i = 0, fake_pxm = -1; i < MAX_NUMNODES; i++) { if (node_to_pxm_map[i] > fake_pxm) fake_pxm = node_to_pxm_map[i]; } diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c index c09cc3c68633..70af3fbbebe5 100644 --- a/drivers/acpi/osl.c +++ b/drivers/acpi/osl.c @@ -544,11 +544,7 @@ acpi_os_predefined_override(const struct acpi_predefined_names *init_val, static irqreturn_t acpi_irq(int irq, void *dev_id) { - u32 handled; - - handled = (*acpi_irq_handler) (acpi_irq_context); - - if (handled) { + if ((*acpi_irq_handler)(acpi_irq_context)) { acpi_irq_handled++; return IRQ_HANDLED; } else { @@ -582,7 +578,8 @@ acpi_os_install_interrupt_handler(u32 gsi, acpi_osd_handler handler, acpi_irq_handler = handler; acpi_irq_context = context; - if (request_irq(irq, acpi_irq, IRQF_SHARED, "acpi", acpi_irq)) { + if (request_threaded_irq(irq, NULL, acpi_irq, IRQF_SHARED | IRQF_ONESHOT, + "acpi", acpi_irq)) { pr_err("SCI (IRQ%d) allocation failed\n", irq); acpi_irq_handler = NULL; return AE_NOT_ACQUIRED; @@ -1063,9 +1060,7 @@ int __init acpi_debugger_init(void) acpi_status acpi_os_execute(acpi_execute_type type, acpi_osd_exec_callback function, void *context) { - acpi_status status = AE_OK; struct acpi_os_dpc *dpc; - struct workqueue_struct *queue; int ret; ACPI_DEBUG_PRINT((ACPI_DB_EXEC, @@ -1076,9 +1071,9 @@ acpi_status acpi_os_execute(acpi_execute_type type, ret = acpi_debugger_create_thread(function, context); if (ret) { pr_err("Kernel thread creation failed\n"); - status = AE_ERROR; + return AE_ERROR; } - goto out_thread; + return AE_OK; } /* @@ -1096,43 +1091,41 @@ acpi_status acpi_os_execute(acpi_execute_type type, dpc->function = function; dpc->context = context; + INIT_WORK(&dpc->work, acpi_os_execute_deferred); /* * To prevent lockdep from complaining unnecessarily, make sure that * there is a different static lockdep key for each workqueue by using * INIT_WORK() for each of them separately. */ - if (type == OSL_NOTIFY_HANDLER) { - queue = kacpi_notify_wq; - INIT_WORK(&dpc->work, acpi_os_execute_deferred); - } else if (type == OSL_GPE_HANDLER) { - queue = kacpid_wq; - INIT_WORK(&dpc->work, acpi_os_execute_deferred); - } else { + switch (type) { + case OSL_NOTIFY_HANDLER: + ret = queue_work(kacpi_notify_wq, &dpc->work); + break; + case OSL_GPE_HANDLER: + /* + * On some machines, a software-initiated SMI causes corruption + * unless the SMI runs on CPU 0. An SMI can be initiated by + * any AML, but typically it's done in GPE-related methods that + * are run via workqueues, so we can avoid the known corruption + * cases by always queueing on CPU 0. + */ + ret = queue_work_on(0, kacpid_wq, &dpc->work); + break; + default: pr_err("Unsupported os_execute type %d.\n", type); - status = AE_ERROR; + goto err; } - - if (ACPI_FAILURE(status)) - goto err_workqueue; - - /* - * On some machines, a software-initiated SMI causes corruption unless - * the SMI runs on CPU 0. An SMI can be initiated by any AML, but - * typically it's done in GPE-related methods that are run via - * workqueues, so we can avoid the known corruption cases by always - * queueing on CPU 0. - */ - ret = queue_work_on(0, queue, &dpc->work); if (!ret) { pr_err("Unable to queue work\n"); - status = AE_ERROR; + goto err; } -err_workqueue: - if (ACPI_FAILURE(status)) - kfree(dpc); -out_thread: - return status; + + return AE_OK; + +err: + kfree(dpc); + return AE_ERROR; } EXPORT_SYMBOL(acpi_os_execute); @@ -1522,20 +1515,18 @@ void acpi_os_delete_lock(acpi_spinlock handle) acpi_cpu_flags acpi_os_acquire_lock(acpi_spinlock lockp) __acquires(lockp) { - acpi_cpu_flags flags; - - spin_lock_irqsave(lockp, flags); - return flags; + spin_lock(lockp); + return 0; } /* * Release a spinlock. See above. */ -void acpi_os_release_lock(acpi_spinlock lockp, acpi_cpu_flags flags) +void acpi_os_release_lock(acpi_spinlock lockp, acpi_cpu_flags not_used) __releases(lockp) { - spin_unlock_irqrestore(lockp, flags); + spin_unlock(lockp); } #ifndef ACPI_USE_LOCAL_CACHE @@ -1672,7 +1663,7 @@ acpi_status __init acpi_os_initialize(void) acpi_status __init acpi_os_initialize1(void) { kacpid_wq = alloc_workqueue("kacpid", 0, 1); - kacpi_notify_wq = alloc_workqueue("kacpi_notify", 0, 1); + kacpi_notify_wq = alloc_workqueue("kacpi_notify", 0, 0); kacpi_hotplug_wq = alloc_ordered_workqueue("kacpi_hotplug", 0); BUG_ON(!kacpid_wq); BUG_ON(!kacpi_notify_wq); diff --git a/drivers/acpi/processor_thermal.c b/drivers/acpi/processor_thermal.c index b7c6287eccca..1219adb11ab9 100644 --- a/drivers/acpi/processor_thermal.c +++ b/drivers/acpi/processor_thermal.c @@ -17,6 +17,8 @@ #include <acpi/processor.h> #include <linux/uaccess.h> +#include "internal.h" + #ifdef CONFIG_CPU_FREQ /* If a passive cooling situation is detected, primarily CPUfreq is used, as it @@ -26,12 +28,21 @@ */ #define CPUFREQ_THERMAL_MIN_STEP 0 -#define CPUFREQ_THERMAL_MAX_STEP 3 -static DEFINE_PER_CPU(unsigned int, cpufreq_thermal_reduction_pctg); +static int cpufreq_thermal_max_step __read_mostly = 3; + +/* + * Minimum throttle percentage for processor_thermal cooling device. + * The processor_thermal driver uses it to calculate the percentage amount by + * which cpu frequency must be reduced for each cooling state. This is also used + * to calculate the maximum number of throttling steps or cooling states. + */ +static int cpufreq_thermal_reduction_pctg __read_mostly = 20; -#define reduction_pctg(cpu) \ - per_cpu(cpufreq_thermal_reduction_pctg, phys_package_first_cpu(cpu)) +static DEFINE_PER_CPU(unsigned int, cpufreq_thermal_reduction_step); + +#define reduction_step(cpu) \ + per_cpu(cpufreq_thermal_reduction_step, phys_package_first_cpu(cpu)) /* * Emulate "per package data" using per cpu data (which should really be @@ -71,7 +82,7 @@ static int cpufreq_get_max_state(unsigned int cpu) if (!cpu_has_cpufreq(cpu)) return 0; - return CPUFREQ_THERMAL_MAX_STEP; + return cpufreq_thermal_max_step; } static int cpufreq_get_cur_state(unsigned int cpu) @@ -79,7 +90,7 @@ static int cpufreq_get_cur_state(unsigned int cpu) if (!cpu_has_cpufreq(cpu)) return 0; - return reduction_pctg(cpu); + return reduction_step(cpu); } static int cpufreq_set_cur_state(unsigned int cpu, int state) @@ -92,7 +103,7 @@ static int cpufreq_set_cur_state(unsigned int cpu, int state) if (!cpu_has_cpufreq(cpu)) return 0; - reduction_pctg(cpu) = state; + reduction_step(cpu) = state; /* * Update all the CPUs in the same package because they all @@ -113,7 +124,8 @@ static int cpufreq_set_cur_state(unsigned int cpu, int state) if (!policy) return -EINVAL; - max_freq = (policy->cpuinfo.max_freq * (100 - reduction_pctg(i) * 20)) / 100; + max_freq = (policy->cpuinfo.max_freq * + (100 - reduction_step(i) * cpufreq_thermal_reduction_pctg)) / 100; cpufreq_cpu_put(policy); @@ -126,10 +138,29 @@ static int cpufreq_set_cur_state(unsigned int cpu, int state) return 0; } +static void acpi_thermal_cpufreq_config(void) +{ + int cpufreq_pctg = acpi_arch_thermal_cpufreq_pctg(); + + if (!cpufreq_pctg) + return; + + cpufreq_thermal_reduction_pctg = cpufreq_pctg; + + /* + * Derive the MAX_STEP from minimum throttle percentage so that the reduction + * percentage doesn't end up becoming negative. Also, cap the MAX_STEP so that + * the CPU performance doesn't become 0. + */ + cpufreq_thermal_max_step = (100 / cpufreq_pctg) - 2; +} + void acpi_thermal_cpufreq_init(struct cpufreq_policy *policy) { unsigned int cpu; + acpi_thermal_cpufreq_config(); + for_each_cpu(cpu, policy->related_cpus) { struct acpi_processor *pr = per_cpu(processors, cpu); int ret; @@ -190,7 +221,7 @@ static int acpi_processor_max_state(struct acpi_processor *pr) /* * There exists four states according to - * cpufreq_thermal_reduction_pctg. 0, 1, 2, 3 + * cpufreq_thermal_reduction_step. 0, 1, 2, 3 */ max_state += cpufreq_get_max_state(pr->id); if (pr->flags.throttling) diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c index 6979a3f9f90a..07d76fb740b6 100644 --- a/drivers/acpi/property.c +++ b/drivers/acpi/property.c @@ -2,14 +2,17 @@ /* * ACPI device specific properties support. * - * Copyright (C) 2014, Intel Corporation + * Copyright (C) 2014 - 2023, Intel Corporation * All rights reserved. * * Authors: Mika Westerberg <mika.westerberg@linux.intel.com> * Darren Hart <dvhart@linux.intel.com> * Rafael J. Wysocki <rafael.j.wysocki@intel.com> + * Sakari Ailus <sakari.ailus@linux.intel.com> */ +#define pr_fmt(fmt) "ACPI: " fmt + #include <linux/acpi.h> #include <linux/device.h> #include <linux/export.h> @@ -801,27 +804,15 @@ static int acpi_get_ref_args(struct fwnode_reference_args *args, u32 nargs = 0, i; /* - * Find the referred data extension node under the - * referred device node. - */ - for (; *element < end && (*element)->type == ACPI_TYPE_STRING; - (*element)++) { - const char *child_name = (*element)->string.pointer; - - ref_fwnode = acpi_fwnode_get_named_child_node(ref_fwnode, child_name); - if (!ref_fwnode) - return -EINVAL; - } - - /* * Assume the following integer elements are all args. Stop counting on - * the first reference or end of the package arguments. In case of - * neither reference, nor integer, return an error, we can't parse it. + * the first reference (possibly represented as a string) or end of the + * package arguments. In case of neither reference, nor integer, return + * an error, we can't parse it. */ for (i = 0; (*element) + i < end && i < num_args; i++) { acpi_object_type type = (*element)[i].type; - if (type == ACPI_TYPE_LOCAL_REFERENCE) + if (type == ACPI_TYPE_LOCAL_REFERENCE || type == ACPI_TYPE_STRING) break; if (type == ACPI_TYPE_INTEGER) @@ -845,6 +836,44 @@ static int acpi_get_ref_args(struct fwnode_reference_args *args, return 0; } +static struct fwnode_handle *acpi_parse_string_ref(const struct fwnode_handle *fwnode, + const char *refstring) +{ + acpi_handle scope, handle; + struct acpi_data_node *dn; + struct acpi_device *device; + acpi_status status; + + if (is_acpi_device_node(fwnode)) { + scope = to_acpi_device_node(fwnode)->handle; + } else if (is_acpi_data_node(fwnode)) { + scope = to_acpi_data_node(fwnode)->handle; + } else { + pr_debug("Bad node type for node %pfw\n", fwnode); + return NULL; + } + + status = acpi_get_handle(scope, refstring, &handle); + if (ACPI_FAILURE(status)) { + acpi_handle_debug(scope, "Unable to get an ACPI handle for %s\n", + refstring); + return NULL; + } + + device = acpi_fetch_acpi_dev(handle); + if (device) + return acpi_fwnode_handle(device); + + status = acpi_get_data_full(handle, acpi_nondev_subnode_tag, + (void **)&dn, NULL); + if (ACPI_FAILURE(status) || !dn) { + acpi_handle_debug(handle, "Subnode not found\n"); + return NULL; + } + + return &dn->fwnode; +} + /** * __acpi_node_get_property_reference - returns handle to the referenced object * @fwnode: Firmware node to get the property from @@ -887,6 +916,7 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode, const union acpi_object *element, *end; const union acpi_object *obj; const struct acpi_device_data *data; + struct fwnode_handle *ref_fwnode; struct acpi_device *device; int ret, idx = 0; @@ -910,16 +940,30 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode, args->fwnode = acpi_fwnode_handle(device); args->nargs = 0; + + return 0; + case ACPI_TYPE_STRING: + if (index) + return -ENOENT; + + ref_fwnode = acpi_parse_string_ref(fwnode, obj->string.pointer); + if (!ref_fwnode) + return -EINVAL; + + args->fwnode = ref_fwnode; + args->nargs = 0; + return 0; case ACPI_TYPE_PACKAGE: /* * If it is not a single reference, then it is a package of - * references followed by number of ints as follows: + * references, followed by number of ints as follows: * * Package () { REF, INT, REF, INT, INT } * - * The index argument is then used to determine which reference - * the caller wants (along with the arguments). + * Here, REF may be either a local reference or a string. The + * index argument is then used to determine which reference the + * caller wants (along with the arguments). */ break; default: @@ -951,6 +995,24 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode, return 0; break; + case ACPI_TYPE_STRING: + ref_fwnode = acpi_parse_string_ref(fwnode, + element->string.pointer); + if (!ref_fwnode) + return -EINVAL; + + element++; + + ret = acpi_get_ref_args(idx == index ? args : NULL, + ref_fwnode, &element, end, + num_args); + if (ret < 0) + return ret; + + if (idx == index) + return 0; + + break; case ACPI_TYPE_INTEGER: if (idx == index) return -ENOENT; diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c index 9bd9f79cd409..0e2c397b1399 100644 --- a/drivers/acpi/resource.c +++ b/drivers/acpi/resource.c @@ -511,6 +511,13 @@ static const struct dmi_system_id irq1_edge_low_force_override[] = { }, }, { + /* TongFang GMxXGxx sold as Eluktronics Inc. RP-15 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Eluktronics Inc."), + DMI_MATCH(DMI_BOARD_NAME, "RP-15"), + }, + }, + { /* TongFang GM6XGxX/TUXEDO Stellaris 16 Gen5 AMD */ .matches = { DMI_MATCH(DMI_BOARD_NAME, "GM6XGxX"), @@ -548,6 +555,18 @@ static const struct dmi_system_id irq1_edge_low_force_override[] = { DMI_MATCH(DMI_BOARD_NAME, "GM6BG0Q"), }, }, + { + /* Infinity E15-5A165-BM */ + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "GM5RG1E0009COM"), + }, + }, + { + /* Infinity E15-5A305-1M */ + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "GM5RGEE0016COM"), + }, + }, { } }; diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index 02bb2cce423f..950d3b02a2a9 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c @@ -1981,10 +1981,9 @@ static void acpi_scan_init_hotplug(struct acpi_device *adev) } } -static u32 acpi_scan_check_dep(acpi_handle handle, bool check_dep) +static u32 acpi_scan_check_dep(acpi_handle handle) { struct acpi_handle_list dep_devices; - acpi_status status; u32 count; int i; @@ -1994,12 +1993,10 @@ static u32 acpi_scan_check_dep(acpi_handle handle, bool check_dep) * 2. ACPI nodes describing USB ports. * Still, checking for _HID catches more then just these cases ... */ - if (!check_dep || !acpi_has_method(handle, "_DEP") || - !acpi_has_method(handle, "_HID")) + if (!acpi_has_method(handle, "_DEP") || !acpi_has_method(handle, "_HID")) return 0; - status = acpi_evaluate_reference(handle, "_DEP", NULL, &dep_devices); - if (ACPI_FAILURE(status)) { + if (!acpi_evaluate_reference(handle, "_DEP", NULL, &dep_devices)) { acpi_handle_debug(handle, "Failed to evaluate _DEP.\n"); return 0; } @@ -2008,6 +2005,7 @@ static u32 acpi_scan_check_dep(acpi_handle handle, bool check_dep) struct acpi_device_info *info; struct acpi_dep_data *dep; bool skip, honor_dep; + acpi_status status; status = acpi_get_object_info(dep_devices.handles[i], &info); if (ACPI_FAILURE(status)) { @@ -2041,7 +2039,13 @@ static u32 acpi_scan_check_dep(acpi_handle handle, bool check_dep) return count; } -static acpi_status acpi_bus_check_add(acpi_handle handle, bool check_dep, +static acpi_status acpi_scan_check_crs_csi2_cb(acpi_handle handle, u32 a, void *b, void **c) +{ + acpi_mipi_check_crs_csi2(handle); + return AE_OK; +} + +static acpi_status acpi_bus_check_add(acpi_handle handle, bool first_pass, struct acpi_device **adev_p) { struct acpi_device *device = acpi_fetch_acpi_dev(handle); @@ -2059,9 +2063,25 @@ static acpi_status acpi_bus_check_add(acpi_handle handle, bool check_dep, if (acpi_device_should_be_hidden(handle)) return AE_OK; - /* Bail out if there are dependencies. */ - if (acpi_scan_check_dep(handle, check_dep) > 0) - return AE_CTRL_DEPTH; + if (first_pass) { + acpi_mipi_check_crs_csi2(handle); + + /* Bail out if there are dependencies. */ + if (acpi_scan_check_dep(handle) > 0) { + /* + * The entire CSI-2 connection graph needs to be + * extracted before any drivers or scan handlers + * are bound to struct device objects, so scan + * _CRS CSI-2 resource descriptors for all + * devices below the current handle. + */ + acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, + ACPI_UINT32_MAX, + acpi_scan_check_crs_csi2_cb, + NULL, NULL, NULL); + return AE_CTRL_DEPTH; + } + } fallthrough; case ACPI_TYPE_ANY: /* for ACPI_ROOT_OBJECT */ @@ -2084,10 +2104,10 @@ static acpi_status acpi_bus_check_add(acpi_handle handle, bool check_dep, } /* - * If check_dep is true at this point, the device has no dependencies, + * If first_pass is true at this point, the device has no dependencies, * or the creation of the device object would have been postponed above. */ - acpi_add_single_object(&device, handle, type, !check_dep); + acpi_add_single_object(&device, handle, type, !first_pass); if (!device) return AE_CTRL_DEPTH; @@ -2431,6 +2451,13 @@ static void acpi_scan_postponed_branch(acpi_handle handle) acpi_walk_namespace(ACPI_TYPE_ANY, handle, ACPI_UINT32_MAX, acpi_bus_check_add_2, NULL, NULL, (void **)&adev); + + /* + * Populate the ACPI _CRS CSI-2 software nodes for the ACPI devices that + * have been added above. + */ + acpi_mipi_init_crs_csi2_swnodes(); + acpi_bus_attach(adev, NULL); } @@ -2499,12 +2526,22 @@ int acpi_bus_scan(acpi_handle handle) if (!device) return -ENODEV; + /* + * Set up ACPI _CRS CSI-2 software nodes using information extracted + * from the _CRS CSI-2 resource descriptors during the ACPI namespace + * walk above and MIPI DisCo for Imaging device properties. + */ + acpi_mipi_scan_crs_csi2(); + acpi_mipi_init_crs_csi2_swnodes(); + acpi_bus_attach(device, (void *)true); /* Pass 2: Enumerate all of the remaining devices. */ acpi_scan_postponed(); + acpi_mipi_crs_csi2_cleanup(); + return 0; } EXPORT_SYMBOL(acpi_bus_scan); diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c index f74d81abdbfc..4748e8061253 100644 --- a/drivers/acpi/thermal.c +++ b/drivers/acpi/thermal.c @@ -31,6 +31,8 @@ #include <linux/uaccess.h> #include <linux/units.h> +#include "internal.h" + #define ACPI_THERMAL_CLASS "thermal_zone" #define ACPI_THERMAL_DEVICE_NAME "Thermal Zone" #define ACPI_THERMAL_NOTIFY_TEMPERATURE 0x80 @@ -90,7 +92,7 @@ struct acpi_thermal_passive { struct acpi_thermal_trip trip; unsigned long tc1; unsigned long tc2; - unsigned long tsp; + unsigned long delay; }; struct acpi_thermal_active { @@ -188,24 +190,19 @@ static int active_trip_index(struct acpi_thermal *tz, static long get_passive_temp(struct acpi_thermal *tz) { - unsigned long long tmp; - acpi_status status; + int temp; - status = acpi_evaluate_integer(tz->device->handle, "_PSV", NULL, &tmp); - if (ACPI_FAILURE(status)) + if (acpi_passive_trip_temp(tz->device, &temp)) return THERMAL_TEMP_INVALID; - return tmp; + return temp; } static long get_active_temp(struct acpi_thermal *tz, int index) { - char method[] = { '_', 'A', 'C', '0' + index, '\0' }; - unsigned long long tmp; - acpi_status status; + int temp; - status = acpi_evaluate_integer(tz->device->handle, method, NULL, &tmp); - if (ACPI_FAILURE(status)) + if (acpi_active_trip_temp(tz->device, index, &temp)) return THERMAL_TEMP_INVALID; /* @@ -215,10 +212,10 @@ static long get_active_temp(struct acpi_thermal *tz, int index) if (act > 0) { unsigned long long override = celsius_to_deci_kelvin(act); - if (tmp > override) - tmp = override; + if (temp > override) + return override; } - return tmp; + return temp; } static void acpi_thermal_update_trip(struct acpi_thermal *tz, @@ -247,7 +244,6 @@ static bool update_trip_devices(struct acpi_thermal *tz, { struct acpi_handle_list devices = { 0 }; char method[] = "_PSL"; - acpi_status status; if (index != ACPI_THERMAL_TRIP_PASSIVE) { method[1] = 'A'; @@ -255,8 +251,7 @@ static bool update_trip_devices(struct acpi_thermal *tz, method[3] = '0' + index; } - status = acpi_evaluate_reference(tz->device->handle, method, NULL, &devices); - if (ACPI_FAILURE(status)) { + if (!acpi_evaluate_reference(tz->device->handle, method, NULL, &devices)) { acpi_handle_info(tz->device->handle, "%s evaluation failure\n", method); return false; } @@ -297,6 +292,7 @@ static int acpi_thermal_adjust_trip(struct thermal_trip *trip, void *data) struct acpi_thermal_trip *acpi_trip = trip->priv; struct adjust_trip_data *atd = data; struct acpi_thermal *tz = atd->tz; + int temp; if (!acpi_trip || !acpi_thermal_trip_valid(acpi_trip)) return 0; @@ -307,9 +303,11 @@ static int acpi_thermal_adjust_trip(struct thermal_trip *trip, void *data) acpi_thermal_update_trip_devices(tz, trip); if (acpi_thermal_trip_valid(acpi_trip)) - trip->temperature = acpi_thermal_temp(tz, acpi_trip->temp_dk); + temp = acpi_thermal_temp(tz, acpi_trip->temp_dk); else - trip->temperature = THERMAL_TEMP_INVALID; + temp = THERMAL_TEMP_INVALID; + + thermal_zone_set_trip_temp(tz->thermal_zone, trip, temp); return 0; } @@ -339,13 +337,12 @@ static void acpi_thermal_trips_update(struct acpi_thermal *tz, u32 event) dev_name(&adev->dev), event, 0); } -static long acpi_thermal_get_critical_trip(struct acpi_thermal *tz) +static int acpi_thermal_get_critical_trip(struct acpi_thermal *tz) { - unsigned long long tmp; - acpi_status status; + int temp; if (crt > 0) { - tmp = celsius_to_deci_kelvin(crt); + temp = celsius_to_deci_kelvin(crt); goto set; } if (crt == -1) { @@ -353,38 +350,34 @@ static long acpi_thermal_get_critical_trip(struct acpi_thermal *tz) return THERMAL_TEMP_INVALID; } - status = acpi_evaluate_integer(tz->device->handle, "_CRT", NULL, &tmp); - if (ACPI_FAILURE(status)) { - acpi_handle_debug(tz->device->handle, "No critical threshold\n"); + if (acpi_critical_trip_temp(tz->device, &temp)) return THERMAL_TEMP_INVALID; - } - if (tmp <= 2732) { + + if (temp <= 2732) { /* * Below zero (Celsius) values clearly aren't right for sure, * so discard them as invalid. */ - pr_info(FW_BUG "Invalid critical threshold (%llu)\n", tmp); + pr_info(FW_BUG "Invalid critical threshold (%d)\n", temp); return THERMAL_TEMP_INVALID; } set: - acpi_handle_debug(tz->device->handle, "Critical threshold [%llu]\n", tmp); - return tmp; + acpi_handle_debug(tz->device->handle, "Critical threshold [%d]\n", temp); + return temp; } -static long acpi_thermal_get_hot_trip(struct acpi_thermal *tz) +static int acpi_thermal_get_hot_trip(struct acpi_thermal *tz) { - unsigned long long tmp; - acpi_status status; + int temp; - status = acpi_evaluate_integer(tz->device->handle, "_HOT", NULL, &tmp); - if (ACPI_FAILURE(status)) { + if (acpi_hot_trip_temp(tz->device, &temp) || temp == THERMAL_TEMP_INVALID) { acpi_handle_debug(tz->device->handle, "No hot threshold\n"); return THERMAL_TEMP_INVALID; } - acpi_handle_debug(tz->device->handle, "Hot threshold [%llu]\n", tmp); - return tmp; + acpi_handle_debug(tz->device->handle, "Hot threshold [%d]\n", temp); + return temp; } static bool passive_trip_params_init(struct acpi_thermal *tz) @@ -404,11 +397,17 @@ static bool passive_trip_params_init(struct acpi_thermal *tz) tz->trips.passive.tc2 = tmp; + status = acpi_evaluate_integer(tz->device->handle, "_TFP", NULL, &tmp); + if (ACPI_SUCCESS(status)) { + tz->trips.passive.delay = tmp; + return true; + } + status = acpi_evaluate_integer(tz->device->handle, "_TSP", NULL, &tmp); if (ACPI_FAILURE(status)) return false; - tz->trips.passive.tsp = tmp; + tz->trips.passive.delay = tmp * 100; return true; } @@ -904,7 +903,7 @@ static int acpi_thermal_add(struct acpi_device *device) acpi_trip = &tz->trips.passive.trip; if (acpi_thermal_trip_valid(acpi_trip)) { - passive_delay = tz->trips.passive.tsp * 100; + passive_delay = tz->trips.passive.delay; trip->type = THERMAL_TRIP_PASSIVE; trip->temperature = acpi_thermal_temp(tz, acpi_trip->temp_dk); @@ -1142,6 +1141,7 @@ static void __exit acpi_thermal_exit(void) module_init(acpi_thermal_init); module_exit(acpi_thermal_exit); +MODULE_IMPORT_NS(ACPI_THERMAL); MODULE_AUTHOR("Paul Diefenbaugh"); MODULE_DESCRIPTION("ACPI Thermal Zone Driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/thermal/thermal_acpi.c b/drivers/acpi/thermal_lib.c index 43eaf0f2ff49..4e0519ca9739 100644 --- a/drivers/thermal/thermal_acpi.c +++ b/drivers/acpi/thermal_lib.c @@ -3,12 +3,13 @@ * Copyright 2023 Linaro Limited * Copyright 2023 Intel Corporation * - * Library routines for populating a generic thermal trip point structure - * with data obtained by evaluating a specific object in the ACPI Namespace. + * Library routines for retrieving trip point temperature values from the + * platform firmware via ACPI. */ #include <linux/acpi.h> #include <linux/units.h> #include <linux/thermal.h> +#include "internal.h" /* * Minimum temperature for full military grade is 218°K (-55°C) and @@ -17,11 +18,11 @@ * firmware. Any values out of these boundaries may be considered * bogus and we can assume the firmware has no data to provide. */ -#define TEMP_MIN_DECIK 2180 -#define TEMP_MAX_DECIK 4480 +#define TEMP_MIN_DECIK 2180ULL +#define TEMP_MAX_DECIK 4480ULL -static int thermal_acpi_trip_temp(struct acpi_device *adev, char *obj_name, - int *ret_temp) +static int acpi_trip_temp(struct acpi_device *adev, char *obj_name, + int *ret_temp) { unsigned long long temp; acpi_status status; @@ -33,7 +34,7 @@ static int thermal_acpi_trip_temp(struct acpi_device *adev, char *obj_name, } if (temp >= TEMP_MIN_DECIK && temp <= TEMP_MAX_DECIK) { - *ret_temp = deci_kelvin_to_millicelsius(temp); + *ret_temp = temp; } else { acpi_handle_debug(adev->handle, "%s result %llu out of range\n", obj_name, temp); @@ -43,6 +44,48 @@ static int thermal_acpi_trip_temp(struct acpi_device *adev, char *obj_name, return 0; } +int acpi_active_trip_temp(struct acpi_device *adev, int id, int *ret_temp) +{ + char obj_name[] = {'_', 'A', 'C', '0' + id, '\0'}; + + if (id < 0 || id > 9) + return -EINVAL; + + return acpi_trip_temp(adev, obj_name, ret_temp); +} +EXPORT_SYMBOL_NS_GPL(acpi_active_trip_temp, ACPI_THERMAL); + +int acpi_passive_trip_temp(struct acpi_device *adev, int *ret_temp) +{ + return acpi_trip_temp(adev, "_PSV", ret_temp); +} +EXPORT_SYMBOL_NS_GPL(acpi_passive_trip_temp, ACPI_THERMAL); + +int acpi_hot_trip_temp(struct acpi_device *adev, int *ret_temp) +{ + return acpi_trip_temp(adev, "_HOT", ret_temp); +} +EXPORT_SYMBOL_NS_GPL(acpi_hot_trip_temp, ACPI_THERMAL); + +int acpi_critical_trip_temp(struct acpi_device *adev, int *ret_temp) +{ + return acpi_trip_temp(adev, "_CRT", ret_temp); +} +EXPORT_SYMBOL_NS_GPL(acpi_critical_trip_temp, ACPI_THERMAL); + +static int thermal_temp(int error, int temp_decik, int *ret_temp) +{ + if (error) + return error; + + if (temp_decik == THERMAL_TEMP_INVALID) + *ret_temp = THERMAL_TEMP_INVALID; + else + *ret_temp = deci_kelvin_to_millicelsius(temp_decik); + + return 0; +} + /** * thermal_acpi_active_trip_temp - Retrieve active trip point temperature * @adev: Target thermal zone ACPI device object. @@ -57,12 +100,10 @@ static int thermal_acpi_trip_temp(struct acpi_device *adev, char *obj_name, */ int thermal_acpi_active_trip_temp(struct acpi_device *adev, int id, int *ret_temp) { - char obj_name[] = {'_', 'A', 'C', '0' + id, '\0'}; - - if (id < 0 || id > 9) - return -EINVAL; + int temp_decik; + int ret = acpi_active_trip_temp(adev, id, &temp_decik); - return thermal_acpi_trip_temp(adev, obj_name, ret_temp); + return thermal_temp(ret, temp_decik, ret_temp); } EXPORT_SYMBOL_GPL(thermal_acpi_active_trip_temp); @@ -78,7 +119,10 @@ EXPORT_SYMBOL_GPL(thermal_acpi_active_trip_temp); */ int thermal_acpi_passive_trip_temp(struct acpi_device *adev, int *ret_temp) { - return thermal_acpi_trip_temp(adev, "_PSV", ret_temp); + int temp_decik; + int ret = acpi_passive_trip_temp(adev, &temp_decik); + + return thermal_temp(ret, temp_decik, ret_temp); } EXPORT_SYMBOL_GPL(thermal_acpi_passive_trip_temp); @@ -95,7 +139,10 @@ EXPORT_SYMBOL_GPL(thermal_acpi_passive_trip_temp); */ int thermal_acpi_hot_trip_temp(struct acpi_device *adev, int *ret_temp) { - return thermal_acpi_trip_temp(adev, "_HOT", ret_temp); + int temp_decik; + int ret = acpi_hot_trip_temp(adev, &temp_decik); + + return thermal_temp(ret, temp_decik, ret_temp); } EXPORT_SYMBOL_GPL(thermal_acpi_hot_trip_temp); @@ -111,6 +158,9 @@ EXPORT_SYMBOL_GPL(thermal_acpi_hot_trip_temp); */ int thermal_acpi_critical_trip_temp(struct acpi_device *adev, int *ret_temp) { - return thermal_acpi_trip_temp(adev, "_CRT", ret_temp); + int temp_decik; + int ret = acpi_critical_trip_temp(adev, &temp_decik); + + return thermal_temp(ret, temp_decik, ret_temp); } EXPORT_SYMBOL_GPL(thermal_acpi_critical_trip_temp); diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c index 62944e35fcee..abac5cc25477 100644 --- a/drivers/acpi/utils.c +++ b/drivers/acpi/utils.c @@ -329,21 +329,18 @@ const char *acpi_get_subsystem_id(acpi_handle handle) } EXPORT_SYMBOL_GPL(acpi_get_subsystem_id); -acpi_status -acpi_evaluate_reference(acpi_handle handle, - acpi_string pathname, - struct acpi_object_list *arguments, - struct acpi_handle_list *list) +bool acpi_evaluate_reference(acpi_handle handle, acpi_string pathname, + struct acpi_object_list *arguments, + struct acpi_handle_list *list) { - acpi_status status = AE_OK; - union acpi_object *package = NULL; - union acpi_object *element = NULL; struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; - u32 i = 0; - + union acpi_object *package; + acpi_status status; + bool ret = false; + u32 i; if (!list) - return AE_BAD_PARAMETER; + return false; /* Evaluate object. */ @@ -353,62 +350,47 @@ acpi_evaluate_reference(acpi_handle handle, package = buffer.pointer; - if ((buffer.length == 0) || !package) { - status = AE_BAD_DATA; - acpi_util_eval_error(handle, pathname, status); - goto end; - } - if (package->type != ACPI_TYPE_PACKAGE) { - status = AE_BAD_DATA; - acpi_util_eval_error(handle, pathname, status); - goto end; - } - if (!package->package.count) { - status = AE_BAD_DATA; - acpi_util_eval_error(handle, pathname, status); - goto end; - } + if (buffer.length == 0 || !package || + package->type != ACPI_TYPE_PACKAGE || !package->package.count) + goto err; - list->handles = kcalloc(package->package.count, sizeof(*list->handles), GFP_KERNEL); - if (!list->handles) { - kfree(package); - return AE_NO_MEMORY; - } list->count = package->package.count; + list->handles = kcalloc(list->count, sizeof(*list->handles), GFP_KERNEL); + if (!list->handles) + goto err_clear; /* Extract package data. */ for (i = 0; i < list->count; i++) { + union acpi_object *element = &(package->package.elements[i]); - element = &(package->package.elements[i]); + if (element->type != ACPI_TYPE_LOCAL_REFERENCE || + !element->reference.handle) + goto err_free; - if (element->type != ACPI_TYPE_LOCAL_REFERENCE) { - status = AE_BAD_DATA; - acpi_util_eval_error(handle, pathname, status); - break; - } - - if (!element->reference.handle) { - status = AE_NULL_ENTRY; - acpi_util_eval_error(handle, pathname, status); - break; - } /* Get the acpi_handle. */ list->handles[i] = element->reference.handle; acpi_handle_debug(list->handles[i], "Found in reference list\n"); } - if (ACPI_FAILURE(status)) { - list->count = 0; - kfree(list->handles); - list->handles = NULL; - } + ret = true; end: kfree(buffer.pointer); - return status; + return ret; + +err_free: + kfree(list->handles); + list->handles = NULL; + +err_clear: + list->count = 0; + +err: + acpi_util_eval_error(handle, pathname, status); + goto end; } EXPORT_SYMBOL(acpi_evaluate_reference); @@ -426,7 +408,7 @@ bool acpi_handle_list_equal(struct acpi_handle_list *list1, { return list1->count == list2->count && !memcmp(list1->handles, list2->handles, - list1->count * sizeof(acpi_handle)); + list1->count * sizeof(*list1->handles)); } EXPORT_SYMBOL_GPL(acpi_handle_list_equal); @@ -468,6 +450,40 @@ void acpi_handle_list_free(struct acpi_handle_list *list) } EXPORT_SYMBOL_GPL(acpi_handle_list_free); +/** + * acpi_device_dep - Check ACPI device dependency + * @target: ACPI handle of the target ACPI device. + * @match: ACPI handle to look up in the target's _DEP list. + * + * Return true if @match is present in the list returned by _DEP for + * @target or false otherwise. + */ +bool acpi_device_dep(acpi_handle target, acpi_handle match) +{ + struct acpi_handle_list dep_devices; + bool ret = false; + int i; + + if (!acpi_has_method(target, "_DEP")) + return false; + + if (!acpi_evaluate_reference(target, "_DEP", NULL, &dep_devices)) { + acpi_handle_debug(target, "Failed to evaluate _DEP.\n"); + return false; + } + + for (i = 0; i < dep_devices.count; i++) { + if (dep_devices.handles[i] == match) { + ret = true; + break; + } + } + + acpi_handle_list_free(&dep_devices); + return ret; +} +EXPORT_SYMBOL_GPL(acpi_device_dep); + acpi_status acpi_get_physical_device_location(acpi_handle handle, struct acpi_pld_info **pld) { @@ -825,54 +841,6 @@ bool acpi_check_dsm(acpi_handle handle, const guid_t *guid, u64 rev, u64 funcs) EXPORT_SYMBOL(acpi_check_dsm); /** - * acpi_dev_uid_match - Match device by supplied UID - * @adev: ACPI device to match. - * @uid2: Unique ID of the device. - * - * Matches UID in @adev with given @uid2. - * - * Returns: - * - %true if matches. - * - %false otherwise. - */ -bool acpi_dev_uid_match(struct acpi_device *adev, const char *uid2) -{ - const char *uid1 = acpi_device_uid(adev); - - return uid1 && uid2 && !strcmp(uid1, uid2); -} -EXPORT_SYMBOL_GPL(acpi_dev_uid_match); - -/** - * acpi_dev_hid_uid_match - Match device by supplied HID and UID - * @adev: ACPI device to match. - * @hid2: Hardware ID of the device. - * @uid2: Unique ID of the device, pass NULL to not check _UID. - * - * Matches HID and UID in @adev with given @hid2 and @uid2. Absence of @uid2 - * will be treated as a match. If user wants to validate @uid2, it should be - * done before calling this function. - * - * Returns: - * - %true if matches or @uid2 is NULL. - * - %false otherwise. - */ -bool acpi_dev_hid_uid_match(struct acpi_device *adev, - const char *hid2, const char *uid2) -{ - const char *hid1 = acpi_device_hid(adev); - - if (strcmp(hid1, hid2)) - return false; - - if (!uid2) - return true; - - return acpi_dev_uid_match(adev, uid2); -} -EXPORT_SYMBOL(acpi_dev_hid_uid_match); - -/** * acpi_dev_uid_to_integer - treat ACPI device _UID as integer * @adev: ACPI device to get _UID from * @integer: output buffer for integer diff --git a/drivers/android/binder.c b/drivers/android/binder.c index 92128aae2d06..7658103ba760 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -1921,7 +1921,7 @@ static void binder_deferred_fd_close(int fd) if (!twcb) return; init_task_work(&twcb->twork, binder_do_fd_close); - twcb->file = close_fd_get_file(fd); + twcb->file = file_close_fd(fd); if (twcb->file) { // pin it until binder_do_fd_close(); see comments there get_file(twcb->file); diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c index 138f6d43d13b..f69d30c9f50f 100644 --- a/drivers/android/binder_alloc.c +++ b/drivers/android/binder_alloc.c @@ -234,7 +234,7 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate, if (page->page_ptr) { trace_binder_alloc_lru_start(alloc, index); - on_lru = list_lru_del(&binder_alloc_lru, &page->lru); + on_lru = list_lru_del_obj(&binder_alloc_lru, &page->lru); WARN_ON(!on_lru); trace_binder_alloc_lru_end(alloc, index); @@ -285,7 +285,7 @@ free_range: trace_binder_free_lru_start(alloc, index); - ret = list_lru_add(&binder_alloc_lru, &page->lru); + ret = list_lru_add_obj(&binder_alloc_lru, &page->lru); WARN_ON(!ret); trace_binder_free_lru_end(alloc, index); @@ -848,7 +848,7 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc) if (!alloc->pages[i].page_ptr) continue; - on_lru = list_lru_del(&binder_alloc_lru, + on_lru = list_lru_del_obj(&binder_alloc_lru, &alloc->pages[i].lru); page_addr = alloc->buffer + i * PAGE_SIZE; binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, @@ -1287,4 +1287,3 @@ int binder_alloc_copy_from_buffer(struct binder_alloc *alloc, return binder_alloc_do_buffer_copy(alloc, false, buffer, buffer_offset, dest, bytes); } - diff --git a/drivers/base/arch_numa.c b/drivers/base/arch_numa.c index eaa31e567d1e..5b59d133b6af 100644 --- a/drivers/base/arch_numa.c +++ b/drivers/base/arch_numa.c @@ -144,7 +144,7 @@ void __init early_map_cpu_to_node(unsigned int cpu, int nid) unsigned long __per_cpu_offset[NR_CPUS] __read_mostly; EXPORT_SYMBOL(__per_cpu_offset); -static int __init early_cpu_to_node(int cpu) +int __init early_cpu_to_node(int cpu) { return cpu_to_node_map[cpu]; } diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c index b741b5ba82bd..5aaa0865625d 100644 --- a/drivers/base/arch_topology.c +++ b/drivers/base/arch_topology.c @@ -19,6 +19,7 @@ #include <linux/init.h> #include <linux/rcupdate.h> #include <linux/sched.h> +#include <linux/units.h> #define CREATE_TRACE_POINTS #include <trace/events/thermal_pressure.h> @@ -26,7 +27,8 @@ static DEFINE_PER_CPU(struct scale_freq_data __rcu *, sft_data); static struct cpumask scale_freq_counters_mask; static bool scale_freq_invariant; -static DEFINE_PER_CPU(u32, freq_factor) = 1; +DEFINE_PER_CPU(unsigned long, capacity_freq_ref) = 1; +EXPORT_PER_CPU_SYMBOL_GPL(capacity_freq_ref); static bool supports_scale_freq_counters(const struct cpumask *cpus) { @@ -170,9 +172,9 @@ DEFINE_PER_CPU(unsigned long, thermal_pressure); * operating on stale data when hot-plug is used for some CPUs. The * @capped_freq reflects the currently allowed max CPUs frequency due to * thermal capping. It might be also a boost frequency value, which is bigger - * than the internal 'freq_factor' max frequency. In such case the pressure - * value should simply be removed, since this is an indication that there is - * no thermal throttling. The @capped_freq must be provided in kHz. + * than the internal 'capacity_freq_ref' max frequency. In such case the + * pressure value should simply be removed, since this is an indication that + * there is no thermal throttling. The @capped_freq must be provided in kHz. */ void topology_update_thermal_pressure(const struct cpumask *cpus, unsigned long capped_freq) @@ -183,10 +185,7 @@ void topology_update_thermal_pressure(const struct cpumask *cpus, cpu = cpumask_first(cpus); max_capacity = arch_scale_cpu_capacity(cpu); - max_freq = per_cpu(freq_factor, cpu); - - /* Convert to MHz scale which is used in 'freq_factor' */ - capped_freq /= 1000; + max_freq = arch_scale_freq_ref(cpu); /* * Handle properly the boost frequencies, which should simply clean @@ -279,13 +278,13 @@ void topology_normalize_cpu_scale(void) capacity_scale = 1; for_each_possible_cpu(cpu) { - capacity = raw_capacity[cpu] * per_cpu(freq_factor, cpu); + capacity = raw_capacity[cpu] * per_cpu(capacity_freq_ref, cpu); capacity_scale = max(capacity, capacity_scale); } pr_debug("cpu_capacity: capacity_scale=%llu\n", capacity_scale); for_each_possible_cpu(cpu) { - capacity = raw_capacity[cpu] * per_cpu(freq_factor, cpu); + capacity = raw_capacity[cpu] * per_cpu(capacity_freq_ref, cpu); capacity = div64_u64(capacity << SCHED_CAPACITY_SHIFT, capacity_scale); topology_set_cpu_scale(cpu, capacity); @@ -321,15 +320,15 @@ bool __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu) cpu_node, raw_capacity[cpu]); /* - * Update freq_factor for calculating early boot cpu capacities. + * Update capacity_freq_ref for calculating early boot CPU capacities. * For non-clk CPU DVFS mechanism, there's no way to get the * frequency value now, assuming they are running at the same - * frequency (by keeping the initial freq_factor value). + * frequency (by keeping the initial capacity_freq_ref value). */ cpu_clk = of_clk_get(cpu_node, 0); if (!PTR_ERR_OR_ZERO(cpu_clk)) { - per_cpu(freq_factor, cpu) = - clk_get_rate(cpu_clk) / 1000; + per_cpu(capacity_freq_ref, cpu) = + clk_get_rate(cpu_clk) / HZ_PER_KHZ; clk_put(cpu_clk); } } else { @@ -345,11 +344,16 @@ bool __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu) return !ret; } +void __weak freq_inv_set_max_ratio(int cpu, u64 max_rate) +{ +} + #ifdef CONFIG_ACPI_CPPC_LIB #include <acpi/cppc_acpi.h> void topology_init_cpu_capacity_cppc(void) { + u64 capacity, capacity_scale = 0; struct cppc_perf_caps perf_caps; int cpu; @@ -366,6 +370,10 @@ void topology_init_cpu_capacity_cppc(void) (perf_caps.highest_perf >= perf_caps.nominal_perf) && (perf_caps.highest_perf >= perf_caps.lowest_perf)) { raw_capacity[cpu] = perf_caps.highest_perf; + capacity_scale = max_t(u64, capacity_scale, raw_capacity[cpu]); + + per_cpu(capacity_freq_ref, cpu) = cppc_perf_to_khz(&perf_caps, raw_capacity[cpu]); + pr_debug("cpu_capacity: CPU%d cpu_capacity=%u (raw).\n", cpu, raw_capacity[cpu]); continue; @@ -376,7 +384,18 @@ void topology_init_cpu_capacity_cppc(void) goto exit; } - topology_normalize_cpu_scale(); + for_each_possible_cpu(cpu) { + freq_inv_set_max_ratio(cpu, + per_cpu(capacity_freq_ref, cpu) * HZ_PER_KHZ); + + capacity = raw_capacity[cpu]; + capacity = div64_u64(capacity << SCHED_CAPACITY_SHIFT, + capacity_scale); + topology_set_cpu_scale(cpu, capacity); + pr_debug("cpu_capacity: CPU%d cpu_capacity=%lu\n", + cpu, topology_get_cpu_scale(cpu)); + } + schedule_work(&update_topology_flags_work); pr_debug("cpu_capacity: cpu_capacity initialization done\n"); @@ -410,8 +429,11 @@ init_cpu_capacity_callback(struct notifier_block *nb, cpumask_andnot(cpus_to_visit, cpus_to_visit, policy->related_cpus); - for_each_cpu(cpu, policy->related_cpus) - per_cpu(freq_factor, cpu) = policy->cpuinfo.max_freq / 1000; + for_each_cpu(cpu, policy->related_cpus) { + per_cpu(capacity_freq_ref, cpu) = policy->cpuinfo.max_freq; + freq_inv_set_max_ratio(cpu, + per_cpu(capacity_freq_ref, cpu) * HZ_PER_KHZ); + } if (cpumask_empty(cpus_to_visit)) { topology_normalize_cpu_scale(); diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index f85f3515c258..fadcd0379dc2 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -587,7 +587,7 @@ bool dev_pm_skip_resume(struct device *dev) * The driver of @dev will not receive interrupts while this function is being * executed. */ -static int device_resume_noirq(struct device *dev, pm_message_t state, bool async) +static void device_resume_noirq(struct device *dev, pm_message_t state, bool async) { pm_callback_t callback = NULL; const char *info = NULL; @@ -655,7 +655,13 @@ Skip: Out: complete_all(&dev->power.completion); TRACE_RESUME(error); - return error; + + if (error) { + suspend_stats.failed_resume_noirq++; + dpm_save_failed_step(SUSPEND_RESUME_NOIRQ); + dpm_save_failed_dev(dev_name(dev)); + pm_dev_err(dev, state, async ? " async noirq" : " noirq", error); + } } static bool is_async(struct device *dev) @@ -669,23 +675,29 @@ static bool dpm_async_fn(struct device *dev, async_func_t func) reinit_completion(&dev->power.completion); if (is_async(dev)) { + dev->power.async_in_progress = true; + get_device(dev); - async_schedule_dev(func, dev); - return true; - } + if (async_schedule_dev_nocall(func, dev)) + return true; + + put_device(dev); + } + /* + * Because async_schedule_dev_nocall() above has returned false or it + * has not been called at all, func() is not running and it is safe to + * update the async_in_progress flag without extra synchronization. + */ + dev->power.async_in_progress = false; return false; } static void async_resume_noirq(void *data, async_cookie_t cookie) { struct device *dev = data; - int error; - - error = device_resume_noirq(dev, pm_transition, true); - if (error) - pm_dev_err(dev, pm_transition, " async", error); + device_resume_noirq(dev, pm_transition, true); put_device(dev); } @@ -699,35 +711,27 @@ static void dpm_noirq_resume_devices(pm_message_t state) pm_transition = state; /* - * Advanced the async threads upfront, - * in case the starting of async threads is - * delayed by non-async resuming devices. + * Trigger the resume of "async" devices upfront so they don't have to + * wait for the "non-async" ones they don't depend on. */ list_for_each_entry(dev, &dpm_noirq_list, power.entry) dpm_async_fn(dev, async_resume_noirq); while (!list_empty(&dpm_noirq_list)) { dev = to_device(dpm_noirq_list.next); - get_device(dev); list_move_tail(&dev->power.entry, &dpm_late_early_list); - mutex_unlock(&dpm_list_mtx); + if (!dev->power.async_in_progress) { + get_device(dev); - if (!is_async(dev)) { - int error; + mutex_unlock(&dpm_list_mtx); - error = device_resume_noirq(dev, state, false); - if (error) { - suspend_stats.failed_resume_noirq++; - dpm_save_failed_step(SUSPEND_RESUME_NOIRQ); - dpm_save_failed_dev(dev_name(dev)); - pm_dev_err(dev, state, " noirq", error); - } - } + device_resume_noirq(dev, state, false); - put_device(dev); + put_device(dev); - mutex_lock(&dpm_list_mtx); + mutex_lock(&dpm_list_mtx); + } } mutex_unlock(&dpm_list_mtx); async_synchronize_full(); @@ -758,7 +762,7 @@ void dpm_resume_noirq(pm_message_t state) * * Runtime PM is disabled for @dev while this function is being executed. */ -static int device_resume_early(struct device *dev, pm_message_t state, bool async) +static void device_resume_early(struct device *dev, pm_message_t state, bool async) { pm_callback_t callback = NULL; const char *info = NULL; @@ -811,18 +815,20 @@ Out: pm_runtime_enable(dev); complete_all(&dev->power.completion); - return error; + + if (error) { + suspend_stats.failed_resume_early++; + dpm_save_failed_step(SUSPEND_RESUME_EARLY); + dpm_save_failed_dev(dev_name(dev)); + pm_dev_err(dev, state, async ? " async early" : " early", error); + } } static void async_resume_early(void *data, async_cookie_t cookie) { struct device *dev = data; - int error; - - error = device_resume_early(dev, pm_transition, true); - if (error) - pm_dev_err(dev, pm_transition, " async", error); + device_resume_early(dev, pm_transition, true); put_device(dev); } @@ -840,35 +846,27 @@ void dpm_resume_early(pm_message_t state) pm_transition = state; /* - * Advanced the async threads upfront, - * in case the starting of async threads is - * delayed by non-async resuming devices. + * Trigger the resume of "async" devices upfront so they don't have to + * wait for the "non-async" ones they don't depend on. */ list_for_each_entry(dev, &dpm_late_early_list, power.entry) dpm_async_fn(dev, async_resume_early); while (!list_empty(&dpm_late_early_list)) { dev = to_device(dpm_late_early_list.next); - get_device(dev); list_move_tail(&dev->power.entry, &dpm_suspended_list); - mutex_unlock(&dpm_list_mtx); + if (!dev->power.async_in_progress) { + get_device(dev); - if (!is_async(dev)) { - int error; + mutex_unlock(&dpm_list_mtx); - error = device_resume_early(dev, state, false); - if (error) { - suspend_stats.failed_resume_early++; - dpm_save_failed_step(SUSPEND_RESUME_EARLY); - dpm_save_failed_dev(dev_name(dev)); - pm_dev_err(dev, state, " early", error); - } - } + device_resume_early(dev, state, false); - put_device(dev); + put_device(dev); - mutex_lock(&dpm_list_mtx); + mutex_lock(&dpm_list_mtx); + } } mutex_unlock(&dpm_list_mtx); async_synchronize_full(); @@ -893,7 +891,7 @@ EXPORT_SYMBOL_GPL(dpm_resume_start); * @state: PM transition of the system being carried out. * @async: If true, the device is being resumed asynchronously. */ -static int device_resume(struct device *dev, pm_message_t state, bool async) +static void device_resume(struct device *dev, pm_message_t state, bool async) { pm_callback_t callback = NULL; const char *info = NULL; @@ -975,17 +973,19 @@ static int device_resume(struct device *dev, pm_message_t state, bool async) TRACE_RESUME(error); - return error; + if (error) { + suspend_stats.failed_resume++; + dpm_save_failed_step(SUSPEND_RESUME); + dpm_save_failed_dev(dev_name(dev)); + pm_dev_err(dev, state, async ? " async" : "", error); + } } static void async_resume(void *data, async_cookie_t cookie) { struct device *dev = data; - int error; - error = device_resume(dev, pm_transition, true); - if (error) - pm_dev_err(dev, pm_transition, " async", error); + device_resume(dev, pm_transition, true); put_device(dev); } @@ -1008,27 +1008,26 @@ void dpm_resume(pm_message_t state) pm_transition = state; async_error = 0; + /* + * Trigger the resume of "async" devices upfront so they don't have to + * wait for the "non-async" ones they don't depend on. + */ list_for_each_entry(dev, &dpm_suspended_list, power.entry) dpm_async_fn(dev, async_resume); while (!list_empty(&dpm_suspended_list)) { dev = to_device(dpm_suspended_list.next); + get_device(dev); - if (!is_async(dev)) { - int error; + if (!dev->power.async_in_progress) { mutex_unlock(&dpm_list_mtx); - error = device_resume(dev, state, false); - if (error) { - suspend_stats.failed_resume++; - dpm_save_failed_step(SUSPEND_RESUME); - dpm_save_failed_dev(dev_name(dev)); - pm_dev_err(dev, state, "", error); - } + device_resume(dev, state, false); mutex_lock(&dpm_list_mtx); } + if (!list_empty(&dev->power.entry)) list_move_tail(&dev->power.entry, &dpm_prepared_list); diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h index 9a9ea514c2d8..583dd5d7d46b 100644 --- a/drivers/base/regmap/internal.h +++ b/drivers/base/regmap/internal.h @@ -318,6 +318,7 @@ struct regmap_ram_data { bool *read; bool *written; enum regmap_endian reg_endian; + bool (*noinc_reg)(struct regmap_ram_data *data, unsigned int reg); }; /* diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c index bdd80b73c3e6..fb84cda92a75 100644 --- a/drivers/base/regmap/regmap-debugfs.c +++ b/drivers/base/regmap/regmap-debugfs.c @@ -226,8 +226,8 @@ static ssize_t regmap_read_debugfs(struct regmap *map, unsigned int from, if (*ppos < 0 || !count) return -EINVAL; - if (count > (PAGE_SIZE << MAX_ORDER)) - count = PAGE_SIZE << MAX_ORDER; + if (count > (PAGE_SIZE << MAX_PAGE_ORDER)) + count = PAGE_SIZE << MAX_PAGE_ORDER; buf = kmalloc(count, GFP_KERNEL); if (!buf) @@ -373,8 +373,8 @@ static ssize_t regmap_reg_ranges_read_file(struct file *file, if (*ppos < 0 || !count) return -EINVAL; - if (count > (PAGE_SIZE << MAX_ORDER)) - count = PAGE_SIZE << MAX_ORDER; + if (count > (PAGE_SIZE << MAX_PAGE_ORDER)) + count = PAGE_SIZE << MAX_PAGE_ORDER; buf = kmalloc(count, GFP_KERNEL); if (!buf) diff --git a/drivers/base/regmap/regmap-kunit.c b/drivers/base/regmap/regmap-kunit.c index e14cc03a17f6..026bdcb45127 100644 --- a/drivers/base/regmap/regmap-kunit.c +++ b/drivers/base/regmap/regmap-kunit.c @@ -1186,6 +1186,65 @@ static void raw_write(struct kunit *test) regmap_exit(map); } +static bool reg_zero(struct device *dev, unsigned int reg) +{ + return reg == 0; +} + +static bool ram_reg_zero(struct regmap_ram_data *data, unsigned int reg) +{ + return reg == 0; +} + +static void raw_noinc_write(struct kunit *test) +{ + struct raw_test_types *t = (struct raw_test_types *)test->param_value; + struct regmap *map; + struct regmap_config config; + struct regmap_ram_data *data; + unsigned int val, val_test, val_last; + u16 val_array[BLOCK_TEST_SIZE]; + + config = raw_regmap_config; + config.volatile_reg = reg_zero; + config.writeable_noinc_reg = reg_zero; + config.readable_noinc_reg = reg_zero; + + map = gen_raw_regmap(&config, t, &data); + KUNIT_ASSERT_FALSE(test, IS_ERR(map)); + if (IS_ERR(map)) + return; + + data->noinc_reg = ram_reg_zero; + + get_random_bytes(&val_array, sizeof(val_array)); + + if (config.val_format_endian == REGMAP_ENDIAN_BIG) { + val_test = be16_to_cpu(val_array[1]) + 100; + val_last = be16_to_cpu(val_array[BLOCK_TEST_SIZE - 1]); + } else { + val_test = le16_to_cpu(val_array[1]) + 100; + val_last = le16_to_cpu(val_array[BLOCK_TEST_SIZE - 1]); + } + + /* Put some data into the register following the noinc register */ + KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 1, val_test)); + + /* Write some data to the noinc register */ + KUNIT_EXPECT_EQ(test, 0, regmap_noinc_write(map, 0, val_array, + sizeof(val_array))); + + /* We should read back the last value written */ + KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &val)); + KUNIT_ASSERT_EQ(test, val_last, val); + + /* Make sure we didn't touch the register after the noinc register */ + KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 1, &val)); + KUNIT_ASSERT_EQ(test, val_test, val); + + regmap_exit(map); +} + static void raw_sync(struct kunit *test) { struct raw_test_types *t = (struct raw_test_types *)test->param_value; @@ -1284,6 +1343,7 @@ static struct kunit_case regmap_test_cases[] = { KUNIT_CASE_PARAM(raw_read_defaults, raw_test_types_gen_params), KUNIT_CASE_PARAM(raw_write_read_single, raw_test_types_gen_params), KUNIT_CASE_PARAM(raw_write, raw_test_types_gen_params), + KUNIT_CASE_PARAM(raw_noinc_write, raw_test_types_gen_params), KUNIT_CASE_PARAM(raw_sync, raw_test_cache_types_gen_params), {} }; diff --git a/drivers/base/regmap/regmap-ram.c b/drivers/base/regmap/regmap-ram.c index 85f34a5dee04..192d6b131dff 100644 --- a/drivers/base/regmap/regmap-ram.c +++ b/drivers/base/regmap/regmap-ram.c @@ -65,12 +65,12 @@ struct regmap *__regmap_init_ram(const struct regmap_config *config, return ERR_PTR(-EINVAL); } - data->read = kcalloc(sizeof(bool), config->max_register + 1, + data->read = kcalloc(config->max_register + 1, sizeof(bool), GFP_KERNEL); if (!data->read) return ERR_PTR(-ENOMEM); - data->written = kcalloc(sizeof(bool), config->max_register + 1, + data->written = kcalloc(config->max_register + 1, sizeof(bool), GFP_KERNEL); if (!data->written) return ERR_PTR(-ENOMEM); diff --git a/drivers/base/regmap/regmap-raw-ram.c b/drivers/base/regmap/regmap-raw-ram.c index c9b800885f3b..93ae07b503fd 100644 --- a/drivers/base/regmap/regmap-raw-ram.c +++ b/drivers/base/regmap/regmap-raw-ram.c @@ -41,10 +41,15 @@ static int regmap_raw_ram_gather_write(void *context, return -EINVAL; r = decode_reg(data->reg_endian, reg); - memcpy(&our_buf[r], val, val_len); - - for (i = 0; i < val_len / 2; i++) - data->written[r + i] = true; + if (data->noinc_reg && data->noinc_reg(data, r)) { + memcpy(&our_buf[r], val + val_len - 2, 2); + data->written[r] = true; + } else { + memcpy(&our_buf[r], val, val_len); + + for (i = 0; i < val_len / 2; i++) + data->written[r + i] = true; + } return 0; } @@ -70,10 +75,16 @@ static int regmap_raw_ram_read(void *context, return -EINVAL; r = decode_reg(data->reg_endian, reg); - memcpy(val, &our_buf[r], val_len); - - for (i = 0; i < val_len / 2; i++) - data->read[r + i] = true; + if (data->noinc_reg && data->noinc_reg(data, r)) { + for (i = 0; i < val_len; i += 2) + memcpy(val + i, &our_buf[r], 2); + data->read[r] = true; + } else { + memcpy(val, &our_buf[r], val_len); + + for (i = 0; i < val_len / 2; i++) + data->read[r + i] = true; + } return 0; } @@ -111,12 +122,12 @@ struct regmap *__regmap_init_raw_ram(const struct regmap_config *config, return ERR_PTR(-EINVAL); } - data->read = kcalloc(sizeof(bool), config->max_register + 1, + data->read = kcalloc(config->max_register + 1, sizeof(bool), GFP_KERNEL); if (!data->read) return ERR_PTR(-ENOMEM); - data->written = kcalloc(sizeof(bool), config->max_register + 1, + data->written = kcalloc(config->max_register + 1, sizeof(bool), GFP_KERNEL); if (!data->written) return ERR_PTR(-ENOMEM); diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c index ea6157747199..6db77d8e45f9 100644 --- a/drivers/base/regmap/regmap.c +++ b/drivers/base/regmap/regmap.c @@ -2136,7 +2136,7 @@ static int regmap_noinc_readwrite(struct regmap *map, unsigned int reg, } /** - * regmap_noinc_write(): Write data from a register without incrementing the + * regmap_noinc_write(): Write data to a register without incrementing the * register number * * @map: Register map to write to diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index 11114a5d9e5c..d0e41d52d6a9 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c @@ -3079,7 +3079,7 @@ static void raw_cmd_free(struct floppy_raw_cmd **ptr) } } -#define MAX_LEN (1UL << MAX_ORDER << PAGE_SHIFT) +#define MAX_LEN (1UL << MAX_PAGE_ORDER << PAGE_SHIFT) static int raw_cmd_copyin(int cmd, void __user *param, struct floppy_raw_cmd **rcmd) diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 9f2d412fc560..8a8cd4fc9238 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -245,9 +245,7 @@ static int lo_write_bvec(struct file *file, struct bio_vec *bvec, loff_t *ppos) iov_iter_bvec(&i, ITER_SOURCE, bvec, 1, bvec->bv_len); - file_start_write(file); bw = vfs_iter_write(file, &i, ppos, 0); - file_end_write(file); if (likely(bw == bvec->bv_len)) return 0; diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c index 83600b45e12a..3eaf02ebeebe 100644 --- a/drivers/block/ublk_drv.c +++ b/drivers/block/ublk_drv.c @@ -893,12 +893,9 @@ static int ublk_map_io(const struct ublk_queue *ubq, const struct request *req, */ if (ublk_need_map_req(req)) { struct iov_iter iter; - struct iovec iov; const int dir = ITER_DEST; - import_single_range(dir, u64_to_user_ptr(io->addr), rq_bytes, - &iov, &iter); - + import_ubuf(dir, u64_to_user_ptr(io->addr), rq_bytes, &iter); return ublk_copy_user_pages(req, 0, &iter, dir); } return rq_bytes; @@ -915,13 +912,11 @@ static int ublk_unmap_io(const struct ublk_queue *ubq, if (ublk_need_unmap_req(req)) { struct iov_iter iter; - struct iovec iov; const int dir = ITER_SOURCE; WARN_ON_ONCE(io->res > rq_bytes); - import_single_range(dir, u64_to_user_ptr(io->addr), io->res, - &iov, &iter); + import_ubuf(dir, u64_to_user_ptr(io->addr), io->res, &iter); return ublk_copy_user_pages(req, 0, &iter, dir); } return rq_bytes; diff --git a/drivers/block/zram/Kconfig b/drivers/block/zram/Kconfig index 0386b7da02aa..7b29cce60ab2 100644 --- a/drivers/block/zram/Kconfig +++ b/drivers/block/zram/Kconfig @@ -59,8 +59,8 @@ config ZRAM_WRITEBACK bool "Write back incompressible or idle page to backing device" depends on ZRAM help - With incompressible page, there is no memory saving to keep it - in memory. Instead, write it out to backing device. + This lets zram entries (incompressible or idle pages) be written + back to a backing device, helping save memory. For this feature, admin should set up backing device via /sys/block/zramX/backing_dev. @@ -69,9 +69,18 @@ config ZRAM_WRITEBACK See Documentation/admin-guide/blockdev/zram.rst for more information. +config ZRAM_TRACK_ENTRY_ACTIME + bool "Track access time of zram entries" + depends on ZRAM + help + With this feature zram tracks access time of every stored + entry (page), which can be used for a more fine grained IDLE + pages writeback. + config ZRAM_MEMORY_TRACKING bool "Track zRam block status" depends on ZRAM && DEBUG_FS + select ZRAM_TRACK_ENTRY_ACTIME help With this feature, admin can track the state of allocated blocks of zRAM. Admin could see the information via @@ -86,4 +95,4 @@ config ZRAM_MULTI_COMP This will enable multi-compression streams, so that ZRAM can re-compress pages using a potentially slower but more effective compression algorithm. Note, that IDLE page recompression - requires ZRAM_MEMORY_TRACKING. + requires ZRAM_TRACK_ENTRY_ACTIME. diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index d77d3664ca08..2b1d82473be8 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -174,6 +174,14 @@ static inline u32 zram_get_priority(struct zram *zram, u32 index) return prio & ZRAM_COMP_PRIORITY_MASK; } +static void zram_accessed(struct zram *zram, u32 index) +{ + zram_clear_flag(zram, index, ZRAM_IDLE); +#ifdef CONFIG_ZRAM_TRACK_ENTRY_ACTIME + zram->table[index].ac_time = ktime_get_boottime(); +#endif +} + static inline void update_used_max(struct zram *zram, const unsigned long pages) { @@ -293,8 +301,9 @@ static void mark_idle(struct zram *zram, ktime_t cutoff) zram_slot_lock(zram, index); if (zram_allocated(zram, index) && !zram_test_flag(zram, index, ZRAM_UNDER_WB)) { -#ifdef CONFIG_ZRAM_MEMORY_TRACKING - is_idle = !cutoff || ktime_after(cutoff, zram->table[index].ac_time); +#ifdef CONFIG_ZRAM_TRACK_ENTRY_ACTIME + is_idle = !cutoff || ktime_after(cutoff, + zram->table[index].ac_time); #endif if (is_idle) zram_set_flag(zram, index, ZRAM_IDLE); @@ -317,7 +326,7 @@ static ssize_t idle_store(struct device *dev, */ u64 age_sec; - if (IS_ENABLED(CONFIG_ZRAM_MEMORY_TRACKING) && !kstrtoull(buf, 0, &age_sec)) + if (IS_ENABLED(CONFIG_ZRAM_TRACK_ENTRY_ACTIME) && !kstrtoull(buf, 0, &age_sec)) cutoff_time = ktime_sub(ktime_get_boottime(), ns_to_ktime(age_sec * NSEC_PER_SEC)); else @@ -841,12 +850,6 @@ static void zram_debugfs_destroy(void) debugfs_remove_recursive(zram_debugfs_root); } -static void zram_accessed(struct zram *zram, u32 index) -{ - zram_clear_flag(zram, index, ZRAM_IDLE); - zram->table[index].ac_time = ktime_get_boottime(); -} - static ssize_t read_block_state(struct file *file, char __user *buf, size_t count, loff_t *ppos) { @@ -930,10 +933,6 @@ static void zram_debugfs_unregister(struct zram *zram) #else static void zram_debugfs_create(void) {}; static void zram_debugfs_destroy(void) {}; -static void zram_accessed(struct zram *zram, u32 index) -{ - zram_clear_flag(zram, index, ZRAM_IDLE); -}; static void zram_debugfs_register(struct zram *zram) {}; static void zram_debugfs_unregister(struct zram *zram) {}; #endif @@ -1254,7 +1253,7 @@ static void zram_free_page(struct zram *zram, size_t index) { unsigned long handle; -#ifdef CONFIG_ZRAM_MEMORY_TRACKING +#ifdef CONFIG_ZRAM_TRACK_ENTRY_ACTIME zram->table[index].ac_time = 0; #endif if (zram_test_flag(zram, index, ZRAM_IDLE)) @@ -1322,9 +1321,9 @@ static int zram_read_from_zspool(struct zram *zram, struct page *page, void *mem; value = handle ? zram_get_element(zram, index) : 0; - mem = kmap_atomic(page); + mem = kmap_local_page(page); zram_fill_page(mem, PAGE_SIZE, value); - kunmap_atomic(mem); + kunmap_local(mem); return 0; } @@ -1337,14 +1336,14 @@ static int zram_read_from_zspool(struct zram *zram, struct page *page, src = zs_map_object(zram->mem_pool, handle, ZS_MM_RO); if (size == PAGE_SIZE) { - dst = kmap_atomic(page); + dst = kmap_local_page(page); memcpy(dst, src, PAGE_SIZE); - kunmap_atomic(dst); + kunmap_local(dst); ret = 0; } else { - dst = kmap_atomic(page); + dst = kmap_local_page(page); ret = zcomp_decompress(zstrm, src, size, dst); - kunmap_atomic(dst); + kunmap_local(dst); zcomp_stream_put(zram->comps[prio]); } zs_unmap_object(zram->mem_pool, handle); @@ -1417,21 +1416,21 @@ static int zram_write_page(struct zram *zram, struct page *page, u32 index) unsigned long element = 0; enum zram_pageflags flags = 0; - mem = kmap_atomic(page); + mem = kmap_local_page(page); if (page_same_filled(mem, &element)) { - kunmap_atomic(mem); + kunmap_local(mem); /* Free memory associated with this sector now. */ flags = ZRAM_SAME; atomic64_inc(&zram->stats.same_pages); goto out; } - kunmap_atomic(mem); + kunmap_local(mem); compress_again: zstrm = zcomp_stream_get(zram->comps[ZRAM_PRIMARY_COMP]); - src = kmap_atomic(page); + src = kmap_local_page(page); ret = zcomp_compress(zstrm, src, &comp_len); - kunmap_atomic(src); + kunmap_local(src); if (unlikely(ret)) { zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]); @@ -1495,10 +1494,10 @@ compress_again: src = zstrm->buffer; if (comp_len == PAGE_SIZE) - src = kmap_atomic(page); + src = kmap_local_page(page); memcpy(dst, src, comp_len); if (comp_len == PAGE_SIZE) - kunmap_atomic(src); + kunmap_local(src); zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]); zs_unmap_object(zram->mem_pool, handle); @@ -1615,9 +1614,9 @@ static int zram_recompress(struct zram *zram, u32 index, struct page *page, num_recomps++; zstrm = zcomp_stream_get(zram->comps[prio]); - src = kmap_atomic(page); + src = kmap_local_page(page); ret = zcomp_compress(zstrm, src, &comp_len_new); - kunmap_atomic(src); + kunmap_local(src); if (ret) { zcomp_stream_put(zram->comps[prio]); diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h index d090753f97be..3b94d12f41b4 100644 --- a/drivers/block/zram/zram_drv.h +++ b/drivers/block/zram/zram_drv.h @@ -69,7 +69,7 @@ struct zram_table_entry { unsigned long element; }; unsigned long flags; -#ifdef CONFIG_ZRAM_MEMORY_TRACKING +#ifdef CONFIG_ZRAM_TRACK_ENTRY_ACTIME ktime_t ac_time; #endif }; diff --git a/drivers/char/random.c b/drivers/char/random.c index 4a9c79391dee..456be28ba67c 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -1364,7 +1364,6 @@ static void __cold try_to_generate_entropy(void) SYSCALL_DEFINE3(getrandom, char __user *, ubuf, size_t, len, unsigned int, flags) { struct iov_iter iter; - struct iovec iov; int ret; if (flags & ~(GRND_NONBLOCK | GRND_RANDOM | GRND_INSECURE)) @@ -1385,7 +1384,7 @@ SYSCALL_DEFINE3(getrandom, char __user *, ubuf, size_t, len, unsigned int, flags return ret; } - ret = import_single_range(ITER_DEST, ubuf, len, &iov, &iter); + ret = import_ubuf(ITER_DEST, ubuf, len, &iter); if (unlikely(ret)) return ret; return get_random_bytes_user(&iter); @@ -1491,7 +1490,6 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg) return 0; case RNDADDENTROPY: { struct iov_iter iter; - struct iovec iov; ssize_t ret; int len; @@ -1503,7 +1501,7 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg) return -EINVAL; if (get_user(len, p++)) return -EFAULT; - ret = import_single_range(ITER_SOURCE, p, len, &iov, &iter); + ret = import_ubuf(ITER_SOURCE, p, len, &iter); if (unlikely(ret)) return ret; ret = write_pool_user(&iter); diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c index 44b19e696176..3d5e6d705fc6 100644 --- a/drivers/connector/cn_proc.c +++ b/drivers/connector/cn_proc.c @@ -108,8 +108,9 @@ static inline void send_msg(struct cn_msg *msg) filter_data[1] = 0; } - cn_netlink_send_mult(msg, msg->len, 0, CN_IDX_PROC, GFP_NOWAIT, - cn_filter, (void *)filter_data); + if (cn_netlink_send_mult(msg, msg->len, 0, CN_IDX_PROC, GFP_NOWAIT, + cn_filter, (void *)filter_data) == -ESRCH) + atomic_set(&proc_event_num_listeners, 0); local_unlock(&local_event.lock); } diff --git a/drivers/cpufreq/armada-8k-cpufreq.c b/drivers/cpufreq/armada-8k-cpufreq.c index 8afefdea4d80..ce5a5641b6dd 100644 --- a/drivers/cpufreq/armada-8k-cpufreq.c +++ b/drivers/cpufreq/armada-8k-cpufreq.c @@ -57,7 +57,7 @@ static void __init armada_8k_get_sharing_cpus(struct clk *cur_clk, continue; } - clk = clk_get(cpu_dev, 0); + clk = clk_get(cpu_dev, NULL); if (IS_ERR(clk)) { pr_warn("Cannot get clock for CPU %d\n", cpu); } else { @@ -165,7 +165,7 @@ static int __init armada_8k_cpufreq_init(void) continue; } - clk = clk_get(cpu_dev, 0); + clk = clk_get(cpu_dev, NULL); if (IS_ERR(clk)) { pr_err("Cannot get clock for CPU %d\n", cpu); diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c index fe08ca419b3d..64420d9cfd1e 100644 --- a/drivers/cpufreq/cppc_cpufreq.c +++ b/drivers/cpufreq/cppc_cpufreq.c @@ -16,7 +16,6 @@ #include <linux/delay.h> #include <linux/cpu.h> #include <linux/cpufreq.h> -#include <linux/dmi.h> #include <linux/irq_work.h> #include <linux/kthread.h> #include <linux/time.h> @@ -27,12 +26,6 @@ #include <acpi/cppc_acpi.h> -/* Minimum struct length needed for the DMI processor entry we want */ -#define DMI_ENTRY_PROCESSOR_MIN_LENGTH 48 - -/* Offset in the DMI processor structure for the max frequency */ -#define DMI_PROCESSOR_MAX_SPEED 0x14 - /* * This list contains information parsed from per CPU ACPI _CPC and _PSD * structures: e.g. the highest and lowest supported performance, capabilities, @@ -291,97 +284,9 @@ static inline void cppc_freq_invariance_exit(void) } #endif /* CONFIG_ACPI_CPPC_CPUFREQ_FIE */ -/* Callback function used to retrieve the max frequency from DMI */ -static void cppc_find_dmi_mhz(const struct dmi_header *dm, void *private) -{ - const u8 *dmi_data = (const u8 *)dm; - u16 *mhz = (u16 *)private; - - if (dm->type == DMI_ENTRY_PROCESSOR && - dm->length >= DMI_ENTRY_PROCESSOR_MIN_LENGTH) { - u16 val = (u16)get_unaligned((const u16 *) - (dmi_data + DMI_PROCESSOR_MAX_SPEED)); - *mhz = val > *mhz ? val : *mhz; - } -} - -/* Look up the max frequency in DMI */ -static u64 cppc_get_dmi_max_khz(void) -{ - u16 mhz = 0; - - dmi_walk(cppc_find_dmi_mhz, &mhz); - - /* - * Real stupid fallback value, just in case there is no - * actual value set. - */ - mhz = mhz ? mhz : 1; - - return (1000 * mhz); -} - -/* - * If CPPC lowest_freq and nominal_freq registers are exposed then we can - * use them to convert perf to freq and vice versa. The conversion is - * extrapolated as an affine function passing by the 2 points: - * - (Low perf, Low freq) - * - (Nominal perf, Nominal perf) - */ -static unsigned int cppc_cpufreq_perf_to_khz(struct cppc_cpudata *cpu_data, - unsigned int perf) -{ - struct cppc_perf_caps *caps = &cpu_data->perf_caps; - s64 retval, offset = 0; - static u64 max_khz; - u64 mul, div; - - if (caps->lowest_freq && caps->nominal_freq) { - mul = caps->nominal_freq - caps->lowest_freq; - div = caps->nominal_perf - caps->lowest_perf; - offset = caps->nominal_freq - div64_u64(caps->nominal_perf * mul, div); - } else { - if (!max_khz) - max_khz = cppc_get_dmi_max_khz(); - mul = max_khz; - div = caps->highest_perf; - } - - retval = offset + div64_u64(perf * mul, div); - if (retval >= 0) - return retval; - return 0; -} - -static unsigned int cppc_cpufreq_khz_to_perf(struct cppc_cpudata *cpu_data, - unsigned int freq) -{ - struct cppc_perf_caps *caps = &cpu_data->perf_caps; - s64 retval, offset = 0; - static u64 max_khz; - u64 mul, div; - - if (caps->lowest_freq && caps->nominal_freq) { - mul = caps->nominal_perf - caps->lowest_perf; - div = caps->nominal_freq - caps->lowest_freq; - offset = caps->nominal_perf - div64_u64(caps->nominal_freq * mul, div); - } else { - if (!max_khz) - max_khz = cppc_get_dmi_max_khz(); - mul = caps->highest_perf; - div = max_khz; - } - - retval = offset + div64_u64(freq * mul, div); - if (retval >= 0) - return retval; - return 0; -} - static int cppc_cpufreq_set_target(struct cpufreq_policy *policy, unsigned int target_freq, unsigned int relation) - { struct cppc_cpudata *cpu_data = policy->driver_data; unsigned int cpu = policy->cpu; @@ -389,7 +294,7 @@ static int cppc_cpufreq_set_target(struct cpufreq_policy *policy, u32 desired_perf; int ret = 0; - desired_perf = cppc_cpufreq_khz_to_perf(cpu_data, target_freq); + desired_perf = cppc_khz_to_perf(&cpu_data->perf_caps, target_freq); /* Return if it is exactly the same perf */ if (desired_perf == cpu_data->perf_ctrls.desired_perf) return ret; @@ -417,7 +322,7 @@ static unsigned int cppc_cpufreq_fast_switch(struct cpufreq_policy *policy, u32 desired_perf; int ret; - desired_perf = cppc_cpufreq_khz_to_perf(cpu_data, target_freq); + desired_perf = cppc_khz_to_perf(&cpu_data->perf_caps, target_freq); cpu_data->perf_ctrls.desired_perf = desired_perf; ret = cppc_set_perf(cpu, &cpu_data->perf_ctrls); @@ -530,7 +435,7 @@ static int cppc_get_cpu_power(struct device *cpu_dev, min_step = min_cap / CPPC_EM_CAP_STEP; max_step = max_cap / CPPC_EM_CAP_STEP; - perf_prev = cppc_cpufreq_khz_to_perf(cpu_data, *KHz); + perf_prev = cppc_khz_to_perf(perf_caps, *KHz); step = perf_prev / perf_step; if (step > max_step) @@ -550,8 +455,8 @@ static int cppc_get_cpu_power(struct device *cpu_dev, perf = step * perf_step; } - *KHz = cppc_cpufreq_perf_to_khz(cpu_data, perf); - perf_check = cppc_cpufreq_khz_to_perf(cpu_data, *KHz); + *KHz = cppc_perf_to_khz(perf_caps, perf); + perf_check = cppc_khz_to_perf(perf_caps, *KHz); step_check = perf_check / perf_step; /* @@ -561,8 +466,8 @@ static int cppc_get_cpu_power(struct device *cpu_dev, */ while ((*KHz == prev_freq) || (step_check != step)) { perf++; - *KHz = cppc_cpufreq_perf_to_khz(cpu_data, perf); - perf_check = cppc_cpufreq_khz_to_perf(cpu_data, *KHz); + *KHz = cppc_perf_to_khz(perf_caps, perf); + perf_check = cppc_khz_to_perf(perf_caps, *KHz); step_check = perf_check / perf_step; } @@ -591,7 +496,7 @@ static int cppc_get_cpu_cost(struct device *cpu_dev, unsigned long KHz, perf_caps = &cpu_data->perf_caps; max_cap = arch_scale_cpu_capacity(cpu_dev->id); - perf_prev = cppc_cpufreq_khz_to_perf(cpu_data, KHz); + perf_prev = cppc_khz_to_perf(perf_caps, KHz); perf_step = CPPC_EM_CAP_STEP * perf_caps->highest_perf / max_cap; step = perf_prev / perf_step; @@ -679,10 +584,6 @@ static struct cppc_cpudata *cppc_cpufreq_get_cpu_data(unsigned int cpu) goto free_mask; } - /* Convert the lowest and nominal freq from MHz to KHz */ - cpu_data->perf_caps.lowest_freq *= 1000; - cpu_data->perf_caps.nominal_freq *= 1000; - list_add(&cpu_data->node, &cpu_data_list); return cpu_data; @@ -724,20 +625,16 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy) * Set min to lowest nonlinear perf to avoid any efficiency penalty (see * Section 8.4.7.1.1.5 of ACPI 6.1 spec) */ - policy->min = cppc_cpufreq_perf_to_khz(cpu_data, - caps->lowest_nonlinear_perf); - policy->max = cppc_cpufreq_perf_to_khz(cpu_data, - caps->nominal_perf); + policy->min = cppc_perf_to_khz(caps, caps->lowest_nonlinear_perf); + policy->max = cppc_perf_to_khz(caps, caps->nominal_perf); /* * Set cpuinfo.min_freq to Lowest to make the full range of performance * available if userspace wants to use any perf between lowest & lowest * nonlinear perf */ - policy->cpuinfo.min_freq = cppc_cpufreq_perf_to_khz(cpu_data, - caps->lowest_perf); - policy->cpuinfo.max_freq = cppc_cpufreq_perf_to_khz(cpu_data, - caps->nominal_perf); + policy->cpuinfo.min_freq = cppc_perf_to_khz(caps, caps->lowest_perf); + policy->cpuinfo.max_freq = cppc_perf_to_khz(caps, caps->nominal_perf); policy->transition_delay_us = cppc_cpufreq_get_transition_delay_us(cpu); policy->shared_type = cpu_data->shared_type; @@ -773,7 +670,7 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy) boost_supported = true; /* Set policy->cur to max now. The governors will adjust later. */ - policy->cur = cppc_cpufreq_perf_to_khz(cpu_data, caps->highest_perf); + policy->cur = cppc_perf_to_khz(caps, caps->highest_perf); cpu_data->perf_ctrls.desired_perf = caps->highest_perf; ret = cppc_set_perf(cpu, &cpu_data->perf_ctrls); @@ -863,7 +760,7 @@ static unsigned int cppc_cpufreq_get_rate(unsigned int cpu) delivered_perf = cppc_perf_from_fbctrs(cpu_data, &fb_ctrs_t0, &fb_ctrs_t1); - return cppc_cpufreq_perf_to_khz(cpu_data, delivered_perf); + return cppc_perf_to_khz(&cpu_data->perf_caps, delivered_perf); } static int cppc_cpufreq_set_boost(struct cpufreq_policy *policy, int state) @@ -878,11 +775,9 @@ static int cppc_cpufreq_set_boost(struct cpufreq_policy *policy, int state) } if (state) - policy->max = cppc_cpufreq_perf_to_khz(cpu_data, - caps->highest_perf); + policy->max = cppc_perf_to_khz(caps, caps->highest_perf); else - policy->max = cppc_cpufreq_perf_to_khz(cpu_data, - caps->nominal_perf); + policy->max = cppc_perf_to_khz(caps, caps->nominal_perf); policy->cpuinfo.max_freq = policy->max; ret = freq_qos_update_request(policy->max_freq_req, policy->max); @@ -937,7 +832,7 @@ static unsigned int hisi_cppc_cpufreq_get_rate(unsigned int cpu) if (ret < 0) return -EIO; - return cppc_cpufreq_perf_to_khz(cpu_data, desired_perf); + return cppc_perf_to_khz(&cpu_data->perf_caps, desired_perf); } static void cppc_check_hisi_workaround(void) diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 934d35f570b7..44db4f59c4cc 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -454,7 +454,7 @@ void cpufreq_freq_transition_end(struct cpufreq_policy *policy, arch_set_freq_scale(policy->related_cpus, policy->cur, - policy->cpuinfo.max_freq); + arch_scale_freq_ref(policy->cpu)); spin_lock(&policy->transition_lock); policy->transition_ongoing = false; @@ -2174,7 +2174,7 @@ unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy, policy->cur = freq; arch_set_freq_scale(policy->related_cpus, freq, - policy->cpuinfo.max_freq); + arch_scale_freq_ref(policy->cpu)); cpufreq_stats_record_transition(policy, freq); if (trace_cpu_frequency_enabled()) { diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index a534a1f7f1ee..2ca70b0b5fdc 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -302,7 +302,10 @@ static bool hwp_forced __read_mostly; static struct cpufreq_driver *intel_pstate_driver __read_mostly; -#define HYBRID_SCALING_FACTOR 78741 +#define HYBRID_SCALING_FACTOR 78741 +#define HYBRID_SCALING_FACTOR_MTL 80000 + +static int hybrid_scaling_factor = HYBRID_SCALING_FACTOR; static inline int core_get_scaling(void) { @@ -422,7 +425,7 @@ static int intel_pstate_cppc_get_scaling(int cpu) */ if (!ret && cppc_perf.nominal_perf && cppc_perf.nominal_freq && cppc_perf.nominal_perf * 100 != cppc_perf.nominal_freq) - return HYBRID_SCALING_FACTOR; + return hybrid_scaling_factor; return core_get_scaling(); } @@ -1692,13 +1695,6 @@ static void intel_pstate_update_epp_defaults(struct cpudata *cpudata) cpudata->epp_default = intel_pstate_get_epp(cpudata, 0); /* - * If this CPU gen doesn't call for change in balance_perf - * EPP return. - */ - if (epp_values[EPP_INDEX_BALANCE_PERFORMANCE] == HWP_EPP_BALANCE_PERFORMANCE) - return; - - /* * If the EPP is set by firmware, which means that firmware enabled HWP * - Is equal or less than 0x80 (default balance_perf EPP) * - But less performance oriented than performance EPP @@ -1711,6 +1707,13 @@ static void intel_pstate_update_epp_defaults(struct cpudata *cpudata) } /* + * If this CPU gen doesn't call for change in balance_perf + * EPP return. + */ + if (epp_values[EPP_INDEX_BALANCE_PERFORMANCE] == HWP_EPP_BALANCE_PERFORMANCE) + return; + + /* * Use hard coded value per gen to update the balance_perf * and default EPP. */ @@ -1968,7 +1971,7 @@ static int hwp_get_cpu_scaling(int cpu) smp_call_function_single(cpu, hybrid_get_type, &cpu_type, 1); /* P-cores have a smaller perf level-to-freqency scaling factor. */ if (cpu_type == 0x40) - return HYBRID_SCALING_FACTOR; + return hybrid_scaling_factor; /* Use default core scaling for E-cores */ if (cpu_type == 0x20) @@ -2406,6 +2409,7 @@ static const struct x86_cpu_id intel_pstate_cpu_ids[] = { X86_MATCH(ICELAKE_X, core_funcs), X86_MATCH(TIGERLAKE, core_funcs), X86_MATCH(SAPPHIRERAPIDS_X, core_funcs), + X86_MATCH(EMERALDRAPIDS_X, core_funcs), {} }; MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids); @@ -3398,6 +3402,11 @@ static const struct x86_cpu_id intel_epp_balance_perf[] = { {} }; +static const struct x86_cpu_id intel_hybrid_scaling_factor[] = { + X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE_L, HYBRID_SCALING_FACTOR_MTL), + {} +}; + static int __init intel_pstate_init(void) { static struct cpudata **_all_cpu_data; @@ -3488,9 +3497,16 @@ hwp_cpu_matched: if (hwp_active) { const struct x86_cpu_id *id = x86_match_cpu(intel_epp_balance_perf); + const struct x86_cpu_id *hybrid_id = x86_match_cpu(intel_hybrid_scaling_factor); if (id) epp_values[EPP_INDEX_BALANCE_PERFORMANCE] = id->driver_data; + + if (hybrid_id) { + hybrid_scaling_factor = hybrid_id->driver_data; + pr_debug("hybrid scaling factor: %d\n", hybrid_scaling_factor); + } + } mutex_lock(&intel_pstate_driver_lock); diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c index c8a7ccc42c16..4ee23f4ebf4a 100644 --- a/drivers/cpufreq/scmi-cpufreq.c +++ b/drivers/cpufreq/scmi-cpufreq.c @@ -334,8 +334,11 @@ static int scmi_cpufreq_probe(struct scmi_device *sdev) #ifdef CONFIG_COMMON_CLK /* dummy clock provider as needed by OPP if clocks property is used */ - if (of_property_present(dev->of_node, "#clock-cells")) - devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, NULL); + if (of_property_present(dev->of_node, "#clock-cells")) { + ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, NULL); + if (ret) + return dev_err_probe(dev, ret, "%s: registering clock provider failed\n", __func__); + } #endif ret = cpufreq_register_driver(&scmi_cpufreq_driver); diff --git a/drivers/cpuidle/cpuidle-haltpoll.c b/drivers/cpuidle/cpuidle-haltpoll.c index e66df22f9695..d8515d5c0853 100644 --- a/drivers/cpuidle/cpuidle-haltpoll.c +++ b/drivers/cpuidle/cpuidle-haltpoll.c @@ -25,13 +25,12 @@ MODULE_PARM_DESC(force, "Load unconditionally"); static struct cpuidle_device __percpu *haltpoll_cpuidle_devices; static enum cpuhp_state haltpoll_hp_state; -static int default_enter_idle(struct cpuidle_device *dev, - struct cpuidle_driver *drv, int index) +static __cpuidle int default_enter_idle(struct cpuidle_device *dev, + struct cpuidle_driver *drv, int index) { - if (current_clr_polling_and_test()) { - local_irq_enable(); + if (current_clr_polling_and_test()) return index; - } + arch_cpu_idle(); return index; } diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c index fcaccd0b5a65..e4d3f45242f6 100644 --- a/drivers/crypto/ccp/sev-dev.c +++ b/drivers/crypto/ccp/sev-dev.c @@ -906,7 +906,7 @@ static int sev_ioctl_do_get_id2(struct sev_issue_cmd *argp) /* * The length of the ID shouldn't be assumed by software since * it may change in the future. The allocation size is limited - * to 1 << (PAGE_SHIFT + MAX_ORDER) by the page allocator. + * to 1 << (PAGE_SHIFT + MAX_PAGE_ORDER) by the page allocator. * If the allocation fails, simply return ENOMEM rather than * warning in the kernel log. */ diff --git a/drivers/crypto/hisilicon/sgl.c b/drivers/crypto/hisilicon/sgl.c index 3df7a256e919..5c1012d7ffa9 100644 --- a/drivers/crypto/hisilicon/sgl.c +++ b/drivers/crypto/hisilicon/sgl.c @@ -70,11 +70,11 @@ struct hisi_acc_sgl_pool *hisi_acc_create_sgl_pool(struct device *dev, HISI_ACC_SGL_ALIGN_SIZE); /* - * the pool may allocate a block of memory of size PAGE_SIZE * 2^MAX_ORDER, + * the pool may allocate a block of memory of size PAGE_SIZE * 2^MAX_PAGE_ORDER, * block size may exceed 2^31 on ia64, so the max of block size is 2^31 */ - block_size = 1 << (PAGE_SHIFT + MAX_ORDER < 32 ? - PAGE_SHIFT + MAX_ORDER : 31); + block_size = 1 << (PAGE_SHIFT + MAX_PAGE_ORDER < 32 ? + PAGE_SHIFT + MAX_PAGE_ORDER : 31); sgl_num_per_block = block_size / sgl_size; block_num = count / sgl_num_per_block; remain_sgl = count % sgl_num_per_block; diff --git a/drivers/dax/bus.c b/drivers/dax/bus.c index 1659b787b65f..1ff1ab5fa105 100644 --- a/drivers/dax/bus.c +++ b/drivers/dax/bus.c @@ -367,6 +367,7 @@ static ssize_t create_store(struct device *dev, struct device_attribute *attr, .dax_region = dax_region, .size = 0, .id = -1, + .memmap_on_memory = false, }; struct dev_dax *dev_dax = devm_create_dev_dax(&data); @@ -1400,6 +1401,8 @@ struct dev_dax *devm_create_dev_dax(struct dev_dax_data *data) dev_dax->align = dax_region->align; ida_init(&dev_dax->ida); + dev_dax->memmap_on_memory = data->memmap_on_memory; + inode = dax_inode(dax_dev); dev->devt = inode->i_rdev; dev->bus = &dax_bus_type; diff --git a/drivers/dax/bus.h b/drivers/dax/bus.h index 1ccd23360124..cbbf64443098 100644 --- a/drivers/dax/bus.h +++ b/drivers/dax/bus.h @@ -23,6 +23,7 @@ struct dev_dax_data { struct dev_pagemap *pgmap; resource_size_t size; int id; + bool memmap_on_memory; }; struct dev_dax *devm_create_dev_dax(struct dev_dax_data *data); diff --git a/drivers/dax/cxl.c b/drivers/dax/cxl.c index 8bc9d04034d6..c696837ab23c 100644 --- a/drivers/dax/cxl.c +++ b/drivers/dax/cxl.c @@ -26,6 +26,7 @@ static int cxl_dax_region_probe(struct device *dev) .dax_region = dax_region, .id = -1, .size = range_len(&cxlr_dax->hpa_range), + .memmap_on_memory = true, }; return PTR_ERR_OR_ZERO(devm_create_dev_dax(&data)); diff --git a/drivers/dax/dax-private.h b/drivers/dax/dax-private.h index 27cf2daaaa79..446617b73aea 100644 --- a/drivers/dax/dax-private.h +++ b/drivers/dax/dax-private.h @@ -70,6 +70,7 @@ struct dev_dax { struct ida ida; struct device dev; struct dev_pagemap *pgmap; + bool memmap_on_memory; int nr_range; struct dev_dax_range { unsigned long pgoff; diff --git a/drivers/dax/hmem/hmem.c b/drivers/dax/hmem/hmem.c index 5d2ddef0f8f5..b9da69f92697 100644 --- a/drivers/dax/hmem/hmem.c +++ b/drivers/dax/hmem/hmem.c @@ -36,6 +36,7 @@ static int dax_hmem_probe(struct platform_device *pdev) .dax_region = dax_region, .id = -1, .size = region_idle ? 0 : range_len(&mri->range), + .memmap_on_memory = false, }; return PTR_ERR_OR_ZERO(devm_create_dev_dax(&data)); diff --git a/drivers/dax/kmem.c b/drivers/dax/kmem.c index 369c698b7706..42ee360cf4e3 100644 --- a/drivers/dax/kmem.c +++ b/drivers/dax/kmem.c @@ -12,6 +12,7 @@ #include <linux/mm.h> #include <linux/mman.h> #include <linux/memory-tiers.h> +#include <linux/memory_hotplug.h> #include "dax-private.h" #include "bus.h" @@ -93,6 +94,7 @@ static int dev_dax_kmem_probe(struct dev_dax *dev_dax) struct dax_kmem_data *data; struct memory_dev_type *mtype; int i, rc, mapped = 0; + mhp_t mhp_flags; int numa_node; int adist = MEMTIER_DEFAULT_DAX_ADISTANCE; @@ -179,12 +181,16 @@ static int dev_dax_kmem_probe(struct dev_dax *dev_dax) */ res->flags = IORESOURCE_SYSTEM_RAM; + mhp_flags = MHP_NID_IS_MGID; + if (dev_dax->memmap_on_memory) + mhp_flags |= MHP_MEMMAP_ON_MEMORY; + /* * Ensure that future kexec'd kernels will not treat * this as RAM automatically. */ rc = add_memory_driver_managed(data->mgid, range.start, - range_len(&range), kmem_name, MHP_NID_IS_MGID); + range_len(&range), kmem_name, mhp_flags); if (rc) { dev_warn(dev, "mapping%d: %#llx-%#llx memory add failed\n", diff --git a/drivers/dax/pmem.c b/drivers/dax/pmem.c index ae0cb113a5d3..f3c6c67b8412 100644 --- a/drivers/dax/pmem.c +++ b/drivers/dax/pmem.c @@ -63,6 +63,7 @@ static struct dev_dax *__dax_pmem_probe(struct device *dev) .id = id, .pgmap = &pgmap, .size = range_len(&range), + .memmap_on_memory = false, }; return devm_create_dev_dax(&data); diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c index b3a68d5833bd..98657d3b9435 100644 --- a/drivers/devfreq/devfreq.c +++ b/drivers/devfreq/devfreq.c @@ -461,10 +461,14 @@ static void devfreq_monitor(struct work_struct *work) if (err) dev_err(&devfreq->dev, "dvfs failed with (%d) error\n", err); + if (devfreq->stop_polling) + goto out; + queue_delayed_work(devfreq_wq, &devfreq->work, msecs_to_jiffies(devfreq->profile->polling_ms)); - mutex_unlock(&devfreq->lock); +out: + mutex_unlock(&devfreq->lock); trace_devfreq_monitor(devfreq); } @@ -483,6 +487,10 @@ void devfreq_monitor_start(struct devfreq *devfreq) if (IS_SUPPORTED_FLAG(devfreq->governor->flags, IRQ_DRIVEN)) return; + mutex_lock(&devfreq->lock); + if (delayed_work_pending(&devfreq->work)) + goto out; + switch (devfreq->profile->timer) { case DEVFREQ_TIMER_DEFERRABLE: INIT_DEFERRABLE_WORK(&devfreq->work, devfreq_monitor); @@ -491,12 +499,16 @@ void devfreq_monitor_start(struct devfreq *devfreq) INIT_DELAYED_WORK(&devfreq->work, devfreq_monitor); break; default: - return; + goto out; } if (devfreq->profile->polling_ms) queue_delayed_work(devfreq_wq, &devfreq->work, msecs_to_jiffies(devfreq->profile->polling_ms)); + +out: + devfreq->stop_polling = false; + mutex_unlock(&devfreq->lock); } EXPORT_SYMBOL(devfreq_monitor_start); @@ -513,6 +525,14 @@ void devfreq_monitor_stop(struct devfreq *devfreq) if (IS_SUPPORTED_FLAG(devfreq->governor->flags, IRQ_DRIVEN)) return; + mutex_lock(&devfreq->lock); + if (devfreq->stop_polling) { + mutex_unlock(&devfreq->lock); + return; + } + + devfreq->stop_polling = true; + mutex_unlock(&devfreq->lock); cancel_delayed_work_sync(&devfreq->work); } EXPORT_SYMBOL(devfreq_monitor_stop); @@ -1688,7 +1708,7 @@ static ssize_t trans_stat_show(struct device *dev, struct device_attribute *attr, char *buf) { struct devfreq *df = to_devfreq(dev); - ssize_t len; + ssize_t len = 0; int i, j; unsigned int max_state; @@ -1697,7 +1717,7 @@ static ssize_t trans_stat_show(struct device *dev, max_state = df->max_state; if (max_state == 0) - return sprintf(buf, "Not Supported.\n"); + return sysfs_emit(buf, "Not Supported.\n"); mutex_lock(&df->lock); if (!df->stop_polling && @@ -1707,31 +1727,49 @@ static ssize_t trans_stat_show(struct device *dev, } mutex_unlock(&df->lock); - len = sprintf(buf, " From : To\n"); - len += sprintf(buf + len, " :"); - for (i = 0; i < max_state; i++) - len += sprintf(buf + len, "%10lu", - df->freq_table[i]); + len += sysfs_emit_at(buf, len, " From : To\n"); + len += sysfs_emit_at(buf, len, " :"); + for (i = 0; i < max_state; i++) { + if (len >= PAGE_SIZE - 1) + break; + len += sysfs_emit_at(buf, len, "%10lu", + df->freq_table[i]); + } - len += sprintf(buf + len, " time(ms)\n"); + if (len >= PAGE_SIZE - 1) + return PAGE_SIZE - 1; + len += sysfs_emit_at(buf, len, " time(ms)\n"); for (i = 0; i < max_state; i++) { - if (df->freq_table[i] == df->previous_freq) - len += sprintf(buf + len, "*"); + if (len >= PAGE_SIZE - 1) + break; + if (df->freq_table[2] == df->previous_freq) + len += sysfs_emit_at(buf, len, "*"); else - len += sprintf(buf + len, " "); - - len += sprintf(buf + len, "%10lu:", df->freq_table[i]); - for (j = 0; j < max_state; j++) - len += sprintf(buf + len, "%10u", + len += sysfs_emit_at(buf, len, " "); + if (len >= PAGE_SIZE - 1) + break; + len += sysfs_emit_at(buf, len, "%10lu:", df->freq_table[i]); + for (j = 0; j < max_state; j++) { + if (len >= PAGE_SIZE - 1) + break; + len += sysfs_emit_at(buf, len, "%10u", df->stats.trans_table[(i * max_state) + j]); + } + if (len >= PAGE_SIZE - 1) + break; + len += sysfs_emit_at(buf, len, "%10llu\n", (u64) + jiffies64_to_msecs(df->stats.time_in_state[i])); + } - len += sprintf(buf + len, "%10llu\n", (u64) - jiffies64_to_msecs(df->stats.time_in_state[i])); + if (len < PAGE_SIZE - 1) + len += sysfs_emit_at(buf, len, "Total transition : %u\n", + df->stats.total_trans); + if (len >= PAGE_SIZE - 1) { + pr_warn_once("devfreq transition table exceeds PAGE_SIZE. Disabling\n"); + return -EFBIG; } - len += sprintf(buf + len, "Total transition : %u\n", - df->stats.total_trans); return len; } diff --git a/drivers/edac/altera_edac.c b/drivers/edac/altera_edac.c index 8b31cd54bdb6..ae17ce4d9722 100644 --- a/drivers/edac/altera_edac.c +++ b/drivers/edac/altera_edac.c @@ -22,6 +22,7 @@ #include <linux/of_platform.h> #include <linux/panic_notifier.h> #include <linux/platform_device.h> +#include <linux/property.h> #include <linux/regmap.h> #include <linux/types.h> #include <linux/uaccess.h> @@ -279,7 +280,6 @@ release: static int altr_sdram_probe(struct platform_device *pdev) { - const struct of_device_id *id; struct edac_mc_layer layers[2]; struct mem_ctl_info *mci; struct altr_sdram_mc_data *drvdata; @@ -290,10 +290,6 @@ static int altr_sdram_probe(struct platform_device *pdev) int irq, irq2, res = 0; unsigned long mem_size, irqflags = 0; - id = of_match_device(altr_sdram_ctrl_of_match, &pdev->dev); - if (!id) - return -ENODEV; - /* Grab the register range from the sdr controller in device tree */ mc_vbase = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, "altr,sdr-syscon"); @@ -304,8 +300,7 @@ static int altr_sdram_probe(struct platform_device *pdev) } /* Check specific dependencies for the module */ - priv = of_match_node(altr_sdram_ctrl_of_match, - pdev->dev.of_node)->data; + priv = device_get_match_data(&pdev->dev); /* Validate the SDRAM controller has ECC enabled */ if (regmap_read(mc_vbase, priv->ecc_ctrl_offset, &read_reg) || @@ -459,15 +454,13 @@ free: return res; } -static int altr_sdram_remove(struct platform_device *pdev) +static void altr_sdram_remove(struct platform_device *pdev) { struct mem_ctl_info *mci = platform_get_drvdata(pdev); edac_mc_del_mc(&pdev->dev); edac_mc_free(mci); platform_set_drvdata(pdev, NULL); - - return 0; } /* @@ -489,7 +482,7 @@ static const struct dev_pm_ops altr_sdram_pm_ops = { static struct platform_driver altr_sdram_edac_driver = { .probe = altr_sdram_probe, - .remove = altr_sdram_remove, + .remove_new = altr_sdram_remove, .driver = { .name = "altr_sdram_edac", #ifdef CONFIG_PM @@ -812,7 +805,7 @@ fail: return res; } -static int altr_edac_device_remove(struct platform_device *pdev) +static void altr_edac_device_remove(struct platform_device *pdev) { struct edac_device_ctl_info *dci = platform_get_drvdata(pdev); struct altr_edac_device_dev *drvdata = dci->pvt_info; @@ -820,13 +813,11 @@ static int altr_edac_device_remove(struct platform_device *pdev) debugfs_remove_recursive(drvdata->debugfs_dir); edac_device_del_device(&pdev->dev); edac_device_free_ctl_info(dci); - - return 0; } static struct platform_driver altr_edac_device_driver = { .probe = altr_edac_device_probe, - .remove = altr_edac_device_remove, + .remove_new = altr_edac_device_remove, .driver = { .name = "altr_edac_device", .of_match_table = altr_edac_device_of_match, diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c index 9b6642d00871..537b9987a431 100644 --- a/drivers/edac/amd64_edac.c +++ b/drivers/edac/amd64_edac.c @@ -996,15 +996,23 @@ static struct local_node_map { #define LNTM_NODE_COUNT GENMASK(27, 16) #define LNTM_BASE_NODE_ID GENMASK(11, 0) -static int gpu_get_node_map(void) +static int gpu_get_node_map(struct amd64_pvt *pvt) { struct pci_dev *pdev; int ret; u32 tmp; /* - * Node ID 0 is reserved for CPUs. - * Therefore, a non-zero Node ID means we've already cached the values. + * Mapping of nodes from hardware-provided AMD Node ID to a + * Linux logical one is applicable for MI200 models. Therefore, + * return early for other heterogeneous systems. + */ + if (pvt->F3->device != PCI_DEVICE_ID_AMD_MI200_DF_F3) + return 0; + + /* + * Node ID 0 is reserved for CPUs. Therefore, a non-zero Node ID + * means the values have been already cached. */ if (gpu_node_map.base_node_id) return 0; @@ -3851,7 +3859,7 @@ static void gpu_init_csrows(struct mem_ctl_info *mci) dimm->nr_pages = gpu_get_csrow_nr_pages(pvt, umc, cs); dimm->edac_mode = EDAC_SECDED; - dimm->mtype = MEM_HBM2; + dimm->mtype = pvt->dram_type; dimm->dtype = DEV_X16; dimm->grain = 64; } @@ -3880,7 +3888,7 @@ static bool gpu_ecc_enabled(struct amd64_pvt *pvt) return true; } -static inline u32 gpu_get_umc_base(u8 umc, u8 channel) +static inline u32 gpu_get_umc_base(struct amd64_pvt *pvt, u8 umc, u8 channel) { /* * On CPUs, there is one channel per UMC, so UMC numbering equals @@ -3893,13 +3901,16 @@ static inline u32 gpu_get_umc_base(u8 umc, u8 channel) * On GPU nodes channels are selected in 3rd nibble * HBM chX[3:0]= [Y ]5X[3:0]000; * HBM chX[7:4]= [Y+1]5X[3:0]000 + * + * On MI300 APU nodes, same as GPU nodes but channels are selected + * in the base address of 0x90000 */ umc *= 2; if (channel >= 4) umc++; - return 0x50000 + (umc << 20) + ((channel % 4) << 12); + return pvt->gpu_umc_base + (umc << 20) + ((channel % 4) << 12); } static void gpu_read_mc_regs(struct amd64_pvt *pvt) @@ -3910,7 +3921,7 @@ static void gpu_read_mc_regs(struct amd64_pvt *pvt) /* Read registers from each UMC */ for_each_umc(i) { - umc_base = gpu_get_umc_base(i, 0); + umc_base = gpu_get_umc_base(pvt, i, 0); umc = &pvt->umc[i]; amd_smn_read(nid, umc_base + UMCCH_UMC_CFG, &umc->umc_cfg); @@ -3927,7 +3938,7 @@ static void gpu_read_base_mask(struct amd64_pvt *pvt) for_each_umc(umc) { for_each_chip_select(cs, umc, pvt) { - base_reg = gpu_get_umc_base(umc, cs) + UMCCH_BASE_ADDR; + base_reg = gpu_get_umc_base(pvt, umc, cs) + UMCCH_BASE_ADDR; base = &pvt->csels[umc].csbases[cs]; if (!amd_smn_read(pvt->mc_node_id, base_reg, base)) { @@ -3935,7 +3946,7 @@ static void gpu_read_base_mask(struct amd64_pvt *pvt) umc, cs, *base, base_reg); } - mask_reg = gpu_get_umc_base(umc, cs) + UMCCH_ADDR_MASK; + mask_reg = gpu_get_umc_base(pvt, umc, cs) + UMCCH_ADDR_MASK; mask = &pvt->csels[umc].csmasks[cs]; if (!amd_smn_read(pvt->mc_node_id, mask_reg, mask)) { @@ -3960,7 +3971,7 @@ static int gpu_hw_info_get(struct amd64_pvt *pvt) { int ret; - ret = gpu_get_node_map(); + ret = gpu_get_node_map(pvt); if (ret) return ret; @@ -4125,6 +4136,8 @@ static int per_family_init(struct amd64_pvt *pvt) if (pvt->F3->device == PCI_DEVICE_ID_AMD_MI200_DF_F3) { pvt->ctl_name = "MI200"; pvt->max_mcs = 4; + pvt->dram_type = MEM_HBM2; + pvt->gpu_umc_base = 0x50000; pvt->ops = &gpu_ops; } else { pvt->ctl_name = "F19h_M30h"; @@ -4142,6 +4155,13 @@ static int per_family_init(struct amd64_pvt *pvt) pvt->ctl_name = "F19h_M70h"; pvt->flags.zn_regs_v2 = 1; break; + case 0x90 ... 0x9f: + pvt->ctl_name = "F19h_M90h"; + pvt->max_mcs = 4; + pvt->dram_type = MEM_HBM3; + pvt->gpu_umc_base = 0x90000; + pvt->ops = &gpu_ops; + break; case 0xa0 ... 0xaf: pvt->ctl_name = "F19h_MA0h"; pvt->max_mcs = 12; @@ -4180,23 +4200,33 @@ static const struct attribute_group *amd64_edac_attr_groups[] = { NULL }; +/* + * For heterogeneous and APU models EDAC CHIP_SELECT and CHANNEL layers + * should be swapped to fit into the layers. + */ +static unsigned int get_layer_size(struct amd64_pvt *pvt, u8 layer) +{ + bool is_gpu = (pvt->ops == &gpu_ops); + + if (!layer) + return is_gpu ? pvt->max_mcs + : pvt->csels[0].b_cnt; + else + return is_gpu ? pvt->csels[0].b_cnt + : pvt->max_mcs; +} + static int init_one_instance(struct amd64_pvt *pvt) { struct mem_ctl_info *mci = NULL; struct edac_mc_layer layers[2]; int ret = -ENOMEM; - /* - * For Heterogeneous family EDAC CHIP_SELECT and CHANNEL layers should - * be swapped to fit into the layers. - */ layers[0].type = EDAC_MC_LAYER_CHIP_SELECT; - layers[0].size = (pvt->F3->device == PCI_DEVICE_ID_AMD_MI200_DF_F3) ? - pvt->max_mcs : pvt->csels[0].b_cnt; + layers[0].size = get_layer_size(pvt, 0); layers[0].is_virt_csrow = true; layers[1].type = EDAC_MC_LAYER_CHANNEL; - layers[1].size = (pvt->F3->device == PCI_DEVICE_ID_AMD_MI200_DF_F3) ? - pvt->csels[0].b_cnt : pvt->max_mcs; + layers[1].size = get_layer_size(pvt, 1); layers[1].is_virt_csrow = false; mci = edac_mc_alloc(pvt->mc_node_id, ARRAY_SIZE(layers), layers, 0); diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h index 5a4e4a59682b..1665f7932bac 100644 --- a/drivers/edac/amd64_edac.h +++ b/drivers/edac/amd64_edac.h @@ -362,6 +362,7 @@ struct amd64_pvt { u32 dct_sel_lo; /* DRAM Controller Select Low */ u32 dct_sel_hi; /* DRAM Controller Select High */ u32 online_spare; /* On-Line spare Reg */ + u32 gpu_umc_base; /* Base address used for channel selection on GPUs */ /* x4, x8, or x16 syndromes in use */ u8 ecc_sym_sz; diff --git a/drivers/edac/armada_xp_edac.c b/drivers/edac/armada_xp_edac.c index c4bd2fb9c46b..25517c99b3ea 100644 --- a/drivers/edac/armada_xp_edac.c +++ b/drivers/edac/armada_xp_edac.c @@ -5,7 +5,9 @@ #include <linux/kernel.h> #include <linux/edac.h> -#include <linux/of_platform.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> #include <asm/hardware/cache-l2x0.h> #include <asm/hardware/cache-aurora-l2.h> @@ -351,20 +353,18 @@ static int axp_mc_probe(struct platform_device *pdev) return 0; } -static int axp_mc_remove(struct platform_device *pdev) +static void axp_mc_remove(struct platform_device *pdev) { struct mem_ctl_info *mci = platform_get_drvdata(pdev); edac_mc_del_mc(&pdev->dev); edac_mc_free(mci); platform_set_drvdata(pdev, NULL); - - return 0; } static struct platform_driver axp_mc_driver = { .probe = axp_mc_probe, - .remove = axp_mc_remove, + .remove_new = axp_mc_remove, .driver = { .name = "armada_xp_mc_edac", .of_match_table = of_match_ptr(axp_mc_of_match), @@ -564,7 +564,7 @@ static int aurora_l2_probe(struct platform_device *pdev) return 0; } -static int aurora_l2_remove(struct platform_device *pdev) +static void aurora_l2_remove(struct platform_device *pdev) { struct edac_device_ctl_info *dci = platform_get_drvdata(pdev); #ifdef CONFIG_EDAC_DEBUG @@ -575,13 +575,11 @@ static int aurora_l2_remove(struct platform_device *pdev) edac_device_del_device(&pdev->dev); edac_device_free_ctl_info(dci); platform_set_drvdata(pdev, NULL); - - return 0; } static struct platform_driver aurora_l2_driver = { .probe = aurora_l2_probe, - .remove = aurora_l2_remove, + .remove_new = aurora_l2_remove, .driver = { .name = "aurora_l2_edac", .of_match_table = of_match_ptr(aurora_l2_of_match), diff --git a/drivers/edac/aspeed_edac.c b/drivers/edac/aspeed_edac.c index 6bd5f8815919..157a480eb761 100644 --- a/drivers/edac/aspeed_edac.c +++ b/drivers/edac/aspeed_edac.c @@ -357,7 +357,7 @@ probe_exit02: } -static int aspeed_remove(struct platform_device *pdev) +static void aspeed_remove(struct platform_device *pdev) { struct mem_ctl_info *mci; @@ -369,8 +369,6 @@ static int aspeed_remove(struct platform_device *pdev) mci = edac_mc_del_mc(&pdev->dev); if (mci) edac_mc_free(mci); - - return 0; } @@ -389,7 +387,7 @@ static struct platform_driver aspeed_driver = { .of_match_table = aspeed_of_match }, .probe = aspeed_probe, - .remove = aspeed_remove + .remove_new = aspeed_remove }; module_platform_driver(aspeed_driver); diff --git a/drivers/edac/bluefield_edac.c b/drivers/edac/bluefield_edac.c index e4736eb37bfb..5b3164560648 100644 --- a/drivers/edac/bluefield_edac.c +++ b/drivers/edac/bluefield_edac.c @@ -323,14 +323,12 @@ err: } -static int bluefield_edac_mc_remove(struct platform_device *pdev) +static void bluefield_edac_mc_remove(struct platform_device *pdev) { struct mem_ctl_info *mci = platform_get_drvdata(pdev); edac_mc_del_mc(&pdev->dev); edac_mc_free(mci); - - return 0; } static const struct acpi_device_id bluefield_mc_acpi_ids[] = { @@ -346,7 +344,7 @@ static struct platform_driver bluefield_edac_mc_driver = { .acpi_match_table = bluefield_mc_acpi_ids, }, .probe = bluefield_edac_mc_probe, - .remove = bluefield_edac_mc_remove, + .remove_new = bluefield_edac_mc_remove, }; module_platform_driver(bluefield_edac_mc_driver); diff --git a/drivers/edac/cell_edac.c b/drivers/edac/cell_edac.c index bc1f3416400e..2000f66fbf5c 100644 --- a/drivers/edac/cell_edac.c +++ b/drivers/edac/cell_edac.c @@ -234,12 +234,11 @@ static int cell_edac_probe(struct platform_device *pdev) return 0; } -static int cell_edac_remove(struct platform_device *pdev) +static void cell_edac_remove(struct platform_device *pdev) { struct mem_ctl_info *mci = edac_mc_del_mc(&pdev->dev); if (mci) edac_mc_free(mci); - return 0; } static struct platform_driver cell_edac_driver = { @@ -247,7 +246,7 @@ static struct platform_driver cell_edac_driver = { .name = "cbe-mic", }, .probe = cell_edac_probe, - .remove = cell_edac_remove, + .remove_new = cell_edac_remove, }; static int __init cell_edac_init(void) diff --git a/drivers/edac/cpc925_edac.c b/drivers/edac/cpc925_edac.c index 9797e6d60dde..5075dc7526e3 100644 --- a/drivers/edac/cpc925_edac.c +++ b/drivers/edac/cpc925_edac.c @@ -1010,7 +1010,7 @@ out: return res; } -static int cpc925_remove(struct platform_device *pdev) +static void cpc925_remove(struct platform_device *pdev) { struct mem_ctl_info *mci = platform_get_drvdata(pdev); @@ -1023,13 +1023,11 @@ static int cpc925_remove(struct platform_device *pdev) edac_mc_del_mc(&pdev->dev); edac_mc_free(mci); - - return 0; } static struct platform_driver cpc925_edac_driver = { .probe = cpc925_probe, - .remove = cpc925_remove, + .remove_new = cpc925_remove, .driver = { .name = "cpc925_edac", } diff --git a/drivers/edac/dmc520_edac.c b/drivers/edac/dmc520_edac.c index 1fa5ca57e9ec..4e30b989a1a4 100644 --- a/drivers/edac/dmc520_edac.c +++ b/drivers/edac/dmc520_edac.c @@ -602,7 +602,7 @@ err: return ret; } -static int dmc520_edac_remove(struct platform_device *pdev) +static void dmc520_edac_remove(struct platform_device *pdev) { u32 reg_val, idx, irq_mask_all = 0; struct mem_ctl_info *mci; @@ -626,8 +626,6 @@ static int dmc520_edac_remove(struct platform_device *pdev) edac_mc_del_mc(&pdev->dev); edac_mc_free(mci); - - return 0; } static const struct of_device_id dmc520_edac_driver_id[] = { @@ -644,7 +642,7 @@ static struct platform_driver dmc520_edac_driver = { }, .probe = dmc520_edac_probe, - .remove = dmc520_edac_remove + .remove_new = dmc520_edac_remove }; module_platform_driver(dmc520_edac_driver); diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c index 6faeb2ab3960..d6eed727b0cd 100644 --- a/drivers/edac/edac_mc.c +++ b/drivers/edac/edac_mc.c @@ -166,6 +166,7 @@ const char * const edac_mem_types[] = { [MEM_NVDIMM] = "Non-volatile-RAM", [MEM_WIO2] = "Wide-IO-2", [MEM_HBM2] = "High-bandwidth-memory-Gen2", + [MEM_HBM3] = "High-bandwidth-memory-Gen3", }; EXPORT_SYMBOL_GPL(edac_mem_types); diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c index 287cc51dbc86..901d4cd3ca38 100644 --- a/drivers/edac/edac_pci_sysfs.c +++ b/drivers/edac/edac_pci_sysfs.c @@ -521,7 +521,7 @@ static void edac_pci_dev_parity_clear(struct pci_dev *dev) /* read the device TYPE, looking for bridges */ pci_read_config_byte(dev, PCI_HEADER_TYPE, &header_type); - if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) + if ((header_type & PCI_HEADER_TYPE_MASK) == PCI_HEADER_TYPE_BRIDGE) get_pci_parity_status(dev, 1); } @@ -583,7 +583,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev) edac_dbg(4, "PCI HEADER TYPE= 0x%02x %s\n", header_type, dev_name(&dev->dev)); - if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) { + if ((header_type & PCI_HEADER_TYPE_MASK) == PCI_HEADER_TYPE_BRIDGE) { /* On bridges, need to examine secondary status register */ status = get_pci_parity_status(dev, 1); diff --git a/drivers/edac/fsl_ddr_edac.c b/drivers/edac/fsl_ddr_edac.c index b81757555a8a..d148d262d0d4 100644 --- a/drivers/edac/fsl_ddr_edac.c +++ b/drivers/edac/fsl_ddr_edac.c @@ -612,7 +612,7 @@ err: return res; } -int fsl_mc_err_remove(struct platform_device *op) +void fsl_mc_err_remove(struct platform_device *op) { struct mem_ctl_info *mci = dev_get_drvdata(&op->dev); struct fsl_mc_pdata *pdata = mci->pvt_info; @@ -629,5 +629,4 @@ int fsl_mc_err_remove(struct platform_device *op) edac_mc_del_mc(&op->dev); edac_mc_free(mci); - return 0; } diff --git a/drivers/edac/fsl_ddr_edac.h b/drivers/edac/fsl_ddr_edac.h index 332439d7b2d9..c0994a2a003c 100644 --- a/drivers/edac/fsl_ddr_edac.h +++ b/drivers/edac/fsl_ddr_edac.h @@ -72,5 +72,5 @@ struct fsl_mc_pdata { int irq; }; int fsl_mc_err_probe(struct platform_device *op); -int fsl_mc_err_remove(struct platform_device *op); +void fsl_mc_err_remove(struct platform_device *op); #endif diff --git a/drivers/edac/highbank_l2_edac.c b/drivers/edac/highbank_l2_edac.c index 140d4431bd0d..5646c049a934 100644 --- a/drivers/edac/highbank_l2_edac.c +++ b/drivers/edac/highbank_l2_edac.c @@ -118,18 +118,17 @@ err: return res; } -static int highbank_l2_err_remove(struct platform_device *pdev) +static void highbank_l2_err_remove(struct platform_device *pdev) { struct edac_device_ctl_info *dci = platform_get_drvdata(pdev); edac_device_del_device(&pdev->dev); edac_device_free_ctl_info(dci); - return 0; } static struct platform_driver highbank_l2_edac_driver = { .probe = highbank_l2_err_probe, - .remove = highbank_l2_err_remove, + .remove_new = highbank_l2_err_remove, .driver = { .name = "hb_l2_edac", .of_match_table = hb_l2_err_of_match, diff --git a/drivers/edac/highbank_mc_edac.c b/drivers/edac/highbank_mc_edac.c index a0c04a7f95e9..1c5b888ab11d 100644 --- a/drivers/edac/highbank_mc_edac.c +++ b/drivers/edac/highbank_mc_edac.c @@ -251,18 +251,17 @@ free: return res; } -static int highbank_mc_remove(struct platform_device *pdev) +static void highbank_mc_remove(struct platform_device *pdev) { struct mem_ctl_info *mci = platform_get_drvdata(pdev); edac_mc_del_mc(&pdev->dev); edac_mc_free(mci); - return 0; } static struct platform_driver highbank_mc_edac_driver = { .probe = highbank_mc_probe, - .remove = highbank_mc_remove, + .remove_new = highbank_mc_remove, .driver = { .name = "hb_mc_edac", .of_match_table = hb_ddr_ctrl_of_match, diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c index 23d25724bae4..91e0a88ef904 100644 --- a/drivers/edac/i7core_edac.c +++ b/drivers/edac/i7core_edac.c @@ -376,7 +376,7 @@ static const struct pci_id_table pci_dev_table[] = { PCI_ID_TABLE_ENTRY(pci_dev_descr_i7core_nehalem), PCI_ID_TABLE_ENTRY(pci_dev_descr_lynnfield), PCI_ID_TABLE_ENTRY(pci_dev_descr_i7core_westmere), - {0,} /* 0 terminated list. */ + { NULL, } }; /* @@ -385,7 +385,7 @@ static const struct pci_id_table pci_dev_table[] = { static const struct pci_device_id i7core_pci_tbl[] = { {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_X58_HUB_MGMT)}, {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LYNNFIELD_QPI_LINK0)}, - {0,} /* 0 terminated list. */ + { 0, } }; /**************************************************************************** diff --git a/drivers/edac/igen6_edac.c b/drivers/edac/igen6_edac.c index 1a18693294db..2b0ecdeba5cd 100644 --- a/drivers/edac/igen6_edac.c +++ b/drivers/edac/igen6_edac.c @@ -58,6 +58,7 @@ /* Capability register E */ #define CAPID_E_OFFSET 0xf0 #define CAPID_E_IBECC BIT(12) +#define CAPID_E_IBECC_BIT18 BIT(18) /* Error Status */ #define ERRSTS_OFFSET 0xc8 @@ -80,6 +81,7 @@ #define ECC_ERROR_LOG_UE BIT_ULL(63) #define ECC_ERROR_LOG_ADDR_SHIFT 5 #define ECC_ERROR_LOG_ADDR(v) GET_BITFIELD(v, 5, 38) +#define ECC_ERROR_LOG_ADDR45(v) GET_BITFIELD(v, 5, 45) #define ECC_ERROR_LOG_SYND(v) GET_BITFIELD(v, 46, 61) /* Host MMIO base address */ @@ -133,6 +135,8 @@ static struct res_config { u32 ibecc_base; u32 ibecc_error_log_offset; bool (*ibecc_available)(struct pci_dev *pdev); + /* Extract error address logged in IBECC */ + u64 (*err_addr)(u64 ecclog); /* Convert error address logged in IBECC to system physical address */ u64 (*err_addr_to_sys_addr)(u64 eaddr, int mc); /* Convert error address logged in IBECC to integrated memory controller address */ @@ -222,6 +226,67 @@ static struct work_struct ecclog_work; #define DID_ADL_SKU3 0x4621 #define DID_ADL_SKU4 0x4641 +/* Compute die IDs for Alder Lake-N with IBECC */ +#define DID_ADL_N_SKU1 0x4614 +#define DID_ADL_N_SKU2 0x4617 +#define DID_ADL_N_SKU3 0x461b +#define DID_ADL_N_SKU4 0x461c +#define DID_ADL_N_SKU5 0x4673 +#define DID_ADL_N_SKU6 0x4674 +#define DID_ADL_N_SKU7 0x4675 +#define DID_ADL_N_SKU8 0x4677 +#define DID_ADL_N_SKU9 0x4678 +#define DID_ADL_N_SKU10 0x4679 +#define DID_ADL_N_SKU11 0x467c + +/* Compute die IDs for Raptor Lake-P with IBECC */ +#define DID_RPL_P_SKU1 0xa706 +#define DID_RPL_P_SKU2 0xa707 +#define DID_RPL_P_SKU3 0xa708 +#define DID_RPL_P_SKU4 0xa716 +#define DID_RPL_P_SKU5 0xa718 + +/* Compute die IDs for Meteor Lake-PS with IBECC */ +#define DID_MTL_PS_SKU1 0x7d21 +#define DID_MTL_PS_SKU2 0x7d22 +#define DID_MTL_PS_SKU3 0x7d23 +#define DID_MTL_PS_SKU4 0x7d24 + +/* Compute die IDs for Meteor Lake-P with IBECC */ +#define DID_MTL_P_SKU1 0x7d01 +#define DID_MTL_P_SKU2 0x7d02 +#define DID_MTL_P_SKU3 0x7d14 + +static int get_mchbar(struct pci_dev *pdev, u64 *mchbar) +{ + union { + u64 v; + struct { + u32 v_lo; + u32 v_hi; + }; + } u; + + if (pci_read_config_dword(pdev, MCHBAR_OFFSET, &u.v_lo)) { + igen6_printk(KERN_ERR, "Failed to read lower MCHBAR\n"); + return -ENODEV; + } + + if (pci_read_config_dword(pdev, MCHBAR_OFFSET + 4, &u.v_hi)) { + igen6_printk(KERN_ERR, "Failed to read upper MCHBAR\n"); + return -ENODEV; + } + + if (!(u.v & MCHBAR_EN)) { + igen6_printk(KERN_ERR, "MCHBAR is disabled\n"); + return -ENODEV; + } + + *mchbar = MCHBAR_BASE(u.v); + + return 0; +} + static bool ehl_ibecc_available(struct pci_dev *pdev) { u32 v; @@ -272,6 +337,39 @@ static bool tgl_ibecc_available(struct pci_dev *pdev) return !(CAPID_E_IBECC & v); } +static bool mtl_p_ibecc_available(struct pci_dev *pdev) +{ + u32 v; + + if (pci_read_config_dword(pdev, CAPID_E_OFFSET, &v)) + return false; + + return !(CAPID_E_IBECC_BIT18 & v); +} + +static bool mtl_ps_ibecc_available(struct pci_dev *pdev) +{ +#define MCHBAR_MEMSS_IBECCDIS 0x13c00 + void __iomem *window; + u64 mchbar; + u32 val; + + if (get_mchbar(pdev, &mchbar)) + return false; + + window = ioremap(mchbar, MCHBAR_SIZE * 2); + if (!window) { + igen6_printk(KERN_ERR, "Failed to ioremap 0x%llx\n", mchbar); + return false; + } + + val = readl(window + MCHBAR_MEMSS_IBECCDIS); + iounmap(window); + + /* Bit6: 1 - IBECC is disabled, 0 - IBECC isn't disabled */ + return !GET_BITFIELD(val, 6, 6); +} + static u64 mem_addr_to_sys_addr(u64 maddr) { if (maddr < igen6_tolud) @@ -358,6 +456,11 @@ static u64 adl_err_addr_to_imc_addr(u64 eaddr, int mc) return imc_addr; } +static u64 rpl_p_err_addr(u64 ecclog) +{ + return ECC_ERROR_LOG_ADDR45(ecclog); +} + static struct res_config ehl_cfg = { .num_imc = 1, .imc_base = 0x5000, @@ -403,6 +506,51 @@ static struct res_config adl_cfg = { .err_addr_to_imc_addr = adl_err_addr_to_imc_addr, }; +static struct res_config adl_n_cfg = { + .machine_check = true, + .num_imc = 1, + .imc_base = 0xd800, + .ibecc_base = 0xd400, + .ibecc_error_log_offset = 0x68, + .ibecc_available = tgl_ibecc_available, + .err_addr_to_sys_addr = adl_err_addr_to_sys_addr, + .err_addr_to_imc_addr = adl_err_addr_to_imc_addr, +}; + +static struct res_config rpl_p_cfg = { + .machine_check = true, + .num_imc = 2, + .imc_base = 0xd800, + .ibecc_base = 0xd400, + .ibecc_error_log_offset = 0x68, + .ibecc_available = tgl_ibecc_available, + .err_addr = rpl_p_err_addr, + .err_addr_to_sys_addr = adl_err_addr_to_sys_addr, + .err_addr_to_imc_addr = adl_err_addr_to_imc_addr, +}; + +static struct res_config mtl_ps_cfg = { + .machine_check = true, + .num_imc = 2, + .imc_base = 0xd800, + .ibecc_base = 0xd400, + .ibecc_error_log_offset = 0x170, + .ibecc_available = mtl_ps_ibecc_available, + .err_addr_to_sys_addr = adl_err_addr_to_sys_addr, + .err_addr_to_imc_addr = adl_err_addr_to_imc_addr, +}; + +static struct res_config mtl_p_cfg = { + .machine_check = true, + .num_imc = 2, + .imc_base = 0xd800, + .ibecc_base = 0xd400, + .ibecc_error_log_offset = 0x170, + .ibecc_available = mtl_p_ibecc_available, + .err_addr_to_sys_addr = adl_err_addr_to_sys_addr, + .err_addr_to_imc_addr = adl_err_addr_to_imc_addr, +}; + static const struct pci_device_id igen6_pci_tbl[] = { { PCI_VDEVICE(INTEL, DID_EHL_SKU5), (kernel_ulong_t)&ehl_cfg }, { PCI_VDEVICE(INTEL, DID_EHL_SKU6), (kernel_ulong_t)&ehl_cfg }, @@ -424,6 +572,29 @@ static const struct pci_device_id igen6_pci_tbl[] = { { PCI_VDEVICE(INTEL, DID_ADL_SKU2), (kernel_ulong_t)&adl_cfg }, { PCI_VDEVICE(INTEL, DID_ADL_SKU3), (kernel_ulong_t)&adl_cfg }, { PCI_VDEVICE(INTEL, DID_ADL_SKU4), (kernel_ulong_t)&adl_cfg }, + { PCI_VDEVICE(INTEL, DID_ADL_N_SKU1), (kernel_ulong_t)&adl_n_cfg }, + { PCI_VDEVICE(INTEL, DID_ADL_N_SKU2), (kernel_ulong_t)&adl_n_cfg }, + { PCI_VDEVICE(INTEL, DID_ADL_N_SKU3), (kernel_ulong_t)&adl_n_cfg }, + { PCI_VDEVICE(INTEL, DID_ADL_N_SKU4), (kernel_ulong_t)&adl_n_cfg }, + { PCI_VDEVICE(INTEL, DID_ADL_N_SKU5), (kernel_ulong_t)&adl_n_cfg }, + { PCI_VDEVICE(INTEL, DID_ADL_N_SKU6), (kernel_ulong_t)&adl_n_cfg }, + { PCI_VDEVICE(INTEL, DID_ADL_N_SKU7), (kernel_ulong_t)&adl_n_cfg }, + { PCI_VDEVICE(INTEL, DID_ADL_N_SKU8), (kernel_ulong_t)&adl_n_cfg }, + { PCI_VDEVICE(INTEL, DID_ADL_N_SKU9), (kernel_ulong_t)&adl_n_cfg }, + { PCI_VDEVICE(INTEL, DID_ADL_N_SKU10), (kernel_ulong_t)&adl_n_cfg }, + { PCI_VDEVICE(INTEL, DID_ADL_N_SKU11), (kernel_ulong_t)&adl_n_cfg }, + { PCI_VDEVICE(INTEL, DID_RPL_P_SKU1), (kernel_ulong_t)&rpl_p_cfg }, + { PCI_VDEVICE(INTEL, DID_RPL_P_SKU2), (kernel_ulong_t)&rpl_p_cfg }, + { PCI_VDEVICE(INTEL, DID_RPL_P_SKU3), (kernel_ulong_t)&rpl_p_cfg }, + { PCI_VDEVICE(INTEL, DID_RPL_P_SKU4), (kernel_ulong_t)&rpl_p_cfg }, + { PCI_VDEVICE(INTEL, DID_RPL_P_SKU5), (kernel_ulong_t)&rpl_p_cfg }, + { PCI_VDEVICE(INTEL, DID_MTL_PS_SKU1), (kernel_ulong_t)&mtl_ps_cfg }, + { PCI_VDEVICE(INTEL, DID_MTL_PS_SKU2), (kernel_ulong_t)&mtl_ps_cfg }, + { PCI_VDEVICE(INTEL, DID_MTL_PS_SKU3), (kernel_ulong_t)&mtl_ps_cfg }, + { PCI_VDEVICE(INTEL, DID_MTL_PS_SKU4), (kernel_ulong_t)&mtl_ps_cfg }, + { PCI_VDEVICE(INTEL, DID_MTL_P_SKU1), (kernel_ulong_t)&mtl_p_cfg }, + { PCI_VDEVICE(INTEL, DID_MTL_P_SKU2), (kernel_ulong_t)&mtl_p_cfg }, + { PCI_VDEVICE(INTEL, DID_MTL_P_SKU3), (kernel_ulong_t)&mtl_p_cfg }, { }, }; MODULE_DEVICE_TABLE(pci, igen6_pci_tbl); @@ -679,8 +850,11 @@ static void ecclog_work_cb(struct work_struct *work) llist_for_each_entry_safe(node, tmp, head, llnode) { memset(&res, 0, sizeof(res)); - eaddr = ECC_ERROR_LOG_ADDR(node->ecclog) << - ECC_ERROR_LOG_ADDR_SHIFT; + if (res_cfg->err_addr) + eaddr = res_cfg->err_addr(node->ecclog); + else + eaddr = ECC_ERROR_LOG_ADDR(node->ecclog) << + ECC_ERROR_LOG_ADDR_SHIFT; res.mc = node->mc; res.sys_addr = res_cfg->err_addr_to_sys_addr(eaddr, res.mc); res.imc_addr = res_cfg->err_addr_to_imc_addr(eaddr, res.mc); @@ -969,22 +1143,8 @@ static int igen6_pci_setup(struct pci_dev *pdev, u64 *mchbar) igen6_tom = u.v & GENMASK_ULL(38, 20); - if (pci_read_config_dword(pdev, MCHBAR_OFFSET, &u.v_lo)) { - igen6_printk(KERN_ERR, "Failed to read lower MCHBAR\n"); + if (get_mchbar(pdev, mchbar)) goto fail; - } - - if (pci_read_config_dword(pdev, MCHBAR_OFFSET + 4, &u.v_hi)) { - igen6_printk(KERN_ERR, "Failed to read upper MCHBAR\n"); - goto fail; - } - - if (!(u.v & MCHBAR_EN)) { - igen6_printk(KERN_ERR, "MCHBAR is disabled\n"); - goto fail; - } - - *mchbar = MCHBAR_BASE(u.v); #ifdef CONFIG_EDAC_DEBUG if (pci_read_config_dword(pdev, TOUUD_OFFSET, &u.v_lo)) diff --git a/drivers/edac/layerscape_edac.c b/drivers/edac/layerscape_edac.c index 7c5e2b3c0daa..d2f895033280 100644 --- a/drivers/edac/layerscape_edac.c +++ b/drivers/edac/layerscape_edac.c @@ -27,7 +27,7 @@ MODULE_DEVICE_TABLE(of, fsl_ddr_mc_err_of_match); static struct platform_driver fsl_ddr_mc_err_driver = { .probe = fsl_mc_err_probe, - .remove = fsl_mc_err_remove, + .remove_new = fsl_mc_err_remove, .driver = { .name = "fsl_ddr_mc_err", .of_match_table = fsl_ddr_mc_err_of_match, diff --git a/drivers/edac/mce_amd.c b/drivers/edac/mce_amd.c index 9215c06783df..ec8b6c9fedfd 100644 --- a/drivers/edac/mce_amd.c +++ b/drivers/edac/mce_amd.c @@ -143,482 +143,6 @@ static const char * const mc6_mce_desc[] = { "Status Register File", }; -/* Scalable MCA error strings */ -static const char * const smca_ls_mce_desc[] = { - "Load queue parity error", - "Store queue parity error", - "Miss address buffer payload parity error", - "Level 1 TLB parity error", - "DC Tag error type 5", - "DC Tag error type 6", - "DC Tag error type 1", - "Internal error type 1", - "Internal error type 2", - "System Read Data Error Thread 0", - "System Read Data Error Thread 1", - "DC Tag error type 2", - "DC Data error type 1 and poison consumption", - "DC Data error type 2", - "DC Data error type 3", - "DC Tag error type 4", - "Level 2 TLB parity error", - "PDC parity error", - "DC Tag error type 3", - "DC Tag error type 5", - "L2 Fill Data error", -}; - -static const char * const smca_ls2_mce_desc[] = { - "An ECC error was detected on a data cache read by a probe or victimization", - "An ECC error or L2 poison was detected on a data cache read by a load", - "An ECC error was detected on a data cache read-modify-write by a store", - "An ECC error or poison bit mismatch was detected on a tag read by a probe or victimization", - "An ECC error or poison bit mismatch was detected on a tag read by a load", - "An ECC error or poison bit mismatch was detected on a tag read by a store", - "An ECC error was detected on an EMEM read by a load", - "An ECC error was detected on an EMEM read-modify-write by a store", - "A parity error was detected in an L1 TLB entry by any access", - "A parity error was detected in an L2 TLB entry by any access", - "A parity error was detected in a PWC entry by any access", - "A parity error was detected in an STQ entry by any access", - "A parity error was detected in an LDQ entry by any access", - "A parity error was detected in a MAB entry by any access", - "A parity error was detected in an SCB entry state field by any access", - "A parity error was detected in an SCB entry address field by any access", - "A parity error was detected in an SCB entry data field by any access", - "A parity error was detected in a WCB entry by any access", - "A poisoned line was detected in an SCB entry by any access", - "A SystemReadDataError error was reported on read data returned from L2 for a load", - "A SystemReadDataError error was reported on read data returned from L2 for an SCB store", - "A SystemReadDataError error was reported on read data returned from L2 for a WCB store", - "A hardware assertion error was reported", - "A parity error was detected in an STLF, SCB EMEM entry or SRB store data by any access", -}; - -static const char * const smca_if_mce_desc[] = { - "Op Cache Microtag Probe Port Parity Error", - "IC Microtag or Full Tag Multi-hit Error", - "IC Full Tag Parity Error", - "IC Data Array Parity Error", - "Decoupling Queue PhysAddr Parity Error", - "L0 ITLB Parity Error", - "L1 ITLB Parity Error", - "L2 ITLB Parity Error", - "BPQ Thread 0 Snoop Parity Error", - "BPQ Thread 1 Snoop Parity Error", - "L1 BTB Multi-Match Error", - "L2 BTB Multi-Match Error", - "L2 Cache Response Poison Error", - "System Read Data Error", - "Hardware Assertion Error", - "L1-TLB Multi-Hit", - "L2-TLB Multi-Hit", - "BSR Parity Error", - "CT MCE", -}; - -static const char * const smca_l2_mce_desc[] = { - "L2M Tag Multiple-Way-Hit error", - "L2M Tag or State Array ECC Error", - "L2M Data Array ECC Error", - "Hardware Assert Error", -}; - -static const char * const smca_de_mce_desc[] = { - "Micro-op cache tag parity error", - "Micro-op cache data parity error", - "Instruction buffer parity error", - "Micro-op queue parity error", - "Instruction dispatch queue parity error", - "Fetch address FIFO parity error", - "Patch RAM data parity error", - "Patch RAM sequencer parity error", - "Micro-op buffer parity error", - "Hardware Assertion MCA Error", -}; - -static const char * const smca_ex_mce_desc[] = { - "Watchdog Timeout error", - "Physical register file parity error", - "Flag register file parity error", - "Immediate displacement register file parity error", - "Address generator payload parity error", - "EX payload parity error", - "Checkpoint queue parity error", - "Retire dispatch queue parity error", - "Retire status queue parity error", - "Scheduling queue parity error", - "Branch buffer queue parity error", - "Hardware Assertion error", - "Spec Map parity error", - "Retire Map parity error", -}; - -static const char * const smca_fp_mce_desc[] = { - "Physical register file (PRF) parity error", - "Freelist (FL) parity error", - "Schedule queue parity error", - "NSQ parity error", - "Retire queue (RQ) parity error", - "Status register file (SRF) parity error", - "Hardware assertion", -}; - -static const char * const smca_l3_mce_desc[] = { - "Shadow Tag Macro ECC Error", - "Shadow Tag Macro Multi-way-hit Error", - "L3M Tag ECC Error", - "L3M Tag Multi-way-hit Error", - "L3M Data ECC Error", - "SDP Parity Error or SystemReadDataError from XI", - "L3 Victim Queue Parity Error", - "L3 Hardware Assertion", -}; - -static const char * const smca_cs_mce_desc[] = { - "Illegal Request", - "Address Violation", - "Security Violation", - "Illegal Response", - "Unexpected Response", - "Request or Probe Parity Error", - "Read Response Parity Error", - "Atomic Request Parity Error", - "Probe Filter ECC Error", -}; - -static const char * const smca_cs2_mce_desc[] = { - "Illegal Request", - "Address Violation", - "Security Violation", - "Illegal Response", - "Unexpected Response", - "Request or Probe Parity Error", - "Read Response Parity Error", - "Atomic Request Parity Error", - "SDP read response had no match in the CS queue", - "Probe Filter Protocol Error", - "Probe Filter ECC Error", - "SDP read response had an unexpected RETRY error", - "Counter overflow error", - "Counter underflow error", -}; - -static const char * const smca_pie_mce_desc[] = { - "Hardware Assert", - "Register security violation", - "Link Error", - "Poison data consumption", - "A deferred error was detected in the DF" -}; - -static const char * const smca_umc_mce_desc[] = { - "DRAM ECC error", - "Data poison error", - "SDP parity error", - "Advanced peripheral bus error", - "Address/Command parity error", - "Write data CRC error", - "DCQ SRAM ECC error", - "AES SRAM ECC error", -}; - -static const char * const smca_umc2_mce_desc[] = { - "DRAM ECC error", - "Data poison error", - "SDP parity error", - "Reserved", - "Address/Command parity error", - "Write data parity error", - "DCQ SRAM ECC error", - "Reserved", - "Read data parity error", - "Rdb SRAM ECC error", - "RdRsp SRAM ECC error", - "LM32 MP errors", -}; - -static const char * const smca_pb_mce_desc[] = { - "An ECC error in the Parameter Block RAM array", -}; - -static const char * const smca_psp_mce_desc[] = { - "An ECC or parity error in a PSP RAM instance", -}; - -static const char * const smca_psp2_mce_desc[] = { - "High SRAM ECC or parity error", - "Low SRAM ECC or parity error", - "Instruction Cache Bank 0 ECC or parity error", - "Instruction Cache Bank 1 ECC or parity error", - "Instruction Tag Ram 0 parity error", - "Instruction Tag Ram 1 parity error", - "Data Cache Bank 0 ECC or parity error", - "Data Cache Bank 1 ECC or parity error", - "Data Cache Bank 2 ECC or parity error", - "Data Cache Bank 3 ECC or parity error", - "Data Tag Bank 0 parity error", - "Data Tag Bank 1 parity error", - "Data Tag Bank 2 parity error", - "Data Tag Bank 3 parity error", - "Dirty Data Ram parity error", - "TLB Bank 0 parity error", - "TLB Bank 1 parity error", - "System Hub Read Buffer ECC or parity error", -}; - -static const char * const smca_smu_mce_desc[] = { - "An ECC or parity error in an SMU RAM instance", -}; - -static const char * const smca_smu2_mce_desc[] = { - "High SRAM ECC or parity error", - "Low SRAM ECC or parity error", - "Data Cache Bank A ECC or parity error", - "Data Cache Bank B ECC or parity error", - "Data Tag Cache Bank A ECC or parity error", - "Data Tag Cache Bank B ECC or parity error", - "Instruction Cache Bank A ECC or parity error", - "Instruction Cache Bank B ECC or parity error", - "Instruction Tag Cache Bank A ECC or parity error", - "Instruction Tag Cache Bank B ECC or parity error", - "System Hub Read Buffer ECC or parity error", - "PHY RAM ECC error", -}; - -static const char * const smca_mp5_mce_desc[] = { - "High SRAM ECC or parity error", - "Low SRAM ECC or parity error", - "Data Cache Bank A ECC or parity error", - "Data Cache Bank B ECC or parity error", - "Data Tag Cache Bank A ECC or parity error", - "Data Tag Cache Bank B ECC or parity error", - "Instruction Cache Bank A ECC or parity error", - "Instruction Cache Bank B ECC or parity error", - "Instruction Tag Cache Bank A ECC or parity error", - "Instruction Tag Cache Bank B ECC or parity error", -}; - -static const char * const smca_mpdma_mce_desc[] = { - "Main SRAM [31:0] bank ECC or parity error", - "Main SRAM [63:32] bank ECC or parity error", - "Main SRAM [95:64] bank ECC or parity error", - "Main SRAM [127:96] bank ECC or parity error", - "Data Cache Bank A ECC or parity error", - "Data Cache Bank B ECC or parity error", - "Data Tag Cache Bank A ECC or parity error", - "Data Tag Cache Bank B ECC or parity error", - "Instruction Cache Bank A ECC or parity error", - "Instruction Cache Bank B ECC or parity error", - "Instruction Tag Cache Bank A ECC or parity error", - "Instruction Tag Cache Bank B ECC or parity error", - "Data Cache Bank A ECC or parity error", - "Data Cache Bank B ECC or parity error", - "Data Tag Cache Bank A ECC or parity error", - "Data Tag Cache Bank B ECC or parity error", - "Instruction Cache Bank A ECC or parity error", - "Instruction Cache Bank B ECC or parity error", - "Instruction Tag Cache Bank A ECC or parity error", - "Instruction Tag Cache Bank B ECC or parity error", - "Data Cache Bank A ECC or parity error", - "Data Cache Bank B ECC or parity error", - "Data Tag Cache Bank A ECC or parity error", - "Data Tag Cache Bank B ECC or parity error", - "Instruction Cache Bank A ECC or parity error", - "Instruction Cache Bank B ECC or parity error", - "Instruction Tag Cache Bank A ECC or parity error", - "Instruction Tag Cache Bank B ECC or parity error", - "System Hub Read Buffer ECC or parity error", - "MPDMA TVF DVSEC Memory ECC or parity error", - "MPDMA TVF MMIO Mailbox0 ECC or parity error", - "MPDMA TVF MMIO Mailbox1 ECC or parity error", - "MPDMA TVF Doorbell Memory ECC or parity error", - "MPDMA TVF SDP Slave Memory 0 ECC or parity error", - "MPDMA TVF SDP Slave Memory 1 ECC or parity error", - "MPDMA TVF SDP Slave Memory 2 ECC or parity error", - "MPDMA TVF SDP Master Memory 0 ECC or parity error", - "MPDMA TVF SDP Master Memory 1 ECC or parity error", - "MPDMA TVF SDP Master Memory 2 ECC or parity error", - "MPDMA TVF SDP Master Memory 3 ECC or parity error", - "MPDMA TVF SDP Master Memory 4 ECC or parity error", - "MPDMA TVF SDP Master Memory 5 ECC or parity error", - "MPDMA TVF SDP Master Memory 6 ECC or parity error", - "MPDMA PTE Command FIFO ECC or parity error", - "MPDMA PTE Hub Data FIFO ECC or parity error", - "MPDMA PTE Internal Data FIFO ECC or parity error", - "MPDMA PTE Command Memory DMA ECC or parity error", - "MPDMA PTE Command Memory Internal ECC or parity error", - "MPDMA PTE DMA Completion FIFO ECC or parity error", - "MPDMA PTE Tablewalk Completion FIFO ECC or parity error", - "MPDMA PTE Descriptor Completion FIFO ECC or parity error", - "MPDMA PTE ReadOnly Completion FIFO ECC or parity error", - "MPDMA PTE DirectWrite Completion FIFO ECC or parity error", - "SDP Watchdog Timer expired", -}; - -static const char * const smca_nbio_mce_desc[] = { - "ECC or Parity error", - "PCIE error", - "SDP ErrEvent error", - "SDP Egress Poison Error", - "IOHC Internal Poison Error", -}; - -static const char * const smca_pcie_mce_desc[] = { - "CCIX PER Message logging", - "CCIX Read Response with Status: Non-Data Error", - "CCIX Write Response with Status: Non-Data Error", - "CCIX Read Response with Status: Data Error", - "CCIX Non-okay write response with data error", -}; - -static const char * const smca_pcie2_mce_desc[] = { - "SDP Parity Error logging", -}; - -static const char * const smca_xgmipcs_mce_desc[] = { - "Data Loss Error", - "Training Error", - "Flow Control Acknowledge Error", - "Rx Fifo Underflow Error", - "Rx Fifo Overflow Error", - "CRC Error", - "BER Exceeded Error", - "Tx Vcid Data Error", - "Replay Buffer Parity Error", - "Data Parity Error", - "Replay Fifo Overflow Error", - "Replay Fifo Underflow Error", - "Elastic Fifo Overflow Error", - "Deskew Error", - "Flow Control CRC Error", - "Data Startup Limit Error", - "FC Init Timeout Error", - "Recovery Timeout Error", - "Ready Serial Timeout Error", - "Ready Serial Attempt Error", - "Recovery Attempt Error", - "Recovery Relock Attempt Error", - "Replay Attempt Error", - "Sync Header Error", - "Tx Replay Timeout Error", - "Rx Replay Timeout Error", - "LinkSub Tx Timeout Error", - "LinkSub Rx Timeout Error", - "Rx CMD Packet Error", -}; - -static const char * const smca_xgmiphy_mce_desc[] = { - "RAM ECC Error", - "ARC instruction buffer parity error", - "ARC data buffer parity error", - "PHY APB error", -}; - -static const char * const smca_nbif_mce_desc[] = { - "Timeout error from GMI", - "SRAM ECC error", - "NTB Error Event", - "SDP Parity error", -}; - -static const char * const smca_sata_mce_desc[] = { - "Parity error for port 0", - "Parity error for port 1", - "Parity error for port 2", - "Parity error for port 3", - "Parity error for port 4", - "Parity error for port 5", - "Parity error for port 6", - "Parity error for port 7", -}; - -static const char * const smca_usb_mce_desc[] = { - "Parity error or ECC error for S0 RAM0", - "Parity error or ECC error for S0 RAM1", - "Parity error or ECC error for S0 RAM2", - "Parity error for PHY RAM0", - "Parity error for PHY RAM1", - "AXI Slave Response error", -}; - -static const char * const smca_gmipcs_mce_desc[] = { - "Data Loss Error", - "Training Error", - "Replay Parity Error", - "Rx Fifo Underflow Error", - "Rx Fifo Overflow Error", - "CRC Error", - "BER Exceeded Error", - "Tx Fifo Underflow Error", - "Replay Buffer Parity Error", - "Tx Overflow Error", - "Replay Fifo Overflow Error", - "Replay Fifo Underflow Error", - "Elastic Fifo Overflow Error", - "Deskew Error", - "Offline Error", - "Data Startup Limit Error", - "FC Init Timeout Error", - "Recovery Timeout Error", - "Ready Serial Timeout Error", - "Ready Serial Attempt Error", - "Recovery Attempt Error", - "Recovery Relock Attempt Error", - "Deskew Abort Error", - "Rx Buffer Error", - "Rx LFDS Fifo Overflow Error", - "Rx LFDS Fifo Underflow Error", - "LinkSub Tx Timeout Error", - "LinkSub Rx Timeout Error", - "Rx CMD Packet Error", - "LFDS Training Timeout Error", - "LFDS FC Init Timeout Error", - "Data Loss Error", -}; - -struct smca_mce_desc { - const char * const *descs; - unsigned int num_descs; -}; - -static struct smca_mce_desc smca_mce_descs[] = { - [SMCA_LS] = { smca_ls_mce_desc, ARRAY_SIZE(smca_ls_mce_desc) }, - [SMCA_LS_V2] = { smca_ls2_mce_desc, ARRAY_SIZE(smca_ls2_mce_desc) }, - [SMCA_IF] = { smca_if_mce_desc, ARRAY_SIZE(smca_if_mce_desc) }, - [SMCA_L2_CACHE] = { smca_l2_mce_desc, ARRAY_SIZE(smca_l2_mce_desc) }, - [SMCA_DE] = { smca_de_mce_desc, ARRAY_SIZE(smca_de_mce_desc) }, - [SMCA_EX] = { smca_ex_mce_desc, ARRAY_SIZE(smca_ex_mce_desc) }, - [SMCA_FP] = { smca_fp_mce_desc, ARRAY_SIZE(smca_fp_mce_desc) }, - [SMCA_L3_CACHE] = { smca_l3_mce_desc, ARRAY_SIZE(smca_l3_mce_desc) }, - [SMCA_CS] = { smca_cs_mce_desc, ARRAY_SIZE(smca_cs_mce_desc) }, - [SMCA_CS_V2] = { smca_cs2_mce_desc, ARRAY_SIZE(smca_cs2_mce_desc) }, - [SMCA_PIE] = { smca_pie_mce_desc, ARRAY_SIZE(smca_pie_mce_desc) }, - [SMCA_UMC] = { smca_umc_mce_desc, ARRAY_SIZE(smca_umc_mce_desc) }, - [SMCA_UMC_V2] = { smca_umc2_mce_desc, ARRAY_SIZE(smca_umc2_mce_desc) }, - [SMCA_PB] = { smca_pb_mce_desc, ARRAY_SIZE(smca_pb_mce_desc) }, - [SMCA_PSP] = { smca_psp_mce_desc, ARRAY_SIZE(smca_psp_mce_desc) }, - [SMCA_PSP_V2] = { smca_psp2_mce_desc, ARRAY_SIZE(smca_psp2_mce_desc) }, - [SMCA_SMU] = { smca_smu_mce_desc, ARRAY_SIZE(smca_smu_mce_desc) }, - [SMCA_SMU_V2] = { smca_smu2_mce_desc, ARRAY_SIZE(smca_smu2_mce_desc) }, - [SMCA_MP5] = { smca_mp5_mce_desc, ARRAY_SIZE(smca_mp5_mce_desc) }, - [SMCA_MPDMA] = { smca_mpdma_mce_desc, ARRAY_SIZE(smca_mpdma_mce_desc) }, - [SMCA_NBIO] = { smca_nbio_mce_desc, ARRAY_SIZE(smca_nbio_mce_desc) }, - [SMCA_PCIE] = { smca_pcie_mce_desc, ARRAY_SIZE(smca_pcie_mce_desc) }, - [SMCA_PCIE_V2] = { smca_pcie2_mce_desc, ARRAY_SIZE(smca_pcie2_mce_desc) }, - [SMCA_XGMI_PCS] = { smca_xgmipcs_mce_desc, ARRAY_SIZE(smca_xgmipcs_mce_desc) }, - /* NBIF and SHUB have the same error descriptions, for now. */ - [SMCA_NBIF] = { smca_nbif_mce_desc, ARRAY_SIZE(smca_nbif_mce_desc) }, - [SMCA_SHUB] = { smca_nbif_mce_desc, ARRAY_SIZE(smca_nbif_mce_desc) }, - [SMCA_SATA] = { smca_sata_mce_desc, ARRAY_SIZE(smca_sata_mce_desc) }, - [SMCA_USB] = { smca_usb_mce_desc, ARRAY_SIZE(smca_usb_mce_desc) }, - [SMCA_GMI_PCS] = { smca_gmipcs_mce_desc, ARRAY_SIZE(smca_gmipcs_mce_desc) }, - /* All the PHY bank types have the same error descriptions, for now. */ - [SMCA_XGMI_PHY] = { smca_xgmiphy_mce_desc, ARRAY_SIZE(smca_xgmiphy_mce_desc) }, - [SMCA_WAFL_PHY] = { smca_xgmiphy_mce_desc, ARRAY_SIZE(smca_xgmiphy_mce_desc) }, - [SMCA_GMI_PHY] = { smca_xgmiphy_mce_desc, ARRAY_SIZE(smca_xgmiphy_mce_desc) }, -}; - static bool f12h_mc0_mce(u16 ec, u8 xec) { bool ret = false; @@ -1163,11 +687,51 @@ static void decode_mc6_mce(struct mce *m) pr_emerg(HW_ERR "Corrupted MC6 MCE info?\n"); } +static const char * const smca_long_names[] = { + [SMCA_LS ... SMCA_LS_V2] = "Load Store Unit", + [SMCA_IF] = "Instruction Fetch Unit", + [SMCA_L2_CACHE] = "L2 Cache", + [SMCA_DE] = "Decode Unit", + [SMCA_RESERVED] = "Reserved", + [SMCA_EX] = "Execution Unit", + [SMCA_FP] = "Floating Point Unit", + [SMCA_L3_CACHE] = "L3 Cache", + [SMCA_CS ... SMCA_CS_V2] = "Coherent Slave", + [SMCA_PIE] = "Power, Interrupts, etc.", + + /* UMC v2 is separate because both of them can exist in a single system. */ + [SMCA_UMC] = "Unified Memory Controller", + [SMCA_UMC_V2] = "Unified Memory Controller v2", + [SMCA_PB] = "Parameter Block", + [SMCA_PSP ... SMCA_PSP_V2] = "Platform Security Processor", + [SMCA_SMU ... SMCA_SMU_V2] = "System Management Unit", + [SMCA_MP5] = "Microprocessor 5 Unit", + [SMCA_MPDMA] = "MPDMA Unit", + [SMCA_NBIO] = "Northbridge IO Unit", + [SMCA_PCIE ... SMCA_PCIE_V2] = "PCI Express Unit", + [SMCA_XGMI_PCS] = "Ext Global Memory Interconnect PCS Unit", + [SMCA_NBIF] = "NBIF Unit", + [SMCA_SHUB] = "System Hub Unit", + [SMCA_SATA] = "SATA Unit", + [SMCA_USB] = "USB Unit", + [SMCA_GMI_PCS] = "Global Memory Interconnect PCS Unit", + [SMCA_XGMI_PHY] = "Ext Global Memory Interconnect PHY Unit", + [SMCA_WAFL_PHY] = "WAFL PHY Unit", + [SMCA_GMI_PHY] = "Global Memory Interconnect PHY Unit", +}; + +static const char *smca_get_long_name(enum smca_bank_types t) +{ + if (t >= N_SMCA_BANK_TYPES) + return NULL; + + return smca_long_names[t]; +} + /* Decode errors according to Scalable MCA specification */ static void decode_smca_error(struct mce *m) { enum smca_bank_types bank_type = smca_get_bank_type(m->extcpu, m->bank); - const char *ip_name; u8 xec = XEC(m->status, xec_mask); if (bank_type >= N_SMCA_BANK_TYPES) @@ -1178,13 +742,7 @@ static void decode_smca_error(struct mce *m) return; } - ip_name = smca_get_long_name(bank_type); - - pr_emerg(HW_ERR "%s Ext. Error Code: %d", ip_name, xec); - - /* Only print the decode of valid error codes */ - if (xec < smca_mce_descs[bank_type].num_descs) - pr_cont(", %s.\n", smca_mce_descs[bank_type].descs[xec]); + pr_emerg(HW_ERR "%s Ext. Error Code: %d", smca_get_long_name(bank_type), xec); if ((bank_type == SMCA_UMC || bank_type == SMCA_UMC_V2) && xec == 0 && decode_dram_ecc) diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c index 2b5703e5066e..c1bc53f4e184 100644 --- a/drivers/edac/mpc85xx_edac.c +++ b/drivers/edac/mpc85xx_edac.c @@ -300,7 +300,7 @@ err: return res; } -static int mpc85xx_pci_err_remove(struct platform_device *op) +static void mpc85xx_pci_err_remove(struct platform_device *op) { struct edac_pci_ctl_info *pci = dev_get_drvdata(&op->dev); struct mpc85xx_pci_pdata *pdata = pci->pvt_info; @@ -312,8 +312,6 @@ static int mpc85xx_pci_err_remove(struct platform_device *op) edac_pci_del_device(&op->dev); edac_pci_free_ctl_info(pci); - - return 0; } static const struct platform_device_id mpc85xx_pci_err_match[] = { @@ -325,7 +323,7 @@ static const struct platform_device_id mpc85xx_pci_err_match[] = { static struct platform_driver mpc85xx_pci_err_driver = { .probe = mpc85xx_pci_err_probe, - .remove = mpc85xx_pci_err_remove, + .remove_new = mpc85xx_pci_err_remove, .id_table = mpc85xx_pci_err_match, .driver = { .name = "mpc85xx_pci_err", @@ -591,7 +589,7 @@ err: return res; } -static int mpc85xx_l2_err_remove(struct platform_device *op) +static void mpc85xx_l2_err_remove(struct platform_device *op) { struct edac_device_ctl_info *edac_dev = dev_get_drvdata(&op->dev); struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info; @@ -606,7 +604,6 @@ static int mpc85xx_l2_err_remove(struct platform_device *op) out_be32(pdata->l2_vbase + MPC85XX_L2_ERRDIS, orig_l2_err_disable); edac_device_del_device(&op->dev); edac_device_free_ctl_info(edac_dev); - return 0; } static const struct of_device_id mpc85xx_l2_err_of_match[] = { @@ -630,7 +627,7 @@ MODULE_DEVICE_TABLE(of, mpc85xx_l2_err_of_match); static struct platform_driver mpc85xx_l2_err_driver = { .probe = mpc85xx_l2_err_probe, - .remove = mpc85xx_l2_err_remove, + .remove_new = mpc85xx_l2_err_remove, .driver = { .name = "mpc85xx_l2_err", .of_match_table = mpc85xx_l2_err_of_match, @@ -659,7 +656,7 @@ MODULE_DEVICE_TABLE(of, mpc85xx_mc_err_of_match); static struct platform_driver mpc85xx_mc_err_driver = { .probe = fsl_mc_err_probe, - .remove = fsl_mc_err_remove, + .remove_new = fsl_mc_err_remove, .driver = { .name = "mpc85xx_mc_err", .of_match_table = mpc85xx_mc_err_of_match, diff --git a/drivers/edac/npcm_edac.c b/drivers/edac/npcm_edac.c index 6d15c1550263..2e2133b784e9 100644 --- a/drivers/edac/npcm_edac.c +++ b/drivers/edac/npcm_edac.c @@ -410,7 +410,7 @@ free_edac_mc: return rc; } -static int edac_remove(struct platform_device *pdev) +static void edac_remove(struct platform_device *pdev) { struct mem_ctl_info *mci = platform_get_drvdata(pdev); struct priv_data *priv = mci->pvt_info; @@ -426,8 +426,6 @@ static int edac_remove(struct platform_device *pdev) regmap_write(npcm_regmap, pdata->ctl_int_mask_master, pdata->int_mask_master_global_mask); regmap_update_bits(npcm_regmap, pdata->ctl_ecc_en, pdata->ecc_en_mask, 0); - - return 0; } static const struct npcm_platform_data npcm750_edac = { @@ -533,7 +531,7 @@ static struct platform_driver npcm_edac_driver = { .of_match_table = npcm_edac_of_match, }, .probe = edac_probe, - .remove = edac_remove, + .remove_new = edac_remove, }; module_platform_driver(npcm_edac_driver); diff --git a/drivers/edac/octeon_edac-l2c.c b/drivers/edac/octeon_edac-l2c.c index c33059e9b0be..4015eb9af6fe 100644 --- a/drivers/edac/octeon_edac-l2c.c +++ b/drivers/edac/octeon_edac-l2c.c @@ -184,19 +184,17 @@ err: return -ENXIO; } -static int octeon_l2c_remove(struct platform_device *pdev) +static void octeon_l2c_remove(struct platform_device *pdev) { struct edac_device_ctl_info *l2c = platform_get_drvdata(pdev); edac_device_del_device(&pdev->dev); edac_device_free_ctl_info(l2c); - - return 0; } static struct platform_driver octeon_l2c_driver = { .probe = octeon_l2c_probe, - .remove = octeon_l2c_remove, + .remove_new = octeon_l2c_remove, .driver = { .name = "octeon_l2c_edac", } diff --git a/drivers/edac/octeon_edac-lmc.c b/drivers/edac/octeon_edac-lmc.c index aeb222ca3ed1..18615cbcd9ea 100644 --- a/drivers/edac/octeon_edac-lmc.c +++ b/drivers/edac/octeon_edac-lmc.c @@ -302,18 +302,17 @@ static int octeon_lmc_edac_probe(struct platform_device *pdev) return 0; } -static int octeon_lmc_edac_remove(struct platform_device *pdev) +static void octeon_lmc_edac_remove(struct platform_device *pdev) { struct mem_ctl_info *mci = platform_get_drvdata(pdev); edac_mc_del_mc(&pdev->dev); edac_mc_free(mci); - return 0; } static struct platform_driver octeon_lmc_edac_driver = { .probe = octeon_lmc_edac_probe, - .remove = octeon_lmc_edac_remove, + .remove_new = octeon_lmc_edac_remove, .driver = { .name = "octeon_lmc_edac", } diff --git a/drivers/edac/octeon_edac-pc.c b/drivers/edac/octeon_edac-pc.c index 754eced59c32..ea8a8e337b1e 100644 --- a/drivers/edac/octeon_edac-pc.c +++ b/drivers/edac/octeon_edac-pc.c @@ -119,19 +119,18 @@ err: return -ENXIO; } -static int co_cache_error_remove(struct platform_device *pdev) +static void co_cache_error_remove(struct platform_device *pdev) { struct co_cache_error *p = platform_get_drvdata(pdev); unregister_co_cache_error_notifier(&p->notifier); edac_device_del_device(&pdev->dev); edac_device_free_ctl_info(p->ed); - return 0; } static struct platform_driver co_cache_error_driver = { .probe = co_cache_error_probe, - .remove = co_cache_error_remove, + .remove_new = co_cache_error_remove, .driver = { .name = "octeon_pc_edac", } diff --git a/drivers/edac/octeon_edac-pci.c b/drivers/edac/octeon_edac-pci.c index 28b238eecefc..108ad9493cfb 100644 --- a/drivers/edac/octeon_edac-pci.c +++ b/drivers/edac/octeon_edac-pci.c @@ -87,19 +87,17 @@ err: return res; } -static int octeon_pci_remove(struct platform_device *pdev) +static void octeon_pci_remove(struct platform_device *pdev) { struct edac_pci_ctl_info *pci = platform_get_drvdata(pdev); edac_pci_del_device(&pdev->dev); edac_pci_free_ctl_info(pci); - - return 0; } static struct platform_driver octeon_pci_driver = { .probe = octeon_pci_probe, - .remove = octeon_pci_remove, + .remove_new = octeon_pci_remove, .driver = { .name = "octeon_pci_edac", } diff --git a/drivers/edac/pnd2_edac.c b/drivers/edac/pnd2_edac.c index 2b306f2cc605..2afcd148fcf8 100644 --- a/drivers/edac/pnd2_edac.c +++ b/drivers/edac/pnd2_edac.c @@ -16,18 +16,20 @@ * rank, bank, row and column using the appropriate "dunit_ops" functions/parameters. */ -#include <linux/module.h> +#include <linux/bitmap.h> +#include <linux/delay.h> +#include <linux/edac.h> #include <linux/init.h> +#include <linux/math64.h> +#include <linux/mmzone.h> +#include <linux/mod_devicetable.h> +#include <linux/module.h> #include <linux/pci.h> #include <linux/pci_ids.h> +#include <linux/sizes.h> #include <linux/slab.h> -#include <linux/delay.h> -#include <linux/edac.h> -#include <linux/mmzone.h> #include <linux/smp.h> -#include <linux/bitmap.h> -#include <linux/math64.h> -#include <linux/mod_devicetable.h> + #include <linux/platform_data/x86/p2sb.h> #include <asm/cpu_device_id.h> @@ -109,7 +111,6 @@ static struct mem_ctl_info *pnd2_mci; #define MOT_CHAN_INTLV_BIT_1SLC_2CH 12 #define MOT_CHAN_INTLV_BIT_2SLC_2CH 13 #define SELECTOR_DISABLED (-1) -#define _4GB (1ul << 32) #define PMI_ADDRESS_WIDTH 31 #define PND_MAX_PHYS_BIT 39 @@ -183,7 +184,7 @@ static int _apl_rd_reg(int port, int off, int op, u32 *data) } P2SB_READ(dword, P2SB_DATA_OFF, data); - ret = (status >> 1) & 0x3; + ret = (status >> 1) & GENMASK(1, 0); out: /* Hide the P2SB device, if it was hidden before */ if (hidden) @@ -307,7 +308,7 @@ static bool two_channels; /* Both PMI channels in one slice enabled */ static u8 sym_chan_mask; static u8 asym_chan_mask; -static u8 chan_mask; +static unsigned long chan_mask; static int slice_selector = -1; static int chan_selector = -1; @@ -329,7 +330,7 @@ static void mk_region_mask(char *name, struct region *rp, u64 base, u64 mask) return; } if (mask != GENMASK_ULL(PND_MAX_PHYS_BIT, __ffs(mask))) { - pr_info(FW_BUG "MOT mask not power of two\n"); + pr_info(FW_BUG "MOT mask is invalid\n"); return; } if (base & ~mask) { @@ -587,7 +588,7 @@ static int get_registers(void) /* Get a contiguous memory address (remove the MMIO gap) */ static u64 remove_mmio_gap(u64 sys) { - return (sys < _4GB) ? sys : sys - (_4GB - top_lm); + return (sys < SZ_4G) ? sys : sys - (SZ_4G - top_lm); } /* Squeeze out one address bit, shift upper part down to fill gap */ @@ -598,7 +599,7 @@ static void remove_addr_bit(u64 *addr, int bitidx) if (bitidx == -1) return; - mask = (1ull << bitidx) - 1; + mask = BIT_ULL(bitidx) - 1; *addr = ((*addr >> 1) & ~mask) | (*addr & mask); } @@ -642,8 +643,8 @@ static int sys2pmi(const u64 addr, u32 *pmiidx, u64 *pmiaddr, char *msg) int sym_chan_shift = sym_channels >> 1; /* Give up if address is out of range, or in MMIO gap */ - if (addr >= (1ul << PND_MAX_PHYS_BIT) || - (addr >= top_lm && addr < _4GB) || addr >= top_hm) { + if (addr >= BIT(PND_MAX_PHYS_BIT) || + (addr >= top_lm && addr < SZ_4G) || addr >= top_hm) { snprintf(msg, PND2_MSG_SIZE, "Error address 0x%llx is not DRAM", addr); return -EINVAL; } @@ -727,10 +728,10 @@ static int sys2pmi(const u64 addr, u32 *pmiidx, u64 *pmiaddr, char *msg) } /* Translate PMI address to memory (rank, row, bank, column) */ -#define C(n) (0x10 | (n)) /* column */ -#define B(n) (0x20 | (n)) /* bank */ -#define R(n) (0x40 | (n)) /* row */ -#define RS (0x80) /* rank */ +#define C(n) (BIT(4) | (n)) /* column */ +#define B(n) (BIT(5) | (n)) /* bank */ +#define R(n) (BIT(6) | (n)) /* row */ +#define RS (BIT(7)) /* rank */ /* addrdec values */ #define AMAP_1KB 0 @@ -1064,9 +1065,9 @@ static int apl_check_ecc_active(void) int i, ret = 0; /* Check dramtype and ECC mode for each present DIMM */ - for (i = 0; i < APL_NUM_CHANNELS; i++) - if (chan_mask & BIT(i)) - ret += check_channel(i); + for_each_set_bit(i, &chan_mask, APL_NUM_CHANNELS) + ret += check_channel(i); + return ret ? -EINVAL : 0; } @@ -1205,10 +1206,7 @@ static void apl_get_dimm_config(struct mem_ctl_info *mci) u64 capacity; int i, g; - for (i = 0; i < APL_NUM_CHANNELS; i++) { - if (!(chan_mask & BIT(i))) - continue; - + for_each_set_bit(i, &chan_mask, APL_NUM_CHANNELS) { dimm = edac_get_dimm(mci, i, 0, 0); if (!dimm) { edac_dbg(0, "No allocated DIMM for channel %d\n", i); @@ -1228,8 +1226,7 @@ static void apl_get_dimm_config(struct mem_ctl_info *mci) } pvt->dimm_geom[i] = g; - capacity = (d->rken0 + d->rken1) * 8 * (1ul << dimms[g].rowbits) * - (1ul << dimms[g].colbits); + capacity = (d->rken0 + d->rken1) * 8 * BIT(dimms[g].rowbits + dimms[g].colbits); edac_dbg(0, "Channel %d: %lld MByte DIMM\n", i, capacity >> (20 - 3)); dimm->nr_pages = MiB_TO_PAGES(capacity >> (20 - 3)); dimm->grain = 32; @@ -1295,7 +1292,7 @@ static void dnv_get_dimm_config(struct mem_ctl_info *mci) continue; } - capacity = ranks_of_dimm[j] * banks * (1ul << rowbits) * (1ul << colbits); + capacity = ranks_of_dimm[j] * banks * BIT(rowbits + colbits); edac_dbg(0, "Channel %d DIMM %d: %lld MByte DIMM\n", i, j, capacity >> (20 - 3)); dimm->nr_pages = MiB_TO_PAGES(capacity >> (20 - 3)); dimm->grain = 32; diff --git a/drivers/edac/ppc4xx_edac.c b/drivers/edac/ppc4xx_edac.c index 046969b4e82e..1eea3341a916 100644 --- a/drivers/edac/ppc4xx_edac.c +++ b/drivers/edac/ppc4xx_edac.c @@ -1329,8 +1329,7 @@ static int ppc4xx_edac_probe(struct platform_device *op) * * Unconditionally returns 0. */ -static int -ppc4xx_edac_remove(struct platform_device *op) +static void ppc4xx_edac_remove(struct platform_device *op) { struct mem_ctl_info *mci = dev_get_drvdata(&op->dev); struct ppc4xx_edac_pdata *pdata = mci->pvt_info; @@ -1344,8 +1343,6 @@ ppc4xx_edac_remove(struct platform_device *op) edac_mc_del_mc(mci->pdev); edac_mc_free(mci); - - return 0; } /** @@ -1379,7 +1376,7 @@ ppc4xx_edac_opstate_init(void) static struct platform_driver ppc4xx_edac_driver = { .probe = ppc4xx_edac_probe, - .remove = ppc4xx_edac_remove, + .remove_new = ppc4xx_edac_remove, .driver = { .name = PPC4XX_EDAC_MODULE_NAME, .of_match_table = ppc4xx_edac_match, diff --git a/drivers/edac/qcom_edac.c b/drivers/edac/qcom_edac.c index b2db545c6810..5539917c01dd 100644 --- a/drivers/edac/qcom_edac.c +++ b/drivers/edac/qcom_edac.c @@ -390,14 +390,12 @@ irq_done: return rc; } -static int qcom_llcc_edac_remove(struct platform_device *pdev) +static void qcom_llcc_edac_remove(struct platform_device *pdev) { struct edac_device_ctl_info *edev_ctl = dev_get_drvdata(&pdev->dev); edac_device_del_device(edev_ctl->dev); edac_device_free_ctl_info(edev_ctl); - - return 0; } static const struct platform_device_id qcom_llcc_edac_id_table[] = { @@ -408,7 +406,7 @@ MODULE_DEVICE_TABLE(platform, qcom_llcc_edac_id_table); static struct platform_driver qcom_llcc_edac_driver = { .probe = qcom_llcc_edac_probe, - .remove = qcom_llcc_edac_remove, + .remove_new = qcom_llcc_edac_remove, .driver = { .name = "qcom_llcc_edac", }, diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c index 0c779a0326b6..26cca5a9322d 100644 --- a/drivers/edac/sb_edac.c +++ b/drivers/edac/sb_edac.c @@ -439,7 +439,7 @@ static const struct pci_id_descr pci_dev_descr_sbridge[] = { static const struct pci_id_table pci_dev_descr_sbridge_table[] = { PCI_ID_TABLE_ENTRY(pci_dev_descr_sbridge, ARRAY_SIZE(pci_dev_descr_sbridge), 1, SANDY_BRIDGE), - {0,} /* 0 terminated list. */ + { NULL, } }; /* This changes depending if 1HA or 2HA: @@ -505,7 +505,7 @@ static const struct pci_id_descr pci_dev_descr_ibridge[] = { static const struct pci_id_table pci_dev_descr_ibridge_table[] = { PCI_ID_TABLE_ENTRY(pci_dev_descr_ibridge, 12, 2, IVY_BRIDGE), - {0,} /* 0 terminated list. */ + { NULL, } }; /* Haswell support */ @@ -576,7 +576,7 @@ static const struct pci_id_descr pci_dev_descr_haswell[] = { static const struct pci_id_table pci_dev_descr_haswell_table[] = { PCI_ID_TABLE_ENTRY(pci_dev_descr_haswell, 13, 2, HASWELL), - {0,} /* 0 terminated list. */ + { NULL, } }; /* Knight's Landing Support */ @@ -620,7 +620,7 @@ static const struct pci_id_descr pci_dev_descr_knl[] = { static const struct pci_id_table pci_dev_descr_knl_table[] = { PCI_ID_TABLE_ENTRY(pci_dev_descr_knl, ARRAY_SIZE(pci_dev_descr_knl), 1, KNIGHTS_LANDING), - {0,} + { NULL, } }; /* @@ -686,7 +686,7 @@ static const struct pci_id_descr pci_dev_descr_broadwell[] = { static const struct pci_id_table pci_dev_descr_broadwell_table[] = { PCI_ID_TABLE_ENTRY(pci_dev_descr_broadwell, 10, 2, BROADWELL), - {0,} /* 0 terminated list. */ + { NULL, } }; diff --git a/drivers/edac/skx_common.c b/drivers/edac/skx_common.c index ce3e0069e028..9c5b6f8bd8bd 100644 --- a/drivers/edac/skx_common.c +++ b/drivers/edac/skx_common.c @@ -648,6 +648,10 @@ int skx_mce_check_error(struct notifier_block *nb, unsigned long val, memset(&res, 0, sizeof(res)); res.mce = mce; res.addr = mce->addr & MCI_ADDR_PHYSADDR; + if (!pfn_to_online_page(res.addr >> PAGE_SHIFT)) { + pr_err("Invalid address 0x%llx in IA32_MC%d_ADDR\n", mce->addr, mce->bank); + return NOTIFY_DONE; + } /* Try driver decoder first */ if (!(driver_decode && driver_decode(&res))) { diff --git a/drivers/edac/synopsys_edac.c b/drivers/edac/synopsys_edac.c index c4fc64cbecd0..709babce43ba 100644 --- a/drivers/edac/synopsys_edac.c +++ b/drivers/edac/synopsys_edac.c @@ -1410,7 +1410,7 @@ free_edac_mc: * * Return: Unconditionally 0 */ -static int mc_remove(struct platform_device *pdev) +static void mc_remove(struct platform_device *pdev) { struct mem_ctl_info *mci = platform_get_drvdata(pdev); struct synps_edac_priv *priv = mci->pvt_info; @@ -1425,8 +1425,6 @@ static int mc_remove(struct platform_device *pdev) edac_mc_del_mc(&pdev->dev); edac_mc_free(mci); - - return 0; } static struct platform_driver synps_edac_mc_driver = { @@ -1435,7 +1433,7 @@ static struct platform_driver synps_edac_mc_driver = { .of_match_table = synps_edac_match, }, .probe = mc_probe, - .remove = mc_remove, + .remove_new = mc_remove, }; module_platform_driver(synps_edac_mc_driver); diff --git a/drivers/edac/thunderx_edac.c b/drivers/edac/thunderx_edac.c index b9c5772da959..90d46e5c4ff0 100644 --- a/drivers/edac/thunderx_edac.c +++ b/drivers/edac/thunderx_edac.c @@ -1133,7 +1133,7 @@ static irqreturn_t thunderx_ocx_com_threaded_isr(int irq, void *irq_id) decode_register(other, OCX_OTHER_SIZE, ocx_com_errors, ctx->reg_com_int); - strncat(msg, other, OCX_MESSAGE_SIZE); + strlcat(msg, other, OCX_MESSAGE_SIZE); for (lane = 0; lane < OCX_RX_LANES; lane++) if (ctx->reg_com_int & BIT(lane)) { @@ -1142,12 +1142,12 @@ static irqreturn_t thunderx_ocx_com_threaded_isr(int irq, void *irq_id) lane, ctx->reg_lane_int[lane], lane, ctx->reg_lane_stat11[lane]); - strncat(msg, other, OCX_MESSAGE_SIZE); + strlcat(msg, other, OCX_MESSAGE_SIZE); decode_register(other, OCX_OTHER_SIZE, ocx_lane_errors, ctx->reg_lane_int[lane]); - strncat(msg, other, OCX_MESSAGE_SIZE); + strlcat(msg, other, OCX_MESSAGE_SIZE); } if (ctx->reg_com_int & OCX_COM_INT_CE) @@ -1217,7 +1217,7 @@ static irqreturn_t thunderx_ocx_lnk_threaded_isr(int irq, void *irq_id) decode_register(other, OCX_OTHER_SIZE, ocx_com_link_errors, ctx->reg_com_link_int); - strncat(msg, other, OCX_MESSAGE_SIZE); + strlcat(msg, other, OCX_MESSAGE_SIZE); if (ctx->reg_com_link_int & OCX_COM_LINK_INT_UE) edac_device_handle_ue(ocx->edac_dev, 0, 0, msg); @@ -1896,7 +1896,7 @@ static irqreturn_t thunderx_l2c_threaded_isr(int irq, void *irq_id) decode_register(other, L2C_OTHER_SIZE, l2_errors, ctx->reg_int); - strncat(msg, other, L2C_MESSAGE_SIZE); + strlcat(msg, other, L2C_MESSAGE_SIZE); if (ctx->reg_int & mask_ue) edac_device_handle_ue(l2c->edac_dev, 0, 0, msg); diff --git a/drivers/edac/ti_edac.c b/drivers/edac/ti_edac.c index 6971ded598de..29723c9592f7 100644 --- a/drivers/edac/ti_edac.c +++ b/drivers/edac/ti_edac.c @@ -312,19 +312,17 @@ err: return ret; } -static int ti_edac_remove(struct platform_device *pdev) +static void ti_edac_remove(struct platform_device *pdev) { struct mem_ctl_info *mci = platform_get_drvdata(pdev); edac_mc_del_mc(&pdev->dev); edac_mc_free(mci); - - return 0; } static struct platform_driver ti_edac_driver = { .probe = ti_edac_probe, - .remove = ti_edac_remove, + .remove_new = ti_edac_remove, .driver = { .name = EDAC_MOD_NAME, .of_match_table = ti_edac_of_match, diff --git a/drivers/edac/xgene_edac.c b/drivers/edac/xgene_edac.c index c52b9dd9154c..1b50f8160013 100644 --- a/drivers/edac/xgene_edac.c +++ b/drivers/edac/xgene_edac.c @@ -1960,7 +1960,7 @@ out_err: return rc; } -static int xgene_edac_remove(struct platform_device *pdev) +static void xgene_edac_remove(struct platform_device *pdev) { struct xgene_edac *edac = dev_get_drvdata(&pdev->dev); struct xgene_edac_mc_ctx *mcu; @@ -1981,8 +1981,6 @@ static int xgene_edac_remove(struct platform_device *pdev) list_for_each_entry_safe(node, temp_node, &edac->socs, next) xgene_edac_soc_remove(node); - - return 0; } static const struct of_device_id xgene_edac_of_match[] = { @@ -1993,7 +1991,7 @@ MODULE_DEVICE_TABLE(of, xgene_edac_of_match); static struct platform_driver xgene_edac_driver = { .probe = xgene_edac_probe, - .remove = xgene_edac_remove, + .remove_new = xgene_edac_remove, .driver = { .name = "xgene-edac", .of_match_table = xgene_edac_of_match, diff --git a/drivers/edac/zynqmp_edac.c b/drivers/edac/zynqmp_edac.c index ac7d1e0b324c..2d9a5cfd8931 100644 --- a/drivers/edac/zynqmp_edac.c +++ b/drivers/edac/zynqmp_edac.c @@ -426,7 +426,7 @@ free_dev_ctl: return ret; } -static int edac_remove(struct platform_device *pdev) +static void edac_remove(struct platform_device *pdev) { struct edac_device_ctl_info *dci = platform_get_drvdata(pdev); struct edac_priv *priv = dci->pvt_info; @@ -440,8 +440,6 @@ static int edac_remove(struct platform_device *pdev) edac_device_del_device(&pdev->dev); edac_device_free_ctl_info(dci); - - return 0; } static const struct of_device_id zynqmp_ocm_edac_match[] = { @@ -457,7 +455,7 @@ static struct platform_driver zynqmp_ocm_edac_driver = { .of_match_table = zynqmp_ocm_edac_match, }, .probe = edac_probe, - .remove = edac_remove, + .remove_new = edac_remove, }; module_platform_driver(zynqmp_ocm_edac_driver); diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c index 7e88fd489741..9db9290c3269 100644 --- a/drivers/firewire/ohci.c +++ b/drivers/firewire/ohci.c @@ -279,6 +279,51 @@ static char ohci_driver_name[] = KBUILD_MODNAME; #define QUIRK_TI_SLLZ059 0x20 #define QUIRK_IR_WAKE 0x40 +// On PCI Express Root Complex in any type of AMD Ryzen machine, VIA VT6306/6307/6308 with Asmedia +// ASM1083/1085 brings an inconvenience that the read accesses to 'Isochronous Cycle Timer' register +// (at offset 0xf0 in PCI I/O space) often causes unexpected system reboot. The mechanism is not +// clear, since the read access to the other registers is enough safe; e.g. 'Node ID' register, +// while it is probable due to detection of any type of PCIe error. +#define QUIRK_REBOOT_BY_CYCLE_TIMER_READ 0x80000000 + +#if IS_ENABLED(CONFIG_X86) + +static bool has_reboot_by_cycle_timer_read_quirk(const struct fw_ohci *ohci) +{ + return !!(ohci->quirks & QUIRK_REBOOT_BY_CYCLE_TIMER_READ); +} + +#define PCI_DEVICE_ID_ASMEDIA_ASM108X 0x1080 + +static bool detect_vt630x_with_asm1083_on_amd_ryzen_machine(const struct pci_dev *pdev) +{ + const struct pci_dev *pcie_to_pci_bridge; + + // Detect any type of AMD Ryzen machine. + if (!static_cpu_has(X86_FEATURE_ZEN)) + return false; + + // Detect VIA VT6306/6307/6308. + if (pdev->vendor != PCI_VENDOR_ID_VIA) + return false; + if (pdev->device != PCI_DEVICE_ID_VIA_VT630X) + return false; + + // Detect Asmedia ASM1083/1085. + pcie_to_pci_bridge = pdev->bus->self; + if (pcie_to_pci_bridge->vendor != PCI_VENDOR_ID_ASMEDIA) + return false; + if (pcie_to_pci_bridge->device != PCI_DEVICE_ID_ASMEDIA_ASM108X) + return false; + + return true; +} + +#else +#define has_reboot_by_cycle_timer_read_quirk(ohci) false +#define detect_vt630x_with_asm1083_on_amd_ryzen_machine(pdev) false +#endif + /* In case of multiple matches in ohci_quirks[], only the first one is used. */ static const struct { unsigned short vendor, device, revision, flags; @@ -1724,6 +1769,9 @@ static u32 get_cycle_time(struct fw_ohci *ohci) s32 diff01, diff12; int i; + if (has_reboot_by_cycle_timer_read_quirk(ohci)) + return 0; + c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer); if (ohci->quirks & QUIRK_CYCLE_TIMER) { @@ -3630,6 +3678,9 @@ static int pci_probe(struct pci_dev *dev, if (param_quirks) ohci->quirks = param_quirks; + if (detect_vt630x_with_asm1083_on_amd_ryzen_machine(dev)) + ohci->quirks |= QUIRK_REBOOT_BY_CYCLE_TIMER_READ; + /* * Because dma_alloc_coherent() allocates at least one page, * we save space by using a common buffer for the AR request/ diff --git a/drivers/firmware/efi/dev-path-parser.c b/drivers/firmware/efi/dev-path-parser.c index f80d87c199c3..937be269fee8 100644 --- a/drivers/firmware/efi/dev-path-parser.c +++ b/drivers/firmware/efi/dev-path-parser.c @@ -18,8 +18,6 @@ static long __init parse_acpi_path(const struct efi_dev_path *node, struct acpi_device *adev; struct device *phys_dev; char hid[ACPI_ID_LEN]; - u64 uid; - int ret; if (node->header.length != 12) return -EINVAL; @@ -31,10 +29,9 @@ static long __init parse_acpi_path(const struct efi_dev_path *node, node->acpi.hid >> 16); for_each_acpi_dev_match(adev, hid, NULL, -1) { - ret = acpi_dev_uid_to_integer(adev, &uid); - if (ret == 0 && node->acpi.uid == uid) + if (acpi_dev_uid_match(adev, node->acpi.uid)) break; - if (ret == -ENODATA && node->acpi.uid == 0) + if (!acpi_device_uid(adev) && node->acpi.uid == 0) break; } if (!adev) diff --git a/drivers/firmware/efi/libstub/Makefile.zboot b/drivers/firmware/efi/libstub/Makefile.zboot index 2c489627a807..65ffd0b760b2 100644 --- a/drivers/firmware/efi/libstub/Makefile.zboot +++ b/drivers/firmware/efi/libstub/Makefile.zboot @@ -5,8 +5,8 @@ # EFI_ZBOOT_FORWARD_CFI quiet_cmd_copy_and_pad = PAD $@ - cmd_copy_and_pad = cp $< $@ && \ - truncate -s $(shell hexdump -s16 -n4 -e '"%u"' $<) $@ + cmd_copy_and_pad = cp $< $@; \ + truncate -s $$(hexdump -s16 -n4 -e '"%u"' $<) $@ # Pad the file to the size of the uncompressed image in memory, including BSS $(obj)/vmlinux.bin: $(obj)/$(EFI_ZBOOT_PAYLOAD) FORCE diff --git a/drivers/firmware/efi/libstub/x86-5lvl.c b/drivers/firmware/efi/libstub/x86-5lvl.c index 479dd445acdc..77359e802181 100644 --- a/drivers/firmware/efi/libstub/x86-5lvl.c +++ b/drivers/firmware/efi/libstub/x86-5lvl.c @@ -13,8 +13,8 @@ bool efi_no5lvl; static void (*la57_toggle)(void *cr3); static const struct desc_struct gdt[] = { - [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff), - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff), + [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(DESC_CODE32, 0, 0xfffff), + [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(DESC_CODE64, 0, 0xfffff), }; /* diff --git a/drivers/firmware/efi/libstub/x86-stub.c b/drivers/firmware/efi/libstub/x86-stub.c index da9b7b8d0716..0d510c9a06a4 100644 --- a/drivers/firmware/efi/libstub/x86-stub.c +++ b/drivers/firmware/efi/libstub/x86-stub.c @@ -787,6 +787,8 @@ static efi_status_t efi_decompress_kernel(unsigned long *kernel_entry) efi_debug("AMI firmware v2.0 or older detected - disabling physical KASLR\n"); seed[0] = 0; } + + boot_params_ptr->hdr.loadflags |= KASLR_FLAG; } status = efi_random_alloc(alloc_size, CONFIG_PHYSICAL_ALIGN, &addr, diff --git a/drivers/fpga/dfl.c b/drivers/fpga/dfl.c index dd7a783d53b5..e73f88050f08 100644 --- a/drivers/fpga/dfl.c +++ b/drivers/fpga/dfl.c @@ -1872,7 +1872,7 @@ static irqreturn_t dfl_irq_handler(int irq, void *arg) { struct eventfd_ctx *trigger = arg; - eventfd_signal(trigger, 1); + eventfd_signal(trigger); return IRQ_HANDLED; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 8dee52ce26d0..93cf73d6fa11 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -2188,15 +2188,8 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev) adev->firmware.gpu_info_fw = NULL; - if (adev->mman.discovery_bin) { - /* - * FIXME: The bounding box is still needed by Navi12, so - * temporarily read it from gpu_info firmware. Should be dropped - * when DAL no longer needs it. - */ - if (adev->asic_type != CHIP_NAVI12) - return 0; - } + if (adev->mman.discovery_bin) + return 0; switch (adev->asic_type) { default: diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index c8c00c2a5224..4e82ee4d74ac 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -6170,8 +6170,9 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket); - - if (stream->link->psr_settings.psr_feature_enabled || stream->link->replay_settings.replay_feature_enabled) { + else if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT || + stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST || + stream->signal == SIGNAL_TYPE_EDP) { // // should decide stream support vsc sdp colorimetry capability // before building vsc info packet @@ -6187,8 +6188,9 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, if (stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22) tf = TRANSFER_FUNC_GAMMA_22; mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space, tf); - aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY; + if (stream->link->psr_settings.psr_feature_enabled) + aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY; } finish: dc_sink_release(sink); @@ -6914,8 +6916,7 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder, if (IS_ERR(mst_state)) return PTR_ERR(mst_state); - if (!mst_state->pbn_div) - mst_state->pbn_div = dm_mst_get_pbn_divider(aconnector->mst_root->dc_link); + mst_state->pbn_div = dm_mst_get_pbn_divider(aconnector->mst_root->dc_link); if (!state->duplicated) { int max_bpc = conn_state->max_requested_bpc; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c index ec77b2b41ba3..d2271e308fa0 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c @@ -440,7 +440,115 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv14_soc = { .use_urgent_burst_bw = 0 }; -struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv12_soc = { 0 }; +struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv12_soc = { + .clock_limits = { + { + .state = 0, + .dcfclk_mhz = 560.0, + .fabricclk_mhz = 560.0, + .dispclk_mhz = 513.0, + .dppclk_mhz = 513.0, + .phyclk_mhz = 540.0, + .socclk_mhz = 560.0, + .dscclk_mhz = 171.0, + .dram_speed_mts = 1069.0, + }, + { + .state = 1, + .dcfclk_mhz = 694.0, + .fabricclk_mhz = 694.0, + .dispclk_mhz = 642.0, + .dppclk_mhz = 642.0, + .phyclk_mhz = 600.0, + .socclk_mhz = 694.0, + .dscclk_mhz = 214.0, + .dram_speed_mts = 1324.0, + }, + { + .state = 2, + .dcfclk_mhz = 875.0, + .fabricclk_mhz = 875.0, + .dispclk_mhz = 734.0, + .dppclk_mhz = 734.0, + .phyclk_mhz = 810.0, + .socclk_mhz = 875.0, + .dscclk_mhz = 245.0, + .dram_speed_mts = 1670.0, + }, + { + .state = 3, + .dcfclk_mhz = 1000.0, + .fabricclk_mhz = 1000.0, + .dispclk_mhz = 1100.0, + .dppclk_mhz = 1100.0, + .phyclk_mhz = 810.0, + .socclk_mhz = 1000.0, + .dscclk_mhz = 367.0, + .dram_speed_mts = 2000.0, + }, + { + .state = 4, + .dcfclk_mhz = 1200.0, + .fabricclk_mhz = 1200.0, + .dispclk_mhz = 1284.0, + .dppclk_mhz = 1284.0, + .phyclk_mhz = 810.0, + .socclk_mhz = 1200.0, + .dscclk_mhz = 428.0, + .dram_speed_mts = 2000.0, + }, + { + .state = 5, + .dcfclk_mhz = 1200.0, + .fabricclk_mhz = 1200.0, + .dispclk_mhz = 1284.0, + .dppclk_mhz = 1284.0, + .phyclk_mhz = 810.0, + .socclk_mhz = 1200.0, + .dscclk_mhz = 428.0, + .dram_speed_mts = 2000.0, + }, + }, + + .num_states = 5, + .sr_exit_time_us = 1.9, + .sr_enter_plus_exit_time_us = 4.4, + .urgent_latency_us = 3.0, + .urgent_latency_pixel_data_only_us = 4.0, + .urgent_latency_pixel_mixed_with_vm_data_us = 4.0, + .urgent_latency_vm_data_only_us = 4.0, + .urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096, + .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096, + .urgent_out_of_order_return_per_channel_vm_only_bytes = 4096, + .pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 40.0, + .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 40.0, + .pct_ideal_dram_sdp_bw_after_urgent_vm_only = 40.0, + .max_avg_sdp_bw_use_normal_percent = 40.0, + .max_avg_dram_bw_use_normal_percent = 40.0, + .writeback_latency_us = 12.0, + .ideal_dram_bw_after_urgent_percent = 40.0, + .max_request_size_bytes = 256, + .dram_channel_width_bytes = 16, + .fabric_datapath_to_dcn_data_return_bytes = 64, + .dcn_downspread_percent = 0.5, + .downspread_percent = 0.5, + .dram_page_open_time_ns = 50.0, + .dram_rw_turnaround_time_ns = 17.5, + .dram_return_buffer_per_channel_bytes = 8192, + .round_trip_ping_latency_dcfclk_cycles = 131, + .urgent_out_of_order_return_per_channel_bytes = 4096, + .channel_interleave_bytes = 256, + .num_banks = 8, + .num_chans = 16, + .vmm_page_size_bytes = 4096, + .dram_clock_change_latency_us = 45.0, + .writeback_dram_clock_change_latency_us = 23.0, + .return_bus_width_bytes = 64, + .dispclk_dppclk_vco_speed_mhz = 3850, + .xfc_bus_transport_time_us = 20, + .xfc_xbuf_latency_tolerance_us = 50, + .use_urgent_burst_bw = 0, +}; struct _vcs_dpi_ip_params_st dcn2_1_ip = { .odm_capable = 1, diff --git a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c index 84f9b412a4f1..738ee763f24a 100644 --- a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c +++ b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c @@ -147,12 +147,15 @@ void mod_build_vsc_infopacket(const struct dc_stream_state *stream, } /* VSC packet set to 4 for PSR-SU, or 2 for PSR1 */ - if (stream->link->psr_settings.psr_version == DC_PSR_VERSION_SU_1) - vsc_packet_revision = vsc_packet_rev4; - else if (stream->link->replay_settings.config.replay_supported) + if (stream->link->psr_settings.psr_feature_enabled) { + if (stream->link->psr_settings.psr_version == DC_PSR_VERSION_SU_1) + vsc_packet_revision = vsc_packet_rev4; + else if (stream->link->psr_settings.psr_version == DC_PSR_VERSION_1) + vsc_packet_revision = vsc_packet_rev2; + } + + if (stream->link->replay_settings.config.replay_supported) vsc_packet_revision = vsc_packet_rev4; - else if (stream->link->psr_settings.psr_version == DC_PSR_VERSION_1) - vsc_packet_revision = vsc_packet_rev2; /* Update to revision 5 for extended colorimetry support */ if (stream->use_vsc_sdp_for_colorimetry) diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h index 1c5049e894e3..c2ccf3724e37 100644 --- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h +++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h @@ -318,6 +318,7 @@ enum pp_xgmi_plpd_mode { #define MAX_GFX_CLKS 8 #define MAX_CLKS 4 #define NUM_VCN 4 +#define NUM_JPEG_ENG 32 struct seq_file; enum amd_pp_clock_type; @@ -774,6 +775,85 @@ struct gpu_metrics_v1_4 { uint16_t padding; }; +struct gpu_metrics_v1_5 { + struct metrics_table_header common_header; + + /* Temperature (Celsius) */ + uint16_t temperature_hotspot; + uint16_t temperature_mem; + uint16_t temperature_vrsoc; + + /* Power (Watts) */ + uint16_t curr_socket_power; + + /* Utilization (%) */ + uint16_t average_gfx_activity; + uint16_t average_umc_activity; // memory controller + uint16_t vcn_activity[NUM_VCN]; + uint16_t jpeg_activity[NUM_JPEG_ENG]; + + /* Energy (15.259uJ (2^-16) units) */ + uint64_t energy_accumulator; + + /* Driver attached timestamp (in ns) */ + uint64_t system_clock_counter; + + /* Throttle status */ + uint32_t throttle_status; + + /* Clock Lock Status. Each bit corresponds to clock instance */ + uint32_t gfxclk_lock_status; + + /* Link width (number of lanes) and speed (in 0.1 GT/s) */ + uint16_t pcie_link_width; + uint16_t pcie_link_speed; + + /* XGMI bus width and bitrate (in Gbps) */ + uint16_t xgmi_link_width; + uint16_t xgmi_link_speed; + + /* Utilization Accumulated (%) */ + uint32_t gfx_activity_acc; + uint32_t mem_activity_acc; + + /*PCIE accumulated bandwidth (GB/sec) */ + uint64_t pcie_bandwidth_acc; + + /*PCIE instantaneous bandwidth (GB/sec) */ + uint64_t pcie_bandwidth_inst; + + /* PCIE L0 to recovery state transition accumulated count */ + uint64_t pcie_l0_to_recov_count_acc; + + /* PCIE replay accumulated count */ + uint64_t pcie_replay_count_acc; + + /* PCIE replay rollover accumulated count */ + uint64_t pcie_replay_rover_count_acc; + + /* PCIE NAK sent accumulated count */ + uint32_t pcie_nak_sent_count_acc; + + /* PCIE NAK received accumulated count */ + uint32_t pcie_nak_rcvd_count_acc; + + /* XGMI accumulated data transfer size(KiloBytes) */ + uint64_t xgmi_read_data_acc[NUM_XGMI_LINKS]; + uint64_t xgmi_write_data_acc[NUM_XGMI_LINKS]; + + /* PMFW attached timestamp (10ns resolution) */ + uint64_t firmware_timestamp; + + /* Current clocks (Mhz) */ + uint16_t current_gfxclk[MAX_GFX_CLKS]; + uint16_t current_socclk[MAX_CLKS]; + uint16_t current_vclk0[MAX_CLKS]; + uint16_t current_dclk0[MAX_CLKS]; + uint16_t current_uclk; + + uint16_t padding; +}; + /* * gpu_metrics_v2_0 is not recommended as it's not naturally aligned. * Use gpu_metrics_v2_1 or later instead. diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c index 49028dde0f87..20c53eefd680 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c @@ -2128,7 +2128,9 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_ if (amdgpu_dpm_is_overdrive_supported(adev)) *states = ATTR_STATE_SUPPORTED; } else if (DEVICE_ATTR_IS(mem_busy_percent)) { - if (adev->flags & AMD_IS_APU || gc_ver == IP_VERSION(9, 0, 1)) + if ((adev->flags & AMD_IS_APU && + gc_ver != IP_VERSION(9, 4, 3)) || + gc_ver == IP_VERSION(9, 0, 1)) *states = ATTR_STATE_UNSUPPORTED; } else if (DEVICE_ATTR_IS(pcie_bw)) { /* PCIe Perf counters won't work on APU nodes */ diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h index fef2d290f3f2..7b812b9994d7 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h @@ -123,7 +123,7 @@ typedef enum { VOLTAGE_GUARDBAND_COUNT } GFX_GUARDBAND_e; -#define SMU_METRICS_TABLE_VERSION 0x9 +#define SMU_METRICS_TABLE_VERSION 0xB typedef struct __attribute__((packed, aligned(4))) { uint32_t AccumulationCounter; @@ -219,7 +219,103 @@ typedef struct __attribute__((packed, aligned(4))) { uint32_t PCIenReplayARolloverCountAcc; // The Pcie counter itself is accumulated uint32_t PCIeNAKSentCountAcc; // The Pcie counter itself is accumulated uint32_t PCIeNAKReceivedCountAcc; // The Pcie counter itself is accumulated -} MetricsTable_t; + + // VCN/JPEG ACTIVITY + uint32_t VcnBusy[4]; + uint32_t JpegBusy[32]; +} MetricsTableX_t; + +typedef struct __attribute__((packed, aligned(4))) { + uint32_t AccumulationCounter; + + //TEMPERATURE + uint32_t MaxSocketTemperature; + uint32_t MaxVrTemperature; + uint32_t MaxHbmTemperature; + uint64_t MaxSocketTemperatureAcc; + uint64_t MaxVrTemperatureAcc; + uint64_t MaxHbmTemperatureAcc; + + //POWER + uint32_t SocketPowerLimit; + uint32_t MaxSocketPowerLimit; + uint32_t SocketPower; + + //ENERGY + uint64_t Timestamp; + uint64_t SocketEnergyAcc; + uint64_t CcdEnergyAcc; + uint64_t XcdEnergyAcc; + uint64_t AidEnergyAcc; + uint64_t HbmEnergyAcc; + + //FREQUENCY + uint32_t CclkFrequencyLimit; + uint32_t GfxclkFrequencyLimit; + uint32_t FclkFrequency; + uint32_t UclkFrequency; + uint32_t SocclkFrequency[4]; + uint32_t VclkFrequency[4]; + uint32_t DclkFrequency[4]; + uint32_t LclkFrequency[4]; + uint64_t GfxclkFrequencyAcc[8]; + uint64_t CclkFrequencyAcc[96]; + + //FREQUENCY RANGE + uint32_t MaxCclkFrequency; + uint32_t MinCclkFrequency; + uint32_t MaxGfxclkFrequency; + uint32_t MinGfxclkFrequency; + uint32_t FclkFrequencyTable[4]; + uint32_t UclkFrequencyTable[4]; + uint32_t SocclkFrequencyTable[4]; + uint32_t VclkFrequencyTable[4]; + uint32_t DclkFrequencyTable[4]; + uint32_t LclkFrequencyTable[4]; + uint32_t MaxLclkDpmRange; + uint32_t MinLclkDpmRange; + + //XGMI + uint32_t XgmiWidth; + uint32_t XgmiBitrate; + uint64_t XgmiReadBandwidthAcc[8]; + uint64_t XgmiWriteBandwidthAcc[8]; + + //ACTIVITY + uint32_t SocketC0Residency; + uint32_t SocketGfxBusy; + uint32_t DramBandwidthUtilization; + uint64_t SocketC0ResidencyAcc; + uint64_t SocketGfxBusyAcc; + uint64_t DramBandwidthAcc; + uint32_t MaxDramBandwidth; + uint64_t DramBandwidthUtilizationAcc; + uint64_t PcieBandwidthAcc[4]; + + //THROTTLERS + uint32_t ProchotResidencyAcc; + uint32_t PptResidencyAcc; + uint32_t SocketThmResidencyAcc; + uint32_t VrThmResidencyAcc; + uint32_t HbmThmResidencyAcc; + uint32_t GfxLockXCDMak; + + // New Items at end to maintain driver compatibility + uint32_t GfxclkFrequency[8]; + + //PSNs + uint64_t PublicSerialNumber_AID[4]; + uint64_t PublicSerialNumber_XCD[8]; + uint64_t PublicSerialNumber_CCD[12]; + + //XGMI Data tranfser size + uint64_t XgmiReadDataSizeAcc[8];//in KByte + uint64_t XgmiWriteDataSizeAcc[8];//in KByte + + // VCN/JPEG ACTIVITY + uint32_t VcnBusy[4]; + uint32_t JpegBusy[32]; +} MetricsTableA_t; #define SMU_VF_METRICS_TABLE_VERSION 0x3 diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c index 900a2d9e6d85..b64e07b75937 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c @@ -245,6 +245,8 @@ struct PPTable_t { #define SMUQ10_TO_UINT(x) ((x) >> 10) #define SMUQ10_FRAC(x) ((x) & 0x3ff) #define SMUQ10_ROUND(x) ((SMUQ10_TO_UINT(x)) + ((SMUQ10_FRAC(x)) >= 0x200)) +#define GET_METRIC_FIELD(field) ((adev->flags & AMD_IS_APU) ?\ + (metrics_a->field) : (metrics_x->field)) struct smu_v13_0_6_dpm_map { enum smu_clk_type clk_type; @@ -327,7 +329,8 @@ static int smu_v13_0_6_tables_init(struct smu_context *smu) SMU_TABLE_INIT(tables, SMU_TABLE_PMSTATUSLOG, SMU13_TOOL_SIZE, PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); - SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(MetricsTable_t), + SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, + max(sizeof(MetricsTableX_t), sizeof(MetricsTableA_t)), PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT); @@ -335,12 +338,13 @@ static int smu_v13_0_6_tables_init(struct smu_context *smu) PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT); - smu_table->metrics_table = kzalloc(sizeof(MetricsTable_t), GFP_KERNEL); + smu_table->metrics_table = kzalloc(max(sizeof(MetricsTableX_t), + sizeof(MetricsTableA_t)), GFP_KERNEL); if (!smu_table->metrics_table) return -ENOMEM; smu_table->metrics_time = 0; - smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_4); + smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_5); smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL); if (!smu_table->gpu_metrics_table) { @@ -431,9 +435,11 @@ static int smu_v13_0_6_get_metrics_table(struct smu_context *smu, static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu) { struct smu_table_context *smu_table = &smu->smu_table; - MetricsTable_t *metrics = (MetricsTable_t *)smu_table->metrics_table; + MetricsTableX_t *metrics_x = (MetricsTableX_t *)smu_table->metrics_table; + MetricsTableA_t *metrics_a = (MetricsTableA_t *)smu_table->metrics_table; struct PPTable_t *pptable = (struct PPTable_t *)smu_table->driver_pptable; + struct amdgpu_device *adev = smu->adev; int ret, i, retry = 100; /* Store one-time values in driver PPTable */ @@ -444,7 +450,7 @@ static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu) return ret; /* Ensure that metrics have been updated */ - if (metrics->AccumulationCounter) + if (GET_METRIC_FIELD(AccumulationCounter)) break; usleep_range(1000, 1100); @@ -454,29 +460,29 @@ static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu) return -ETIME; pptable->MaxSocketPowerLimit = - SMUQ10_ROUND(metrics->MaxSocketPowerLimit); + SMUQ10_ROUND(GET_METRIC_FIELD(MaxSocketPowerLimit)); pptable->MaxGfxclkFrequency = - SMUQ10_ROUND(metrics->MaxGfxclkFrequency); + SMUQ10_ROUND(GET_METRIC_FIELD(MaxGfxclkFrequency)); pptable->MinGfxclkFrequency = - SMUQ10_ROUND(metrics->MinGfxclkFrequency); + SMUQ10_ROUND(GET_METRIC_FIELD(MinGfxclkFrequency)); for (i = 0; i < 4; ++i) { pptable->FclkFrequencyTable[i] = - SMUQ10_ROUND(metrics->FclkFrequencyTable[i]); + SMUQ10_ROUND(GET_METRIC_FIELD(FclkFrequencyTable)[i]); pptable->UclkFrequencyTable[i] = - SMUQ10_ROUND(metrics->UclkFrequencyTable[i]); + SMUQ10_ROUND(GET_METRIC_FIELD(UclkFrequencyTable)[i]); pptable->SocclkFrequencyTable[i] = SMUQ10_ROUND( - metrics->SocclkFrequencyTable[i]); + GET_METRIC_FIELD(SocclkFrequencyTable)[i]); pptable->VclkFrequencyTable[i] = - SMUQ10_ROUND(metrics->VclkFrequencyTable[i]); + SMUQ10_ROUND(GET_METRIC_FIELD(VclkFrequencyTable)[i]); pptable->DclkFrequencyTable[i] = - SMUQ10_ROUND(metrics->DclkFrequencyTable[i]); + SMUQ10_ROUND(GET_METRIC_FIELD(DclkFrequencyTable)[i]); pptable->LclkFrequencyTable[i] = - SMUQ10_ROUND(metrics->LclkFrequencyTable[i]); + SMUQ10_ROUND(GET_METRIC_FIELD(LclkFrequencyTable)[i]); } /* use AID0 serial number by default */ - pptable->PublicSerialNumber_AID = metrics->PublicSerialNumber_AID[0]; + pptable->PublicSerialNumber_AID = GET_METRIC_FIELD(PublicSerialNumber_AID)[0]; pptable->Init = true; } @@ -778,7 +784,8 @@ static int smu_v13_0_6_get_smu_metrics_data(struct smu_context *smu, uint32_t *value) { struct smu_table_context *smu_table = &smu->smu_table; - MetricsTable_t *metrics = (MetricsTable_t *)smu_table->metrics_table; + MetricsTableX_t *metrics_x = (MetricsTableX_t *)smu_table->metrics_table; + MetricsTableA_t *metrics_a = (MetricsTableA_t *)smu_table->metrics_table; struct amdgpu_device *adev = smu->adev; int ret = 0; int xcc_id; @@ -793,50 +800,50 @@ static int smu_v13_0_6_get_smu_metrics_data(struct smu_context *smu, case METRICS_AVERAGE_GFXCLK: if (smu->smc_fw_version >= 0x552F00) { xcc_id = GET_INST(GC, 0); - *value = SMUQ10_ROUND(metrics->GfxclkFrequency[xcc_id]); + *value = SMUQ10_ROUND(GET_METRIC_FIELD(GfxclkFrequency)[xcc_id]); } else { *value = 0; } break; case METRICS_CURR_SOCCLK: case METRICS_AVERAGE_SOCCLK: - *value = SMUQ10_ROUND(metrics->SocclkFrequency[0]); + *value = SMUQ10_ROUND(GET_METRIC_FIELD(SocclkFrequency)[0]); break; case METRICS_CURR_UCLK: case METRICS_AVERAGE_UCLK: - *value = SMUQ10_ROUND(metrics->UclkFrequency); + *value = SMUQ10_ROUND(GET_METRIC_FIELD(UclkFrequency)); break; case METRICS_CURR_VCLK: - *value = SMUQ10_ROUND(metrics->VclkFrequency[0]); + *value = SMUQ10_ROUND(GET_METRIC_FIELD(VclkFrequency)[0]); break; case METRICS_CURR_DCLK: - *value = SMUQ10_ROUND(metrics->DclkFrequency[0]); + *value = SMUQ10_ROUND(GET_METRIC_FIELD(DclkFrequency)[0]); break; case METRICS_CURR_FCLK: - *value = SMUQ10_ROUND(metrics->FclkFrequency); + *value = SMUQ10_ROUND(GET_METRIC_FIELD(FclkFrequency)); break; case METRICS_AVERAGE_GFXACTIVITY: - *value = SMUQ10_ROUND(metrics->SocketGfxBusy); + *value = SMUQ10_ROUND(GET_METRIC_FIELD(SocketGfxBusy)); break; case METRICS_AVERAGE_MEMACTIVITY: - *value = SMUQ10_ROUND(metrics->DramBandwidthUtilization); + *value = SMUQ10_ROUND(GET_METRIC_FIELD(DramBandwidthUtilization)); break; case METRICS_CURR_SOCKETPOWER: - *value = SMUQ10_ROUND(metrics->SocketPower) << 8; + *value = SMUQ10_ROUND(GET_METRIC_FIELD(SocketPower)) << 8; break; case METRICS_TEMPERATURE_HOTSPOT: - *value = SMUQ10_ROUND(metrics->MaxSocketTemperature) * + *value = SMUQ10_ROUND(GET_METRIC_FIELD(MaxSocketTemperature)) * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; break; case METRICS_TEMPERATURE_MEM: - *value = SMUQ10_ROUND(metrics->MaxHbmTemperature) * + *value = SMUQ10_ROUND(GET_METRIC_FIELD(MaxHbmTemperature)) * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; break; /* This is the max of all VRs and not just SOC VR. * No need to define another data type for the same. */ case METRICS_TEMPERATURE_VRSOC: - *value = SMUQ10_ROUND(metrics->MaxVrTemperature) * + *value = SMUQ10_ROUND(GET_METRIC_FIELD(MaxVrTemperature)) * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; break; default: @@ -2022,67 +2029,70 @@ static int smu_v13_0_6_get_current_pcie_link_speed(struct smu_context *smu) static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table) { struct smu_table_context *smu_table = &smu->smu_table; - struct gpu_metrics_v1_4 *gpu_metrics = - (struct gpu_metrics_v1_4 *)smu_table->gpu_metrics_table; + struct gpu_metrics_v1_5 *gpu_metrics = + (struct gpu_metrics_v1_5 *)smu_table->gpu_metrics_table; struct amdgpu_device *adev = smu->adev; - int ret = 0, xcc_id, inst, i; - MetricsTable_t *metrics; + int ret = 0, xcc_id, inst, i, j; + MetricsTableX_t *metrics_x; + MetricsTableA_t *metrics_a; u16 link_width_level; - metrics = kzalloc(sizeof(MetricsTable_t), GFP_KERNEL); - ret = smu_v13_0_6_get_metrics_table(smu, metrics, true); + metrics_x = kzalloc(max(sizeof(MetricsTableX_t), sizeof(MetricsTableA_t)), GFP_KERNEL); + ret = smu_v13_0_6_get_metrics_table(smu, metrics_x, true); if (ret) { - kfree(metrics); + kfree(metrics_x); return ret; } - smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 4); + metrics_a = (MetricsTableA_t *)metrics_x; + + smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 5); gpu_metrics->temperature_hotspot = - SMUQ10_ROUND(metrics->MaxSocketTemperature); + SMUQ10_ROUND(GET_METRIC_FIELD(MaxSocketTemperature)); /* Individual HBM stack temperature is not reported */ gpu_metrics->temperature_mem = - SMUQ10_ROUND(metrics->MaxHbmTemperature); + SMUQ10_ROUND(GET_METRIC_FIELD(MaxHbmTemperature)); /* Reports max temperature of all voltage rails */ gpu_metrics->temperature_vrsoc = - SMUQ10_ROUND(metrics->MaxVrTemperature); + SMUQ10_ROUND(GET_METRIC_FIELD(MaxVrTemperature)); gpu_metrics->average_gfx_activity = - SMUQ10_ROUND(metrics->SocketGfxBusy); + SMUQ10_ROUND(GET_METRIC_FIELD(SocketGfxBusy)); gpu_metrics->average_umc_activity = - SMUQ10_ROUND(metrics->DramBandwidthUtilization); + SMUQ10_ROUND(GET_METRIC_FIELD(DramBandwidthUtilization)); gpu_metrics->curr_socket_power = - SMUQ10_ROUND(metrics->SocketPower); + SMUQ10_ROUND(GET_METRIC_FIELD(SocketPower)); /* Energy counter reported in 15.259uJ (2^-16) units */ - gpu_metrics->energy_accumulator = metrics->SocketEnergyAcc; + gpu_metrics->energy_accumulator = GET_METRIC_FIELD(SocketEnergyAcc); for (i = 0; i < MAX_GFX_CLKS; i++) { xcc_id = GET_INST(GC, i); if (xcc_id >= 0) gpu_metrics->current_gfxclk[i] = - SMUQ10_ROUND(metrics->GfxclkFrequency[xcc_id]); + SMUQ10_ROUND(GET_METRIC_FIELD(GfxclkFrequency)[xcc_id]); if (i < MAX_CLKS) { gpu_metrics->current_socclk[i] = - SMUQ10_ROUND(metrics->SocclkFrequency[i]); + SMUQ10_ROUND(GET_METRIC_FIELD(SocclkFrequency)[i]); inst = GET_INST(VCN, i); if (inst >= 0) { gpu_metrics->current_vclk0[i] = - SMUQ10_ROUND(metrics->VclkFrequency[inst]); + SMUQ10_ROUND(GET_METRIC_FIELD(VclkFrequency)[inst]); gpu_metrics->current_dclk0[i] = - SMUQ10_ROUND(metrics->DclkFrequency[inst]); + SMUQ10_ROUND(GET_METRIC_FIELD(DclkFrequency)[inst]); } } } - gpu_metrics->current_uclk = SMUQ10_ROUND(metrics->UclkFrequency); + gpu_metrics->current_uclk = SMUQ10_ROUND(GET_METRIC_FIELD(UclkFrequency)); /* Throttle status is not reported through metrics now */ gpu_metrics->throttle_status = 0; /* Clock Lock Status. Each bit corresponds to each GFXCLK instance */ - gpu_metrics->gfxclk_lock_status = metrics->GfxLockXCDMak >> GET_INST(GC, 0); + gpu_metrics->gfxclk_lock_status = GET_METRIC_FIELD(GfxLockXCDMak) >> GET_INST(GC, 0); if (!(adev->flags & AMD_IS_APU)) { link_width_level = smu_v13_0_6_get_current_pcie_link_width_level(smu); @@ -2094,38 +2104,57 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table gpu_metrics->pcie_link_speed = smu_v13_0_6_get_current_pcie_link_speed(smu); gpu_metrics->pcie_bandwidth_acc = - SMUQ10_ROUND(metrics->PcieBandwidthAcc[0]); + SMUQ10_ROUND(metrics_x->PcieBandwidthAcc[0]); gpu_metrics->pcie_bandwidth_inst = - SMUQ10_ROUND(metrics->PcieBandwidth[0]); + SMUQ10_ROUND(metrics_x->PcieBandwidth[0]); gpu_metrics->pcie_l0_to_recov_count_acc = - metrics->PCIeL0ToRecoveryCountAcc; + metrics_x->PCIeL0ToRecoveryCountAcc; gpu_metrics->pcie_replay_count_acc = - metrics->PCIenReplayAAcc; + metrics_x->PCIenReplayAAcc; gpu_metrics->pcie_replay_rover_count_acc = - metrics->PCIenReplayARolloverCountAcc; + metrics_x->PCIenReplayARolloverCountAcc; + gpu_metrics->pcie_nak_sent_count_acc = + metrics_x->PCIeNAKSentCountAcc; + gpu_metrics->pcie_nak_rcvd_count_acc = + metrics_x->PCIeNAKReceivedCountAcc; } gpu_metrics->system_clock_counter = ktime_get_boottime_ns(); gpu_metrics->gfx_activity_acc = - SMUQ10_ROUND(metrics->SocketGfxBusyAcc); + SMUQ10_ROUND(GET_METRIC_FIELD(SocketGfxBusyAcc)); gpu_metrics->mem_activity_acc = - SMUQ10_ROUND(metrics->DramBandwidthUtilizationAcc); + SMUQ10_ROUND(GET_METRIC_FIELD(DramBandwidthUtilizationAcc)); for (i = 0; i < NUM_XGMI_LINKS; i++) { gpu_metrics->xgmi_read_data_acc[i] = - SMUQ10_ROUND(metrics->XgmiReadDataSizeAcc[i]); + SMUQ10_ROUND(GET_METRIC_FIELD(XgmiReadDataSizeAcc)[i]); gpu_metrics->xgmi_write_data_acc[i] = - SMUQ10_ROUND(metrics->XgmiWriteDataSizeAcc[i]); + SMUQ10_ROUND(GET_METRIC_FIELD(XgmiWriteDataSizeAcc)[i]); + } + + for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { + inst = GET_INST(JPEG, i); + for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) { + gpu_metrics->jpeg_activity[(i * adev->jpeg.num_jpeg_rings) + j] = + SMUQ10_ROUND(GET_METRIC_FIELD(JpegBusy) + [(inst * adev->jpeg.num_jpeg_rings) + j]); + } + } + + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + inst = GET_INST(VCN, i); + gpu_metrics->vcn_activity[i] = + SMUQ10_ROUND(GET_METRIC_FIELD(VcnBusy)[inst]); } - gpu_metrics->xgmi_link_width = SMUQ10_ROUND(metrics->XgmiWidth); - gpu_metrics->xgmi_link_speed = SMUQ10_ROUND(metrics->XgmiBitrate); + gpu_metrics->xgmi_link_width = SMUQ10_ROUND(GET_METRIC_FIELD(XgmiWidth)); + gpu_metrics->xgmi_link_speed = SMUQ10_ROUND(GET_METRIC_FIELD(XgmiBitrate)); - gpu_metrics->firmware_timestamp = metrics->Timestamp; + gpu_metrics->firmware_timestamp = GET_METRIC_FIELD(Timestamp); *table = (void *)gpu_metrics; - kfree(metrics); + kfree(metrics_x); return sizeof(*gpu_metrics); } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c index 001a5cf09657..00cd615bbcdc 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c @@ -989,6 +989,9 @@ void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev) case METRICS_VERSION(1, 4): structure_size = sizeof(struct gpu_metrics_v1_4); break; + case METRICS_VERSION(1, 5): + structure_size = sizeof(struct gpu_metrics_v1_5); + break; case METRICS_VERSION(2, 0): structure_size = sizeof(struct gpu_metrics_v2_0); break; diff --git a/drivers/gpu/drm/bridge/parade-ps8640.c b/drivers/gpu/drm/bridge/parade-ps8640.c index 8161b1a1a4b1..541e4f5afc4c 100644 --- a/drivers/gpu/drm/bridge/parade-ps8640.c +++ b/drivers/gpu/drm/bridge/parade-ps8640.c @@ -210,7 +210,7 @@ static ssize_t ps8640_aux_transfer_msg(struct drm_dp_aux *aux, struct ps8640 *ps_bridge = aux_to_ps8640(aux); struct regmap *map = ps_bridge->regmap[PAGE0_DP_CNTL]; struct device *dev = &ps_bridge->page[PAGE0_DP_CNTL]->dev; - unsigned int len = msg->size; + size_t len = msg->size; unsigned int data; unsigned int base; int ret; @@ -330,11 +330,12 @@ static ssize_t ps8640_aux_transfer_msg(struct drm_dp_aux *aux, return ret; } - buf[i] = data; + if (i < msg->size) + buf[i] = data; } } - return len; + return min(len, msg->size); } static ssize_t ps8640_aux_transfer(struct drm_dp_aux *aux, diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c index c45c07840f64..b5464199b633 100644 --- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c +++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c @@ -527,6 +527,7 @@ static ssize_t ti_sn_aux_transfer(struct drm_dp_aux *aux, u32 request_val = AUX_CMD_REQ(msg->request); u8 *buf = msg->buffer; unsigned int len = msg->size; + unsigned int short_len; unsigned int val; int ret; u8 addr_len[SN_AUX_LENGTH_REG + 1 - SN_AUX_ADDR_19_16_REG]; @@ -600,7 +601,8 @@ static ssize_t ti_sn_aux_transfer(struct drm_dp_aux *aux, } if (val & AUX_IRQ_STATUS_AUX_SHORT) { - ret = regmap_read(pdata->regmap, SN_AUX_LENGTH_REG, &len); + ret = regmap_read(pdata->regmap, SN_AUX_LENGTH_REG, &short_len); + len = min(len, short_len); if (ret) goto exit; } else if (val & AUX_IRQ_STATUS_NAT_I2C_FAIL) { diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c index 01da6789d044..b9cc62982196 100644 --- a/drivers/gpu/drm/drm_syncobj.c +++ b/drivers/gpu/drm/drm_syncobj.c @@ -1365,7 +1365,7 @@ static void syncobj_eventfd_entry_fence_func(struct dma_fence *fence, struct syncobj_eventfd_entry *entry = container_of(cb, struct syncobj_eventfd_entry, fence_cb); - eventfd_signal(entry->ev_fd_ctx, 1); + eventfd_signal(entry->ev_fd_ctx); syncobj_eventfd_entry_free(entry); } @@ -1388,13 +1388,13 @@ syncobj_eventfd_entry_func(struct drm_syncobj *syncobj, entry->fence = fence; if (entry->flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE) { - eventfd_signal(entry->ev_fd_ctx, 1); + eventfd_signal(entry->ev_fd_ctx); syncobj_eventfd_entry_free(entry); } else { ret = dma_fence_add_callback(fence, &entry->fence_cb, syncobj_eventfd_entry_fence_func); if (ret == -ENOENT) { - eventfd_signal(entry->ev_fd_ctx, 1); + eventfd_signal(entry->ev_fd_ctx); syncobj_eventfd_entry_free(entry); } } diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index b21bcd40f111..62ce92772367 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -4496,7 +4496,7 @@ static void intel_dp_process_phy_request(struct intel_dp *intel_dp, intel_dp->train_set, crtc_state->lane_count); drm_dp_set_phy_test_pattern(&intel_dp->aux, data, - link_status[DP_DPCD_REV]); + intel_dp->dpcd[DP_DPCD_REV]); } static u8 intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_internal.c b/drivers/gpu/drm/i915/gem/i915_gem_internal.c index 6bc26b4b06b8..ea7561ae6e13 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_internal.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_internal.c @@ -36,7 +36,7 @@ static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj) struct sg_table *st; struct scatterlist *sg; unsigned int npages; /* restricted by sg_alloc_table */ - int max_order = MAX_ORDER; + int max_order = MAX_PAGE_ORDER; unsigned int max_segment; gfp_t gfp; diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c index 6b9f6cf50bf6..84c50c4c4af7 100644 --- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c +++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c @@ -115,7 +115,7 @@ static int get_huge_pages(struct drm_i915_gem_object *obj) do { struct page *page; - GEM_BUG_ON(order > MAX_ORDER); + GEM_BUG_ON(order > MAX_PAGE_ORDER); page = alloc_pages(GFP | __GFP_ZERO, order); if (!page) goto err; diff --git a/drivers/gpu/drm/i915/gvt/interrupt.c b/drivers/gpu/drm/i915/gvt/interrupt.c index de3f5903d1a7..c8e7dfc9f791 100644 --- a/drivers/gpu/drm/i915/gvt/interrupt.c +++ b/drivers/gpu/drm/i915/gvt/interrupt.c @@ -422,7 +422,7 @@ static void init_irq_map(struct intel_gvt_irq *irq) #define MSI_CAP_DATA(offset) (offset + 8) #define MSI_CAP_EN 0x1 -static int inject_virtual_interrupt(struct intel_vgpu *vgpu) +static void inject_virtual_interrupt(struct intel_vgpu *vgpu) { unsigned long offset = vgpu->gvt->device_info.msi_cap_offset; u16 control, data; @@ -434,10 +434,10 @@ static int inject_virtual_interrupt(struct intel_vgpu *vgpu) /* Do not generate MSI if MSIEN is disabled */ if (!(control & MSI_CAP_EN)) - return 0; + return; if (WARN(control & GENMASK(15, 1), "only support one MSI format\n")) - return -EINVAL; + return; trace_inject_msi(vgpu->id, addr, data); @@ -451,10 +451,9 @@ static int inject_virtual_interrupt(struct intel_vgpu *vgpu) * returned and don't inject interrupt into guest. */ if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status)) - return -ESRCH; - if (vgpu->msi_trigger && eventfd_signal(vgpu->msi_trigger, 1) != 1) - return -EFAULT; - return 0; + return; + if (vgpu->msi_trigger) + eventfd_signal(vgpu->msi_trigger); } static void propagate_event(struct intel_gvt_irq *irq, diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index 7b1c8de2f9cb..2d695818f006 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -772,10 +772,6 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream, * The reason field includes flags identifying what * triggered this specific report (mostly timer * triggered or e.g. due to a context switch). - * - * In MMIO triggered reports, some platforms do not set the - * reason bit in this field and it is valid to have a reason - * field of zero. */ reason = oa_report_reason(stream, report); ctx_id = oa_context_id(stream, report32); @@ -787,8 +783,41 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream, * * Note: that we don't clear the valid_ctx_bit so userspace can * understand that the ID has been squashed by the kernel. + * + * Update: + * + * On XEHP platforms the behavior of context id valid bit has + * changed compared to prior platforms. To describe this, we + * define a few terms: + * + * context-switch-report: This is a report with the reason type + * being context-switch. It is generated when a context switches + * out. + * + * context-valid-bit: A bit that is set in the report ID field + * to indicate that a valid context has been loaded. + * + * gpu-idle: A condition characterized by a + * context-switch-report with context-valid-bit set to 0. + * + * On prior platforms, context-id-valid bit is set to 0 only + * when GPU goes idle. In all other reports, it is set to 1. + * + * On XEHP platforms, context-valid-bit is set to 1 in a context + * switch report if a new context switched in. For all other + * reports it is set to 0. + * + * This change in behavior causes an issue with MMIO triggered + * reports. MMIO triggered reports have the markers in the + * context ID field and the context-valid-bit is 0. The logic + * below to squash the context ID would render the report + * useless since the user will not be able to find it in the OA + * buffer. Since MMIO triggered reports exist only on XEHP, + * we should avoid squashing these for XEHP platforms. */ - if (oa_report_ctx_invalid(stream, report)) { + + if (oa_report_ctx_invalid(stream, report) && + GRAPHICS_VER_FULL(stream->engine->i915) < IP_VER(12, 50)) { ctx_id = INVALID_CTX_ID; oa_context_id_squash(stream, report32); } diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h index 57c7edcab602..765e49fd8911 100644 --- a/drivers/gpu/drm/mgag200/mgag200_drv.h +++ b/drivers/gpu/drm/mgag200/mgag200_drv.h @@ -392,6 +392,11 @@ void mgag200_primary_plane_helper_atomic_disable(struct drm_plane *plane, .destroy = drm_plane_cleanup, \ DRM_GEM_SHADOW_PLANE_FUNCS +void mgag200_crtc_set_gamma_linear(struct mga_device *mdev, const struct drm_format_info *format); +void mgag200_crtc_set_gamma(struct mga_device *mdev, + const struct drm_format_info *format, + struct drm_color_lut *lut); + enum drm_mode_status mgag200_crtc_helper_mode_valid(struct drm_crtc *crtc, const struct drm_display_mode *mode); int mgag200_crtc_helper_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *new_state); diff --git a/drivers/gpu/drm/mgag200/mgag200_g200er.c b/drivers/gpu/drm/mgag200/mgag200_g200er.c index bce267e0f7de..8d4538b71047 100644 --- a/drivers/gpu/drm/mgag200/mgag200_g200er.c +++ b/drivers/gpu/drm/mgag200/mgag200_g200er.c @@ -202,6 +202,11 @@ static void mgag200_g200er_crtc_helper_atomic_enable(struct drm_crtc *crtc, mgag200_g200er_reset_tagfifo(mdev); + if (crtc_state->gamma_lut) + mgag200_crtc_set_gamma(mdev, format, crtc_state->gamma_lut->data); + else + mgag200_crtc_set_gamma_linear(mdev, format); + mgag200_enable_display(mdev); if (funcs->enable_vidrst) diff --git a/drivers/gpu/drm/mgag200/mgag200_g200ev.c b/drivers/gpu/drm/mgag200/mgag200_g200ev.c index ac957f42abe1..56e6f986bff3 100644 --- a/drivers/gpu/drm/mgag200/mgag200_g200ev.c +++ b/drivers/gpu/drm/mgag200/mgag200_g200ev.c @@ -203,6 +203,11 @@ static void mgag200_g200ev_crtc_helper_atomic_enable(struct drm_crtc *crtc, mgag200_g200ev_set_hiprilvl(mdev); + if (crtc_state->gamma_lut) + mgag200_crtc_set_gamma(mdev, format, crtc_state->gamma_lut->data); + else + mgag200_crtc_set_gamma_linear(mdev, format); + mgag200_enable_display(mdev); if (funcs->enable_vidrst) diff --git a/drivers/gpu/drm/mgag200/mgag200_g200se.c b/drivers/gpu/drm/mgag200/mgag200_g200se.c index bd6e573c9a1a..ff2b3c6622e7 100644 --- a/drivers/gpu/drm/mgag200/mgag200_g200se.c +++ b/drivers/gpu/drm/mgag200/mgag200_g200se.c @@ -334,6 +334,11 @@ static void mgag200_g200se_crtc_helper_atomic_enable(struct drm_crtc *crtc, mgag200_g200se_set_hiprilvl(mdev, adjusted_mode, format); + if (crtc_state->gamma_lut) + mgag200_crtc_set_gamma(mdev, format, crtc_state->gamma_lut->data); + else + mgag200_crtc_set_gamma_linear(mdev, format); + mgag200_enable_display(mdev); if (funcs->enable_vidrst) diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c index af3ce5a6a636..0f0d59938c3a 100644 --- a/drivers/gpu/drm/mgag200/mgag200_mode.c +++ b/drivers/gpu/drm/mgag200/mgag200_mode.c @@ -28,8 +28,8 @@ * This file contains setup code for the CRTC. */ -static void mgag200_crtc_set_gamma_linear(struct mga_device *mdev, - const struct drm_format_info *format) +void mgag200_crtc_set_gamma_linear(struct mga_device *mdev, + const struct drm_format_info *format) { int i; @@ -65,9 +65,9 @@ static void mgag200_crtc_set_gamma_linear(struct mga_device *mdev, } } -static void mgag200_crtc_set_gamma(struct mga_device *mdev, - const struct drm_format_info *format, - struct drm_color_lut *lut) +void mgag200_crtc_set_gamma(struct mga_device *mdev, + const struct drm_format_info *format, + struct drm_color_lut *lut) { int i; diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h index 2fa0445d8928..d1437c08645f 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h @@ -187,7 +187,7 @@ struct nvkm_gsp { void (*rpc_done)(struct nvkm_gsp *gsp, void *repv); void *(*rm_ctrl_get)(struct nvkm_gsp_object *, u32 cmd, u32 argc); - void *(*rm_ctrl_push)(struct nvkm_gsp_object *, void *argv, u32 repc); + int (*rm_ctrl_push)(struct nvkm_gsp_object *, void **argv, u32 repc); void (*rm_ctrl_done)(struct nvkm_gsp_object *, void *repv); void *(*rm_alloc_get)(struct nvkm_gsp_object *, u32 oclass, u32 argc); @@ -265,7 +265,7 @@ nvkm_gsp_rm_ctrl_get(struct nvkm_gsp_object *object, u32 cmd, u32 argc) return object->client->gsp->rm->rm_ctrl_get(object, cmd, argc); } -static inline void * +static inline int nvkm_gsp_rm_ctrl_push(struct nvkm_gsp_object *object, void *argv, u32 repc) { return object->client->gsp->rm->rm_ctrl_push(object, argv, repc); @@ -275,21 +275,24 @@ static inline void * nvkm_gsp_rm_ctrl_rd(struct nvkm_gsp_object *object, u32 cmd, u32 repc) { void *argv = nvkm_gsp_rm_ctrl_get(object, cmd, repc); + int ret; if (IS_ERR(argv)) return argv; - return nvkm_gsp_rm_ctrl_push(object, argv, repc); + ret = nvkm_gsp_rm_ctrl_push(object, &argv, repc); + if (ret) + return ERR_PTR(ret); + return argv; } static inline int nvkm_gsp_rm_ctrl_wr(struct nvkm_gsp_object *object, void *argv) { - void *repv = nvkm_gsp_rm_ctrl_push(object, argv, 0); - - if (IS_ERR(repv)) - return PTR_ERR(repv); + int ret = nvkm_gsp_rm_ctrl_push(object, &argv, 0); + if (ret) + return ret; return 0; } diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c index ca762ea55413..5057d976fa57 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c @@ -62,7 +62,7 @@ nouveau_fence_signal(struct nouveau_fence *fence) if (test_bit(DMA_FENCE_FLAG_USER_BITS, &fence->base.flags)) { struct nouveau_fence_chan *fctx = nouveau_fctx(fence); - if (!--fctx->notify_ref) + if (atomic_dec_and_test(&fctx->notify_ref)) drop = 1; } @@ -103,6 +103,7 @@ nouveau_fence_context_kill(struct nouveau_fence_chan *fctx, int error) void nouveau_fence_context_del(struct nouveau_fence_chan *fctx) { + cancel_work_sync(&fctx->allow_block_work); nouveau_fence_context_kill(fctx, 0); nvif_event_dtor(&fctx->event); fctx->dead = 1; @@ -167,6 +168,18 @@ nouveau_fence_wait_uevent_handler(struct nvif_event *event, void *repv, u32 repc return ret; } +static void +nouveau_fence_work_allow_block(struct work_struct *work) +{ + struct nouveau_fence_chan *fctx = container_of(work, struct nouveau_fence_chan, + allow_block_work); + + if (atomic_read(&fctx->notify_ref) == 0) + nvif_event_block(&fctx->event); + else + nvif_event_allow(&fctx->event); +} + void nouveau_fence_context_new(struct nouveau_channel *chan, struct nouveau_fence_chan *fctx) { @@ -178,6 +191,7 @@ nouveau_fence_context_new(struct nouveau_channel *chan, struct nouveau_fence_cha } args; int ret; + INIT_WORK(&fctx->allow_block_work, nouveau_fence_work_allow_block); INIT_LIST_HEAD(&fctx->flip); INIT_LIST_HEAD(&fctx->pending); spin_lock_init(&fctx->lock); @@ -521,15 +535,19 @@ static bool nouveau_fence_enable_signaling(struct dma_fence *f) struct nouveau_fence *fence = from_fence(f); struct nouveau_fence_chan *fctx = nouveau_fctx(fence); bool ret; + bool do_work; - if (!fctx->notify_ref++) - nvif_event_allow(&fctx->event); + if (atomic_inc_return(&fctx->notify_ref) == 0) + do_work = true; ret = nouveau_fence_no_signaling(f); if (ret) set_bit(DMA_FENCE_FLAG_USER_BITS, &fence->base.flags); - else if (!--fctx->notify_ref) - nvif_event_block(&fctx->event); + else if (atomic_dec_and_test(&fctx->notify_ref)) + do_work = true; + + if (do_work) + schedule_work(&fctx->allow_block_work); return ret; } diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h index 64d33ae7f356..28f5cf013b89 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.h +++ b/drivers/gpu/drm/nouveau/nouveau_fence.h @@ -3,6 +3,7 @@ #define __NOUVEAU_FENCE_H__ #include <linux/dma-fence.h> +#include <linux/workqueue.h> #include <nvif/event.h> struct nouveau_drm; @@ -45,7 +46,9 @@ struct nouveau_fence_chan { char name[32]; struct nvif_event event; - int notify_ref, dead, killed; + struct work_struct allow_block_work; + atomic_t notify_ref; + int dead, killed; }; struct nouveau_fence_priv { diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c index 457ec5db794d..b24eb1e560bc 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c @@ -209,7 +209,7 @@ nvkm_disp_dtor(struct nvkm_engine *engine) nvkm_head_del(&head); } - if (disp->func->dtor) + if (disp->func && disp->func->dtor) disp->func->dtor(disp); return data; @@ -243,8 +243,10 @@ nvkm_disp_new_(const struct nvkm_disp_func *func, struct nvkm_device *device, spin_lock_init(&disp->client.lock); ret = nvkm_engine_ctor(&nvkm_disp, device, type, inst, true, &disp->engine); - if (ret) + if (ret) { + disp->func = NULL; return ret; + } if (func->super) { disp->super.wq = create_singlethread_workqueue("nvkm-disp"); diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/r535.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/r535.c index 298035070b3a..6a0a4d3b8902 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/r535.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/r535.c @@ -282,7 +282,7 @@ r535_sor_bl_get(struct nvkm_ior *sor) { struct nvkm_disp *disp = sor->disp; NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_PARAMS *ctrl; - int lvl; + int ret, lvl; ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, NV0073_CTRL_CMD_SPECIFIC_GET_BACKLIGHT_BRIGHTNESS, @@ -292,9 +292,11 @@ r535_sor_bl_get(struct nvkm_ior *sor) ctrl->displayId = BIT(sor->asy.outp->index); - ctrl = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, ctrl, sizeof(*ctrl)); - if (IS_ERR(ctrl)) - return PTR_ERR(ctrl); + ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl)); + if (ret) { + nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); + return ret; + } lvl = ctrl->brightness; nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); @@ -649,9 +651,11 @@ r535_conn_new(struct nvkm_disp *disp, u32 id) ctrl->subDeviceInstance = 0; ctrl->displayId = BIT(id); - ctrl = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, ctrl, sizeof(*ctrl)); - if (IS_ERR(ctrl)) - return (void *)ctrl; + ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl)); + if (ret) { + nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); + return ERR_PTR(ret); + } list_for_each_entry(conn, &disp->conns, head) { if (conn->index == ctrl->data[0].index) { @@ -686,7 +690,7 @@ r535_outp_acquire(struct nvkm_outp *outp, bool hda) struct nvkm_disp *disp = outp->disp; struct nvkm_ior *ior; NV0073_CTRL_DFP_ASSIGN_SOR_PARAMS *ctrl; - int or; + int ret, or; ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, NV0073_CTRL_CMD_DFP_ASSIGN_SOR, sizeof(*ctrl)); @@ -699,9 +703,11 @@ r535_outp_acquire(struct nvkm_outp *outp, bool hda) if (hda) ctrl->flags |= NVDEF(NV0073_CTRL, DFP_ASSIGN_SOR_FLAGS, AUDIO, OPTIMAL); - ctrl = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, ctrl, sizeof(*ctrl)); - if (IS_ERR(ctrl)) - return PTR_ERR(ctrl); + ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl)); + if (ret) { + nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); + return ret; + } for (or = 0; or < ARRAY_SIZE(ctrl->sorAssignListWithTag); or++) { if (ctrl->sorAssignListWithTag[or].displayMask & BIT(outp->index)) { @@ -727,6 +733,7 @@ static int r535_disp_head_displayid(struct nvkm_disp *disp, int head, u32 *displayid) { NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS *ctrl; + int ret; ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, NV0073_CTRL_CMD_SYSTEM_GET_ACTIVE, sizeof(*ctrl)); @@ -736,9 +743,11 @@ r535_disp_head_displayid(struct nvkm_disp *disp, int head, u32 *displayid) ctrl->subDeviceInstance = 0; ctrl->head = head; - ctrl = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, ctrl, sizeof(*ctrl)); - if (IS_ERR(ctrl)) - return PTR_ERR(ctrl); + ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl)); + if (ret) { + nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); + return ret; + } *displayid = ctrl->displayId; nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); @@ -772,9 +781,11 @@ r535_outp_inherit(struct nvkm_outp *outp) ctrl->subDeviceInstance = 0; ctrl->displayId = displayid; - ctrl = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, ctrl, sizeof(*ctrl)); - if (IS_ERR(ctrl)) + ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl)); + if (ret) { + nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); return NULL; + } id = ctrl->index; proto = ctrl->protocol; @@ -825,6 +836,7 @@ r535_outp_dfp_get_info(struct nvkm_outp *outp) { NV0073_CTRL_DFP_GET_INFO_PARAMS *ctrl; struct nvkm_disp *disp = outp->disp; + int ret; ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, NV0073_CTRL_CMD_DFP_GET_INFO, sizeof(*ctrl)); if (IS_ERR(ctrl)) @@ -832,9 +844,11 @@ r535_outp_dfp_get_info(struct nvkm_outp *outp) ctrl->displayId = BIT(outp->index); - ctrl = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, ctrl, sizeof(*ctrl)); - if (IS_ERR(ctrl)) - return PTR_ERR(ctrl); + ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl)); + if (ret) { + nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); + return ret; + } nvkm_debug(&disp->engine.subdev, "DFP %08x: flags:%08x flags2:%08x\n", ctrl->displayId, ctrl->flags, ctrl->flags2); @@ -858,9 +872,11 @@ r535_outp_detect(struct nvkm_outp *outp) ctrl->subDeviceInstance = 0; ctrl->displayMask = BIT(outp->index); - ctrl = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, ctrl, sizeof(*ctrl)); - if (IS_ERR(ctrl)) - return PTR_ERR(ctrl); + ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl)); + if (ret) { + nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); + return ret; + } if (ctrl->displayMask & BIT(outp->index)) { ret = r535_outp_dfp_get_info(outp); @@ -895,6 +911,7 @@ r535_dp_mst_id_get(struct nvkm_outp *outp, u32 *pid) { NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID_PARAMS *ctrl; struct nvkm_disp *disp = outp->disp; + int ret; ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID, @@ -904,9 +921,11 @@ r535_dp_mst_id_get(struct nvkm_outp *outp, u32 *pid) ctrl->subDeviceInstance = 0; ctrl->displayId = BIT(outp->index); - ctrl = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, ctrl, sizeof(*ctrl)); - if (IS_ERR(ctrl)) - return PTR_ERR(ctrl); + ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl)); + if (ret) { + nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); + return ret; + } *pid = ctrl->displayIdAssigned; nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); @@ -938,38 +957,60 @@ r535_dp_train_target(struct nvkm_outp *outp, u8 target, bool mst, u8 link_nr, u8 { struct nvkm_disp *disp = outp->disp; NV0073_CTRL_DP_CTRL_PARAMS *ctrl; - int ret; - - ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, NV0073_CTRL_CMD_DP_CTRL, sizeof(*ctrl)); - if (IS_ERR(ctrl)) - return PTR_ERR(ctrl); + int ret, retries; + u32 cmd, data; - ctrl->subDeviceInstance = 0; - ctrl->displayId = BIT(outp->index); - ctrl->cmd = NVDEF(NV0073_CTRL, DP_CMD, SET_LANE_COUNT, TRUE) | - NVDEF(NV0073_CTRL, DP_CMD, SET_LINK_BW, TRUE) | - NVDEF(NV0073_CTRL, DP_CMD, TRAIN_PHY_REPEATER, YES); - ctrl->data = NVVAL(NV0073_CTRL, DP_DATA, SET_LANE_COUNT, link_nr) | - NVVAL(NV0073_CTRL, DP_DATA, SET_LINK_BW, link_bw) | - NVVAL(NV0073_CTRL, DP_DATA, TARGET, target); + cmd = NVDEF(NV0073_CTRL, DP_CMD, SET_LANE_COUNT, TRUE) | + NVDEF(NV0073_CTRL, DP_CMD, SET_LINK_BW, TRUE) | + NVDEF(NV0073_CTRL, DP_CMD, TRAIN_PHY_REPEATER, YES); + data = NVVAL(NV0073_CTRL, DP_DATA, SET_LANE_COUNT, link_nr) | + NVVAL(NV0073_CTRL, DP_DATA, SET_LINK_BW, link_bw) | + NVVAL(NV0073_CTRL, DP_DATA, TARGET, target); if (mst) - ctrl->cmd |= NVDEF(NV0073_CTRL, DP_CMD, SET_FORMAT_MODE, MULTI_STREAM); + cmd |= NVDEF(NV0073_CTRL, DP_CMD, SET_FORMAT_MODE, MULTI_STREAM); if (outp->dp.dpcd[DPCD_RC02] & DPCD_RC02_ENHANCED_FRAME_CAP) - ctrl->cmd |= NVDEF(NV0073_CTRL, DP_CMD, SET_ENHANCED_FRAMING, TRUE); + cmd |= NVDEF(NV0073_CTRL, DP_CMD, SET_ENHANCED_FRAMING, TRUE); if (target == 0 && (outp->dp.dpcd[DPCD_RC02] & 0x20) && !(outp->dp.dpcd[DPCD_RC03] & DPCD_RC03_TPS4_SUPPORTED)) - ctrl->cmd |= NVDEF(NV0073_CTRL, DP_CMD, POST_LT_ADJ_REQ_GRANTED, YES); + cmd |= NVDEF(NV0073_CTRL, DP_CMD, POST_LT_ADJ_REQ_GRANTED, YES); - ctrl = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, ctrl, sizeof(*ctrl)); - if (IS_ERR(ctrl)) - return PTR_ERR(ctrl); + /* We should retry up to 3 times, but only if GSP asks politely */ + for (retries = 0; retries < 3; ++retries) { + ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, NV0073_CTRL_CMD_DP_CTRL, + sizeof(*ctrl)); + if (IS_ERR(ctrl)) + return PTR_ERR(ctrl); + + ctrl->subDeviceInstance = 0; + ctrl->displayId = BIT(outp->index); + ctrl->retryTimeMs = 0; + ctrl->cmd = cmd; + ctrl->data = data; + + ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl)); + if (ret == -EAGAIN && ctrl->retryTimeMs) { + /* + * Device (likely an eDP panel) isn't ready yet, wait for the time specified + * by GSP before retrying again + */ + nvkm_debug(&disp->engine.subdev, + "Waiting %dms for GSP LT panel delay before retrying\n", + ctrl->retryTimeMs); + msleep(ctrl->retryTimeMs); + nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); + } else { + /* GSP didn't say to retry, or we were successful */ + if (ctrl->err) + ret = -EIO; + nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); + break; + } + } - ret = ctrl->err ? -EIO : 0; - nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); return ret; } @@ -1036,9 +1077,11 @@ r535_dp_aux_xfer(struct nvkm_outp *outp, u8 type, u32 addr, u8 *data, u8 *psize) ctrl->size = !ctrl->bAddrOnly ? (size - 1) : 0; memcpy(ctrl->data, data, size); - ctrl = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, ctrl, sizeof(*ctrl)); - if (IS_ERR(ctrl)) + ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl)); + if (ret) { + nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); return PTR_ERR(ctrl); + } memcpy(data, ctrl->data, size); *psize = ctrl->size; @@ -1111,10 +1154,13 @@ r535_tmds_edid_get(struct nvkm_outp *outp, u8 *data, u16 *psize) ctrl->subDeviceInstance = 0; ctrl->displayId = BIT(outp->index); - ctrl = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, ctrl, sizeof(*ctrl)); - if (IS_ERR(ctrl)) - return PTR_ERR(ctrl); + ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl)); + if (ret) { + nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); + return ret; + } + ret = -E2BIG; if (ctrl->bufferSize <= *psize) { memcpy(data, ctrl->edidBuffer, ctrl->bufferSize); *psize = ctrl->bufferSize; @@ -1153,9 +1199,11 @@ r535_outp_new(struct nvkm_disp *disp, u32 id) ctrl->subDeviceInstance = 0; ctrl->displayId = BIT(id); - ctrl = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, ctrl, sizeof(*ctrl)); - if (IS_ERR(ctrl)) - return PTR_ERR(ctrl); + ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl)); + if (ret) { + nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); + return ret; + } switch (ctrl->type) { case NV0073_CTRL_SPECIFIC_OR_TYPE_NONE: @@ -1229,9 +1277,11 @@ r535_outp_new(struct nvkm_disp *disp, u32 id) ctrl->sorIndex = ~0; - ctrl = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, ctrl, sizeof(*ctrl)); - if (IS_ERR(ctrl)) - return PTR_ERR(ctrl); + ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl)); + if (ret) { + nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); + return ret; + } switch (NVVAL_GET(ctrl->maxLinkRate, NV0073_CTRL_CMD, DP_GET_CAPS, MAX_LINK_RATE)) { case NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_1_62: @@ -1465,8 +1515,6 @@ r535_disp_oneinit(struct nvkm_disp *disp) bool nvhg = acpi_check_dsm(handle, &NVHG_DSM_GUID, NVHG_DSM_REV, 1ULL << 0x00000014); - printk(KERN_ERR "bl: nbci:%d nvhg:%d\n", nbci, nvhg); - if (nbci || nvhg) { union acpi_object argv4 = { .buffer.type = ACPI_TYPE_BUFFER, @@ -1479,9 +1527,6 @@ r535_disp_oneinit(struct nvkm_disp *disp) if (!obj) { acpi_handle_info(handle, "failed to evaluate _DSM\n"); } else { - printk(KERN_ERR "bl: obj type %d\n", obj->type); - printk(KERN_ERR "bl: obj len %d\n", obj->package.count); - for (int i = 0; i < obj->package.count; i++) { union acpi_object *elt = &obj->package.elements[i]; u32 size; @@ -1491,12 +1536,10 @@ r535_disp_oneinit(struct nvkm_disp *disp) else size = 4; - printk(KERN_ERR "elt %03d: type %d size %d\n", i, elt->type, size); memcpy(&ctrl->backLightData[ctrl->backLightDataSize], &elt->integer.value, size); ctrl->backLightDataSize += size; } - printk(KERN_ERR "bl: data size %d\n", ctrl->backLightDataSize); ctrl->status = 0; ACPI_FREE(obj); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/r535.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/r535.c index d088e636edc3..b903785056b5 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/r535.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/r535.c @@ -242,6 +242,7 @@ r535_chan_id_put(struct nvkm_chan *chan) nvkm_memory_unref(&userd->mem); nvkm_chid_put(runl->chid, userd->chid, &chan->cgrp->lock); list_del(&userd->head); + kfree(userd); } break; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c index 44fb86841c05..9ee58e2a0eb2 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c @@ -70,6 +70,20 @@ struct r535_gsp_msg { #define GSP_MSG_HDR_SIZE offsetof(struct r535_gsp_msg, data) +static int +r535_rpc_status_to_errno(uint32_t rpc_status) +{ + switch (rpc_status) { + case 0x55: /* NV_ERR_NOT_READY */ + case 0x66: /* NV_ERR_TIMEOUT_RETRY */ + return -EAGAIN; + case 0x51: /* NV_ERR_NO_MEMORY */ + return -ENOMEM; + default: + return -EINVAL; + } +} + static void * r535_gsp_msgq_wait(struct nvkm_gsp *gsp, u32 repc, u32 *prepc, int *ptime) { @@ -298,7 +312,8 @@ retry: struct nvkm_gsp_msgq_ntfy *ntfy = &gsp->msgq.ntfy[i]; if (ntfy->fn == msg->function) { - ntfy->func(ntfy->priv, ntfy->fn, msg->data, msg->length - sizeof(*msg)); + if (ntfy->func) + ntfy->func(ntfy->priv, ntfy->fn, msg->data, msg->length - sizeof(*msg)); break; } } @@ -583,14 +598,14 @@ r535_gsp_rpc_rm_alloc_push(struct nvkm_gsp_object *object, void *argv, u32 repc) return rpc; if (rpc->status) { - nvkm_error(&gsp->subdev, "RM_ALLOC: 0x%x\n", rpc->status); - ret = ERR_PTR(-EINVAL); + ret = ERR_PTR(r535_rpc_status_to_errno(rpc->status)); + if (PTR_ERR(ret) != -EAGAIN) + nvkm_error(&gsp->subdev, "RM_ALLOC: 0x%x\n", rpc->status); } else { ret = repc ? rpc->params : NULL; } - if (IS_ERR_OR_NULL(ret)) - nvkm_gsp_rpc_done(gsp, rpc); + nvkm_gsp_rpc_done(gsp, rpc); return ret; } @@ -623,29 +638,34 @@ r535_gsp_rpc_rm_ctrl_done(struct nvkm_gsp_object *object, void *repv) { rpc_gsp_rm_control_v03_00 *rpc = container_of(repv, typeof(*rpc), params); + if (!repv) + return; nvkm_gsp_rpc_done(object->client->gsp, rpc); } -static void * -r535_gsp_rpc_rm_ctrl_push(struct nvkm_gsp_object *object, void *argv, u32 repc) +static int +r535_gsp_rpc_rm_ctrl_push(struct nvkm_gsp_object *object, void **argv, u32 repc) { - rpc_gsp_rm_control_v03_00 *rpc = container_of(argv, typeof(*rpc), params); + rpc_gsp_rm_control_v03_00 *rpc = container_of((*argv), typeof(*rpc), params); struct nvkm_gsp *gsp = object->client->gsp; - void *ret; + int ret = 0; rpc = nvkm_gsp_rpc_push(gsp, rpc, true, repc); - if (IS_ERR_OR_NULL(rpc)) - return rpc; + if (IS_ERR_OR_NULL(rpc)) { + *argv = NULL; + return PTR_ERR(rpc); + } if (rpc->status) { - nvkm_error(&gsp->subdev, "cli:0x%08x obj:0x%08x ctrl cmd:0x%08x failed: 0x%08x\n", - object->client->object.handle, object->handle, rpc->cmd, rpc->status); - ret = ERR_PTR(-EINVAL); - } else { - ret = repc ? rpc->params : NULL; + ret = r535_rpc_status_to_errno(rpc->status); + if (ret != -EAGAIN) + nvkm_error(&gsp->subdev, "cli:0x%08x obj:0x%08x ctrl cmd:0x%08x failed: 0x%08x\n", + object->client->object.handle, object->handle, rpc->cmd, rpc->status); } - if (IS_ERR_OR_NULL(ret)) + if (repc) + *argv = rpc->params; + else nvkm_gsp_rpc_done(gsp, rpc); return ret; @@ -843,9 +863,11 @@ r535_gsp_intr_get_table(struct nvkm_gsp *gsp) if (IS_ERR(ctrl)) return PTR_ERR(ctrl); - ctrl = nvkm_gsp_rm_ctrl_push(&gsp->internal.device.subdevice, ctrl, sizeof(*ctrl)); - if (WARN_ON(IS_ERR(ctrl))) - return PTR_ERR(ctrl); + ret = nvkm_gsp_rm_ctrl_push(&gsp->internal.device.subdevice, &ctrl, sizeof(*ctrl)); + if (WARN_ON(ret)) { + nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, ctrl); + return ret; + } for (unsigned i = 0; i < ctrl->tableLen; i++) { enum nvkm_subdev_type type; @@ -1099,16 +1121,12 @@ r535_gsp_acpi_caps(acpi_handle handle, CAPS_METHOD_DATA *caps) if (!obj) return; - printk(KERN_ERR "nvop: obj type %d\n", obj->type); - printk(KERN_ERR "nvop: obj len %d\n", obj->buffer.length); - if (WARN_ON(obj->type != ACPI_TYPE_BUFFER) || WARN_ON(obj->buffer.length != 4)) return; caps->status = 0; caps->optimusCaps = *(u32 *)obj->buffer.pointer; - printk(KERN_ERR "nvop: caps %08x\n", caps->optimusCaps); ACPI_FREE(obj); @@ -1135,9 +1153,6 @@ r535_gsp_acpi_jt(acpi_handle handle, JT_METHOD_DATA *jt) if (!obj) return; - printk(KERN_ERR "jt: obj type %d\n", obj->type); - printk(KERN_ERR "jt: obj len %d\n", obj->buffer.length); - if (WARN_ON(obj->type != ACPI_TYPE_BUFFER) || WARN_ON(obj->buffer.length != 4)) return; @@ -1146,7 +1161,6 @@ r535_gsp_acpi_jt(acpi_handle handle, JT_METHOD_DATA *jt) jt->jtCaps = *(u32 *)obj->buffer.pointer; jt->jtRevId = (jt->jtCaps & 0xfff00000) >> 20; jt->bSBIOSCaps = 0; - printk(KERN_ERR "jt: caps %08x rev:%04x\n", jt->jtCaps, jt->jtRevId); ACPI_FREE(obj); @@ -1157,6 +1171,8 @@ static void r535_gsp_acpi_mux_id(acpi_handle handle, u32 id, MUX_METHOD_DATA_ELEMENT *mode, MUX_METHOD_DATA_ELEMENT *part) { + union acpi_object mux_arg = { ACPI_TYPE_INTEGER }; + struct acpi_object_list input = { 1, &mux_arg }; acpi_handle iter = NULL, handle_mux = NULL; acpi_status status; unsigned long long value; @@ -1179,14 +1195,18 @@ r535_gsp_acpi_mux_id(acpi_handle handle, u32 id, MUX_METHOD_DATA_ELEMENT *mode, if (!handle_mux) return; - status = acpi_evaluate_integer(handle_mux, "MXDM", NULL, &value); + /* I -think- 0 means "acquire" according to nvidia's driver source */ + input.pointer->integer.type = ACPI_TYPE_INTEGER; + input.pointer->integer.value = 0; + + status = acpi_evaluate_integer(handle_mux, "MXDM", &input, &value); if (ACPI_SUCCESS(status)) { mode->acpiId = id; mode->mode = value; mode->status = 0; } - status = acpi_evaluate_integer(handle_mux, "MXDS", NULL, &value); + status = acpi_evaluate_integer(handle_mux, "MXDS", &input, &value); if (ACPI_SUCCESS(status)) { part->acpiId = id; part->mode = value; @@ -1232,8 +1252,8 @@ r535_gsp_acpi_dod(acpi_handle handle, DOD_METHOD_DATA *dod) dod->acpiIdListLen += sizeof(dod->acpiIdList[0]); } - printk(KERN_ERR "_DOD: ok! len:%d\n", dod->acpiIdListLen); dod->status = 0; + kfree(output.pointer); } #endif @@ -2186,7 +2206,9 @@ r535_gsp_oneinit(struct nvkm_gsp *gsp) r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_MMU_FAULT_QUEUED, r535_gsp_msg_mmu_fault_queued, gsp); r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_OS_ERROR_LOG, r535_gsp_msg_os_error_log, gsp); - + r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_PERF_BRIDGELESS_INFO_UPDATE, NULL, NULL); + r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_UCODE_LIBOS_PRINT, NULL, NULL); + r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_GSP_SEND_USER_SHARED_DATA, NULL, NULL); ret = r535_gsp_rm_boot_ctor(gsp); if (ret) return ret; diff --git a/drivers/gpu/drm/ttm/tests/ttm_device_test.c b/drivers/gpu/drm/ttm/tests/ttm_device_test.c index b1b423b68cdf..19eaff22e6ae 100644 --- a/drivers/gpu/drm/ttm/tests/ttm_device_test.c +++ b/drivers/gpu/drm/ttm/tests/ttm_device_test.c @@ -175,7 +175,7 @@ static void ttm_device_init_pools(struct kunit *test) if (params->pools_init_expected) { for (int i = 0; i < TTM_NUM_CACHING_TYPES; ++i) { - for (int j = 0; j <= MAX_ORDER; ++j) { + for (int j = 0; j < NR_PAGE_ORDERS; ++j) { pt = pool->caching[i].orders[j]; KUNIT_EXPECT_PTR_EQ(test, pt.pool, pool); KUNIT_EXPECT_EQ(test, pt.caching, i); diff --git a/drivers/gpu/drm/ttm/tests/ttm_pool_test.c b/drivers/gpu/drm/ttm/tests/ttm_pool_test.c index 2d9cae8cd984..cceaa18d4e46 100644 --- a/drivers/gpu/drm/ttm/tests/ttm_pool_test.c +++ b/drivers/gpu/drm/ttm/tests/ttm_pool_test.c @@ -109,7 +109,7 @@ static const struct ttm_pool_test_case ttm_pool_basic_cases[] = { }, { .description = "Above the allocation limit", - .order = MAX_ORDER + 1, + .order = MAX_PAGE_ORDER + 1, }, { .description = "One page, with coherent DMA mappings enabled", @@ -118,7 +118,7 @@ static const struct ttm_pool_test_case ttm_pool_basic_cases[] = { }, { .description = "Above the allocation limit, with coherent DMA mappings enabled", - .order = MAX_ORDER + 1, + .order = MAX_PAGE_ORDER + 1, .use_dma_alloc = true, }, }; @@ -165,7 +165,7 @@ static void ttm_pool_alloc_basic(struct kunit *test) fst_page = tt->pages[0]; last_page = tt->pages[tt->num_pages - 1]; - if (params->order <= MAX_ORDER) { + if (params->order <= MAX_PAGE_ORDER) { if (params->use_dma_alloc) { KUNIT_ASSERT_NOT_NULL(test, (void *)fst_page->private); KUNIT_ASSERT_NOT_NULL(test, (void *)last_page->private); @@ -182,7 +182,7 @@ static void ttm_pool_alloc_basic(struct kunit *test) * order 0 blocks */ KUNIT_ASSERT_EQ(test, fst_page->private, - min_t(unsigned int, MAX_ORDER, + min_t(unsigned int, MAX_PAGE_ORDER, params->order)); KUNIT_ASSERT_EQ(test, last_page->private, 0); } diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c index fe610a3cace0..b62f420a9f96 100644 --- a/drivers/gpu/drm/ttm/ttm_pool.c +++ b/drivers/gpu/drm/ttm/ttm_pool.c @@ -65,11 +65,11 @@ module_param(page_pool_size, ulong, 0644); static atomic_long_t allocated_pages; -static struct ttm_pool_type global_write_combined[MAX_ORDER + 1]; -static struct ttm_pool_type global_uncached[MAX_ORDER + 1]; +static struct ttm_pool_type global_write_combined[NR_PAGE_ORDERS]; +static struct ttm_pool_type global_uncached[NR_PAGE_ORDERS]; -static struct ttm_pool_type global_dma32_write_combined[MAX_ORDER + 1]; -static struct ttm_pool_type global_dma32_uncached[MAX_ORDER + 1]; +static struct ttm_pool_type global_dma32_write_combined[NR_PAGE_ORDERS]; +static struct ttm_pool_type global_dma32_uncached[NR_PAGE_ORDERS]; static spinlock_t shrinker_lock; static struct list_head shrinker_list; @@ -447,7 +447,7 @@ int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt, else gfp_flags |= GFP_HIGHUSER; - for (order = min_t(unsigned int, MAX_ORDER, __fls(num_pages)); + for (order = min_t(unsigned int, MAX_PAGE_ORDER, __fls(num_pages)); num_pages; order = min_t(unsigned int, order, __fls(num_pages))) { struct ttm_pool_type *pt; @@ -568,7 +568,7 @@ void ttm_pool_init(struct ttm_pool *pool, struct device *dev, if (use_dma_alloc || nid != NUMA_NO_NODE) { for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) - for (j = 0; j <= MAX_ORDER; ++j) + for (j = 0; j < NR_PAGE_ORDERS; ++j) ttm_pool_type_init(&pool->caching[i].orders[j], pool, i, j); } @@ -601,7 +601,7 @@ void ttm_pool_fini(struct ttm_pool *pool) if (pool->use_dma_alloc || pool->nid != NUMA_NO_NODE) { for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) - for (j = 0; j <= MAX_ORDER; ++j) + for (j = 0; j < NR_PAGE_ORDERS; ++j) ttm_pool_type_fini(&pool->caching[i].orders[j]); } @@ -656,7 +656,7 @@ static void ttm_pool_debugfs_header(struct seq_file *m) unsigned int i; seq_puts(m, "\t "); - for (i = 0; i <= MAX_ORDER; ++i) + for (i = 0; i < NR_PAGE_ORDERS; ++i) seq_printf(m, " ---%2u---", i); seq_puts(m, "\n"); } @@ -667,7 +667,7 @@ static void ttm_pool_debugfs_orders(struct ttm_pool_type *pt, { unsigned int i; - for (i = 0; i <= MAX_ORDER; ++i) + for (i = 0; i < NR_PAGE_ORDERS; ++i) seq_printf(m, " %8u", ttm_pool_type_count(&pt[i])); seq_puts(m, "\n"); } @@ -776,7 +776,7 @@ int ttm_pool_mgr_init(unsigned long num_pages) spin_lock_init(&shrinker_lock); INIT_LIST_HEAD(&shrinker_list); - for (i = 0; i <= MAX_ORDER; ++i) { + for (i = 0; i < NR_PAGE_ORDERS; ++i) { ttm_pool_type_init(&global_write_combined[i], NULL, ttm_write_combined, i); ttm_pool_type_init(&global_uncached[i], NULL, ttm_uncached, i); @@ -816,7 +816,7 @@ void ttm_pool_mgr_fini(void) { unsigned int i; - for (i = 0; i <= MAX_ORDER; ++i) { + for (i = 0; i < NR_PAGE_ORDERS; ++i) { ttm_pool_type_fini(&global_write_combined[i]); ttm_pool_type_fini(&global_uncached[i]); diff --git a/drivers/i2c/i2c-core.h b/drivers/i2c/i2c-core.h index 05b8b8dfa9bd..36587f38dff3 100644 --- a/drivers/i2c/i2c-core.h +++ b/drivers/i2c/i2c-core.h @@ -3,6 +3,7 @@ * i2c-core.h - interfaces internal to the I2C framework */ +#include <linux/kconfig.h> #include <linux/rwsem.h> struct i2c_devinfo { @@ -29,7 +30,8 @@ int i2c_dev_irq_from_resources(const struct resource *resources, */ static inline bool i2c_in_atomic_xfer_mode(void) { - return system_state > SYSTEM_RUNNING && !preemptible(); + return system_state > SYSTEM_RUNNING && + (IS_ENABLED(CONFIG_PREEMPT_COUNT) ? !preemptible() : irqs_disabled()); } static inline int __i2c_lock_bus_helper(struct i2c_adapter *adap) diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index dcda0afecfc5..bcf1198e8991 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c @@ -131,11 +131,12 @@ static unsigned int mwait_substates __initdata; #define MWAIT2flg(eax) ((eax & 0xFF) << 24) static __always_inline int __intel_idle(struct cpuidle_device *dev, - struct cpuidle_driver *drv, int index) + struct cpuidle_driver *drv, + int index, bool irqoff) { struct cpuidle_state *state = &drv->states[index]; unsigned long eax = flg2MWAIT(state->flags); - unsigned long ecx = 1; /* break on interrupt flag */ + unsigned long ecx = 1*irqoff; /* break on interrupt flag */ mwait_idle_with_hints(eax, ecx); @@ -159,19 +160,13 @@ static __always_inline int __intel_idle(struct cpuidle_device *dev, static __cpuidle int intel_idle(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { - return __intel_idle(dev, drv, index); + return __intel_idle(dev, drv, index, true); } static __cpuidle int intel_idle_irq(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { - int ret; - - raw_local_irq_enable(); - ret = __intel_idle(dev, drv, index); - raw_local_irq_disable(); - - return ret; + return __intel_idle(dev, drv, index, false); } static __cpuidle int intel_idle_ibrs(struct cpuidle_device *dev, @@ -184,7 +179,7 @@ static __cpuidle int intel_idle_ibrs(struct cpuidle_device *dev, if (smt_active) __update_spec_ctrl(0); - ret = __intel_idle(dev, drv, index); + ret = __intel_idle(dev, drv, index, true); if (smt_active) __update_spec_ctrl(spec_ctrl); @@ -196,7 +191,7 @@ static __cpuidle int intel_idle_xstate(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { fpu_idle_fpregs(); - return __intel_idle(dev, drv, index); + return __intel_idle(dev, drv, index, true); } /** @@ -923,6 +918,35 @@ static struct cpuidle_state adl_l_cstates[] __initdata = { .enter = NULL } }; +static struct cpuidle_state mtl_l_cstates[] __initdata = { + { + .name = "C1E", + .desc = "MWAIT 0x01", + .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, + .exit_latency = 1, + .target_residency = 1, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { + .name = "C6", + .desc = "MWAIT 0x20", + .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 140, + .target_residency = 420, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { + .name = "C10", + .desc = "MWAIT 0x60", + .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 310, + .target_residency = 930, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { + .enter = NULL } +}; + static struct cpuidle_state gmt_cstates[] __initdata = { { .name = "C1", @@ -1242,6 +1266,72 @@ static struct cpuidle_state snr_cstates[] __initdata = { .enter = NULL } }; +static struct cpuidle_state grr_cstates[] __initdata = { + { + .name = "C1", + .desc = "MWAIT 0x00", + .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_ALWAYS_ENABLE, + .exit_latency = 1, + .target_residency = 1, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { + .name = "C1E", + .desc = "MWAIT 0x01", + .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, + .exit_latency = 2, + .target_residency = 10, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { + .name = "C6S", + .desc = "MWAIT 0x22", + .flags = MWAIT2flg(0x22) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 140, + .target_residency = 500, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { + .enter = NULL } +}; + +static struct cpuidle_state srf_cstates[] __initdata = { + { + .name = "C1", + .desc = "MWAIT 0x00", + .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_ALWAYS_ENABLE, + .exit_latency = 1, + .target_residency = 1, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { + .name = "C1E", + .desc = "MWAIT 0x01", + .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, + .exit_latency = 2, + .target_residency = 10, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { + .name = "C6S", + .desc = "MWAIT 0x22", + .flags = MWAIT2flg(0x22) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 270, + .target_residency = 700, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { + .name = "C6SP", + .desc = "MWAIT 0x23", + .flags = MWAIT2flg(0x23) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 310, + .target_residency = 900, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { + .enter = NULL } +}; + static const struct idle_cpu idle_cpu_nehalem __initconst = { .state_table = nehalem_cstates, .auto_demotion_disable_flags = NHM_C1_AUTO_DEMOTE | NHM_C3_AUTO_DEMOTE, @@ -1349,6 +1439,10 @@ static const struct idle_cpu idle_cpu_adl_l __initconst = { .state_table = adl_l_cstates, }; +static const struct idle_cpu idle_cpu_mtl_l __initconst = { + .state_table = mtl_l_cstates, +}; + static const struct idle_cpu idle_cpu_gmt __initconst = { .state_table = gmt_cstates, }; @@ -1387,6 +1481,18 @@ static const struct idle_cpu idle_cpu_snr __initconst = { .use_acpi = true, }; +static const struct idle_cpu idle_cpu_grr __initconst = { + .state_table = grr_cstates, + .disable_promotion_to_c1e = true, + .use_acpi = true, +}; + +static const struct idle_cpu idle_cpu_srf __initconst = { + .state_table = srf_cstates, + .disable_promotion_to_c1e = true, + .use_acpi = true, +}; + static const struct x86_cpu_id intel_idle_ids[] __initconst = { X86_MATCH_INTEL_FAM6_MODEL(NEHALEM_EP, &idle_cpu_nhx), X86_MATCH_INTEL_FAM6_MODEL(NEHALEM, &idle_cpu_nehalem), @@ -1423,6 +1529,7 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = { X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, &idle_cpu_icx), X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, &idle_cpu_adl), X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, &idle_cpu_adl_l), + X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE_L, &idle_cpu_mtl_l), X86_MATCH_INTEL_FAM6_MODEL(ATOM_GRACEMONT, &idle_cpu_gmt), X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, &idle_cpu_spr), X86_MATCH_INTEL_FAM6_MODEL(EMERALDRAPIDS_X, &idle_cpu_spr), @@ -1432,6 +1539,8 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = { X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_PLUS, &idle_cpu_bxt), X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_D, &idle_cpu_dnv), X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_D, &idle_cpu_snr), + X86_MATCH_INTEL_FAM6_MODEL(ATOM_CRESTMONT, &idle_cpu_grr), + X86_MATCH_INTEL_FAM6_MODEL(ATOM_CRESTMONT_X, &idle_cpu_srf), {} }; diff --git a/drivers/infiniband/hw/erdma/erdma_hw.h b/drivers/infiniband/hw/erdma/erdma_hw.h index 9d316fdc6f9a..a155519a862f 100644 --- a/drivers/infiniband/hw/erdma/erdma_hw.h +++ b/drivers/infiniband/hw/erdma/erdma_hw.h @@ -11,8 +11,6 @@ #include <linux/types.h> /* PCIe device related definition. */ -#define PCI_VENDOR_ID_ALIBABA 0x1ded - #define ERDMA_PCI_WIDTH 64 #define ERDMA_FUNC_BAR 0 #define ERDMA_MISX_BAR 2 diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c index 8ba53edf2311..869369cb5b5f 100644 --- a/drivers/infiniband/hw/mlx5/devx.c +++ b/drivers/infiniband/hw/mlx5/devx.c @@ -2498,7 +2498,7 @@ static void dispatch_event_fd(struct list_head *fd_list, list_for_each_entry_rcu(item, fd_list, xa_list) { if (item->eventfd) - eventfd_signal(item->eventfd, 1); + eventfd_signal(item->eventfd); else deliver_event(item, data); } diff --git a/drivers/input/rmi4/rmi_spi.c b/drivers/input/rmi4/rmi_spi.c index 852aeb0b2c07..07c866f42296 100644 --- a/drivers/input/rmi4/rmi_spi.c +++ b/drivers/input/rmi4/rmi_spi.c @@ -375,7 +375,7 @@ static int rmi_spi_probe(struct spi_device *spi) struct rmi_device_platform_data *spi_pdata = spi->dev.platform_data; int error; - if (spi->master->flags & SPI_MASTER_HALF_DUPLEX) + if (spi->master->flags & SPI_CONTROLLER_HALF_DUPLEX) return -EINVAL; rmi_spi = devm_kzalloc(&spi->dev, sizeof(struct rmi_spi_xport), diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c index fcc987f5d4ed..b9a0523cbb0a 100644 --- a/drivers/iommu/amd/iommu.c +++ b/drivers/iommu/amd/iommu.c @@ -3357,7 +3357,7 @@ static void irq_remapping_prepare_irte(struct amd_ir_data *data, data->irq_2_irte.devid = devid; data->irq_2_irte.index = index + sub_handle; - iommu->irte_ops->prepare(data->entry, apic->delivery_mode, + iommu->irte_ops->prepare(data->entry, APIC_DELIVERY_MODE_FIXED, apic->dest_mode_logical, irq_cfg->vector, irq_cfg->dest_apicid, devid); @@ -3634,7 +3634,7 @@ int amd_iommu_deactivate_guest_mode(void *data) entry->lo.fields_remap.valid = valid; entry->lo.fields_remap.dm = apic->dest_mode_logical; - entry->lo.fields_remap.int_type = apic->delivery_mode; + entry->lo.fields_remap.int_type = APIC_DELIVERY_MODE_FIXED; entry->hi.fields.vector = cfg->vector; entry->lo.fields_remap.destination = APICID_TO_IRTE_DEST_LO(cfg->dest_apicid); diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h index 961205ba86d2..925ac6a47bce 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h @@ -188,7 +188,7 @@ #ifdef CONFIG_CMA_ALIGNMENT #define Q_MAX_SZ_SHIFT (PAGE_SHIFT + CONFIG_CMA_ALIGNMENT) #else -#define Q_MAX_SZ_SHIFT (PAGE_SHIFT + MAX_ORDER) +#define Q_MAX_SZ_SHIFT (PAGE_SHIFT + MAX_PAGE_ORDER) #endif /* diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index 85163a83df2f..e59f50e11ea8 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -884,7 +884,7 @@ static struct page **__iommu_dma_alloc_pages(struct device *dev, struct page **pages; unsigned int i = 0, nid = dev_to_node(dev); - order_mask &= GENMASK(MAX_ORDER, 0); + order_mask &= GENMASK(MAX_PAGE_ORDER, 0); if (!order_mask) return NULL; diff --git a/drivers/iommu/intel/irq_remapping.c b/drivers/iommu/intel/irq_remapping.c index 29b9e55dcf26..566297bc87dd 100644 --- a/drivers/iommu/intel/irq_remapping.c +++ b/drivers/iommu/intel/irq_remapping.c @@ -1112,7 +1112,7 @@ static void prepare_irte(struct irte *irte, int vector, unsigned int dest) * irq migration in the presence of interrupt-remapping. */ irte->trigger_mode = 0; - irte->dlvry_mode = apic->delivery_mode; + irte->dlvry_mode = APIC_DELIVERY_MODE_FIXED; irte->vector = vector; irte->dest_id = IRTE_DEST(dest); irte->redir_hint = 1; diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index 9a7a74239eab..d097001c1e3e 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -2465,8 +2465,8 @@ static bool its_parse_indirect_baser(struct its_node *its, * feature is not supported by hardware. */ new_order = max_t(u32, get_order(esz << ids), new_order); - if (new_order > MAX_ORDER) { - new_order = MAX_ORDER; + if (new_order > MAX_PAGE_ORDER) { + new_order = MAX_PAGE_ORDER; ids = ilog2(PAGE_ORDER_TO_SIZE(new_order) / (int)esz); pr_warn("ITS@%pa: %s Table too large, reduce ids %llu->%u\n", &its->phys_base, its_base_type_string[type], diff --git a/drivers/irqchip/irq-qcom-mpm.c b/drivers/irqchip/irq-qcom-mpm.c index 7124565234a5..cda5838d2232 100644 --- a/drivers/irqchip/irq-qcom-mpm.c +++ b/drivers/irqchip/irq-qcom-mpm.c @@ -14,6 +14,7 @@ #include <linux/mailbox_client.h> #include <linux/module.h> #include <linux/of.h> +#include <linux/of_address.h> #include <linux/of_platform.h> #include <linux/platform_device.h> #include <linux/pm_domain.h> @@ -322,8 +323,10 @@ static int qcom_mpm_init(struct device_node *np, struct device_node *parent) struct device *dev = &pdev->dev; struct irq_domain *parent_domain; struct generic_pm_domain *genpd; + struct device_node *msgram_np; struct qcom_mpm_priv *priv; unsigned int pin_cnt; + struct resource res; int i, irq; int ret; @@ -374,9 +377,26 @@ static int qcom_mpm_init(struct device_node *np, struct device_node *parent) raw_spin_lock_init(&priv->lock); - priv->base = devm_platform_ioremap_resource(pdev, 0); - if (IS_ERR(priv->base)) - return PTR_ERR(priv->base); + /* If we have a handle to an RPM message ram partition, use it. */ + msgram_np = of_parse_phandle(np, "qcom,rpm-msg-ram", 0); + if (msgram_np) { + ret = of_address_to_resource(msgram_np, 0, &res); + if (ret) { + of_node_put(msgram_np); + return ret; + } + + /* Don't use devm_ioremap_resource, as we're accessing a shared region. */ + priv->base = devm_ioremap(dev, res.start, resource_size(&res)); + of_node_put(msgram_np); + if (IS_ERR(priv->base)) + return PTR_ERR(priv->base); + } else { + /* Otherwise, fall back to simple MMIO. */ + priv->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(priv->base)) + return PTR_ERR(priv->base); + } for (i = 0; i < priv->reg_stride; i++) { qcom_mpm_write(priv, MPM_REG_ENABLE, i, 0); diff --git a/drivers/irqchip/irq-renesas-rzg2l.c b/drivers/irqchip/irq-renesas-rzg2l.c index fe8d516f3614..9494fc26259c 100644 --- a/drivers/irqchip/irq-renesas-rzg2l.c +++ b/drivers/irqchip/irq-renesas-rzg2l.c @@ -18,6 +18,7 @@ #include <linux/pm_runtime.h> #include <linux/reset.h> #include <linux/spinlock.h> +#include <linux/syscore_ops.h> #define IRQC_IRQ_START 1 #define IRQC_IRQ_COUNT 8 @@ -28,8 +29,7 @@ #define ISCR 0x10 #define IITSR 0x14 #define TSCR 0x20 -#define TITSR0 0x24 -#define TITSR1 0x28 +#define TITSR(n) (0x24 + (n) * 4) #define TITSR0_MAX_INT 16 #define TITSEL_WIDTH 0x2 #define TSSR(n) (0x30 + ((n) * 4)) @@ -53,15 +53,33 @@ #define IITSR_IITSEL_EDGE_BOTH 3 #define IITSR_IITSEL_MASK(n) IITSR_IITSEL((n), 3) -#define TINT_EXTRACT_HWIRQ(x) FIELD_GET(GENMASK(15, 0), (x)) -#define TINT_EXTRACT_GPIOINT(x) FIELD_GET(GENMASK(31, 16), (x)) +#define TINT_EXTRACT_HWIRQ(x) FIELD_GET(GENMASK(15, 0), (x)) +#define TINT_EXTRACT_GPIOINT(x) FIELD_GET(GENMASK(31, 16), (x)) -struct rzg2l_irqc_priv { - void __iomem *base; - struct irq_fwspec fwspec[IRQC_NUM_IRQ]; - raw_spinlock_t lock; +/** + * struct rzg2l_irqc_reg_cache - registers cache (necessary for suspend/resume) + * @iitsr: IITSR register + * @titsr: TITSR registers + */ +struct rzg2l_irqc_reg_cache { + u32 iitsr; + u32 titsr[2]; }; +/** + * struct rzg2l_irqc_priv - IRQ controller private data structure + * @base: Controller's base address + * @fwspec: IRQ firmware specific data + * @lock: Lock to serialize access to hardware registers + * @cache: Registers cache for suspend/resume + */ +static struct rzg2l_irqc_priv { + void __iomem *base; + struct irq_fwspec fwspec[IRQC_NUM_IRQ]; + raw_spinlock_t lock; + struct rzg2l_irqc_reg_cache cache; +} *rzg2l_irqc_data; + static struct rzg2l_irqc_priv *irq_data_to_priv(struct irq_data *data) { return data->domain->host_data; @@ -72,11 +90,17 @@ static void rzg2l_irq_eoi(struct irq_data *d) unsigned int hw_irq = irqd_to_hwirq(d) - IRQC_IRQ_START; struct rzg2l_irqc_priv *priv = irq_data_to_priv(d); u32 bit = BIT(hw_irq); - u32 reg; + u32 iitsr, iscr; - reg = readl_relaxed(priv->base + ISCR); - if (reg & bit) - writel_relaxed(reg & ~bit, priv->base + ISCR); + iscr = readl_relaxed(priv->base + ISCR); + iitsr = readl_relaxed(priv->base + IITSR); + + /* + * ISCR can only be cleared if the type is falling-edge, rising-edge or + * falling/rising-edge. + */ + if ((iscr & bit) && (iitsr & IITSR_IITSEL_MASK(hw_irq))) + writel_relaxed(iscr & ~bit, priv->base + ISCR); } static void rzg2l_tint_eoi(struct irq_data *d) @@ -188,8 +212,7 @@ static int rzg2l_tint_set_edge(struct irq_data *d, unsigned int type) struct rzg2l_irqc_priv *priv = irq_data_to_priv(d); unsigned int hwirq = irqd_to_hwirq(d); u32 titseln = hwirq - IRQC_TINT_START; - u32 offset; - u8 sense; + u8 index, sense; u32 reg; switch (type & IRQ_TYPE_SENSE_MASK) { @@ -205,17 +228,17 @@ static int rzg2l_tint_set_edge(struct irq_data *d, unsigned int type) return -EINVAL; } - offset = TITSR0; + index = 0; if (titseln >= TITSR0_MAX_INT) { titseln -= TITSR0_MAX_INT; - offset = TITSR1; + index = 1; } raw_spin_lock(&priv->lock); - reg = readl_relaxed(priv->base + offset); + reg = readl_relaxed(priv->base + TITSR(index)); reg &= ~(IRQ_MASK << (titseln * TITSEL_WIDTH)); reg |= sense << (titseln * TITSEL_WIDTH); - writel_relaxed(reg, priv->base + offset); + writel_relaxed(reg, priv->base + TITSR(index)); raw_spin_unlock(&priv->lock); return 0; @@ -236,6 +259,38 @@ static int rzg2l_irqc_set_type(struct irq_data *d, unsigned int type) return irq_chip_set_type_parent(d, IRQ_TYPE_LEVEL_HIGH); } +static int rzg2l_irqc_irq_suspend(void) +{ + struct rzg2l_irqc_reg_cache *cache = &rzg2l_irqc_data->cache; + void __iomem *base = rzg2l_irqc_data->base; + + cache->iitsr = readl_relaxed(base + IITSR); + for (u8 i = 0; i < 2; i++) + cache->titsr[i] = readl_relaxed(base + TITSR(i)); + + return 0; +} + +static void rzg2l_irqc_irq_resume(void) +{ + struct rzg2l_irqc_reg_cache *cache = &rzg2l_irqc_data->cache; + void __iomem *base = rzg2l_irqc_data->base; + + /* + * Restore only interrupt type. TSSRx will be restored at the + * request of pin controller to avoid spurious interrupts due + * to invalid PIN states. + */ + for (u8 i = 0; i < 2; i++) + writel_relaxed(cache->titsr[i], base + TITSR(i)); + writel_relaxed(cache->iitsr, base + IITSR); +} + +static struct syscore_ops rzg2l_irqc_syscore_ops = { + .suspend = rzg2l_irqc_irq_suspend, + .resume = rzg2l_irqc_irq_resume, +}; + static const struct irq_chip irqc_chip = { .name = "rzg2l-irqc", .irq_eoi = rzg2l_irqc_eoi, @@ -321,7 +376,6 @@ static int rzg2l_irqc_init(struct device_node *node, struct device_node *parent) struct irq_domain *irq_domain, *parent_domain; struct platform_device *pdev; struct reset_control *resetn; - struct rzg2l_irqc_priv *priv; int ret; pdev = of_find_device_by_node(node); @@ -334,15 +388,15 @@ static int rzg2l_irqc_init(struct device_node *node, struct device_node *parent) return -ENODEV; } - priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); - if (!priv) + rzg2l_irqc_data = devm_kzalloc(&pdev->dev, sizeof(*rzg2l_irqc_data), GFP_KERNEL); + if (!rzg2l_irqc_data) return -ENOMEM; - priv->base = devm_of_iomap(&pdev->dev, pdev->dev.of_node, 0, NULL); - if (IS_ERR(priv->base)) - return PTR_ERR(priv->base); + rzg2l_irqc_data->base = devm_of_iomap(&pdev->dev, pdev->dev.of_node, 0, NULL); + if (IS_ERR(rzg2l_irqc_data->base)) + return PTR_ERR(rzg2l_irqc_data->base); - ret = rzg2l_irqc_parse_interrupts(priv, node); + ret = rzg2l_irqc_parse_interrupts(rzg2l_irqc_data, node); if (ret) { dev_err(&pdev->dev, "cannot parse interrupts: %d\n", ret); return ret; @@ -365,17 +419,19 @@ static int rzg2l_irqc_init(struct device_node *node, struct device_node *parent) goto pm_disable; } - raw_spin_lock_init(&priv->lock); + raw_spin_lock_init(&rzg2l_irqc_data->lock); irq_domain = irq_domain_add_hierarchy(parent_domain, 0, IRQC_NUM_IRQ, node, &rzg2l_irqc_domain_ops, - priv); + rzg2l_irqc_data); if (!irq_domain) { dev_err(&pdev->dev, "failed to add irq domain\n"); ret = -ENOMEM; goto pm_put; } + register_syscore_ops(&rzg2l_irqc_syscore_ops); + return 0; pm_put: diff --git a/drivers/irqchip/irq-xtensa-pic.c b/drivers/irqchip/irq-xtensa-pic.c index 0c18d1f1e264..f9d6fce4da33 100644 --- a/drivers/irqchip/irq-xtensa-pic.c +++ b/drivers/irqchip/irq-xtensa-pic.c @@ -12,6 +12,7 @@ * Kevin Chea */ +#include <linux/bits.h> #include <linux/interrupt.h> #include <linux/irqdomain.h> #include <linux/irq.h> @@ -19,8 +20,6 @@ #include <linux/irqchip/xtensa-pic.h> #include <linux/of.h> -unsigned int cached_irq_mask; - /* * Device Tree IRQ specifier translation function which works with one or * two cell bindings. First cell value maps directly to the hwirq number. @@ -44,34 +43,30 @@ static const struct irq_domain_ops xtensa_irq_domain_ops = { static void xtensa_irq_mask(struct irq_data *d) { - cached_irq_mask &= ~(1 << d->hwirq); - xtensa_set_sr(cached_irq_mask, intenable); -} + u32 irq_mask; -static void xtensa_irq_unmask(struct irq_data *d) -{ - cached_irq_mask |= 1 << d->hwirq; - xtensa_set_sr(cached_irq_mask, intenable); + irq_mask = xtensa_get_sr(intenable); + irq_mask &= ~BIT(d->hwirq); + xtensa_set_sr(irq_mask, intenable); } -static void xtensa_irq_enable(struct irq_data *d) +static void xtensa_irq_unmask(struct irq_data *d) { - xtensa_irq_unmask(d); -} + u32 irq_mask; -static void xtensa_irq_disable(struct irq_data *d) -{ - xtensa_irq_mask(d); + irq_mask = xtensa_get_sr(intenable); + irq_mask |= BIT(d->hwirq); + xtensa_set_sr(irq_mask, intenable); } static void xtensa_irq_ack(struct irq_data *d) { - xtensa_set_sr(1 << d->hwirq, intclear); + xtensa_set_sr(BIT(d->hwirq), intclear); } static int xtensa_irq_retrigger(struct irq_data *d) { - unsigned int mask = 1u << d->hwirq; + unsigned int mask = BIT(d->hwirq); if (WARN_ON(mask & ~XCHAL_INTTYPE_MASK_SOFTWARE)) return 0; @@ -81,8 +76,6 @@ static int xtensa_irq_retrigger(struct irq_data *d) static struct irq_chip xtensa_irq_chip = { .name = "xtensa", - .irq_enable = xtensa_irq_enable, - .irq_disable = xtensa_irq_disable, .irq_mask = xtensa_irq_mask, .irq_unmask = xtensa_irq_unmask, .irq_ack = xtensa_irq_ack, diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c index f03d7dba270c..13c65b7e1ed6 100644 --- a/drivers/md/dm-bufio.c +++ b/drivers/md/dm-bufio.c @@ -1170,7 +1170,7 @@ static void __cache_size_refresh(void) * If the allocation may fail we use __get_free_pages. Memory fragmentation * won't have a fatal effect here, but it just causes flushes of some other * buffers and more I/O will be performed. Don't use __get_free_pages if it - * always fails (i.e. order > MAX_ORDER). + * always fails (i.e. order > MAX_PAGE_ORDER). * * If the allocation shouldn't fail we use __vmalloc. This is only for the * initial reserve allocation, so there's no risk of wasting all vmalloc diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 2ae8560b6a14..855b482cbff1 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -1673,7 +1673,7 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned int size) unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; gfp_t gfp_mask = GFP_NOWAIT | __GFP_HIGHMEM; unsigned int remaining_size; - unsigned int order = MAX_ORDER; + unsigned int order = MAX_PAGE_ORDER; retry: if (unlikely(gfp_mask & __GFP_DIRECT_RECLAIM)) diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c index f57fb821528d..7916ed9f10e8 100644 --- a/drivers/md/dm-flakey.c +++ b/drivers/md/dm-flakey.c @@ -434,7 +434,7 @@ static struct bio *clone_bio(struct dm_target *ti, struct flakey_c *fc, struct b remaining_size = size; - order = MAX_ORDER; + order = MAX_PAGE_ORDER; while (remaining_size) { struct page *pages; unsigned size_to_add, to_copy; diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 23c32cd1f1d8..8dcabf84d866 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -2675,7 +2675,7 @@ static int lock_fs(struct mapped_device *md) WARN_ON(test_bit(DMF_FROZEN, &md->flags)); - r = freeze_bdev(md->disk->part0); + r = bdev_freeze(md->disk->part0); if (!r) set_bit(DMF_FROZEN, &md->flags); return r; @@ -2685,7 +2685,7 @@ static void unlock_fs(struct mapped_device *md) { if (!test_bit(DMF_FROZEN, &md->flags)) return; - thaw_bdev(md->disk->part0); + bdev_thaw(md->disk->part0); clear_bit(DMF_FROZEN, &md->flags); } diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig index 90ce58fd629e..925c19ee513b 100644 --- a/drivers/mfd/Kconfig +++ b/drivers/mfd/Kconfig @@ -2255,7 +2255,7 @@ config MFD_VEXPRESS_SYSREG config RAVE_SP_CORE tristate "RAVE SP MCU core driver" depends on SERIAL_DEV_BUS - select CRC_CCITT + select CRC_ITU_T help Select this to get support for the Supervisory Processor device found on several devices in RAVE line of hardware. diff --git a/drivers/mfd/rave-sp.c b/drivers/mfd/rave-sp.c index da50eba10014..f62422740de2 100644 --- a/drivers/mfd/rave-sp.c +++ b/drivers/mfd/rave-sp.c @@ -9,7 +9,7 @@ */ #include <linux/atomic.h> -#include <linux/crc-ccitt.h> +#include <linux/crc-itu-t.h> #include <linux/delay.h> #include <linux/export.h> #include <linux/init.h> @@ -251,7 +251,7 @@ static void csum_8b2c(const u8 *buf, size_t size, u8 *crc) static void csum_ccitt(const u8 *buf, size_t size, u8 *crc) { - const u16 calculated = crc_ccitt_false(0xffff, buf, size); + const u16 calculated = crc_itu_t(0xffff, buf, size); /* * While the rest of the wire protocol is little-endian, diff --git a/drivers/mfd/tps6594-spi.c b/drivers/mfd/tps6594-spi.c index f4b4f37f957f..24b72847e3f5 100644 --- a/drivers/mfd/tps6594-spi.c +++ b/drivers/mfd/tps6594-spi.c @@ -98,7 +98,7 @@ static int tps6594_spi_probe(struct spi_device *spi) spi_set_drvdata(spi, tps); tps->dev = dev; - tps->reg = spi->chip_select; + tps->reg = spi_get_chipselect(spi, 0); tps->irq = spi->irq; tps->regmap = devm_regmap_init(dev, NULL, spi, &tps6594_spi_regmap_config); diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h index 0562071cdd4a..6ad0ab892675 100644 --- a/drivers/misc/cxl/cxl.h +++ b/drivers/misc/cxl/cxl.h @@ -836,7 +836,8 @@ static inline bool cxl_is_power8(void) { if ((pvr_version_is(PVR_POWER8E)) || (pvr_version_is(PVR_POWER8NVL)) || - (pvr_version_is(PVR_POWER8))) + (pvr_version_is(PVR_POWER8)) || + (pvr_version_is(PVR_HX_C2000))) return true; return false; } diff --git a/drivers/misc/genwqe/card_dev.c b/drivers/misc/genwqe/card_dev.c index 55fc5b80e649..4441aca2280a 100644 --- a/drivers/misc/genwqe/card_dev.c +++ b/drivers/misc/genwqe/card_dev.c @@ -443,7 +443,7 @@ static int genwqe_mmap(struct file *filp, struct vm_area_struct *vma) if (vsize == 0) return -EINVAL; - if (get_order(vsize) > MAX_ORDER) + if (get_order(vsize) > MAX_PAGE_ORDER) return -ENOMEM; dma_map = kzalloc(sizeof(struct dma_mapping), GFP_KERNEL); diff --git a/drivers/misc/genwqe/card_utils.c b/drivers/misc/genwqe/card_utils.c index 1c798d6b2dfb..a2c4a9b4f871 100644 --- a/drivers/misc/genwqe/card_utils.c +++ b/drivers/misc/genwqe/card_utils.c @@ -210,7 +210,7 @@ u32 genwqe_crc32(u8 *buff, size_t len, u32 init) void *__genwqe_alloc_consistent(struct genwqe_dev *cd, size_t size, dma_addr_t *dma_handle) { - if (get_order(size) > MAX_ORDER) + if (get_order(size) > MAX_PAGE_ORDER) return NULL; return dma_alloc_coherent(&cd->pci_dev->dev, size, dma_handle, @@ -308,7 +308,7 @@ int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl, sgl->write = write; sgl->sgl_size = genwqe_sgl_size(sgl->nr_pages); - if (get_order(sgl->sgl_size) > MAX_ORDER) { + if (get_order(sgl->sgl_size) > MAX_PAGE_ORDER) { dev_err(&pci_dev->dev, "[%s] err: too much memory requested!\n", __func__); return ret; diff --git a/drivers/misc/ocxl/afu_irq.c b/drivers/misc/ocxl/afu_irq.c index a06920b7e049..36f7379b8e2d 100644 --- a/drivers/misc/ocxl/afu_irq.c +++ b/drivers/misc/ocxl/afu_irq.c @@ -57,7 +57,7 @@ EXPORT_SYMBOL_GPL(ocxl_irq_set_handler); static irqreturn_t afu_irq_handler(int virq, void *data) { - struct afu_irq *irq = (struct afu_irq *) data; + struct afu_irq *irq = data; trace_ocxl_afu_irq_receive(virq); diff --git a/drivers/misc/ocxl/context.c b/drivers/misc/ocxl/context.c index 7f83116ae11a..cded7d1caf32 100644 --- a/drivers/misc/ocxl/context.c +++ b/drivers/misc/ocxl/context.c @@ -55,7 +55,7 @@ EXPORT_SYMBOL_GPL(ocxl_context_alloc); */ static void xsl_fault_error(void *data, u64 addr, u64 dsisr) { - struct ocxl_context *ctx = (struct ocxl_context *) data; + struct ocxl_context *ctx = data; mutex_lock(&ctx->xsl_error_lock); ctx->xsl_error.addr = addr; diff --git a/drivers/misc/ocxl/file.c b/drivers/misc/ocxl/file.c index ac69b7f361f5..7eb74711ac96 100644 --- a/drivers/misc/ocxl/file.c +++ b/drivers/misc/ocxl/file.c @@ -184,7 +184,7 @@ static irqreturn_t irq_handler(void *private) { struct eventfd_ctx *ev_ctx = private; - eventfd_signal(ev_ctx, 1); + eventfd_signal(ev_ctx); return IRQ_HANDLED; } diff --git a/drivers/misc/ocxl/link.c b/drivers/misc/ocxl/link.c index c06c699c0e7b..03402203cacd 100644 --- a/drivers/misc/ocxl/link.c +++ b/drivers/misc/ocxl/link.c @@ -188,7 +188,7 @@ ack: static irqreturn_t xsl_fault_handler(int irq, void *data) { - struct ocxl_link *link = (struct ocxl_link *) data; + struct ocxl_link *link = data; struct spa *spa = link->spa; u64 dsisr, dar, pe_handle; struct pe_data *pe_data; @@ -483,7 +483,7 @@ static void release_xsl(struct kref *ref) void ocxl_link_release(struct pci_dev *dev, void *link_handle) { - struct ocxl_link *link = (struct ocxl_link *) link_handle; + struct ocxl_link *link = link_handle; mutex_lock(&links_list_lock); kref_put(&link->ref, release_xsl); @@ -540,7 +540,7 @@ int ocxl_link_add_pe(void *link_handle, int pasid, u32 pidr, u32 tidr, void (*xsl_err_cb)(void *data, u64 addr, u64 dsisr), void *xsl_err_data) { - struct ocxl_link *link = (struct ocxl_link *) link_handle; + struct ocxl_link *link = link_handle; struct spa *spa = link->spa; struct ocxl_process_element *pe; int pe_handle, rc = 0; @@ -630,7 +630,7 @@ EXPORT_SYMBOL_GPL(ocxl_link_add_pe); int ocxl_link_update_pe(void *link_handle, int pasid, __u16 tid) { - struct ocxl_link *link = (struct ocxl_link *) link_handle; + struct ocxl_link *link = link_handle; struct spa *spa = link->spa; struct ocxl_process_element *pe; int pe_handle, rc; @@ -666,7 +666,7 @@ int ocxl_link_update_pe(void *link_handle, int pasid, __u16 tid) int ocxl_link_remove_pe(void *link_handle, int pasid) { - struct ocxl_link *link = (struct ocxl_link *) link_handle; + struct ocxl_link *link = link_handle; struct spa *spa = link->spa; struct ocxl_process_element *pe; struct pe_data *pe_data; @@ -752,7 +752,7 @@ EXPORT_SYMBOL_GPL(ocxl_link_remove_pe); int ocxl_link_irq_alloc(void *link_handle, int *hw_irq) { - struct ocxl_link *link = (struct ocxl_link *) link_handle; + struct ocxl_link *link = link_handle; int irq; if (atomic_dec_if_positive(&link->irq_available) < 0) @@ -771,7 +771,7 @@ EXPORT_SYMBOL_GPL(ocxl_link_irq_alloc); void ocxl_link_free_irq(void *link_handle, int hw_irq) { - struct ocxl_link *link = (struct ocxl_link *) link_handle; + struct ocxl_link *link = link_handle; xive_native_free_irq(hw_irq); atomic_inc(&link->irq_available); diff --git a/drivers/misc/ocxl/main.c b/drivers/misc/ocxl/main.c index ef73cf35dda2..658974143c3c 100644 --- a/drivers/misc/ocxl/main.c +++ b/drivers/misc/ocxl/main.c @@ -7,7 +7,7 @@ static int __init init_ocxl(void) { - int rc = 0; + int rc; if (!tlbie_capable) return -EINVAL; diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index f9a5cffa64b1..134c36edb6cf 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c @@ -851,9 +851,10 @@ static const struct block_device_operations mmc_bdops = { static int mmc_blk_part_switch_pre(struct mmc_card *card, unsigned int part_type) { + const unsigned int mask = EXT_CSD_PART_CONFIG_ACC_RPMB; int ret = 0; - if (part_type == EXT_CSD_PART_CONFIG_ACC_RPMB) { + if ((part_type & mask) == mask) { if (card->ext_csd.cmdq_en) { ret = mmc_cmdq_disable(card); if (ret) @@ -868,9 +869,10 @@ static int mmc_blk_part_switch_pre(struct mmc_card *card, static int mmc_blk_part_switch_post(struct mmc_card *card, unsigned int part_type) { + const unsigned int mask = EXT_CSD_PART_CONFIG_ACC_RPMB; int ret = 0; - if (part_type == EXT_CSD_PART_CONFIG_ACC_RPMB) { + if ((part_type & mask) == mask) { mmc_retune_unpause(card->host); if (card->reenable_cmdq && !card->ext_csd.cmdq_en) ret = mmc_cmdq_enable(card); @@ -3145,4 +3147,3 @@ module_exit(mmc_blk_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Multimedia Card (MMC) block device driver"); - diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c index 096093f7be00..2f51db4df1a8 100644 --- a/drivers/mmc/core/host.c +++ b/drivers/mmc/core/host.c @@ -692,6 +692,7 @@ EXPORT_SYMBOL(mmc_remove_host); */ void mmc_free_host(struct mmc_host *host) { + cancel_delayed_work_sync(&host->detect); mmc_pwrseq_free(host); put_device(&host->class_dev); } diff --git a/drivers/mmc/host/meson-mx-sdhc-mmc.c b/drivers/mmc/host/meson-mx-sdhc-mmc.c index 528ec8166e7c..1ed9731e77ef 100644 --- a/drivers/mmc/host/meson-mx-sdhc-mmc.c +++ b/drivers/mmc/host/meson-mx-sdhc-mmc.c @@ -269,7 +269,7 @@ static int meson_mx_sdhc_enable_clks(struct mmc_host *mmc) static int meson_mx_sdhc_set_clk(struct mmc_host *mmc, struct mmc_ios *ios) { struct meson_mx_sdhc_host *host = mmc_priv(mmc); - u32 rx_clk_phase; + u32 val, rx_clk_phase; int ret; meson_mx_sdhc_disable_clks(mmc); @@ -290,27 +290,11 @@ static int meson_mx_sdhc_set_clk(struct mmc_host *mmc, struct mmc_ios *ios) mmc->actual_clock = clk_get_rate(host->sd_clk); /* - * according to Amlogic the following latching points are - * selected with empirical values, there is no (known) formula - * to calculate these. + * Phase 90 should work in most cases. For data transmission, + * meson_mx_sdhc_execute_tuning() will find a accurate value */ - if (mmc->actual_clock > 100000000) { - rx_clk_phase = 1; - } else if (mmc->actual_clock > 45000000) { - if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) - rx_clk_phase = 15; - else - rx_clk_phase = 11; - } else if (mmc->actual_clock >= 25000000) { - rx_clk_phase = 15; - } else if (mmc->actual_clock > 5000000) { - rx_clk_phase = 23; - } else if (mmc->actual_clock > 1000000) { - rx_clk_phase = 55; - } else { - rx_clk_phase = 1061; - } - + regmap_read(host->regmap, MESON_SDHC_CLKC, &val); + rx_clk_phase = FIELD_GET(MESON_SDHC_CLKC_CLK_DIV, val) / 4; regmap_update_bits(host->regmap, MESON_SDHC_CLK2, MESON_SDHC_CLK2_RX_CLK_PHASE, FIELD_PREP(MESON_SDHC_CLK2_RX_CLK_PHASE, diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c index cc333ad67cac..b0cccef4cfbf 100644 --- a/drivers/mmc/host/mmc_spi.c +++ b/drivers/mmc/host/mmc_spi.c @@ -1322,7 +1322,7 @@ static int mmc_spi_probe(struct spi_device *spi) /* We rely on full duplex transfers, mostly to reduce * per-transfer overheads (by making fewer transfers). */ - if (spi->master->flags & SPI_MASTER_HALF_DUPLEX) + if (spi->master->flags & SPI_CONTROLLER_HALF_DUPLEX) return -EINVAL; /* MMC and SD specs only seem to care that sampling is on the diff --git a/drivers/mmc/host/sdhci-sprd.c b/drivers/mmc/host/sdhci-sprd.c index 6b8a57e2d20f..bed57a1c64b5 100644 --- a/drivers/mmc/host/sdhci-sprd.c +++ b/drivers/mmc/host/sdhci-sprd.c @@ -239,15 +239,19 @@ static inline void _sdhci_sprd_set_clock(struct sdhci_host *host, div = ((div & 0x300) >> 2) | ((div & 0xFF) << 8); sdhci_enable_clk(host, div); + val = sdhci_readl(host, SDHCI_SPRD_REG_32_BUSY_POSI); + mask = SDHCI_SPRD_BIT_OUTR_CLK_AUTO_EN | SDHCI_SPRD_BIT_INNR_CLK_AUTO_EN; /* Enable CLK_AUTO when the clock is greater than 400K. */ if (clk > 400000) { - val = sdhci_readl(host, SDHCI_SPRD_REG_32_BUSY_POSI); - mask = SDHCI_SPRD_BIT_OUTR_CLK_AUTO_EN | - SDHCI_SPRD_BIT_INNR_CLK_AUTO_EN; if (mask != (val & mask)) { val |= mask; sdhci_writel(host, val, SDHCI_SPRD_REG_32_BUSY_POSI); } + } else { + if (val & mask) { + val &= ~mask; + sdhci_writel(host, val, SDHCI_SPRD_REG_32_BUSY_POSI); + } } } diff --git a/drivers/mtd/maps/vmu-flash.c b/drivers/mtd/maps/vmu-flash.c index a7ec947a3ebb..53019d313db7 100644 --- a/drivers/mtd/maps/vmu-flash.c +++ b/drivers/mtd/maps/vmu-flash.c @@ -719,7 +719,7 @@ static int vmu_can_unload(struct maple_device *mdev) card = maple_get_drvdata(mdev); for (x = 0; x < card->partitions; x++) { mtd = &((card->mtd)[x]); - if (mtd->usecount > 0) + if (kref_read(&mtd->refcnt)) return 0; } return 1; diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c index ff18636e0889..5bc32108ca03 100644 --- a/drivers/mtd/mtd_blkdevs.c +++ b/drivers/mtd/mtd_blkdevs.c @@ -463,7 +463,7 @@ static void blktrans_notify_add(struct mtd_info *mtd) { struct mtd_blktrans_ops *tr; - if (mtd->type == MTD_ABSENT) + if (mtd->type == MTD_ABSENT || mtd->type == MTD_UBIVOLUME) return; list_for_each_entry(tr, &blktrans_majors, list) @@ -503,7 +503,7 @@ int register_mtd_blktrans(struct mtd_blktrans_ops *tr) mutex_lock(&mtd_table_mutex); list_add(&tr->list, &blktrans_majors); mtd_for_each_device(mtd) - if (mtd->type != MTD_ABSENT) + if (mtd->type != MTD_ABSENT && mtd->type != MTD_UBIVOLUME) tr->add_mtd(tr, mtd); mutex_unlock(&mtd_table_mutex); return 0; diff --git a/drivers/mtd/nand/raw/brcmnand/bcm63138_nand.c b/drivers/mtd/nand/raw/brcmnand/bcm63138_nand.c index 9596629000f4..968c5b674b08 100644 --- a/drivers/mtd/nand/raw/brcmnand/bcm63138_nand.c +++ b/drivers/mtd/nand/raw/brcmnand/bcm63138_nand.c @@ -85,7 +85,7 @@ MODULE_DEVICE_TABLE(of, bcm63138_nand_of_match); static struct platform_driver bcm63138_nand_driver = { .probe = bcm63138_nand_probe, - .remove = brcmnand_remove, + .remove_new = brcmnand_remove, .driver = { .name = "bcm63138_nand", .pm = &brcmnand_pm_ops, diff --git a/drivers/mtd/nand/raw/brcmnand/bcm6368_nand.c b/drivers/mtd/nand/raw/brcmnand/bcm6368_nand.c index a06cd87f839a..05b7b653bdf3 100644 --- a/drivers/mtd/nand/raw/brcmnand/bcm6368_nand.c +++ b/drivers/mtd/nand/raw/brcmnand/bcm6368_nand.c @@ -117,7 +117,7 @@ MODULE_DEVICE_TABLE(of, bcm6368_nand_of_match); static struct platform_driver bcm6368_nand_driver = { .probe = bcm6368_nand_probe, - .remove = brcmnand_remove, + .remove_new = brcmnand_remove, .driver = { .name = "bcm6368_nand", .pm = &brcmnand_pm_ops, diff --git a/drivers/mtd/nand/raw/brcmnand/bcma_nand.c b/drivers/mtd/nand/raw/brcmnand/bcma_nand.c index dd27977919fb..4e7e435ba339 100644 --- a/drivers/mtd/nand/raw/brcmnand/bcma_nand.c +++ b/drivers/mtd/nand/raw/brcmnand/bcma_nand.c @@ -119,7 +119,7 @@ static int brcmnand_bcma_nand_probe(struct platform_device *pdev) static struct platform_driver brcmnand_bcma_nand_driver = { .probe = brcmnand_bcma_nand_probe, - .remove = brcmnand_remove, + .remove_new = brcmnand_remove, .driver = { .name = "bcma_brcmnand", .pm = &brcmnand_pm_ops, diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.c b/drivers/mtd/nand/raw/brcmnand/brcmnand.c index 440bef477930..8faca43ae1ff 100644 --- a/drivers/mtd/nand/raw/brcmnand/brcmnand.c +++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.c @@ -625,6 +625,8 @@ enum { /* Only for v7.2 */ #define ACC_CONTROL_ECC_EXT_SHIFT 13 +static u8 brcmnand_status(struct brcmnand_host *host); + static inline bool brcmnand_non_mmio_ops(struct brcmnand_controller *ctrl) { #if IS_ENABLED(CONFIG_MTD_NAND_BRCMNAND_BCMA) @@ -1022,19 +1024,6 @@ static inline int brcmnand_sector_1k_shift(struct brcmnand_controller *ctrl) return -1; } -static int brcmnand_get_sector_size_1k(struct brcmnand_host *host) -{ - struct brcmnand_controller *ctrl = host->ctrl; - int shift = brcmnand_sector_1k_shift(ctrl); - u16 acc_control_offs = brcmnand_cs_offset(ctrl, host->cs, - BRCMNAND_CS_ACC_CONTROL); - - if (shift < 0) - return 0; - - return (nand_readreg(ctrl, acc_control_offs) >> shift) & 0x1; -} - static void brcmnand_set_sector_size_1k(struct brcmnand_host *host, int val) { struct brcmnand_controller *ctrl = host->ctrl; @@ -1061,10 +1050,11 @@ enum { CS_SELECT_AUTO_DEVICE_ID_CFG = BIT(30), }; -static int bcmnand_ctrl_poll_status(struct brcmnand_controller *ctrl, +static int bcmnand_ctrl_poll_status(struct brcmnand_host *host, u32 mask, u32 expected_val, unsigned long timeout_ms) { + struct brcmnand_controller *ctrl = host->ctrl; unsigned long limit; u32 val; @@ -1073,6 +1063,9 @@ static int bcmnand_ctrl_poll_status(struct brcmnand_controller *ctrl, limit = jiffies + msecs_to_jiffies(timeout_ms); do { + if (mask & INTFC_FLASH_STATUS) + brcmnand_status(host); + val = brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS); if ((val & mask) == expected_val) return 0; @@ -1084,6 +1077,9 @@ static int bcmnand_ctrl_poll_status(struct brcmnand_controller *ctrl, * do a final check after time out in case the CPU was busy and the driver * did not get enough time to perform the polling to avoid false alarms */ + if (mask & INTFC_FLASH_STATUS) + brcmnand_status(host); + val = brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS); if ((val & mask) == expected_val) return 0; @@ -1379,7 +1375,7 @@ static void brcmnand_wp(struct mtd_info *mtd, int wp) * make sure ctrl/flash ready before and after * changing state of #WP pin */ - ret = bcmnand_ctrl_poll_status(ctrl, NAND_CTRL_RDY | + ret = bcmnand_ctrl_poll_status(host, NAND_CTRL_RDY | NAND_STATUS_READY, NAND_CTRL_RDY | NAND_STATUS_READY, 0); @@ -1387,9 +1383,10 @@ static void brcmnand_wp(struct mtd_info *mtd, int wp) return; brcmnand_set_wp(ctrl, wp); - nand_status_op(chip, NULL); + /* force controller operation to update internal copy of NAND chip status */ + brcmnand_status(host); /* NAND_STATUS_WP 0x00 = protected, 0x80 = not protected */ - ret = bcmnand_ctrl_poll_status(ctrl, + ret = bcmnand_ctrl_poll_status(host, NAND_CTRL_RDY | NAND_STATUS_READY | NAND_STATUS_WP, @@ -1629,13 +1626,13 @@ static void brcmnand_send_cmd(struct brcmnand_host *host, int cmd) */ if (oops_in_progress) { if (ctrl->cmd_pending && - bcmnand_ctrl_poll_status(ctrl, NAND_CTRL_RDY, NAND_CTRL_RDY, 0)) + bcmnand_ctrl_poll_status(host, NAND_CTRL_RDY, NAND_CTRL_RDY, 0)) return; } else BUG_ON(ctrl->cmd_pending != 0); ctrl->cmd_pending = cmd; - ret = bcmnand_ctrl_poll_status(ctrl, NAND_CTRL_RDY, NAND_CTRL_RDY, 0); + ret = bcmnand_ctrl_poll_status(host, NAND_CTRL_RDY, NAND_CTRL_RDY, 0); WARN_ON(ret); mb(); /* flush previous writes */ @@ -1643,16 +1640,6 @@ static void brcmnand_send_cmd(struct brcmnand_host *host, int cmd) cmd << brcmnand_cmd_shift(ctrl)); } -/*********************************************************************** - * NAND MTD API: read/program/erase - ***********************************************************************/ - -static void brcmnand_cmd_ctrl(struct nand_chip *chip, int dat, - unsigned int ctrl) -{ - /* intentionally left blank */ -} - static bool brcmstb_nand_wait_for_completion(struct nand_chip *chip) { struct brcmnand_host *host = nand_get_controller_data(chip); @@ -1664,7 +1651,7 @@ static bool brcmstb_nand_wait_for_completion(struct nand_chip *chip) if (mtd->oops_panic_write || ctrl->irq < 0) { /* switch to interrupt polling and PIO mode */ disable_ctrl_irqs(ctrl); - sts = bcmnand_ctrl_poll_status(ctrl, NAND_CTRL_RDY, + sts = bcmnand_ctrl_poll_status(host, NAND_CTRL_RDY, NAND_CTRL_RDY, 0); err = sts < 0; } else { @@ -1703,6 +1690,26 @@ static int brcmnand_waitfunc(struct nand_chip *chip) INTFC_FLASH_STATUS; } +static u8 brcmnand_status(struct brcmnand_host *host) +{ + struct nand_chip *chip = &host->chip; + struct mtd_info *mtd = nand_to_mtd(chip); + + brcmnand_set_cmd_addr(mtd, 0); + brcmnand_send_cmd(host, CMD_STATUS_READ); + + return brcmnand_waitfunc(chip); +} + +static u8 brcmnand_reset(struct brcmnand_host *host) +{ + struct nand_chip *chip = &host->chip; + + brcmnand_send_cmd(host, CMD_FLASH_RESET); + + return brcmnand_waitfunc(chip); +} + enum { LLOP_RE = BIT(16), LLOP_WE = BIT(17), @@ -1752,190 +1759,6 @@ static int brcmnand_low_level_op(struct brcmnand_host *host, return brcmnand_waitfunc(chip); } -static void brcmnand_cmdfunc(struct nand_chip *chip, unsigned command, - int column, int page_addr) -{ - struct mtd_info *mtd = nand_to_mtd(chip); - struct brcmnand_host *host = nand_get_controller_data(chip); - struct brcmnand_controller *ctrl = host->ctrl; - u64 addr = (u64)page_addr << chip->page_shift; - int native_cmd = 0; - - if (command == NAND_CMD_READID || command == NAND_CMD_PARAM || - command == NAND_CMD_RNDOUT) - addr = (u64)column; - /* Avoid propagating a negative, don't-care address */ - else if (page_addr < 0) - addr = 0; - - dev_dbg(ctrl->dev, "cmd 0x%x addr 0x%llx\n", command, - (unsigned long long)addr); - - host->last_cmd = command; - host->last_byte = 0; - host->last_addr = addr; - - switch (command) { - case NAND_CMD_RESET: - native_cmd = CMD_FLASH_RESET; - break; - case NAND_CMD_STATUS: - native_cmd = CMD_STATUS_READ; - break; - case NAND_CMD_READID: - native_cmd = CMD_DEVICE_ID_READ; - break; - case NAND_CMD_READOOB: - native_cmd = CMD_SPARE_AREA_READ; - break; - case NAND_CMD_ERASE1: - native_cmd = CMD_BLOCK_ERASE; - brcmnand_wp(mtd, 0); - break; - case NAND_CMD_PARAM: - native_cmd = CMD_PARAMETER_READ; - break; - case NAND_CMD_SET_FEATURES: - case NAND_CMD_GET_FEATURES: - brcmnand_low_level_op(host, LL_OP_CMD, command, false); - brcmnand_low_level_op(host, LL_OP_ADDR, column, false); - break; - case NAND_CMD_RNDOUT: - native_cmd = CMD_PARAMETER_CHANGE_COL; - addr &= ~((u64)(FC_BYTES - 1)); - /* - * HW quirk: PARAMETER_CHANGE_COL requires SECTOR_SIZE_1K=0 - * NB: hwcfg.sector_size_1k may not be initialized yet - */ - if (brcmnand_get_sector_size_1k(host)) { - host->hwcfg.sector_size_1k = - brcmnand_get_sector_size_1k(host); - brcmnand_set_sector_size_1k(host, 0); - } - break; - } - - if (!native_cmd) - return; - - brcmnand_set_cmd_addr(mtd, addr); - brcmnand_send_cmd(host, native_cmd); - brcmnand_waitfunc(chip); - - if (native_cmd == CMD_PARAMETER_READ || - native_cmd == CMD_PARAMETER_CHANGE_COL) { - /* Copy flash cache word-wise */ - u32 *flash_cache = (u32 *)ctrl->flash_cache; - int i; - - brcmnand_soc_data_bus_prepare(ctrl->soc, true); - - /* - * Must cache the FLASH_CACHE now, since changes in - * SECTOR_SIZE_1K may invalidate it - */ - for (i = 0; i < FC_WORDS; i++) - /* - * Flash cache is big endian for parameter pages, at - * least on STB SoCs - */ - flash_cache[i] = be32_to_cpu(brcmnand_read_fc(ctrl, i)); - - brcmnand_soc_data_bus_unprepare(ctrl->soc, true); - - /* Cleanup from HW quirk: restore SECTOR_SIZE_1K */ - if (host->hwcfg.sector_size_1k) - brcmnand_set_sector_size_1k(host, - host->hwcfg.sector_size_1k); - } - - /* Re-enable protection is necessary only after erase */ - if (command == NAND_CMD_ERASE1) - brcmnand_wp(mtd, 1); -} - -static uint8_t brcmnand_read_byte(struct nand_chip *chip) -{ - struct brcmnand_host *host = nand_get_controller_data(chip); - struct brcmnand_controller *ctrl = host->ctrl; - uint8_t ret = 0; - int addr, offs; - - switch (host->last_cmd) { - case NAND_CMD_READID: - if (host->last_byte < 4) - ret = brcmnand_read_reg(ctrl, BRCMNAND_ID) >> - (24 - (host->last_byte << 3)); - else if (host->last_byte < 8) - ret = brcmnand_read_reg(ctrl, BRCMNAND_ID_EXT) >> - (56 - (host->last_byte << 3)); - break; - - case NAND_CMD_READOOB: - ret = oob_reg_read(ctrl, host->last_byte); - break; - - case NAND_CMD_STATUS: - ret = brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS) & - INTFC_FLASH_STATUS; - if (wp_on) /* hide WP status */ - ret |= NAND_STATUS_WP; - break; - - case NAND_CMD_PARAM: - case NAND_CMD_RNDOUT: - addr = host->last_addr + host->last_byte; - offs = addr & (FC_BYTES - 1); - - /* At FC_BYTES boundary, switch to next column */ - if (host->last_byte > 0 && offs == 0) - nand_change_read_column_op(chip, addr, NULL, 0, false); - - ret = ctrl->flash_cache[offs]; - break; - case NAND_CMD_GET_FEATURES: - if (host->last_byte >= ONFI_SUBFEATURE_PARAM_LEN) { - ret = 0; - } else { - bool last = host->last_byte == - ONFI_SUBFEATURE_PARAM_LEN - 1; - brcmnand_low_level_op(host, LL_OP_RD, 0, last); - ret = brcmnand_read_reg(ctrl, BRCMNAND_LL_RDATA) & 0xff; - } - } - - dev_dbg(ctrl->dev, "read byte = 0x%02x\n", ret); - host->last_byte++; - - return ret; -} - -static void brcmnand_read_buf(struct nand_chip *chip, uint8_t *buf, int len) -{ - int i; - - for (i = 0; i < len; i++, buf++) - *buf = brcmnand_read_byte(chip); -} - -static void brcmnand_write_buf(struct nand_chip *chip, const uint8_t *buf, - int len) -{ - int i; - struct brcmnand_host *host = nand_get_controller_data(chip); - - switch (host->last_cmd) { - case NAND_CMD_SET_FEATURES: - for (i = 0; i < len; i++) - brcmnand_low_level_op(host, LL_OP_WR, buf[i], - (i + 1) == len); - break; - default: - BUG(); - break; - } -} - /* * Kick EDU engine */ @@ -2345,8 +2168,9 @@ static int brcmnand_read_page(struct nand_chip *chip, uint8_t *buf, struct mtd_info *mtd = nand_to_mtd(chip); struct brcmnand_host *host = nand_get_controller_data(chip); u8 *oob = oob_required ? (u8 *)chip->oob_poi : NULL; + u64 addr = (u64)page << chip->page_shift; - nand_read_page_op(chip, page, 0, NULL, 0); + host->last_addr = addr; return brcmnand_read(mtd, chip, host->last_addr, mtd->writesize >> FC_SHIFT, (u32 *)buf, oob); @@ -2359,8 +2183,9 @@ static int brcmnand_read_page_raw(struct nand_chip *chip, uint8_t *buf, struct mtd_info *mtd = nand_to_mtd(chip); u8 *oob = oob_required ? (u8 *)chip->oob_poi : NULL; int ret; + u64 addr = (u64)page << chip->page_shift; - nand_read_page_op(chip, page, 0, NULL, 0); + host->last_addr = addr; brcmnand_set_ecc_enabled(host, 0); ret = brcmnand_read(mtd, chip, host->last_addr, @@ -2468,11 +2293,11 @@ static int brcmnand_write_page(struct nand_chip *chip, const uint8_t *buf, struct mtd_info *mtd = nand_to_mtd(chip); struct brcmnand_host *host = nand_get_controller_data(chip); void *oob = oob_required ? chip->oob_poi : NULL; + u64 addr = (u64)page << chip->page_shift; - nand_prog_page_begin_op(chip, page, 0, NULL, 0); - brcmnand_write(mtd, chip, host->last_addr, (const u32 *)buf, oob); + host->last_addr = addr; - return nand_prog_page_end_op(chip); + return brcmnand_write(mtd, chip, host->last_addr, (const u32 *)buf, oob); } static int brcmnand_write_page_raw(struct nand_chip *chip, const uint8_t *buf, @@ -2481,13 +2306,15 @@ static int brcmnand_write_page_raw(struct nand_chip *chip, const uint8_t *buf, struct mtd_info *mtd = nand_to_mtd(chip); struct brcmnand_host *host = nand_get_controller_data(chip); void *oob = oob_required ? chip->oob_poi : NULL; + u64 addr = (u64)page << chip->page_shift; + int ret = 0; - nand_prog_page_begin_op(chip, page, 0, NULL, 0); + host->last_addr = addr; brcmnand_set_ecc_enabled(host, 0); - brcmnand_write(mtd, chip, host->last_addr, (const u32 *)buf, oob); + ret = brcmnand_write(mtd, chip, host->last_addr, (const u32 *)buf, oob); brcmnand_set_ecc_enabled(host, 1); - return nand_prog_page_end_op(chip); + return ret; } static int brcmnand_write_oob(struct nand_chip *chip, int page) @@ -2511,6 +2338,130 @@ static int brcmnand_write_oob_raw(struct nand_chip *chip, int page) return ret; } +static int brcmnand_exec_instr(struct brcmnand_host *host, int i, + const struct nand_operation *op) +{ + const struct nand_op_instr *instr = &op->instrs[i]; + struct brcmnand_controller *ctrl = host->ctrl; + const u8 *out; + bool last_op; + int ret = 0; + u8 *in; + + /* + * The controller needs to be aware of the last command in the operation + * (WAITRDY excepted). + */ + last_op = ((i == (op->ninstrs - 1)) && (instr->type != NAND_OP_WAITRDY_INSTR)) || + ((i == (op->ninstrs - 2)) && (op->instrs[i+1].type == NAND_OP_WAITRDY_INSTR)); + + switch (instr->type) { + case NAND_OP_CMD_INSTR: + brcmnand_low_level_op(host, LL_OP_CMD, instr->ctx.cmd.opcode, last_op); + break; + + case NAND_OP_ADDR_INSTR: + for (i = 0; i < instr->ctx.addr.naddrs; i++) + brcmnand_low_level_op(host, LL_OP_ADDR, instr->ctx.addr.addrs[i], + last_op && (i == (instr->ctx.addr.naddrs - 1))); + break; + + case NAND_OP_DATA_IN_INSTR: + in = instr->ctx.data.buf.in; + for (i = 0; i < instr->ctx.data.len; i++) { + brcmnand_low_level_op(host, LL_OP_RD, 0, + last_op && (i == (instr->ctx.data.len - 1))); + in[i] = brcmnand_read_reg(host->ctrl, BRCMNAND_LL_RDATA); + } + break; + + case NAND_OP_DATA_OUT_INSTR: + out = instr->ctx.data.buf.out; + for (i = 0; i < instr->ctx.data.len; i++) + brcmnand_low_level_op(host, LL_OP_WR, out[i], + last_op && (i == (instr->ctx.data.len - 1))); + break; + + case NAND_OP_WAITRDY_INSTR: + ret = bcmnand_ctrl_poll_status(host, NAND_CTRL_RDY, NAND_CTRL_RDY, 0); + break; + + default: + dev_err(ctrl->dev, "unsupported instruction type: %d\n", + instr->type); + ret = -EINVAL; + break; + } + + return ret; +} + +static int brcmnand_op_is_status(const struct nand_operation *op) +{ + if ((op->ninstrs == 2) && + (op->instrs[0].type == NAND_OP_CMD_INSTR) && + (op->instrs[0].ctx.cmd.opcode == NAND_CMD_STATUS) && + (op->instrs[1].type == NAND_OP_DATA_IN_INSTR)) + return 1; + + return 0; +} + +static int brcmnand_op_is_reset(const struct nand_operation *op) +{ + if ((op->ninstrs == 2) && + (op->instrs[0].type == NAND_OP_CMD_INSTR) && + (op->instrs[0].ctx.cmd.opcode == NAND_CMD_RESET) && + (op->instrs[1].type == NAND_OP_WAITRDY_INSTR)) + return 1; + + return 0; +} + +static int brcmnand_exec_op(struct nand_chip *chip, + const struct nand_operation *op, + bool check_only) +{ + struct brcmnand_host *host = nand_get_controller_data(chip); + struct mtd_info *mtd = nand_to_mtd(chip); + u8 *status; + unsigned int i; + int ret = 0; + + if (check_only) + return 0; + + if (brcmnand_op_is_status(op)) { + status = op->instrs[1].ctx.data.buf.in; + *status = brcmnand_status(host); + + return 0; + } + else if (brcmnand_op_is_reset(op)) { + ret = brcmnand_reset(host); + if (ret < 0) + return ret; + + brcmnand_wp(mtd, 1); + + return 0; + } + + if (op->deassert_wp) + brcmnand_wp(mtd, 0); + + for (i = 0; i < op->ninstrs; i++) { + ret = brcmnand_exec_instr(host, i, op); + if (ret) + break; + } + + if (op->deassert_wp) + brcmnand_wp(mtd, 1); + + return ret; +} + /*********************************************************************** * Per-CS setup (1 NAND device) ***********************************************************************/ @@ -2821,6 +2772,7 @@ static int brcmnand_attach_chip(struct nand_chip *chip) static const struct nand_controller_ops brcmnand_controller_ops = { .attach_chip = brcmnand_attach_chip, + .exec_op = brcmnand_exec_op, }; static int brcmnand_init_cs(struct brcmnand_host *host, @@ -2845,13 +2797,6 @@ static int brcmnand_init_cs(struct brcmnand_host *host, mtd->owner = THIS_MODULE; mtd->dev.parent = dev; - chip->legacy.cmd_ctrl = brcmnand_cmd_ctrl; - chip->legacy.cmdfunc = brcmnand_cmdfunc; - chip->legacy.waitfunc = brcmnand_waitfunc; - chip->legacy.read_byte = brcmnand_read_byte; - chip->legacy.read_buf = brcmnand_read_buf; - chip->legacy.write_buf = brcmnand_write_buf; - chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST; chip->ecc.read_page = brcmnand_read_page; chip->ecc.write_page = brcmnand_write_page; @@ -2863,6 +2808,7 @@ static int brcmnand_init_cs(struct brcmnand_host *host, chip->ecc.write_oob = brcmnand_write_oob; chip->controller = &ctrl->controller; + ctrl->controller.controller_wp = 1; /* * The bootloader might have configured 16bit mode but @@ -3299,7 +3245,7 @@ err: } EXPORT_SYMBOL_GPL(brcmnand_probe); -int brcmnand_remove(struct platform_device *pdev) +void brcmnand_remove(struct platform_device *pdev) { struct brcmnand_controller *ctrl = dev_get_drvdata(&pdev->dev); struct brcmnand_host *host; @@ -3316,8 +3262,6 @@ int brcmnand_remove(struct platform_device *pdev) clk_disable_unprepare(ctrl->clk); dev_set_drvdata(&pdev->dev, NULL); - - return 0; } EXPORT_SYMBOL_GPL(brcmnand_remove); diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.h b/drivers/mtd/nand/raw/brcmnand/brcmnand.h index f1f93d85f50d..928114c0be5e 100644 --- a/drivers/mtd/nand/raw/brcmnand/brcmnand.h +++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.h @@ -88,7 +88,7 @@ static inline void brcmnand_soc_write(struct brcmnand_soc *soc, u32 val, } int brcmnand_probe(struct platform_device *pdev, struct brcmnand_soc *soc); -int brcmnand_remove(struct platform_device *pdev); +void brcmnand_remove(struct platform_device *pdev); extern const struct dev_pm_ops brcmnand_pm_ops; diff --git a/drivers/mtd/nand/raw/brcmnand/brcmstb_nand.c b/drivers/mtd/nand/raw/brcmnand/brcmstb_nand.c index 950923d977b7..558f083b92e9 100644 --- a/drivers/mtd/nand/raw/brcmnand/brcmstb_nand.c +++ b/drivers/mtd/nand/raw/brcmnand/brcmstb_nand.c @@ -23,7 +23,7 @@ static int brcmstb_nand_probe(struct platform_device *pdev) static struct platform_driver brcmstb_nand_driver = { .probe = brcmstb_nand_probe, - .remove = brcmnand_remove, + .remove_new = brcmnand_remove, .driver = { .name = "brcmstb_nand", .pm = &brcmnand_pm_ops, diff --git a/drivers/mtd/nand/raw/brcmnand/iproc_nand.c b/drivers/mtd/nand/raw/brcmnand/iproc_nand.c index 089c70fc6edf..bf46c8b85898 100644 --- a/drivers/mtd/nand/raw/brcmnand/iproc_nand.c +++ b/drivers/mtd/nand/raw/brcmnand/iproc_nand.c @@ -134,7 +134,7 @@ MODULE_DEVICE_TABLE(of, iproc_nand_of_match); static struct platform_driver iproc_nand_driver = { .probe = iproc_nand_probe, - .remove = brcmnand_remove, + .remove_new = brcmnand_remove, .driver = { .name = "iproc_nand", .pm = &brcmnand_pm_ops, diff --git a/drivers/mtd/nand/raw/diskonchip.c b/drivers/mtd/nand/raw/diskonchip.c index 5d2ddb037a9a..5243fab9face 100644 --- a/drivers/mtd/nand/raw/diskonchip.c +++ b/drivers/mtd/nand/raw/diskonchip.c @@ -1491,10 +1491,12 @@ static int __init doc_probe(unsigned long physadr) else numchips = doc2001_init(mtd); - if ((ret = nand_scan(nand, numchips)) || (ret = doc->late_init(mtd))) { - /* DBB note: i believe nand_cleanup is necessary here, as - buffers may have been allocated in nand_base. Check with - Thomas. FIX ME! */ + ret = nand_scan(nand, numchips); + if (ret) + goto fail; + + ret = doc->late_init(mtd); + if (ret) { nand_cleanup(nand); goto fail; } diff --git a/drivers/mtd/nand/raw/fsl_ifc_nand.c b/drivers/mtd/nand/raw/fsl_ifc_nand.c index 20bb1e0cb5eb..f0e2318ce088 100644 --- a/drivers/mtd/nand/raw/fsl_ifc_nand.c +++ b/drivers/mtd/nand/raw/fsl_ifc_nand.c @@ -21,7 +21,7 @@ #define ERR_BYTE 0xFF /* Value returned for read bytes when read failed */ -#define IFC_TIMEOUT_MSECS 500 /* Maximum number of mSecs to wait +#define IFC_TIMEOUT_MSECS 1000 /* Maximum timeout to wait for IFC NAND Machine */ struct fsl_ifc_ctrl; diff --git a/drivers/mtd/nand/raw/meson_nand.c b/drivers/mtd/nand/raw/meson_nand.c index 71ec4052e52a..cdb58aca59c0 100644 --- a/drivers/mtd/nand/raw/meson_nand.c +++ b/drivers/mtd/nand/raw/meson_nand.c @@ -90,6 +90,8 @@ /* eMMC clock register, misc control */ #define CLK_SELECT_NAND BIT(31) +#define CLK_ALWAYS_ON_NAND BIT(24) +#define CLK_SELECT_FIX_PLL2 BIT(6) #define NFC_CLK_CYCLE 6 @@ -509,7 +511,7 @@ static void meson_nfc_set_user_byte(struct nand_chip *nand, u8 *oob_buf) __le64 *info; int i, count; - for (i = 0, count = 0; i < nand->ecc.steps; i++, count += 2) { + for (i = 0, count = 0; i < nand->ecc.steps; i++, count += (2 + nand->ecc.bytes)) { info = &meson_chip->info_buf[i]; *info |= oob_buf[count]; *info |= oob_buf[count + 1] << 8; @@ -522,7 +524,7 @@ static void meson_nfc_get_user_byte(struct nand_chip *nand, u8 *oob_buf) __le64 *info; int i, count; - for (i = 0, count = 0; i < nand->ecc.steps; i++, count += 2) { + for (i = 0, count = 0; i < nand->ecc.steps; i++, count += (2 + nand->ecc.bytes)) { info = &meson_chip->info_buf[i]; oob_buf[count] = *info; oob_buf[count + 1] = *info >> 8; @@ -1154,7 +1156,7 @@ static int meson_nfc_clk_init(struct meson_nfc *nfc) return PTR_ERR(nfc->nand_clk); /* init SD_EMMC_CLOCK to sane defaults w/min clock rate */ - writel(CLK_SELECT_NAND | readl(nfc->reg_clk), + writel(CLK_ALWAYS_ON_NAND | CLK_SELECT_NAND | CLK_SELECT_FIX_PLL2, nfc->reg_clk); ret = clk_prepare_enable(nfc->core_clk); diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c index 9e24bedffd89..3b3ce2926f5d 100644 --- a/drivers/mtd/nand/raw/nand_base.c +++ b/drivers/mtd/nand/raw/nand_base.c @@ -366,6 +366,10 @@ static int nand_check_wp(struct nand_chip *chip) if (chip->options & NAND_BROKEN_XD) return 0; + /* controller responsible for NAND write protect */ + if (chip->controller->controller_wp) + return 0; + /* Check the WP bit */ ret = nand_status_op(chip, &status); if (ret) @@ -1207,6 +1211,23 @@ static int nand_lp_exec_read_page_op(struct nand_chip *chip, unsigned int page, return nand_exec_op(chip, &op); } +static void rawnand_cap_cont_reads(struct nand_chip *chip) +{ + struct nand_memory_organization *memorg; + unsigned int pages_per_lun, first_lun, last_lun; + + memorg = nanddev_get_memorg(&chip->base); + pages_per_lun = memorg->pages_per_eraseblock * memorg->eraseblocks_per_lun; + first_lun = chip->cont_read.first_page / pages_per_lun; + last_lun = chip->cont_read.last_page / pages_per_lun; + + /* Prevent sequential cache reads across LUN boundaries */ + if (first_lun != last_lun) + chip->cont_read.pause_page = first_lun * pages_per_lun + pages_per_lun - 1; + else + chip->cont_read.pause_page = chip->cont_read.last_page; +} + static int nand_lp_exec_cont_read_page_op(struct nand_chip *chip, unsigned int page, unsigned int offset_in_page, void *buf, unsigned int len, bool check_only) @@ -1225,7 +1246,7 @@ static int nand_lp_exec_cont_read_page_op(struct nand_chip *chip, unsigned int p NAND_OP_DATA_IN(len, buf, 0), }; struct nand_op_instr cont_instrs[] = { - NAND_OP_CMD(page == chip->cont_read.last_page ? + NAND_OP_CMD(page == chip->cont_read.pause_page ? NAND_CMD_READCACHEEND : NAND_CMD_READCACHESEQ, NAND_COMMON_TIMING_NS(conf, tWB_max)), NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tR_max), @@ -1262,16 +1283,29 @@ static int nand_lp_exec_cont_read_page_op(struct nand_chip *chip, unsigned int p } if (page == chip->cont_read.first_page) - return nand_exec_op(chip, &start_op); + ret = nand_exec_op(chip, &start_op); else - return nand_exec_op(chip, &cont_op); + ret = nand_exec_op(chip, &cont_op); + if (ret) + return ret; + + if (!chip->cont_read.ongoing) + return 0; + + if (page == chip->cont_read.pause_page && + page != chip->cont_read.last_page) { + chip->cont_read.first_page = chip->cont_read.pause_page + 1; + rawnand_cap_cont_reads(chip); + } else if (page == chip->cont_read.last_page) { + chip->cont_read.ongoing = false; + } + + return 0; } static bool rawnand_cont_read_ongoing(struct nand_chip *chip, unsigned int page) { - return chip->cont_read.ongoing && - page >= chip->cont_read.first_page && - page <= chip->cont_read.last_page; + return chip->cont_read.ongoing && page >= chip->cont_read.first_page; } /** @@ -1493,7 +1527,8 @@ static int nand_exec_prog_page_op(struct nand_chip *chip, unsigned int page, NAND_COMMON_TIMING_NS(conf, tWB_max)), NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tPROG_max), 0), }; - struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs); + struct nand_operation op = NAND_DESTRUCTIVE_OPERATION(chip->cur_cs, + instrs); int naddrs = nand_fill_column_cycles(chip, addrs, offset_in_page); if (naddrs < 0) @@ -1916,7 +1951,8 @@ int nand_erase_op(struct nand_chip *chip, unsigned int eraseblock) NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tBERS_max), 0), }; - struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs); + struct nand_operation op = NAND_DESTRUCTIVE_OPERATION(chip->cur_cs, + instrs); if (chip->options & NAND_ROW_ADDR_3) instrs[1].ctx.addr.naddrs++; @@ -3430,21 +3466,42 @@ static void rawnand_enable_cont_reads(struct nand_chip *chip, unsigned int page, u32 readlen, int col) { struct mtd_info *mtd = nand_to_mtd(chip); + unsigned int end_page, end_col; + + chip->cont_read.ongoing = false; if (!chip->controller->supported_op.cont_read) return; - if ((col && col + readlen < (3 * mtd->writesize)) || - (!col && readlen < (2 * mtd->writesize))) { - chip->cont_read.ongoing = false; + end_page = DIV_ROUND_UP(col + readlen, mtd->writesize); + end_col = (col + readlen) % mtd->writesize; + + if (col) + page++; + + if (end_col && end_page) + end_page--; + + if (page + 1 > end_page) return; - } - chip->cont_read.ongoing = true; chip->cont_read.first_page = page; - if (col) + chip->cont_read.last_page = end_page; + chip->cont_read.ongoing = true; + + rawnand_cap_cont_reads(chip); +} + +static void rawnand_cont_read_skip_first_page(struct nand_chip *chip, unsigned int page) +{ + if (!chip->cont_read.ongoing || page != chip->cont_read.first_page) + return; + + chip->cont_read.first_page++; + if (chip->cont_read.first_page == chip->cont_read.pause_page) chip->cont_read.first_page++; - chip->cont_read.last_page = page + ((readlen >> chip->page_shift) & chip->pagemask); + if (chip->cont_read.first_page >= chip->cont_read.last_page) + chip->cont_read.ongoing = false; } /** @@ -3621,6 +3678,8 @@ read_retry: buf += bytes; max_bitflips = max_t(unsigned int, max_bitflips, chip->pagecache.bitflips); + + rawnand_cont_read_skip_first_page(chip, page); } readlen -= bytes; @@ -5125,6 +5184,14 @@ static void rawnand_late_check_supported_ops(struct nand_chip *chip) /* The supported_op fields should not be set by individual drivers */ WARN_ON_ONCE(chip->controller->supported_op.cont_read); + /* + * Too many devices do not support sequential cached reads with on-die + * ECC correction enabled, so in this case refuse to perform the + * automation. + */ + if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_ON_DIE) + return; + if (!nand_has_exec_op(chip)) return; diff --git a/drivers/mtd/nand/raw/pl35x-nand-controller.c b/drivers/mtd/nand/raw/pl35x-nand-controller.c index c506e92a3e45..1c76ee98efb7 100644 --- a/drivers/mtd/nand/raw/pl35x-nand-controller.c +++ b/drivers/mtd/nand/raw/pl35x-nand-controller.c @@ -128,7 +128,7 @@ struct pl35x_nand { * @conf_regs: SMC configuration registers for command phase * @io_regs: NAND data registers for data phase * @controller: Core NAND controller structure - * @chip: NAND chip information structure + * @chips: List of connected NAND chips * @selected_chip: NAND chip currently selected by the controller * @assigned_cs: List of assigned CS * @ecc_buf: Temporary buffer to extract ECC bytes diff --git a/drivers/mtd/nand/raw/rockchip-nand-controller.c b/drivers/mtd/nand/raw/rockchip-nand-controller.c index 596cf9a78274..7baaef69d70a 100644 --- a/drivers/mtd/nand/raw/rockchip-nand-controller.c +++ b/drivers/mtd/nand/raw/rockchip-nand-controller.c @@ -98,7 +98,7 @@ enum nfc_type { * @high: ECC count high bit index at register. * @high_mask: mask bit */ -struct ecc_cnt_status { +struct rk_ecc_cnt_status { u8 err_flag_bit; u8 low; u8 low_mask; @@ -108,6 +108,7 @@ struct ecc_cnt_status { }; /** + * struct nfc_cfg: Rockchip NAND controller configuration * @type: NFC version * @ecc_strengths: ECC strengths * @ecc_cfgs: ECC config values @@ -144,8 +145,8 @@ struct nfc_cfg { u32 int_st_off; u32 oob0_off; u32 oob1_off; - struct ecc_cnt_status ecc0; - struct ecc_cnt_status ecc1; + struct rk_ecc_cnt_status ecc0; + struct rk_ecc_cnt_status ecc1; }; struct rk_nfc_nand_chip { diff --git a/drivers/mtd/nand/raw/s3c2410.c b/drivers/mtd/nand/raw/s3c2410.c index 3d3d5c9814ff..48c1d0eb66ca 100644 --- a/drivers/mtd/nand/raw/s3c2410.c +++ b/drivers/mtd/nand/raw/s3c2410.c @@ -105,7 +105,6 @@ struct s3c2410_nand_info; /** * struct s3c2410_nand_mtd - driver MTD structure - * @mtd: The MTD instance to pass to the MTD layer. * @chip: The NAND chip information. * @set: The platform information supplied for this set of NAND chips. * @info: Link back to the hardware information. @@ -145,7 +144,6 @@ enum s3c_nand_clk_state { * @clk_rate: The clock rate from @clk. * @clk_state: The current clock state. * @cpu_type: The exact type of this controller. - * @freq_transition: CPUFreq notifier block */ struct s3c2410_nand_info { /* mtd info */ diff --git a/drivers/mtd/nand/raw/txx9ndfmc.c b/drivers/mtd/nand/raw/txx9ndfmc.c index eddcc0728a67..37f79c019a72 100644 --- a/drivers/mtd/nand/raw/txx9ndfmc.c +++ b/drivers/mtd/nand/raw/txx9ndfmc.c @@ -276,7 +276,7 @@ static const struct nand_controller_ops txx9ndfmc_controller_ops = { .attach_chip = txx9ndfmc_attach_chip, }; -static int __init txx9ndfmc_probe(struct platform_device *dev) +static int txx9ndfmc_probe(struct platform_device *dev) { struct txx9ndfmc_platform_data *plat = dev_get_platdata(&dev->dev); int hold, spw; @@ -369,13 +369,11 @@ static int __init txx9ndfmc_probe(struct platform_device *dev) return 0; } -static int __exit txx9ndfmc_remove(struct platform_device *dev) +static void txx9ndfmc_remove(struct platform_device *dev) { struct txx9ndfmc_drvdata *drvdata = platform_get_drvdata(dev); int ret, i; - if (!drvdata) - return 0; for (i = 0; i < MAX_TXX9NDFMC_DEV; i++) { struct mtd_info *mtd = drvdata->mtds[i]; struct nand_chip *chip; @@ -392,7 +390,6 @@ static int __exit txx9ndfmc_remove(struct platform_device *dev) kfree(txx9_priv->mtdname); kfree(txx9_priv); } - return 0; } #ifdef CONFIG_PM @@ -407,14 +404,14 @@ static int txx9ndfmc_resume(struct platform_device *dev) #endif static struct platform_driver txx9ndfmc_driver = { - .remove = __exit_p(txx9ndfmc_remove), + .probe = txx9ndfmc_probe, + .remove_new = txx9ndfmc_remove, .resume = txx9ndfmc_resume, .driver = { .name = "txx9ndfmc", }, }; - -module_platform_driver_probe(txx9ndfmc_driver, txx9ndfmc_probe); +module_platform_driver(txx9ndfmc_driver); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("TXx9 SoC NAND flash controller driver"); diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c index 849ccfedbc72..e0b6715e5dfe 100644 --- a/drivers/mtd/nand/spi/core.c +++ b/drivers/mtd/nand/spi/core.c @@ -974,7 +974,7 @@ static int spinand_manufacturer_match(struct spinand_device *spinand, spinand->manufacturer = manufacturer; return 0; } - return -ENOTSUPP; + return -EOPNOTSUPP; } static int spinand_id_detect(struct spinand_device *spinand) diff --git a/drivers/mtd/spi-nor/atmel.c b/drivers/mtd/spi-nor/atmel.c index e13b8d2dd50a..45d1153a04a0 100644 --- a/drivers/mtd/spi-nor/atmel.c +++ b/drivers/mtd/spi-nor/atmel.c @@ -16,12 +16,12 @@ * is to unlock the whole flash array on startup. Therefore, we have to support * exactly this operation. */ -static int at25fs_nor_lock(struct spi_nor *nor, loff_t ofs, uint64_t len) +static int at25fs_nor_lock(struct spi_nor *nor, loff_t ofs, u64 len) { return -EOPNOTSUPP; } -static int at25fs_nor_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len) +static int at25fs_nor_unlock(struct spi_nor *nor, loff_t ofs, u64 len) { int ret; @@ -37,7 +37,7 @@ static int at25fs_nor_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len) return ret; } -static int at25fs_nor_is_locked(struct spi_nor *nor, loff_t ofs, uint64_t len) +static int at25fs_nor_is_locked(struct spi_nor *nor, loff_t ofs, u64 len) { return -EOPNOTSUPP; } @@ -69,7 +69,7 @@ static const struct spi_nor_fixups at25fs_nor_fixups = { * Return: 0 on success, -error otherwise. */ static int atmel_nor_set_global_protection(struct spi_nor *nor, loff_t ofs, - uint64_t len, bool is_protect) + u64 len, bool is_protect) { int ret; u8 sr; @@ -118,20 +118,18 @@ static int atmel_nor_set_global_protection(struct spi_nor *nor, loff_t ofs, return spi_nor_write_sr(nor, nor->bouncebuf, 1); } -static int atmel_nor_global_protect(struct spi_nor *nor, loff_t ofs, - uint64_t len) +static int atmel_nor_global_protect(struct spi_nor *nor, loff_t ofs, u64 len) { return atmel_nor_set_global_protection(nor, ofs, len, true); } -static int atmel_nor_global_unprotect(struct spi_nor *nor, loff_t ofs, - uint64_t len) +static int atmel_nor_global_unprotect(struct spi_nor *nor, loff_t ofs, u64 len) { return atmel_nor_set_global_protection(nor, ofs, len, false); } static int atmel_nor_is_global_protected(struct spi_nor *nor, loff_t ofs, - uint64_t len) + u64 len) { int ret; diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c index 1c443fe568cf..4129764fad8c 100644 --- a/drivers/mtd/spi-nor/core.c +++ b/drivers/mtd/spi-nor/core.c @@ -1060,24 +1060,32 @@ static int spi_nor_read_sr2(struct spi_nor *nor, u8 *sr2) } /** - * spi_nor_erase_chip() - Erase the entire flash memory. + * spi_nor_erase_die() - Erase the entire die. * @nor: pointer to 'struct spi_nor'. + * @addr: address of the die. + * @die_size: size of the die. * * Return: 0 on success, -errno otherwise. */ -static int spi_nor_erase_chip(struct spi_nor *nor) +static int spi_nor_erase_die(struct spi_nor *nor, loff_t addr, size_t die_size) { + bool multi_die = nor->mtd.size != die_size; int ret; - dev_dbg(nor->dev, " %lldKiB\n", (long long)(nor->mtd.size >> 10)); + dev_dbg(nor->dev, " %lldKiB\n", (long long)(die_size >> 10)); if (nor->spimem) { - struct spi_mem_op op = SPI_NOR_CHIP_ERASE_OP; + struct spi_mem_op op = + SPI_NOR_DIE_ERASE_OP(nor->params->die_erase_opcode, + nor->addr_nbytes, addr, multi_die); spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); ret = spi_mem_exec_op(nor->spimem, &op); } else { + if (multi_die) + return -EOPNOTSUPP; + ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_CHIP_ERASE, NULL, 0); @@ -1792,6 +1800,51 @@ destroy_erase_cmd_list: return ret; } +static int spi_nor_erase_dice(struct spi_nor *nor, loff_t addr, + size_t len, size_t die_size) +{ + unsigned long timeout; + int ret; + + /* + * Scale the timeout linearly with the size of the flash, with + * a minimum calibrated to an old 2MB flash. We could try to + * pull these from CFI/SFDP, but these values should be good + * enough for now. + */ + timeout = max(CHIP_ERASE_2MB_READY_WAIT_JIFFIES, + CHIP_ERASE_2MB_READY_WAIT_JIFFIES * + (unsigned long)(nor->mtd.size / SZ_2M)); + + do { + ret = spi_nor_lock_device(nor); + if (ret) + return ret; + + ret = spi_nor_write_enable(nor); + if (ret) { + spi_nor_unlock_device(nor); + return ret; + } + + ret = spi_nor_erase_die(nor, addr, die_size); + + spi_nor_unlock_device(nor); + if (ret) + return ret; + + ret = spi_nor_wait_till_ready_with_timeout(nor, timeout); + if (ret) + return ret; + + addr += die_size; + len -= die_size; + + } while (len); + + return 0; +} + /* * Erase an address range on the nor chip. The address range may extend * one or more erase sectors. Return an error if there is a problem erasing. @@ -1799,8 +1852,10 @@ destroy_erase_cmd_list: static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr) { struct spi_nor *nor = mtd_to_spi_nor(mtd); - u32 addr, len; - uint32_t rem; + u8 n_dice = nor->params->n_dice; + bool multi_die_erase = false; + u32 addr, len, rem; + size_t die_size; int ret; dev_dbg(nor->dev, "at 0x%llx, len %lld\n", (long long)instr->addr, @@ -1815,39 +1870,22 @@ static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr) addr = instr->addr; len = instr->len; + if (n_dice) { + die_size = div_u64(mtd->size, n_dice); + if (!(len & (die_size - 1)) && !(addr & (die_size - 1))) + multi_die_erase = true; + } else { + die_size = mtd->size; + } + ret = spi_nor_prep_and_lock_pe(nor, instr->addr, instr->len); if (ret) return ret; - /* whole-chip erase? */ - if (len == mtd->size && !(nor->flags & SNOR_F_NO_OP_CHIP_ERASE)) { - unsigned long timeout; - - ret = spi_nor_lock_device(nor); - if (ret) - goto erase_err; - - ret = spi_nor_write_enable(nor); - if (ret) { - spi_nor_unlock_device(nor); - goto erase_err; - } - - ret = spi_nor_erase_chip(nor); - spi_nor_unlock_device(nor); - if (ret) - goto erase_err; - - /* - * Scale the timeout linearly with the size of the flash, with - * a minimum calibrated to an old 2MB flash. We could try to - * pull these from CFI/SFDP, but these values should be good - * enough for now. - */ - timeout = max(CHIP_ERASE_2MB_READY_WAIT_JIFFIES, - CHIP_ERASE_2MB_READY_WAIT_JIFFIES * - (unsigned long)(mtd->size / SZ_2M)); - ret = spi_nor_wait_till_ready_with_timeout(nor, timeout); + /* chip (die) erase? */ + if ((len == mtd->size && !(nor->flags & SNOR_F_NO_OP_CHIP_ERASE)) || + multi_die_erase) { + ret = spi_nor_erase_dice(nor, addr, len, die_size); if (ret) goto erase_err; @@ -2146,7 +2184,7 @@ static int spi_nor_write(struct mtd_info *mtd, loff_t to, size_t len, if (is_power_of_2(page_size)) { page_offset = addr & (page_size - 1); } else { - uint64_t aux = addr; + u64 aux = addr; page_offset = do_div(aux, page_size); } @@ -2850,9 +2888,6 @@ static void spi_nor_init_flags(struct spi_nor *nor) nor->flags |= SNOR_F_HAS_SR_BP3_BIT6; } - if (flags & NO_CHIP_ERASE) - nor->flags |= SNOR_F_NO_OP_CHIP_ERASE; - if (flags & SPI_NOR_RWW && nor->params->n_banks > 1 && !nor->controller_ops) nor->flags |= SNOR_F_RWW; @@ -2897,17 +2932,22 @@ static int spi_nor_late_init_params(struct spi_nor *nor) return ret; } + /* Needed by some flashes late_init hooks. */ + spi_nor_init_flags(nor); + if (nor->info->fixups && nor->info->fixups->late_init) { ret = nor->info->fixups->late_init(nor); if (ret) return ret; } + if (!nor->params->die_erase_opcode) + nor->params->die_erase_opcode = SPINOR_OP_CHIP_ERASE; + /* Default method kept for backward compatibility. */ if (!params->set_4byte_addr_mode) params->set_4byte_addr_mode = spi_nor_set_4byte_addr_mode_brwr; - spi_nor_init_flags(nor); spi_nor_init_fixup_flags(nor); /* @@ -3145,8 +3185,20 @@ int spi_nor_set_4byte_addr_mode(struct spi_nor *nor, bool enable) struct spi_nor_flash_parameter *params = nor->params; int ret; + if (enable) { + /* + * If the RESET# pin isn't hooked up properly, or the system + * otherwise doesn't perform a reset command in the boot + * sequence, it's impossible to 100% protect against unexpected + * reboots (e.g., crashes). Warn the user (or hopefully, system + * designer) that this is bad. + */ + WARN_ONCE(nor->flags & SNOR_F_BROKEN_RESET, + "enabling reset hack; may not recover from unexpected reboots\n"); + } + ret = params->set_4byte_addr_mode(nor, enable); - if (ret && ret != -ENOTSUPP) + if (ret && ret != -EOPNOTSUPP) return ret; if (enable) { @@ -3193,20 +3245,8 @@ static int spi_nor_init(struct spi_nor *nor) if (nor->addr_nbytes == 4 && nor->read_proto != SNOR_PROTO_8_8_8_DTR && - !(nor->flags & SNOR_F_4B_OPCODES)) { - /* - * If the RESET# pin isn't hooked up properly, or the system - * otherwise doesn't perform a reset command in the boot - * sequence, it's impossible to 100% protect against unexpected - * reboots (e.g., crashes). Warn the user (or hopefully, system - * designer) that this is bad. - */ - WARN_ONCE(nor->flags & SNOR_F_BROKEN_RESET, - "enabling reset hack; may not recover from unexpected reboots\n"); - err = spi_nor_set_4byte_addr_mode(nor, true); - if (err) - return err; - } + !(nor->flags & SNOR_F_4B_OPCODES)) + return spi_nor_set_4byte_addr_mode(nor, true); return 0; } @@ -3237,7 +3277,8 @@ static void spi_nor_soft_reset(struct spi_nor *nor) ret = spi_mem_exec_op(nor->spimem, &op); if (ret) { - dev_warn(nor->dev, "Software reset failed: %d\n", ret); + if (ret != -EOPNOTSUPP) + dev_warn(nor->dev, "Software reset failed: %d\n", ret); return; } @@ -3452,9 +3493,7 @@ int spi_nor_scan(struct spi_nor *nor, const char *name, { const struct flash_info *info; struct device *dev = nor->dev; - struct mtd_info *mtd = &nor->mtd; int ret; - int i; ret = spi_nor_check(nor); if (ret) @@ -3518,25 +3557,9 @@ int spi_nor_scan(struct spi_nor *nor, const char *name, /* No mtd_info fields should be used up to this point. */ spi_nor_set_mtd_info(nor); - dev_info(dev, "%s (%lld Kbytes)\n", info->name, - (long long)mtd->size >> 10); - - dev_dbg(dev, - "mtd .name = %s, .size = 0x%llx (%lldMiB), " - ".erasesize = 0x%.8x (%uKiB) .numeraseregions = %d\n", - mtd->name, (long long)mtd->size, (long long)(mtd->size >> 20), - mtd->erasesize, mtd->erasesize / 1024, mtd->numeraseregions); - - if (mtd->numeraseregions) - for (i = 0; i < mtd->numeraseregions; i++) - dev_dbg(dev, - "mtd.eraseregions[%d] = { .offset = 0x%llx, " - ".erasesize = 0x%.8x (%uKiB), " - ".numblocks = %d }\n", - i, (long long)mtd->eraseregions[i].offset, - mtd->eraseregions[i].erasesize, - mtd->eraseregions[i].erasesize / 1024, - mtd->eraseregions[i].numblocks); + dev_dbg(dev, "Manufacturer and device ID: %*phN\n", + SPI_NOR_MAX_ID_LEN, nor->id); + return 0; } EXPORT_SYMBOL_GPL(spi_nor_scan); diff --git a/drivers/mtd/spi-nor/core.h b/drivers/mtd/spi-nor/core.h index 93cd2fc3606d..d36c0e072954 100644 --- a/drivers/mtd/spi-nor/core.h +++ b/drivers/mtd/spi-nor/core.h @@ -85,9 +85,9 @@ SPI_MEM_OP_NO_DUMMY, \ SPI_MEM_OP_NO_DATA) -#define SPI_NOR_CHIP_ERASE_OP \ - SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_CHIP_ERASE, 0), \ - SPI_MEM_OP_NO_ADDR, \ +#define SPI_NOR_DIE_ERASE_OP(opcode, addr_nbytes, addr, dice) \ + SPI_MEM_OP(SPI_MEM_OP_CMD(opcode, 0), \ + SPI_MEM_OP_ADDR(dice ? addr_nbytes : 0, addr, 0), \ SPI_MEM_OP_NO_DUMMY, \ SPI_MEM_OP_NO_DATA) @@ -293,9 +293,9 @@ struct spi_nor_erase_map { * @is_locked: check if a region of the SPI NOR is completely locked */ struct spi_nor_locking_ops { - int (*lock)(struct spi_nor *nor, loff_t ofs, uint64_t len); - int (*unlock)(struct spi_nor *nor, loff_t ofs, uint64_t len); - int (*is_locked)(struct spi_nor *nor, loff_t ofs, uint64_t len); + int (*lock)(struct spi_nor *nor, loff_t ofs, u64 len); + int (*unlock)(struct spi_nor *nor, loff_t ofs, u64 len); + int (*is_locked)(struct spi_nor *nor, loff_t ofs, u64 len); }; /** @@ -362,6 +362,7 @@ struct spi_nor_otp { * command in octal DTR mode. * @n_banks: number of banks. * @n_dice: number of dice in the flash memory. + * @die_erase_opcode: die erase opcode. Defaults to SPINOR_OP_CHIP_ERASE. * @vreg_offset: volatile register offset for each die. * @hwcaps: describes the read and page program hardware * capabilities. @@ -399,6 +400,7 @@ struct spi_nor_flash_parameter { u8 rdsr_addr_nbytes; u8 n_banks; u8 n_dice; + u8 die_erase_opcode; u32 *vreg_offset; struct spi_nor_hwcaps hwcaps; @@ -463,7 +465,7 @@ struct spi_nor_id { * struct flash_info - SPI NOR flash_info entry. * @id: pointer to struct spi_nor_id or NULL, which means "no ID" (mostly * older chips). - * @name: the name of the flash. + * @name: (obsolete) the name of the flash. Do not set it for new additions. * @size: the size of the flash in bytes. * @sector_size: (optional) the size listed here is what works with * SPINOR_OP_SE, which isn't necessarily called a "sector" by @@ -487,7 +489,6 @@ struct spi_nor_id { * Usually these will power-up in a write-protected * state. * SPI_NOR_NO_ERASE: no erase command needed. - * NO_CHIP_ERASE: chip does not support chip erase. * SPI_NOR_NO_FR: can't do fastread. * SPI_NOR_QUAD_PP: flash supports Quad Input Page Program. * SPI_NOR_RWW: flash supports reads while write. @@ -537,10 +538,9 @@ struct flash_info { #define SPI_NOR_BP3_SR_BIT6 BIT(4) #define SPI_NOR_SWP_IS_VOLATILE BIT(5) #define SPI_NOR_NO_ERASE BIT(6) -#define NO_CHIP_ERASE BIT(7) -#define SPI_NOR_NO_FR BIT(8) -#define SPI_NOR_QUAD_PP BIT(9) -#define SPI_NOR_RWW BIT(10) +#define SPI_NOR_NO_FR BIT(7) +#define SPI_NOR_QUAD_PP BIT(8) +#define SPI_NOR_RWW BIT(9) u8 no_sfdp_flags; #define SPI_NOR_SKIP_SFDP BIT(0) diff --git a/drivers/mtd/spi-nor/debugfs.c b/drivers/mtd/spi-nor/debugfs.c index 6e163cb5b478..2dbda6b6938a 100644 --- a/drivers/mtd/spi-nor/debugfs.c +++ b/drivers/mtd/spi-nor/debugfs.c @@ -138,7 +138,7 @@ static int spi_nor_params_show(struct seq_file *s, void *data) if (!(nor->flags & SNOR_F_NO_OP_CHIP_ERASE)) { string_get_size(params->size, 1, STRING_UNITS_2, buf, sizeof(buf)); - seq_printf(s, " %02x (%s)\n", SPINOR_OP_CHIP_ERASE, buf); + seq_printf(s, " %02x (%s)\n", nor->params->die_erase_opcode, buf); } seq_puts(s, "\nsector map\n"); diff --git a/drivers/mtd/spi-nor/micron-st.c b/drivers/mtd/spi-nor/micron-st.c index 8920547c12bf..3c6499fdb712 100644 --- a/drivers/mtd/spi-nor/micron-st.c +++ b/drivers/mtd/spi-nor/micron-st.c @@ -11,6 +11,7 @@ /* flash_info mfr_flag. Used to read proprietary FSR register. */ #define USE_FSR BIT(0) +#define SPINOR_OP_MT_DIE_ERASE 0xc4 /* Chip (die) erase opcode */ #define SPINOR_OP_RDFSR 0x70 /* Read flag status register */ #define SPINOR_OP_CLFSR 0x50 /* Clear flag status register */ #define SPINOR_OP_MT_DTR_RD 0xfd /* Fast Read opcode in DTR mode */ @@ -192,6 +193,50 @@ static struct spi_nor_fixups mt25qu512a_fixups = { .post_bfpt = mt25qu512a_post_bfpt_fixup, }; +static int st_nor_four_die_late_init(struct spi_nor *nor) +{ + struct spi_nor_flash_parameter *params = nor->params; + + params->die_erase_opcode = SPINOR_OP_MT_DIE_ERASE; + params->n_dice = 4; + + /* + * Unfortunately the die erase opcode does not have a 4-byte opcode + * correspondent for these flashes. The SFDP 4BAIT table fails to + * consider the die erase too. We're forced to enter in the 4 byte + * address mode in order to benefit of the die erase. + */ + return spi_nor_set_4byte_addr_mode(nor, true); +} + +static int st_nor_two_die_late_init(struct spi_nor *nor) +{ + struct spi_nor_flash_parameter *params = nor->params; + + params->die_erase_opcode = SPINOR_OP_MT_DIE_ERASE; + params->n_dice = 2; + + /* + * Unfortunately the die erase opcode does not have a 4-byte opcode + * correspondent for these flashes. The SFDP 4BAIT table fails to + * consider the die erase too. We're forced to enter in the 4 byte + * address mode in order to benefit of the die erase. + */ + return spi_nor_set_4byte_addr_mode(nor, true); +} + +static struct spi_nor_fixups n25q00_fixups = { + .late_init = st_nor_four_die_late_init, +}; + +static struct spi_nor_fixups mt25q01_fixups = { + .late_init = st_nor_two_die_late_init, +}; + +static struct spi_nor_fixups mt25q02_fixups = { + .late_init = st_nor_four_die_late_init, +}; + static const struct flash_info st_nor_parts[] = { { .name = "m25p05-nonjedec", @@ -366,16 +411,17 @@ static const struct flash_info st_nor_parts[] = { .name = "n25q00", .size = SZ_128M, .flags = SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | SPI_NOR_4BIT_BP | - SPI_NOR_BP3_SR_BIT6 | NO_CHIP_ERASE, + SPI_NOR_BP3_SR_BIT6, .no_sfdp_flags = SECT_4K | SPI_NOR_QUAD_READ, .mfr_flags = USE_FSR, + .fixups = &n25q00_fixups, }, { .id = SNOR_ID(0x20, 0xba, 0x22), .name = "mt25ql02g", .size = SZ_256M, - .flags = NO_CHIP_ERASE, .no_sfdp_flags = SECT_4K | SPI_NOR_QUAD_READ, .mfr_flags = USE_FSR, + .fixups = &mt25q02_fixups, }, { .id = SNOR_ID(0x20, 0xbb, 0x15), .name = "n25q016a", @@ -430,19 +476,24 @@ static const struct flash_info st_nor_parts[] = { .no_sfdp_flags = SECT_4K | SPI_NOR_QUAD_READ, .mfr_flags = USE_FSR, }, { + .id = SNOR_ID(0x20, 0xbb, 0x21, 0x10, 0x44, 0x00), + .name = "mt25qu01g", + .mfr_flags = USE_FSR, + .fixups = &mt25q01_fixups, + }, { .id = SNOR_ID(0x20, 0xbb, 0x21), .name = "n25q00a", .size = SZ_128M, - .flags = NO_CHIP_ERASE, .no_sfdp_flags = SECT_4K | SPI_NOR_QUAD_READ, .mfr_flags = USE_FSR, + .fixups = &n25q00_fixups, }, { .id = SNOR_ID(0x20, 0xbb, 0x22), .name = "mt25qu02g", .size = SZ_256M, - .flags = NO_CHIP_ERASE, .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ, .mfr_flags = USE_FSR, + .fixups = &mt25q02_fixups, } }; diff --git a/drivers/mtd/spi-nor/sfdp.c b/drivers/mtd/spi-nor/sfdp.c index b3b11dfed789..57713de32832 100644 --- a/drivers/mtd/spi-nor/sfdp.c +++ b/drivers/mtd/spi-nor/sfdp.c @@ -446,6 +446,7 @@ static int spi_nor_parse_bfpt(struct spi_nor *nor, u32 dword; u16 half; u8 erase_mask; + u8 wait_states, mode_clocks, opcode; /* JESD216 Basic Flash Parameter Table length is at least 9 DWORDs. */ if (bfpt_header->length < BFPT_DWORD_MAX_JESD216) @@ -631,6 +632,32 @@ static int spi_nor_parse_bfpt(struct spi_nor *nor, if (bfpt_header->length == BFPT_DWORD_MAX_JESD216B) return spi_nor_post_bfpt_fixups(nor, bfpt_header, &bfpt); + /* Parse 1-1-8 read instruction */ + opcode = FIELD_GET(BFPT_DWORD17_RD_1_1_8_CMD, bfpt.dwords[SFDP_DWORD(17)]); + if (opcode) { + mode_clocks = FIELD_GET(BFPT_DWORD17_RD_1_1_8_MODE_CLOCKS, + bfpt.dwords[SFDP_DWORD(17)]); + wait_states = FIELD_GET(BFPT_DWORD17_RD_1_1_8_WAIT_STATES, + bfpt.dwords[SFDP_DWORD(17)]); + params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_8; + spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ_1_1_8], + mode_clocks, wait_states, opcode, + SNOR_PROTO_1_1_8); + } + + /* Parse 1-8-8 read instruction */ + opcode = FIELD_GET(BFPT_DWORD17_RD_1_8_8_CMD, bfpt.dwords[SFDP_DWORD(17)]); + if (opcode) { + mode_clocks = FIELD_GET(BFPT_DWORD17_RD_1_8_8_MODE_CLOCKS, + bfpt.dwords[SFDP_DWORD(17)]); + wait_states = FIELD_GET(BFPT_DWORD17_RD_1_8_8_WAIT_STATES, + bfpt.dwords[SFDP_DWORD(17)]); + params->hwcaps.mask |= SNOR_HWCAPS_READ_1_8_8; + spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ_1_8_8], + mode_clocks, wait_states, opcode, + SNOR_PROTO_1_8_8); + } + /* 8D-8D-8D command extension. */ switch (bfpt.dwords[SFDP_DWORD(18)] & BFPT_DWORD18_CMD_EXT_MASK) { case BFPT_DWORD18_CMD_EXT_REP: @@ -968,6 +995,8 @@ static int spi_nor_parse_4bait(struct spi_nor *nor, { SNOR_HWCAPS_READ_1_1_1_DTR, BIT(13) }, { SNOR_HWCAPS_READ_1_2_2_DTR, BIT(14) }, { SNOR_HWCAPS_READ_1_4_4_DTR, BIT(15) }, + { SNOR_HWCAPS_READ_1_1_8, BIT(20) }, + { SNOR_HWCAPS_READ_1_8_8, BIT(21) }, }; static const struct sfdp_4bait programs[] = { { SNOR_HWCAPS_PP, BIT(6) }, diff --git a/drivers/mtd/spi-nor/sfdp.h b/drivers/mtd/spi-nor/sfdp.h index 6eb99e1cdd61..da0fe5aa9bb0 100644 --- a/drivers/mtd/spi-nor/sfdp.h +++ b/drivers/mtd/spi-nor/sfdp.h @@ -118,6 +118,13 @@ struct sfdp_bfpt { (BFPT_DWORD16_EN4B_EN4B | BFPT_DWORD16_EX4B_EX4B) #define BFPT_DWORD16_SWRST_EN_RST BIT(12) +#define BFPT_DWORD17_RD_1_1_8_CMD GENMASK(31, 24) +#define BFPT_DWORD17_RD_1_1_8_MODE_CLOCKS GENMASK(23, 21) +#define BFPT_DWORD17_RD_1_1_8_WAIT_STATES GENMASK(20, 16) +#define BFPT_DWORD17_RD_1_8_8_CMD GENMASK(15, 8) +#define BFPT_DWORD17_RD_1_8_8_MODE_CLOCKS GENMASK(7, 5) +#define BFPT_DWORD17_RD_1_8_8_WAIT_STATES GENMASK(4, 0) + #define BFPT_DWORD18_CMD_EXT_MASK GENMASK(30, 29) #define BFPT_DWORD18_CMD_EXT_REP (0x0UL << 29) /* Repeat */ #define BFPT_DWORD18_CMD_EXT_INV (0x1UL << 29) /* Invert */ diff --git a/drivers/mtd/spi-nor/spansion.c b/drivers/mtd/spi-nor/spansion.c index 12921344373d..6cc237c24e07 100644 --- a/drivers/mtd/spi-nor/spansion.c +++ b/drivers/mtd/spi-nor/spansion.c @@ -17,6 +17,7 @@ #define SPINOR_OP_CLSR 0x30 /* Clear status register 1 */ #define SPINOR_OP_CLPEF 0x82 /* Clear program/erase failure flags */ +#define SPINOR_OP_CYPRESS_DIE_ERASE 0x61 /* Chip (die) erase */ #define SPINOR_OP_RD_ANY_REG 0x65 /* Read any register */ #define SPINOR_OP_WR_ANY_REG 0x71 /* Write any register */ #define SPINOR_REG_CYPRESS_VREG 0x00800000 @@ -644,6 +645,7 @@ static int s25hx_t_late_init(struct spi_nor *nor) params->ready = cypress_nor_sr_ready_and_clear; cypress_nor_ecc_init(nor); + params->die_erase_opcode = SPINOR_OP_CYPRESS_DIE_ERASE; return 0; } @@ -933,7 +935,6 @@ static const struct flash_info spansion_nor_parts[] = { .id = SNOR_ID(0x34, 0x2a, 0x1c, 0x0f, 0x00, 0x90), .name = "s25hl02gt", .mfr_flags = USE_CLPEF, - .flags = NO_CHIP_ERASE, .fixups = &s25hx_t_fixups }, { .id = SNOR_ID(0x34, 0x2b, 0x19, 0x0f, 0x08, 0x90), @@ -954,7 +955,6 @@ static const struct flash_info spansion_nor_parts[] = { .id = SNOR_ID(0x34, 0x2b, 0x1c, 0x0f, 0x00, 0x90), .name = "s25hs02gt", .mfr_flags = USE_CLPEF, - .flags = NO_CHIP_ERASE, .fixups = &s25hx_t_fixups }, { .id = SNOR_ID(0x34, 0x5a, 0x1a), diff --git a/drivers/mtd/spi-nor/sst.c b/drivers/mtd/spi-nor/sst.c index 44d2a546bf17..180b7390690c 100644 --- a/drivers/mtd/spi-nor/sst.c +++ b/drivers/mtd/spi-nor/sst.c @@ -13,12 +13,12 @@ #define SST26VF_CR_BPNV BIT(3) -static int sst26vf_nor_lock(struct spi_nor *nor, loff_t ofs, uint64_t len) +static int sst26vf_nor_lock(struct spi_nor *nor, loff_t ofs, u64 len) { return -EOPNOTSUPP; } -static int sst26vf_nor_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len) +static int sst26vf_nor_unlock(struct spi_nor *nor, loff_t ofs, u64 len) { int ret; @@ -38,7 +38,7 @@ static int sst26vf_nor_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len) return spi_nor_global_block_unlock(nor); } -static int sst26vf_nor_is_locked(struct spi_nor *nor, loff_t ofs, uint64_t len) +static int sst26vf_nor_is_locked(struct spi_nor *nor, loff_t ofs, u64 len) { return -EOPNOTSUPP; } diff --git a/drivers/mtd/spi-nor/swp.c b/drivers/mtd/spi-nor/swp.c index 585813310ee1..e48c3cff247a 100644 --- a/drivers/mtd/spi-nor/swp.c +++ b/drivers/mtd/spi-nor/swp.c @@ -53,7 +53,7 @@ static u64 spi_nor_get_min_prot_length_sr(struct spi_nor *nor) } static void spi_nor_get_locked_range_sr(struct spi_nor *nor, u8 sr, loff_t *ofs, - uint64_t *len) + u64 *len) { struct mtd_info *mtd = &nor->mtd; u64 min_prot_len; @@ -90,10 +90,10 @@ static void spi_nor_get_locked_range_sr(struct spi_nor *nor, u8 sr, loff_t *ofs, * (if @locked is false); false otherwise. */ static bool spi_nor_check_lock_status_sr(struct spi_nor *nor, loff_t ofs, - uint64_t len, u8 sr, bool locked) + u64 len, u8 sr, bool locked) { loff_t lock_offs, lock_offs_max, offs_max; - uint64_t lock_len; + u64 lock_len; if (!len) return true; @@ -111,14 +111,13 @@ static bool spi_nor_check_lock_status_sr(struct spi_nor *nor, loff_t ofs, return (ofs >= lock_offs_max) || (offs_max <= lock_offs); } -static bool spi_nor_is_locked_sr(struct spi_nor *nor, loff_t ofs, uint64_t len, - u8 sr) +static bool spi_nor_is_locked_sr(struct spi_nor *nor, loff_t ofs, u64 len, u8 sr) { return spi_nor_check_lock_status_sr(nor, ofs, len, sr, true); } -static bool spi_nor_is_unlocked_sr(struct spi_nor *nor, loff_t ofs, - uint64_t len, u8 sr) +static bool spi_nor_is_unlocked_sr(struct spi_nor *nor, loff_t ofs, u64 len, + u8 sr) { return spi_nor_check_lock_status_sr(nor, ofs, len, sr, false); } @@ -156,7 +155,7 @@ static bool spi_nor_is_unlocked_sr(struct spi_nor *nor, loff_t ofs, * * Returns negative on errors, 0 on success. */ -static int spi_nor_sr_lock(struct spi_nor *nor, loff_t ofs, uint64_t len) +static int spi_nor_sr_lock(struct spi_nor *nor, loff_t ofs, u64 len) { struct mtd_info *mtd = &nor->mtd; u64 min_prot_len; @@ -246,7 +245,7 @@ static int spi_nor_sr_lock(struct spi_nor *nor, loff_t ofs, uint64_t len) * * Returns negative on errors, 0 on success. */ -static int spi_nor_sr_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len) +static int spi_nor_sr_unlock(struct spi_nor *nor, loff_t ofs, u64 len) { struct mtd_info *mtd = &nor->mtd; u64 min_prot_len; @@ -331,7 +330,7 @@ static int spi_nor_sr_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len) * Returns 1 if entire region is locked, 0 if any portion is unlocked, and * negative on errors. */ -static int spi_nor_sr_is_locked(struct spi_nor *nor, loff_t ofs, uint64_t len) +static int spi_nor_sr_is_locked(struct spi_nor *nor, loff_t ofs, u64 len) { int ret; @@ -353,7 +352,7 @@ void spi_nor_init_default_locking_ops(struct spi_nor *nor) nor->params->locking_ops = &spi_nor_sr_locking_ops; } -static int spi_nor_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) +static int spi_nor_lock(struct mtd_info *mtd, loff_t ofs, u64 len) { struct spi_nor *nor = mtd_to_spi_nor(mtd); int ret; @@ -368,7 +367,7 @@ static int spi_nor_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) return ret; } -static int spi_nor_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) +static int spi_nor_unlock(struct mtd_info *mtd, loff_t ofs, u64 len) { struct spi_nor *nor = mtd_to_spi_nor(mtd); int ret; @@ -383,7 +382,7 @@ static int spi_nor_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) return ret; } -static int spi_nor_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len) +static int spi_nor_is_locked(struct mtd_info *mtd, loff_t ofs, u64 len) { struct spi_nor *nor = mtd_to_spi_nor(mtd); int ret; diff --git a/drivers/mtd/spi-nor/sysfs.c b/drivers/mtd/spi-nor/sysfs.c index 2dfdc555a69f..96064e4babf0 100644 --- a/drivers/mtd/spi-nor/sysfs.c +++ b/drivers/mtd/spi-nor/sysfs.c @@ -78,6 +78,8 @@ static umode_t spi_nor_sysfs_is_visible(struct kobject *kobj, if (attr == &dev_attr_manufacturer.attr && !nor->manufacturer) return 0; + if (attr == &dev_attr_partname.attr && !nor->info->name) + return 0; if (attr == &dev_attr_jedec_id.attr && !nor->info->id && !nor->id) return 0; diff --git a/drivers/mtd/ssfdc.c b/drivers/mtd/ssfdc.c index 04da685c36be..211f279a33a9 100644 --- a/drivers/mtd/ssfdc.c +++ b/drivers/mtd/ssfdc.c @@ -18,7 +18,6 @@ struct ssfdcr_record { struct mtd_blktrans_dev mbd; - int usecount; unsigned char heads; unsigned char sectors; unsigned short cylinders; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 579eebb6fc56..e1f1e646cf48 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -12093,6 +12093,8 @@ static void bnxt_sp_task(struct work_struct *work) bnxt_cfg_ntp_filters(bp); if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event)) bnxt_hwrm_exec_fwd_req(bp); + if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event)) + netdev_info(bp->dev, "Receive PF driver unload event!\n"); if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) { bnxt_hwrm_port_qstats(bp, 0); bnxt_hwrm_port_qstats_ext(bp, 0); @@ -13093,8 +13095,6 @@ static void bnxt_cfg_ntp_filters(struct bnxt *bp) } } } - if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event)) - netdev_info(bp->dev, "Receive PF driver unload event!\n"); } #else diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index 9282403d1bf6..2d7ae71287b1 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -2132,8 +2132,10 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev) /* Note: if we ever change from DMA_TX_APPEND_CRC below we * will need to restore software padding of "runt" packets */ + len_stat |= DMA_TX_APPEND_CRC; + if (!i) { - len_stat |= DMA_TX_APPEND_CRC | DMA_SOP; + len_stat |= DMA_SOP; if (skb->ip_summed == CHECKSUM_PARTIAL) len_stat |= DMA_TX_DO_CSUM; } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index b618797a7e8d..f1695c889d3a 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -1041,7 +1041,7 @@ static void hns3_init_tx_spare_buffer(struct hns3_enet_ring *ring) return; order = get_order(alloc_size); - if (order > MAX_ORDER) { + if (order > MAX_PAGE_ORDER) { if (net_ratelimit()) dev_warn(ring_to_dev(ring), "failed to allocate tx spare buffer, exceed to max order\n"); return; diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h index 4e18b4cefa97..94ac36b1408b 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.h +++ b/drivers/net/ethernet/ibm/ibmvnic.h @@ -48,7 +48,7 @@ * of 4096 jumbo frames (MTU=9000) we will need about 9K*4K = 36MB plus * some padding. * - * But the size of a single DMA region is limited by MAX_ORDER in the + * But the size of a single DMA region is limited by MAX_PAGE_ORDER in the * kernel (about 16MB currently). To support say 4K Jumbo frames, we * use a set of LTBs (struct ltb_set) per pool. * @@ -75,7 +75,7 @@ * pool for the 4MB. Thus the 16 Rx and Tx queues require 32 * 5 = 160 * plus 16 for the TSO pools for a total of 176 LTB mappings per VNIC. */ -#define IBMVNIC_ONE_LTB_MAX ((u32)((1 << MAX_ORDER) * PAGE_SIZE)) +#define IBMVNIC_ONE_LTB_MAX ((u32)((1 << MAX_PAGE_ORDER) * PAGE_SIZE)) #define IBMVNIC_ONE_LTB_SIZE min((u32)(8 << 20), IBMVNIC_ONE_LTB_MAX) #define IBMVNIC_LTB_SET_SIZE (38 << 20) diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 1ab8dbe2d880..d5519af34657 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -107,12 +107,18 @@ static struct workqueue_struct *i40e_wq; static void netdev_hw_addr_refcnt(struct i40e_mac_filter *f, struct net_device *netdev, int delta) { + struct netdev_hw_addr_list *ha_list; struct netdev_hw_addr *ha; if (!f || !netdev) return; - netdev_for_each_mc_addr(ha, netdev) { + if (is_unicast_ether_addr(f->macaddr) || is_link_local_ether_addr(f->macaddr)) + ha_list = &netdev->uc; + else + ha_list = &netdev->mc; + + netdev_hw_addr_list_for_each(ha, ha_list) { if (ether_addr_equal(ha->addr, f->macaddr)) { ha->refcount += delta; if (ha->refcount <= 0) @@ -16512,6 +16518,9 @@ static void i40e_pci_error_reset_done(struct pci_dev *pdev) return; i40e_reset_and_rebuild(pf, false, false); +#ifdef CONFIG_PCI_IOV + i40e_restore_all_vfs_msi_state(pdev); +#endif /* CONFIG_PCI_IOV */ } /** diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 3f99eb198245..de5ec4e6bedf 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -154,6 +154,32 @@ void i40e_vc_notify_reset(struct i40e_pf *pf) (u8 *)&pfe, sizeof(struct virtchnl_pf_event)); } +#ifdef CONFIG_PCI_IOV +void i40e_restore_all_vfs_msi_state(struct pci_dev *pdev) +{ + u16 vf_id; + u16 pos; + + /* Continue only if this is a PF */ + if (!pdev->is_physfn) + return; + + if (!pci_num_vf(pdev)) + return; + + pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); + if (pos) { + struct pci_dev *vf_dev = NULL; + + pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID, &vf_id); + while ((vf_dev = pci_get_device(pdev->vendor, vf_id, vf_dev))) { + if (vf_dev->is_virtfn && vf_dev->physfn == pdev) + pci_restore_msi_state(vf_dev); + } + } +} +#endif /* CONFIG_PCI_IOV */ + /** * i40e_vc_notify_vf_reset * @vf: pointer to the VF structure @@ -3521,16 +3547,16 @@ static int i40e_validate_cloud_filter(struct i40e_vf *vf, bool found = false; int bkt; - if (!tc_filter->action) { + if (tc_filter->action != VIRTCHNL_ACTION_TC_REDIRECT) { dev_info(&pf->pdev->dev, - "VF %d: Currently ADq doesn't support Drop Action\n", - vf->vf_id); + "VF %d: ADQ doesn't support this action (%d)\n", + vf->vf_id, tc_filter->action); goto err; } /* action_meta is TC number here to which the filter is applied */ if (!tc_filter->action_meta || - tc_filter->action_meta > I40E_MAX_VF_VSI) { + tc_filter->action_meta > vf->num_tc) { dev_info(&pf->pdev->dev, "VF %d: Invalid TC number %u\n", vf->vf_id, tc_filter->action_meta); goto err; diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h index 2ee0f8a23248..5fd607c0de0a 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h @@ -137,6 +137,9 @@ int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable); void i40e_vc_notify_link_state(struct i40e_pf *pf); void i40e_vc_notify_reset(struct i40e_pf *pf); +#ifdef CONFIG_PCI_IOV +void i40e_restore_all_vfs_msi_state(struct pci_dev *pdev); +#endif /* CONFIG_PCI_IOV */ int i40e_get_vf_stats(struct net_device *netdev, int vf_id, struct ifla_vf_stats *vf_stats); diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h index d7fdb7ba7268..fbd5d92182d3 100644 --- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h +++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h @@ -1359,8 +1359,9 @@ struct ice_aqc_get_link_status_data { u8 lp_flowcontrol; #define ICE_AQ_LINK_LP_PAUSE_ADV BIT(0) #define ICE_AQ_LINK_LP_ASM_DIR_ADV BIT(1) + u8 reserved5[5]; #define ICE_AQC_LS_DATA_SIZE_V2 \ - offsetofend(struct ice_aqc_get_link_status_data, lp_flowcontrol) + offsetofend(struct ice_aqc_get_link_status_data, reserved5) } __packed; /* Set event mask command (direct 0x0613) */ diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index 9a6c25f98632..edac34c796ce 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -5332,7 +5332,6 @@ ice_aq_get_cgu_dpll_status(struct ice_hw *hw, u8 dpll_num, u8 *ref_state, u8 *eec_mode) { struct ice_aqc_get_cgu_dpll_status *cmd; - const s64 nsec_per_psec = 1000LL; struct ice_aq_desc desc; int status; @@ -5348,8 +5347,7 @@ ice_aq_get_cgu_dpll_status(struct ice_hw *hw, u8 dpll_num, u8 *ref_state, *phase_offset = le32_to_cpu(cmd->phase_offset_h); *phase_offset <<= 32; *phase_offset += le32_to_cpu(cmd->phase_offset_l); - *phase_offset = div64_s64(sign_extend64(*phase_offset, 47), - nsec_per_psec); + *phase_offset = sign_extend64(*phase_offset, 47); *eec_mode = cmd->eec_mode; } diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index fb9c93f37e84..adfdea1e2805 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -2146,7 +2146,7 @@ static int ice_configure_phy(struct ice_vsi *vsi) /* Ensure we have media as we cannot configure a medialess port */ if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) - return -EPERM; + return -ENOMEDIUM; ice_print_topo_conflict(vsi); @@ -9187,8 +9187,14 @@ int ice_stop(struct net_device *netdev) int link_err = ice_force_phys_link_state(vsi, false); if (link_err) { - netdev_err(vsi->netdev, "Failed to set physical link down, VSI %d error %d\n", - vsi->vsi_num, link_err); + if (link_err == -ENOMEDIUM) + netdev_info(vsi->netdev, "Skipping link reconfig - no media attached, VSI %d\n", + vsi->vsi_num); + else + netdev_err(vsi->netdev, "Failed to set physical link down, VSI %d error %d\n", + vsi->vsi_num, link_err); + + ice_vsi_close(vsi); return -EIO; } } diff --git a/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c index 81288a17da2a..20c4b3a64710 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c +++ b/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c @@ -1044,7 +1044,6 @@ static int idpf_rx_singleq_clean(struct idpf_queue *rx_q, int budget) } idpf_rx_sync_for_cpu(rx_buf, fields.size); - skb = rx_q->skb; if (skb) idpf_rx_add_frag(rx_buf, skb, fields.size); else diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c index 1f728a9004d9..9e942e5baf39 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c +++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c @@ -396,7 +396,7 @@ static void idpf_rx_desc_rel(struct idpf_queue *rxq, bool bufq, s32 q_model) if (!rxq) return; - if (!bufq && idpf_is_queue_model_split(q_model) && rxq->skb) { + if (rxq->skb) { dev_kfree_skb_any(rxq->skb); rxq->skb = NULL; } diff --git a/drivers/net/ethernet/intel/idpf/virtchnl2.h b/drivers/net/ethernet/intel/idpf/virtchnl2.h index 07e72c72d156..8dc837889723 100644 --- a/drivers/net/ethernet/intel/idpf/virtchnl2.h +++ b/drivers/net/ethernet/intel/idpf/virtchnl2.h @@ -1104,9 +1104,9 @@ struct virtchnl2_rss_key { __le32 vport_id; __le16 key_len; u8 pad; - __DECLARE_FLEX_ARRAY(u8, key_flex); -}; -VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_rss_key); + u8 key_flex[]; +} __packed; +VIRTCHNL2_CHECK_STRUCT_LEN(7, virtchnl2_rss_key); /** * struct virtchnl2_queue_chunk - chunk of contiguous queues diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h index f48f82d5e274..85cc16396506 100644 --- a/drivers/net/ethernet/intel/igc/igc.h +++ b/drivers/net/ethernet/intel/igc/igc.h @@ -568,6 +568,7 @@ struct igc_nfc_filter { u16 etype; __be16 vlan_etype; u16 vlan_tci; + u16 vlan_tci_mask; u8 src_addr[ETH_ALEN]; u8 dst_addr[ETH_ALEN]; u8 user_data[8]; diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c index 785eaa8e0ba8..859b2636f3d9 100644 --- a/drivers/net/ethernet/intel/igc/igc_ethtool.c +++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c @@ -958,6 +958,7 @@ static int igc_ethtool_set_coalesce(struct net_device *netdev, } #define ETHER_TYPE_FULL_MASK ((__force __be16)~0) +#define VLAN_TCI_FULL_MASK ((__force __be16)~0) static int igc_ethtool_get_nfc_rule(struct igc_adapter *adapter, struct ethtool_rxnfc *cmd) { @@ -980,10 +981,16 @@ static int igc_ethtool_get_nfc_rule(struct igc_adapter *adapter, fsp->m_u.ether_spec.h_proto = ETHER_TYPE_FULL_MASK; } + if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_ETYPE) { + fsp->flow_type |= FLOW_EXT; + fsp->h_ext.vlan_etype = rule->filter.vlan_etype; + fsp->m_ext.vlan_etype = ETHER_TYPE_FULL_MASK; + } + if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) { fsp->flow_type |= FLOW_EXT; fsp->h_ext.vlan_tci = htons(rule->filter.vlan_tci); - fsp->m_ext.vlan_tci = htons(VLAN_PRIO_MASK); + fsp->m_ext.vlan_tci = htons(rule->filter.vlan_tci_mask); } if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) { @@ -1218,6 +1225,7 @@ static void igc_ethtool_init_nfc_rule(struct igc_nfc_rule *rule, if ((fsp->flow_type & FLOW_EXT) && fsp->m_ext.vlan_tci) { rule->filter.vlan_tci = ntohs(fsp->h_ext.vlan_tci); + rule->filter.vlan_tci_mask = ntohs(fsp->m_ext.vlan_tci); rule->filter.match_flags |= IGC_FILTER_FLAG_VLAN_TCI; } @@ -1255,11 +1263,19 @@ static void igc_ethtool_init_nfc_rule(struct igc_nfc_rule *rule, memcpy(rule->filter.user_mask, fsp->m_ext.data, sizeof(fsp->m_ext.data)); } - /* When multiple filter options or user data or vlan etype is set, use a - * flex filter. + /* The i225/i226 has various different filters. Flex filters provide a + * way to match up to the first 128 bytes of a packet. Use them for: + * a) For specific user data + * b) For VLAN EtherType + * c) For full TCI match + * d) Or in case multiple filter criteria are set + * + * Otherwise, use the simple MAC, VLAN PRIO or EtherType filters. */ if ((rule->filter.match_flags & IGC_FILTER_FLAG_USER_DATA) || (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_ETYPE) || + ((rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) && + rule->filter.vlan_tci_mask == ntohs(VLAN_TCI_FULL_MASK)) || (rule->filter.match_flags & (rule->filter.match_flags - 1))) rule->flex = true; else @@ -1329,6 +1345,26 @@ static int igc_ethtool_add_nfc_rule(struct igc_adapter *adapter, return -EINVAL; } + /* There are two ways to match the VLAN TCI: + * 1. Match on PCP field and use vlan prio filter for it + * 2. Match on complete TCI field and use flex filter for it + */ + if ((fsp->flow_type & FLOW_EXT) && + fsp->m_ext.vlan_tci && + fsp->m_ext.vlan_tci != htons(VLAN_PRIO_MASK) && + fsp->m_ext.vlan_tci != VLAN_TCI_FULL_MASK) { + netdev_dbg(netdev, "VLAN mask not supported\n"); + return -EOPNOTSUPP; + } + + /* VLAN EtherType can only be matched by full mask. */ + if ((fsp->flow_type & FLOW_EXT) && + fsp->m_ext.vlan_etype && + fsp->m_ext.vlan_etype != ETHER_TYPE_FULL_MASK) { + netdev_dbg(netdev, "VLAN EtherType mask not supported\n"); + return -EOPNOTSUPP; + } + if (fsp->location >= IGC_MAX_RXNFC_RULES) { netdev_dbg(netdev, "Invalid location\n"); return -EINVAL; diff --git a/drivers/net/ethernet/intel/igc/igc_tsn.c b/drivers/net/ethernet/intel/igc/igc_tsn.c index a9c08321aca9..22cefb1eeedf 100644 --- a/drivers/net/ethernet/intel/igc/igc_tsn.c +++ b/drivers/net/ethernet/intel/igc/igc_tsn.c @@ -227,7 +227,7 @@ static int igc_tsn_enable_offload(struct igc_adapter *adapter) wr32(IGC_TQAVCC(i), tqavcc); wr32(IGC_TQAVHC(i), - 0x80000000 + ring->hicredit * 0x7735); + 0x80000000 + ring->hicredit * 0x7736); } else { /* Disable any CBS for the queue */ txqctl &= ~(IGC_TXQCTL_QAV_SEL_MASK); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc.h b/drivers/net/ethernet/marvell/octeontx2/af/npc.h index ab3e39eef2eb..8c0732c9a7ee 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/npc.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/npc.h @@ -528,7 +528,7 @@ struct npc_lt_def { u8 ltype_mask; u8 ltype_match; u8 lid; -}; +} __packed; struct npc_lt_def_ipsec { u8 ltype_mask; @@ -536,7 +536,7 @@ struct npc_lt_def_ipsec { u8 lid; u8 spi_offset; u8 spi_nz; -}; +} __packed; struct npc_lt_def_apad { u8 ltype_mask; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h index cce2806aaa50..8802961b8889 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h @@ -905,6 +905,7 @@ u32 rvu_cgx_get_fifolen(struct rvu *rvu); void *rvu_first_cgx_pdata(struct rvu *rvu); int cgxlmac_to_pf(struct rvu *rvu, int cgx_id, int lmac_id); int rvu_cgx_config_tx(void *cgxd, int lmac_id, bool enable); +int rvu_cgx_tx_enable(struct rvu *rvu, u16 pcifunc, bool enable); int rvu_cgx_prio_flow_ctrl_cfg(struct rvu *rvu, u16 pcifunc, u8 tx_pause, u8 rx_pause, u16 pfc_en); int rvu_cgx_cfg_pause_frm(struct rvu *rvu, u16 pcifunc, u8 tx_pause, u8 rx_pause); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c index 15a319684ed3..38acdc7a73bb 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c @@ -465,6 +465,23 @@ int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start) return mac_ops->mac_rx_tx_enable(cgxd, lmac_id, start); } +int rvu_cgx_tx_enable(struct rvu *rvu, u16 pcifunc, bool enable) +{ + int pf = rvu_get_pf(pcifunc); + struct mac_ops *mac_ops; + u8 cgx_id, lmac_id; + void *cgxd; + + if (!is_cgx_config_permitted(rvu, pcifunc)) + return LMAC_AF_ERR_PERM_DENIED; + + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); + cgxd = rvu_cgx_pdata(cgx_id, rvu); + mac_ops = get_mac_ops(cgxd); + + return mac_ops->mac_tx_enable(cgxd, lmac_id, enable); +} + int rvu_cgx_config_tx(void *cgxd, int lmac_id, bool enable) { struct mac_ops *mac_ops; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c index 4227ebb4a758..58744313f0eb 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c @@ -4143,90 +4143,18 @@ static void nix_find_link_frs(struct rvu *rvu, req->minlen = minlen; } -static int -nix_config_link_credits(struct rvu *rvu, int blkaddr, int link, - u16 pcifunc, u64 tx_credits) -{ - struct rvu_hwinfo *hw = rvu->hw; - int pf = rvu_get_pf(pcifunc); - u8 cgx_id = 0, lmac_id = 0; - unsigned long poll_tmo; - bool restore_tx_en = 0; - struct nix_hw *nix_hw; - u64 cfg, sw_xoff = 0; - u32 schq = 0; - u32 credits; - int rc; - - nix_hw = get_nix_hw(rvu->hw, blkaddr); - if (!nix_hw) - return NIX_AF_ERR_INVALID_NIXBLK; - - if (tx_credits == nix_hw->tx_credits[link]) - return 0; - - /* Enable cgx tx if disabled for credits to be back */ - if (is_pf_cgxmapped(rvu, pf)) { - rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); - restore_tx_en = !rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu), - lmac_id, true); - } - - mutex_lock(&rvu->rsrc_lock); - /* Disable new traffic to link */ - if (hw->cap.nix_shaping) { - schq = nix_get_tx_link(rvu, pcifunc); - sw_xoff = rvu_read64(rvu, blkaddr, NIX_AF_TL1X_SW_XOFF(schq)); - rvu_write64(rvu, blkaddr, - NIX_AF_TL1X_SW_XOFF(schq), BIT_ULL(0)); - } - - rc = NIX_AF_ERR_LINK_CREDITS; - poll_tmo = jiffies + usecs_to_jiffies(200000); - /* Wait for credits to return */ - do { - if (time_after(jiffies, poll_tmo)) - goto exit; - usleep_range(100, 200); - - cfg = rvu_read64(rvu, blkaddr, - NIX_AF_TX_LINKX_NORM_CREDIT(link)); - credits = (cfg >> 12) & 0xFFFFFULL; - } while (credits != nix_hw->tx_credits[link]); - - cfg &= ~(0xFFFFFULL << 12); - cfg |= (tx_credits << 12); - rvu_write64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link), cfg); - rc = 0; - - nix_hw->tx_credits[link] = tx_credits; - -exit: - /* Enable traffic back */ - if (hw->cap.nix_shaping && !sw_xoff) - rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SW_XOFF(schq), 0); - - /* Restore state of cgx tx */ - if (restore_tx_en) - rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false); - - mutex_unlock(&rvu->rsrc_lock); - return rc; -} - int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req, struct msg_rsp *rsp) { struct rvu_hwinfo *hw = rvu->hw; u16 pcifunc = req->hdr.pcifunc; int pf = rvu_get_pf(pcifunc); - int blkaddr, schq, link = -1; - struct nix_txsch *txsch; - u64 cfg, lmac_fifo_len; + int blkaddr, link = -1; struct nix_hw *nix_hw; struct rvu_pfvf *pfvf; u8 cgx = 0, lmac = 0; u16 max_mtu; + u64 cfg; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); if (blkaddr < 0) @@ -4247,25 +4175,6 @@ int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req, if (req->update_minlen && req->minlen < NIC_HW_MIN_FRS) return NIX_AF_ERR_FRS_INVALID; - /* Check if requester wants to update SMQ's */ - if (!req->update_smq) - goto rx_frscfg; - - /* Update min/maxlen in each of the SMQ attached to this PF/VF */ - txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ]; - mutex_lock(&rvu->rsrc_lock); - for (schq = 0; schq < txsch->schq.max; schq++) { - if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc) - continue; - cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq)); - cfg = (cfg & ~(0xFFFFULL << 8)) | ((u64)req->maxlen << 8); - if (req->update_minlen) - cfg = (cfg & ~0x7FULL) | ((u64)req->minlen & 0x7F); - rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq), cfg); - } - mutex_unlock(&rvu->rsrc_lock); - -rx_frscfg: /* Check if config is for SDP link */ if (req->sdp_link) { if (!hw->sdp_links) @@ -4288,7 +4197,6 @@ rx_frscfg: if (link < 0) return NIX_AF_ERR_RX_LINK_INVALID; - linkcfg: nix_find_link_frs(rvu, req, pcifunc); @@ -4298,19 +4206,7 @@ linkcfg: cfg = (cfg & ~0xFFFFULL) | req->minlen; rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), cfg); - if (req->sdp_link || pf == 0) - return 0; - - /* Update transmit credits for CGX links */ - lmac_fifo_len = rvu_cgx_get_lmac_fifolen(rvu, cgx, lmac); - if (!lmac_fifo_len) { - dev_err(rvu->dev, - "%s: Failed to get CGX/RPM%d:LMAC%d FIFO size\n", - __func__, cgx, lmac); - return 0; - } - return nix_config_link_credits(rvu, blkaddr, link, pcifunc, - (lmac_fifo_len - req->maxlen) / 16); + return 0; } int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req, @@ -4841,7 +4737,13 @@ int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req, pfvf = rvu_get_pfvf(rvu, pcifunc); clear_bit(NIXLF_INITIALIZED, &pfvf->flags); - return rvu_cgx_start_stop_io(rvu, pcifunc, false); + err = rvu_cgx_start_stop_io(rvu, pcifunc, false); + if (err) + return err; + + rvu_cgx_tx_enable(rvu, pcifunc, true); + + return 0; } #define RX_SA_BASE GENMASK_ULL(52, 7) diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c index 0d5a41a2ae01..227d01cace3f 100644 --- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c +++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c @@ -267,6 +267,13 @@ static bool mlxbf_gige_rx_packet(struct mlxbf_gige *priv, int *rx_pkts) priv->stats.rx_truncate_errors++; } + /* Read receive consumer index before replenish so that this routine + * returns accurate return value even if packet is received into + * just-replenished buffer prior to exiting this routine. + */ + rx_ci = readq(priv->base + MLXBF_GIGE_RX_CQE_PACKET_CI); + rx_ci_rem = rx_ci % priv->rx_q_entries; + /* Let hardware know we've replenished one buffer */ rx_pi++; @@ -279,8 +286,6 @@ static bool mlxbf_gige_rx_packet(struct mlxbf_gige *priv, int *rx_pkts) rx_pi_rem = rx_pi % priv->rx_q_entries; if (rx_pi_rem == 0) priv->valid_polarity ^= 1; - rx_ci = readq(priv->base + MLXBF_GIGE_RX_CQE_PACKET_CI); - rx_ci_rem = rx_ci % priv->rx_q_entries; if (skb) netif_receive_skb(skb); diff --git a/drivers/net/ethernet/micrel/ks8851_spi.c b/drivers/net/ethernet/micrel/ks8851_spi.c index 88e26c120b48..54f2eac11a63 100644 --- a/drivers/net/ethernet/micrel/ks8851_spi.c +++ b/drivers/net/ethernet/micrel/ks8851_spi.c @@ -156,7 +156,7 @@ static void ks8851_rdreg(struct ks8851_net *ks, unsigned int op, txb[0] = cpu_to_le16(op | KS_SPIOP_RD); - if (kss->spidev->master->flags & SPI_MASTER_HALF_DUPLEX) { + if (kss->spidev->master->flags & SPI_CONTROLLER_HALF_DUPLEX) { msg = &kss->spi_msg2; xfer = kss->spi_xfer2; @@ -180,7 +180,7 @@ static void ks8851_rdreg(struct ks8851_net *ks, unsigned int op, ret = spi_sync(kss->spidev, msg); if (ret < 0) netdev_err(ks->netdev, "read: spi_sync() failed\n"); - else if (kss->spidev->master->flags & SPI_MASTER_HALF_DUPLEX) + else if (kss->spidev->master->flags & SPI_CONTROLLER_HALF_DUPLEX) memcpy(rxb, trx, rxl); else memcpy(rxb, trx + 2, rxl); diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c index 0d57ffcedf0c..fc78bc959ded 100644 --- a/drivers/net/ethernet/qlogic/qla3xxx.c +++ b/drivers/net/ethernet/qlogic/qla3xxx.c @@ -2591,6 +2591,7 @@ static int ql_alloc_buffer_queues(struct ql3_adapter *qdev) if (qdev->lrg_buf_q_alloc_virt_addr == NULL) { netdev_err(qdev->ndev, "lBufQ failed\n"); + kfree(qdev->lrg_buf); return -ENOMEM; } qdev->lrg_buf_q_virt_addr = qdev->lrg_buf_q_alloc_virt_addr; @@ -2615,6 +2616,7 @@ static int ql_alloc_buffer_queues(struct ql3_adapter *qdev) qdev->lrg_buf_q_alloc_size, qdev->lrg_buf_q_alloc_virt_addr, qdev->lrg_buf_q_alloc_phy_addr); + kfree(qdev->lrg_buf); return -ENOMEM; } diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index bb787a52bc75..81fd31f6fac4 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -1211,7 +1211,7 @@ static void rtl8168ep_driver_start(struct rtl8169_private *tp) { r8168ep_ocp_write(tp, 0x01, 0x180, OOB_CMD_DRIVER_START); r8168ep_ocp_write(tp, 0x01, 0x30, r8168ep_ocp_read(tp, 0x30) | 0x01); - rtl_loop_wait_high(tp, &rtl_ep_ocp_read_cond, 10000, 10); + rtl_loop_wait_high(tp, &rtl_ep_ocp_read_cond, 10000, 30); } static void rtl8168_driver_start(struct rtl8169_private *tp) diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index 664eda4b5a11..8649b3e90edb 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -66,16 +66,27 @@ int ravb_wait(struct net_device *ndev, enum ravb_reg reg, u32 mask, u32 value) return -ETIMEDOUT; } -static int ravb_config(struct net_device *ndev) +static int ravb_set_opmode(struct net_device *ndev, u32 opmode) { + u32 csr_ops = 1U << (opmode & CCC_OPC); + u32 ccc_mask = CCC_OPC; int error; - /* Set config mode */ - ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG); - /* Check if the operating mode is changed to the config mode */ - error = ravb_wait(ndev, CSR, CSR_OPS, CSR_OPS_CONFIG); - if (error) - netdev_err(ndev, "failed to switch device to config mode\n"); + /* If gPTP active in config mode is supported it needs to be configured + * along with CSEL and operating mode in the same access. This is a + * hardware limitation. + */ + if (opmode & CCC_GAC) + ccc_mask |= CCC_GAC | CCC_CSEL; + + /* Set operating mode */ + ravb_modify(ndev, CCC, ccc_mask, opmode); + /* Check if the operating mode is changed to the requested one */ + error = ravb_wait(ndev, CSR, CSR_OPS, csr_ops); + if (error) { + netdev_err(ndev, "failed to switch device to requested mode (%u)\n", + opmode & CCC_OPC); + } return error; } @@ -673,7 +684,7 @@ static int ravb_dmac_init(struct net_device *ndev) int error; /* Set CONFIG mode */ - error = ravb_config(ndev); + error = ravb_set_opmode(ndev, CCC_OPC_CONFIG); if (error) return error; @@ -682,9 +693,7 @@ static int ravb_dmac_init(struct net_device *ndev) return error; /* Setting the control will start the AVB-DMAC process. */ - ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_OPERATION); - - return 0; + return ravb_set_opmode(ndev, CCC_OPC_OPERATION); } static void ravb_get_tx_tstamp(struct net_device *ndev) @@ -1046,7 +1055,7 @@ static int ravb_stop_dma(struct net_device *ndev) return error; /* Stop AVB-DMAC process */ - return ravb_config(ndev); + return ravb_set_opmode(ndev, CCC_OPC_CONFIG); } /* E-MAC interrupt handler */ @@ -2560,21 +2569,25 @@ static int ravb_set_gti(struct net_device *ndev) return 0; } -static void ravb_set_config_mode(struct net_device *ndev) +static int ravb_set_config_mode(struct net_device *ndev) { struct ravb_private *priv = netdev_priv(ndev); const struct ravb_hw_info *info = priv->info; + int error; if (info->gptp) { - ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG); + error = ravb_set_opmode(ndev, CCC_OPC_CONFIG); + if (error) + return error; /* Set CSEL value */ ravb_modify(ndev, CCC, CCC_CSEL, CCC_CSEL_HPB); } else if (info->ccc_gac) { - ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG | - CCC_GAC | CCC_CSEL_HPB); + error = ravb_set_opmode(ndev, CCC_OPC_CONFIG | CCC_GAC | CCC_CSEL_HPB); } else { - ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG); + error = ravb_set_opmode(ndev, CCC_OPC_CONFIG); } + + return error; } /* Set tx and rx clock internal delay modes */ @@ -2794,7 +2807,9 @@ static int ravb_probe(struct platform_device *pdev) ndev->ethtool_ops = &ravb_ethtool_ops; /* Set AVB config mode */ - ravb_set_config_mode(ndev); + error = ravb_set_config_mode(ndev); + if (error) + goto out_disable_gptp_clk; if (info->gptp || info->ccc_gac) { /* Set GTI value */ @@ -2917,8 +2932,7 @@ static void ravb_remove(struct platform_device *pdev) dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat, priv->desc_bat_dma); - /* Set reset mode */ - ravb_write(ndev, CCC_OPC_RESET, CCC); + ravb_set_opmode(ndev, CCC_OPC_RESET); clk_disable_unprepare(priv->gptp_clk); clk_disable_unprepare(priv->refclk); @@ -3000,8 +3014,11 @@ static int __maybe_unused ravb_resume(struct device *dev) int ret = 0; /* If WoL is enabled set reset mode to rearm the WoL logic */ - if (priv->wol_enabled) - ravb_write(ndev, CCC_OPC_RESET, CCC); + if (priv->wol_enabled) { + ret = ravb_set_opmode(ndev, CCC_OPC_RESET); + if (ret) + return ret; + } /* All register have been reset to default values. * Restore all registers which where setup at probe time and @@ -3009,7 +3026,9 @@ static int __maybe_unused ravb_resume(struct device *dev) */ /* Set AVB config mode */ - ravb_set_config_mode(ndev); + ret = ravb_set_config_mode(ndev); + if (ret) + return ret; if (info->gptp || info->ccc_gac) { /* Set GTI value */ diff --git a/drivers/net/ethernet/sfc/rx_common.c b/drivers/net/ethernet/sfc/rx_common.c index d2f35ee15eff..fac227d372db 100644 --- a/drivers/net/ethernet/sfc/rx_common.c +++ b/drivers/net/ethernet/sfc/rx_common.c @@ -823,8 +823,10 @@ int efx_probe_filters(struct efx_nic *efx) } if (!success) { - efx_for_each_channel(channel, efx) + efx_for_each_channel(channel, efx) { kfree(channel->rps_flow_id); + channel->rps_flow_id = NULL; + } efx->type->filter_table_remove(efx); rc = -ENOMEM; goto out_unlock; diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.h b/drivers/net/ethernet/toshiba/ps3_gelic_net.h index 0d98defb011e..0ec7412febc7 100644 --- a/drivers/net/ethernet/toshiba/ps3_gelic_net.h +++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.h @@ -346,12 +346,6 @@ static inline void *port_priv(struct gelic_port *port) return port->priv; } -#ifdef CONFIG_PPC_EARLY_DEBUG_PS3GELIC -void udbg_shutdown_ps3gelic(void); -#else -static inline void udbg_shutdown_ps3gelic(void) {} -#endif - int gelic_card_set_irq_mask(struct gelic_card *card, u64 mask); /* shared netdev ops */ void gelic_card_up(struct gelic_card *card); diff --git a/drivers/net/usb/ax88172a.c b/drivers/net/usb/ax88172a.c index 3777c7e2e6fc..e47bb125048d 100644 --- a/drivers/net/usb/ax88172a.c +++ b/drivers/net/usb/ax88172a.c @@ -161,7 +161,9 @@ static int ax88172a_bind(struct usbnet *dev, struct usb_interface *intf) u8 buf[ETH_ALEN]; struct ax88172a_private *priv; - usbnet_get_endpoints(dev, intf); + ret = usbnet_get_endpoints(dev, intf); + if (ret) + return ret; priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index d16f592c2061..51b1868d2f22 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -334,7 +334,6 @@ struct virtio_net_common_hdr { }; }; -static void virtnet_rq_free_unused_buf(struct virtqueue *vq, void *buf); static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf); static bool is_xdp_frame(void *ptr) @@ -408,6 +407,17 @@ static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask) return p; } +static void virtnet_rq_free_buf(struct virtnet_info *vi, + struct receive_queue *rq, void *buf) +{ + if (vi->mergeable_rx_bufs) + put_page(virt_to_head_page(buf)); + else if (vi->big_packets) + give_pages(rq, buf); + else + put_page(virt_to_head_page(buf)); +} + static void enable_delayed_refill(struct virtnet_info *vi) { spin_lock_bh(&vi->refill_lock); @@ -634,17 +644,6 @@ static void *virtnet_rq_get_buf(struct receive_queue *rq, u32 *len, void **ctx) return buf; } -static void *virtnet_rq_detach_unused_buf(struct receive_queue *rq) -{ - void *buf; - - buf = virtqueue_detach_unused_buf(rq->vq); - if (buf && rq->do_dma) - virtnet_rq_unmap(rq, buf, 0); - - return buf; -} - static void virtnet_rq_init_one_sg(struct receive_queue *rq, void *buf, u32 len) { struct virtnet_rq_dma *dma; @@ -744,6 +743,20 @@ static void virtnet_rq_set_premapped(struct virtnet_info *vi) } } +static void virtnet_rq_unmap_free_buf(struct virtqueue *vq, void *buf) +{ + struct virtnet_info *vi = vq->vdev->priv; + struct receive_queue *rq; + int i = vq2rxq(vq); + + rq = &vi->rq[i]; + + if (rq->do_dma) + virtnet_rq_unmap(rq, buf, 0); + + virtnet_rq_free_buf(vi, rq, buf); +} + static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi) { unsigned int len; @@ -1764,7 +1777,7 @@ static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq, if (unlikely(len < vi->hdr_len + ETH_HLEN)) { pr_debug("%s: short packet %i\n", dev->name, len); DEV_STATS_INC(dev, rx_length_errors); - virtnet_rq_free_unused_buf(rq->vq, buf); + virtnet_rq_free_buf(vi, rq, buf); return; } @@ -2392,7 +2405,7 @@ static int virtnet_rx_resize(struct virtnet_info *vi, if (running) napi_disable(&rq->napi); - err = virtqueue_resize(rq->vq, ring_num, virtnet_rq_free_unused_buf); + err = virtqueue_resize(rq->vq, ring_num, virtnet_rq_unmap_free_buf); if (err) netdev_err(vi->dev, "resize rx fail: rx queue index: %d err: %d\n", qindex, err); @@ -4031,19 +4044,6 @@ static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf) xdp_return_frame(ptr_to_xdp(buf)); } -static void virtnet_rq_free_unused_buf(struct virtqueue *vq, void *buf) -{ - struct virtnet_info *vi = vq->vdev->priv; - int i = vq2rxq(vq); - - if (vi->mergeable_rx_bufs) - put_page(virt_to_head_page(buf)); - else if (vi->big_packets) - give_pages(&vi->rq[i], buf); - else - put_page(virt_to_head_page(buf)); -} - static void free_unused_bufs(struct virtnet_info *vi) { void *buf; @@ -4057,10 +4057,10 @@ static void free_unused_bufs(struct virtnet_info *vi) } for (i = 0; i < vi->max_queue_pairs; i++) { - struct receive_queue *rq = &vi->rq[i]; + struct virtqueue *vq = vi->rq[i].vq; - while ((buf = virtnet_rq_detach_unused_buf(rq)) != NULL) - virtnet_rq_free_unused_buf(rq->vq, buf); + while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) + virtnet_rq_unmap_free_buf(vq, buf); cond_resched(); } } diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h index 56def20374f3..7805a42948af 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h +++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h @@ -770,7 +770,7 @@ static inline void iwl_enable_rfkill_int(struct iwl_trans *trans) } } -void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans); +void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans, bool from_irq); static inline bool iwl_is_rfkill_set(struct iwl_trans *trans) { @@ -817,7 +817,7 @@ static inline bool iwl_pcie_dbg_on(struct iwl_trans *trans) return (trans->dbg.dest_tlv || iwl_trans_dbg_ini_valid(trans)); } -void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state); +void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state, bool from_irq); void iwl_trans_pcie_dump_regs(struct iwl_trans *trans); #ifdef CONFIG_IWLWIFI_DEBUGFS diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c index bc6a9f861711..07931c2db494 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c @@ -1783,7 +1783,7 @@ static u32 iwl_pcie_int_cause_ict(struct iwl_trans *trans) return inta; } -void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans) +void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans, bool from_irq) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct isr_statistics *isr_stats = &trans_pcie->isr_stats; @@ -1807,7 +1807,7 @@ void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans) isr_stats->rfkill++; if (prev != report) - iwl_trans_pcie_rf_kill(trans, report); + iwl_trans_pcie_rf_kill(trans, report, from_irq); mutex_unlock(&trans_pcie->mutex); if (hw_rfkill) { @@ -1947,7 +1947,7 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id) /* HW RF KILL switch toggled */ if (inta & CSR_INT_BIT_RF_KILL) { - iwl_pcie_handle_rfkill_irq(trans); + iwl_pcie_handle_rfkill_irq(trans, true); handled |= CSR_INT_BIT_RF_KILL; } @@ -2370,7 +2370,7 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id) /* HW RF KILL switch toggled */ if (inta_hw & MSIX_HW_INT_CAUSES_REG_RF_KILL) - iwl_pcie_handle_rfkill_irq(trans); + iwl_pcie_handle_rfkill_irq(trans, true); if (inta_hw & MSIX_HW_INT_CAUSES_REG_HW_ERR) { IWL_ERR(trans, diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index 92253260f568..d10208075ae5 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -1082,7 +1082,7 @@ bool iwl_pcie_check_hw_rf_kill(struct iwl_trans *trans) report = test_bit(STATUS_RFKILL_OPMODE, &trans->status); if (prev != report) - iwl_trans_pcie_rf_kill(trans, report); + iwl_trans_pcie_rf_kill(trans, report, false); return hw_rfkill; } @@ -1237,7 +1237,7 @@ static void iwl_pcie_init_msix(struct iwl_trans_pcie *trans_pcie) trans_pcie->hw_mask = trans_pcie->hw_init_mask; } -static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans) +static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool from_irq) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); @@ -1264,7 +1264,8 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans) if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) { IWL_DEBUG_INFO(trans, "DEVICE_ENABLED bit was set and is now cleared\n"); - iwl_pcie_synchronize_irqs(trans); + if (!from_irq) + iwl_pcie_synchronize_irqs(trans); iwl_pcie_rx_napi_sync(trans); iwl_pcie_tx_stop(trans); iwl_pcie_rx_stop(trans); @@ -1454,7 +1455,7 @@ void iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans *trans, clear_bit(STATUS_RFKILL_OPMODE, &trans->status); } if (hw_rfkill != was_in_rfkill) - iwl_trans_pcie_rf_kill(trans, hw_rfkill); + iwl_trans_pcie_rf_kill(trans, hw_rfkill, false); } static void iwl_trans_pcie_stop_device(struct iwl_trans *trans) @@ -1469,12 +1470,12 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans) mutex_lock(&trans_pcie->mutex); trans_pcie->opmode_down = true; was_in_rfkill = test_bit(STATUS_RFKILL_OPMODE, &trans->status); - _iwl_trans_pcie_stop_device(trans); + _iwl_trans_pcie_stop_device(trans, false); iwl_trans_pcie_handle_stop_rfkill(trans, was_in_rfkill); mutex_unlock(&trans_pcie->mutex); } -void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state) +void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state, bool from_irq) { struct iwl_trans_pcie __maybe_unused *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); @@ -1487,7 +1488,7 @@ void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state) if (trans->trans_cfg->gen2) _iwl_trans_pcie_gen2_stop_device(trans); else - _iwl_trans_pcie_stop_device(trans); + _iwl_trans_pcie_stop_device(trans, from_irq); } } @@ -2887,7 +2888,7 @@ static ssize_t iwl_dbgfs_rfkill_write(struct file *file, IWL_WARN(trans, "changing debug rfkill %d->%d\n", trans_pcie->debug_rfkill, new_value); trans_pcie->debug_rfkill = new_value; - iwl_pcie_handle_rfkill_irq(trans); + iwl_pcie_handle_rfkill_irq(trans, false); return count; } diff --git a/drivers/nubus/bus.c b/drivers/nubus/bus.c index 72921e4f35f6..12df4d88970c 100644 --- a/drivers/nubus/bus.c +++ b/drivers/nubus/bus.c @@ -32,12 +32,11 @@ static void nubus_device_remove(struct device *dev) ndrv->remove(to_nubus_board(dev)); } -struct bus_type nubus_bus_type = { +static const struct bus_type nubus_bus_type = { .name = "nubus", .probe = nubus_device_probe, .remove = nubus_device_remove, }; -EXPORT_SYMBOL(nubus_bus_type); int nubus_driver_register(struct nubus_driver *ndrv) { diff --git a/drivers/opp/core.c b/drivers/opp/core.c index 84f345c69ea5..c4e0432ae42a 100644 --- a/drivers/opp/core.c +++ b/drivers/opp/core.c @@ -201,7 +201,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq_indexed); * @opp: opp for which level value has to be returned for * * Return: level read from device tree corresponding to the opp, else - * return 0. + * return U32_MAX. */ unsigned int dev_pm_opp_get_level(struct dev_pm_opp *opp) { @@ -221,7 +221,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_level); * @index: index of the required opp * * Return: performance state read from device tree corresponding to the - * required opp, else return 0. + * required opp, else return U32_MAX. */ unsigned int dev_pm_opp_get_required_pstate(struct dev_pm_opp *opp, unsigned int index) @@ -808,6 +808,16 @@ struct dev_pm_opp *dev_pm_opp_find_level_ceil(struct device *dev, struct dev_pm_opp *opp; opp = _find_key_ceil(dev, &temp, 0, true, _read_level, NULL); + if (IS_ERR(opp)) + return opp; + + /* False match */ + if (temp == OPP_LEVEL_UNSET) { + dev_err(dev, "%s: OPP levels aren't available\n", __func__); + dev_pm_opp_put(opp); + return ERR_PTR(-ENODEV); + } + *level = temp; return opp; } @@ -832,9 +842,14 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_find_level_ceil); * use. */ struct dev_pm_opp *dev_pm_opp_find_level_floor(struct device *dev, - unsigned long *level) + unsigned int *level) { - return _find_key_floor(dev, level, 0, true, _read_level, NULL); + unsigned long temp = *level; + struct dev_pm_opp *opp; + + opp = _find_key_floor(dev, &temp, 0, true, _read_level, NULL); + *level = temp; + return opp; } EXPORT_SYMBOL_GPL(dev_pm_opp_find_level_floor); @@ -948,7 +963,7 @@ _opp_config_clk_single(struct device *dev, struct opp_table *opp_table, dev_err(dev, "%s: failed to set clock rate: %d\n", __func__, ret); } else { - opp_table->rate_clk_single = freq; + opp_table->current_rate_single_clk = freq; } return ret; @@ -1046,40 +1061,23 @@ static int _set_opp_bw(const struct opp_table *opp_table, return 0; } -static int _set_performance_state(struct device *dev, struct device *pd_dev, - struct dev_pm_opp *opp, int i) +/* This is only called for PM domain for now */ +static int _set_required_opps(struct device *dev, struct opp_table *opp_table, + struct dev_pm_opp *opp, bool up) { - unsigned int pstate = likely(opp) ? opp->required_opps[i]->level: 0; - int ret; + struct device **devs = opp_table->required_devs; + struct dev_pm_opp *required_opp; + int index, target, delta, ret; - if (!pd_dev) + if (!devs) return 0; - ret = dev_pm_domain_set_performance_state(pd_dev, pstate); - if (ret) { - dev_err(dev, "Failed to set performance state of %s: %d (%d)\n", - dev_name(pd_dev), pstate, ret); - } - - return ret; -} - -static int _opp_set_required_opps_generic(struct device *dev, - struct opp_table *opp_table, struct dev_pm_opp *opp, bool scaling_down) -{ - dev_err(dev, "setting required-opps isn't supported for non-genpd devices\n"); - return -ENOENT; -} - -static int _opp_set_required_opps_genpd(struct device *dev, - struct opp_table *opp_table, struct dev_pm_opp *opp, bool scaling_down) -{ - struct device **genpd_virt_devs = - opp_table->genpd_virt_devs ? opp_table->genpd_virt_devs : &dev; - int index, target, delta, ret; + /* required-opps not fully initialized yet */ + if (lazy_linking_pending(opp_table)) + return -EBUSY; /* Scaling up? Set required OPPs in normal order, else reverse */ - if (!scaling_down) { + if (up) { index = 0; target = opp_table->required_opp_count; delta = 1; @@ -1090,9 +1088,13 @@ static int _opp_set_required_opps_genpd(struct device *dev, } while (index != target) { - ret = _set_performance_state(dev, genpd_virt_devs[index], opp, index); - if (ret) - return ret; + if (devs[index]) { + required_opp = opp ? opp->required_opps[index] : NULL; + + ret = dev_pm_opp_set_opp(devs[index], required_opp); + if (ret) + return ret; + } index += delta; } @@ -1100,34 +1102,6 @@ static int _opp_set_required_opps_genpd(struct device *dev, return 0; } -/* This is only called for PM domain for now */ -static int _set_required_opps(struct device *dev, struct opp_table *opp_table, - struct dev_pm_opp *opp, bool up) -{ - /* required-opps not fully initialized yet */ - if (lazy_linking_pending(opp_table)) - return -EBUSY; - - if (opp_table->set_required_opps) - return opp_table->set_required_opps(dev, opp_table, opp, up); - - return 0; -} - -/* Update set_required_opps handler */ -void _update_set_required_opps(struct opp_table *opp_table) -{ - /* Already set */ - if (opp_table->set_required_opps) - return; - - /* All required OPPs will belong to genpd or none */ - if (opp_table->required_opp_tables[0]->is_genpd) - opp_table->set_required_opps = _opp_set_required_opps_genpd; - else - opp_table->set_required_opps = _opp_set_required_opps_generic; -} - static int _set_opp_level(struct device *dev, struct opp_table *opp_table, struct dev_pm_opp *opp) { @@ -1135,7 +1109,7 @@ static int _set_opp_level(struct device *dev, struct opp_table *opp_table, int ret = 0; if (opp) { - if (!opp->level) + if (opp->level == OPP_LEVEL_UNSET) return 0; level = opp->level; @@ -1378,12 +1352,12 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq) * value of the frequency. In such a case, do not abort but * configure the hardware to the desired frequency forcefully. */ - forced = opp_table->rate_clk_single != target_freq; + forced = opp_table->current_rate_single_clk != freq; } - ret = _set_opp(dev, opp_table, opp, &target_freq, forced); + ret = _set_opp(dev, opp_table, opp, &freq, forced); - if (target_freq) + if (freq) dev_pm_opp_put(opp); put_opp_table: @@ -1867,6 +1841,8 @@ struct dev_pm_opp *_opp_allocate(struct opp_table *opp_table) INIT_LIST_HEAD(&opp->node); + opp->level = OPP_LEVEL_UNSET; + return opp; } @@ -2388,19 +2364,13 @@ static void _opp_detach_genpd(struct opp_table *opp_table) { int index; - if (!opp_table->genpd_virt_devs) - return; - for (index = 0; index < opp_table->required_opp_count; index++) { - if (!opp_table->genpd_virt_devs[index]) + if (!opp_table->required_devs[index]) continue; - dev_pm_domain_detach(opp_table->genpd_virt_devs[index], false); - opp_table->genpd_virt_devs[index] = NULL; + dev_pm_domain_detach(opp_table->required_devs[index], false); + opp_table->required_devs[index] = NULL; } - - kfree(opp_table->genpd_virt_devs); - opp_table->genpd_virt_devs = NULL; } /* @@ -2427,14 +2397,20 @@ static int _opp_attach_genpd(struct opp_table *opp_table, struct device *dev, int index = 0, ret = -EINVAL; const char * const *name = names; - if (opp_table->genpd_virt_devs) - return 0; + if (!opp_table->required_devs) { + dev_err(dev, "Required OPPs not available, can't attach genpd\n"); + return -EINVAL; + } - opp_table->genpd_virt_devs = kcalloc(opp_table->required_opp_count, - sizeof(*opp_table->genpd_virt_devs), - GFP_KERNEL); - if (!opp_table->genpd_virt_devs) - return -ENOMEM; + /* Genpd core takes care of propagation to parent genpd */ + if (opp_table->is_genpd) { + dev_err(dev, "%s: Operation not supported for genpds\n", __func__); + return -EOPNOTSUPP; + } + + /* Checking only the first one is enough ? */ + if (opp_table->required_devs[0]) + return 0; while (*name) { if (index >= opp_table->required_opp_count) { @@ -2450,13 +2426,25 @@ static int _opp_attach_genpd(struct opp_table *opp_table, struct device *dev, goto err; } - opp_table->genpd_virt_devs[index] = virt_dev; + /* + * Add the virtual genpd device as a user of the OPP table, so + * we can call dev_pm_opp_set_opp() on it directly. + * + * This will be automatically removed when the OPP table is + * removed, don't need to handle that here. + */ + if (!_add_opp_dev(virt_dev, opp_table->required_opp_tables[index])) { + ret = -ENOMEM; + goto err; + } + + opp_table->required_devs[index] = virt_dev; index++; name++; } if (virt_devs) - *virt_devs = opp_table->genpd_virt_devs; + *virt_devs = opp_table->required_devs; return 0; @@ -2466,10 +2454,50 @@ err: } +static int _opp_set_required_devs(struct opp_table *opp_table, + struct device *dev, + struct device **required_devs) +{ + int i; + + if (!opp_table->required_devs) { + dev_err(dev, "Required OPPs not available, can't set required devs\n"); + return -EINVAL; + } + + /* Another device that shares the OPP table has set the required devs ? */ + if (opp_table->required_devs[0]) + return 0; + + for (i = 0; i < opp_table->required_opp_count; i++) { + /* Genpd core takes care of propagation to parent genpd */ + if (required_devs[i] && opp_table->is_genpd && + opp_table->required_opp_tables[i]->is_genpd) { + dev_err(dev, "%s: Operation not supported for genpds\n", __func__); + return -EOPNOTSUPP; + } + + opp_table->required_devs[i] = required_devs[i]; + } + + return 0; +} + +static void _opp_put_required_devs(struct opp_table *opp_table) +{ + int i; + + for (i = 0; i < opp_table->required_opp_count; i++) + opp_table->required_devs[i] = NULL; +} + static void _opp_clear_config(struct opp_config_data *data) { - if (data->flags & OPP_CONFIG_GENPD) + if (data->flags & OPP_CONFIG_REQUIRED_DEVS) + _opp_put_required_devs(data->opp_table); + else if (data->flags & OPP_CONFIG_GENPD) _opp_detach_genpd(data->opp_table); + if (data->flags & OPP_CONFIG_REGULATOR) _opp_put_regulators(data->opp_table); if (data->flags & OPP_CONFIG_SUPPORTED_HW) @@ -2583,12 +2611,22 @@ int dev_pm_opp_set_config(struct device *dev, struct dev_pm_opp_config *config) /* Attach genpds */ if (config->genpd_names) { + if (config->required_devs) + goto err; + ret = _opp_attach_genpd(opp_table, dev, config->genpd_names, config->virt_devs); if (ret) goto err; data->flags |= OPP_CONFIG_GENPD; + } else if (config->required_devs) { + ret = _opp_set_required_devs(opp_table, dev, + config->required_devs); + if (ret) + goto err; + + data->flags |= OPP_CONFIG_REQUIRED_DEVS; } ret = xa_alloc(&opp_configs, &id, data, XA_LIMIT(1, INT_MAX), @@ -2975,6 +3013,47 @@ put_table: EXPORT_SYMBOL_GPL(dev_pm_opp_adjust_voltage); /** + * dev_pm_opp_sync_regulators() - Sync state of voltage regulators + * @dev: device for which we do this operation + * + * Sync voltage state of the OPP table regulators. + * + * Return: 0 on success or a negative error value. + */ +int dev_pm_opp_sync_regulators(struct device *dev) +{ + struct opp_table *opp_table; + struct regulator *reg; + int i, ret = 0; + + /* Device may not have OPP table */ + opp_table = _find_opp_table(dev); + if (IS_ERR(opp_table)) + return 0; + + /* Regulator may not be required for the device */ + if (unlikely(!opp_table->regulators)) + goto put_table; + + /* Nothing to sync if voltage wasn't changed */ + if (!opp_table->enabled) + goto put_table; + + for (i = 0; i < opp_table->regulator_count; i++) { + reg = opp_table->regulators[i]; + ret = regulator_sync_voltage(reg); + if (ret) + break; + } +put_table: + /* Drop reference taken by _find_opp_table() */ + dev_pm_opp_put_opp_table(opp_table); + + return ret; +} +EXPORT_SYMBOL_GPL(dev_pm_opp_sync_regulators); + +/** * dev_pm_opp_enable() - Enable a specific OPP * @dev: device for which we do this operation * @freq: OPP frequency to enable @@ -3097,44 +3176,3 @@ void dev_pm_opp_remove_table(struct device *dev) dev_pm_opp_put_opp_table(opp_table); } EXPORT_SYMBOL_GPL(dev_pm_opp_remove_table); - -/** - * dev_pm_opp_sync_regulators() - Sync state of voltage regulators - * @dev: device for which we do this operation - * - * Sync voltage state of the OPP table regulators. - * - * Return: 0 on success or a negative error value. - */ -int dev_pm_opp_sync_regulators(struct device *dev) -{ - struct opp_table *opp_table; - struct regulator *reg; - int i, ret = 0; - - /* Device may not have OPP table */ - opp_table = _find_opp_table(dev); - if (IS_ERR(opp_table)) - return 0; - - /* Regulator may not be required for the device */ - if (unlikely(!opp_table->regulators)) - goto put_table; - - /* Nothing to sync if voltage wasn't changed */ - if (!opp_table->enabled) - goto put_table; - - for (i = 0; i < opp_table->regulator_count; i++) { - reg = opp_table->regulators[i]; - ret = regulator_sync_voltage(reg); - if (ret) - break; - } -put_table: - /* Drop reference taken by _find_opp_table() */ - dev_pm_opp_put_opp_table(opp_table); - - return ret; -} -EXPORT_SYMBOL_GPL(dev_pm_opp_sync_regulators); diff --git a/drivers/opp/of.c b/drivers/opp/of.c index 81fa27599d58..f9f0b22bccbb 100644 --- a/drivers/opp/of.c +++ b/drivers/opp/of.c @@ -165,7 +165,7 @@ static void _opp_table_alloc_required_tables(struct opp_table *opp_table, struct opp_table **required_opp_tables; struct device_node *required_np, *np; bool lazy = false; - int count, i; + int count, i, size; /* Traversing the first OPP node is all we need */ np = of_get_next_available_child(opp_np, NULL); @@ -179,12 +179,13 @@ static void _opp_table_alloc_required_tables(struct opp_table *opp_table, if (count <= 0) goto put_np; - required_opp_tables = kcalloc(count, sizeof(*required_opp_tables), - GFP_KERNEL); + size = sizeof(*required_opp_tables) + sizeof(*opp_table->required_devs); + required_opp_tables = kcalloc(count, size, GFP_KERNEL); if (!required_opp_tables) goto put_np; opp_table->required_opp_tables = required_opp_tables; + opp_table->required_devs = (void *)(required_opp_tables + count); opp_table->required_opp_count = count; for (i = 0; i < count; i++) { @@ -208,8 +209,6 @@ static void _opp_table_alloc_required_tables(struct opp_table *opp_table, mutex_lock(&opp_table_lock); list_add(&opp_table->lazy, &lazy_opp_tables); mutex_unlock(&opp_table_lock); - } else { - _update_set_required_opps(opp_table); } goto put_np; @@ -296,7 +295,7 @@ void _of_clear_opp(struct opp_table *opp_table, struct dev_pm_opp *opp) of_node_put(opp->np); } -static int _link_required_opps(struct dev_pm_opp *opp, +static int _link_required_opps(struct dev_pm_opp *opp, struct opp_table *opp_table, struct opp_table *required_table, int index) { struct device_node *np; @@ -314,6 +313,39 @@ static int _link_required_opps(struct dev_pm_opp *opp, return -ENODEV; } + /* + * There are two genpd (as required-opp) cases that we need to handle, + * devices with a single genpd and ones with multiple genpds. + * + * The single genpd case requires special handling as we need to use the + * same `dev` structure (instead of a virtual one provided by genpd + * core) for setting the performance state. + * + * It doesn't make sense for a device's DT entry to have both + * "opp-level" and single "required-opps" entry pointing to a genpd's + * OPP, as that would make the OPP core call + * dev_pm_domain_set_performance_state() for two different values for + * the same device structure. Lets treat single genpd configuration as a + * case where the OPP's level is directly available without required-opp + * link in the DT. + * + * Just update the `level` with the right value, which + * dev_pm_opp_set_opp() will take care of in the normal path itself. + * + * There is another case though, where a genpd's OPP table has + * required-opps set to a parent genpd. The OPP core expects the user to + * set the respective required `struct device` pointer via + * dev_pm_opp_set_config(). + */ + if (required_table->is_genpd && opp_table->required_opp_count == 1 && + !opp_table->required_devs[0]) { + /* Genpd core takes care of propagation to parent genpd */ + if (!opp_table->is_genpd) { + if (!WARN_ON(opp->level != OPP_LEVEL_UNSET)) + opp->level = opp->required_opps[0]->level; + } + } + return 0; } @@ -338,7 +370,7 @@ static int _of_opp_alloc_required_opps(struct opp_table *opp_table, if (IS_ERR_OR_NULL(required_table)) continue; - ret = _link_required_opps(opp, required_table, i); + ret = _link_required_opps(opp, opp_table, required_table, i); if (ret) goto free_required_opps; } @@ -359,7 +391,7 @@ static int lazy_link_required_opps(struct opp_table *opp_table, int ret; list_for_each_entry(opp, &opp_table->opp_list, node) { - ret = _link_required_opps(opp, new_table, index); + ret = _link_required_opps(opp, opp_table, new_table, index); if (ret) return ret; } @@ -422,7 +454,6 @@ static void lazy_link_required_opp_table(struct opp_table *new_table) /* All required opp-tables found, remove from lazy list */ if (!lazy) { - _update_set_required_opps(opp_table); list_del_init(&opp_table->lazy); list_for_each_entry(opp, &opp_table->opp_list, node) @@ -1393,8 +1424,14 @@ int of_get_required_opp_performance_state(struct device_node *np, int index) opp = _find_opp_of_np(opp_table, required_np); if (opp) { - pstate = opp->level; + if (opp->level == OPP_LEVEL_UNSET) { + pr_err("%s: OPP levels aren't available for %pOF\n", + __func__, np); + } else { + pstate = opp->level; + } dev_pm_opp_put(opp); + } dev_pm_opp_put_opp_table(opp_table); diff --git a/drivers/opp/opp.h b/drivers/opp/opp.h index 08366f90f16b..cff1fabd1ae3 100644 --- a/drivers/opp/opp.h +++ b/drivers/opp/opp.h @@ -35,6 +35,7 @@ extern struct list_head opp_tables; #define OPP_CONFIG_PROP_NAME BIT(3) #define OPP_CONFIG_SUPPORTED_HW BIT(4) #define OPP_CONFIG_GENPD BIT(5) +#define OPP_CONFIG_REQUIRED_DEVS BIT(6) /** * struct opp_config_data - data for set config operations @@ -49,6 +50,18 @@ struct opp_config_data { unsigned int flags; }; +/** + * struct dev_pm_opp_icc_bw - Interconnect bandwidth values + * @avg: Average bandwidth corresponding to this OPP (in icc units) + * @peak: Peak bandwidth corresponding to this OPP (in icc units) + * + * This structure stores the bandwidth values for a single interconnect path. + */ +struct dev_pm_opp_icc_bw { + u32 avg; + u32 peak; +}; + /* * Internal data structure organization with the OPP layer library is as * follows: @@ -157,12 +170,12 @@ enum opp_table_access { * @clock_latency_ns_max: Max clock latency in nanoseconds. * @parsed_static_opps: Count of devices for which OPPs are initialized from DT. * @shared_opp: OPP is shared between multiple devices. - * @rate_clk_single: Currently configured frequency for single clk. + * @current_rate_single_clk: Currently configured frequency for single clk. * @current_opp: Currently configured OPP for the table. * @suspend_opp: Pointer to OPP to be used during device suspend. - * @genpd_virt_devs: List of virtual devices for multiple genpd support. * @required_opp_tables: List of device OPP tables that are required by OPPs in * this table. + * @required_devs: List of devices for required OPP tables. * @required_opp_count: Number of required devices. * @supported_hw: Array of version number to support. * @supported_hw_count: Number of elements in supported_hw array. @@ -180,7 +193,6 @@ enum opp_table_access { * @path_count: Number of interconnect paths * @enabled: Set to true if the device's resources are enabled/configured. * @is_genpd: Marks if the OPP table belongs to a genpd. - * @set_required_opps: Helper responsible to set required OPPs. * @dentry: debugfs dentry pointer of the real device directory (not links). * @dentry_name: Name of the real dentry. * @@ -207,12 +219,12 @@ struct opp_table { unsigned int parsed_static_opps; enum opp_table_access shared_opp; - unsigned long rate_clk_single; + unsigned long current_rate_single_clk; struct dev_pm_opp *current_opp; struct dev_pm_opp *suspend_opp; - struct device **genpd_virt_devs; struct opp_table **required_opp_tables; + struct device **required_devs; unsigned int required_opp_count; unsigned int *supported_hw; @@ -229,8 +241,6 @@ struct opp_table { unsigned int path_count; bool enabled; bool is_genpd; - int (*set_required_opps)(struct device *dev, - struct opp_table *opp_table, struct dev_pm_opp *opp, bool scaling_down); #ifdef CONFIG_DEBUG_FS struct dentry *dentry; diff --git a/drivers/opp/ti-opp-supply.c b/drivers/opp/ti-opp-supply.c index 8f3f13fbbb25..e3b97cd1fbbf 100644 --- a/drivers/opp/ti-opp-supply.c +++ b/drivers/opp/ti-opp-supply.c @@ -18,6 +18,7 @@ #include <linux/of.h> #include <linux/platform_device.h> #include <linux/pm_opp.h> +#include <linux/property.h> #include <linux/regulator/consumer.h> #include <linux/slab.h> @@ -373,23 +374,15 @@ static int ti_opp_supply_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct device *cpu_dev = get_cpu_device(0); - const struct of_device_id *match; const struct ti_opp_supply_of_data *of_data; int ret = 0; - match = of_match_device(ti_opp_supply_of_match, dev); - if (!match) { - /* We do not expect this to happen */ - dev_err(dev, "%s: Unable to match device\n", __func__); - return -ENODEV; - } - if (!match->data) { + of_data = device_get_match_data(dev); + if (!of_data) { /* Again, unlikely.. but mistakes do happen */ dev_err(dev, "%s: Bad data in match\n", __func__); return -EINVAL; } - of_data = match->data; - dev_set_drvdata(dev, (void *)of_data); /* If we need optimized voltage */ diff --git a/drivers/pci/access.c b/drivers/pci/access.c index 6554a2e89d36..6449056b57dd 100644 --- a/drivers/pci/access.c +++ b/drivers/pci/access.c @@ -598,3 +598,15 @@ int pci_write_config_dword(const struct pci_dev *dev, int where, return pci_bus_write_config_dword(dev->bus, dev->devfn, where, val); } EXPORT_SYMBOL(pci_write_config_dword); + +void pci_clear_and_set_config_dword(const struct pci_dev *dev, int pos, + u32 clear, u32 set) +{ + u32 val; + + pci_read_config_dword(dev, pos, &val); + val &= ~clear; + val |= set; + pci_write_config_dword(dev, pos, val); +} +EXPORT_SYMBOL(pci_clear_and_set_config_dword); diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c index 30c7dfeccb16..1eaffff40b8d 100644 --- a/drivers/pci/controller/pci-hyperv.c +++ b/drivers/pci/controller/pci-hyperv.c @@ -650,13 +650,6 @@ static void hv_arch_irq_unmask(struct irq_data *data) PCI_FUNC(pdev->devfn); params->int_target.vector = hv_msi_get_int_vector(data); - /* - * Honoring apic->delivery_mode set to APIC_DELIVERY_MODE_FIXED by - * setting the HV_DEVICE_INTERRUPT_TARGET_MULTICAST flag results in a - * spurious interrupt storm. Not doing so does not seem to have a - * negative effect (yet?). - */ - if (hbus->protocol_version >= PCI_PROTOCOL_VERSION_1_2) { /* * PCI_PROTOCOL_VERSION_1_2 supports the VP_SET version of the diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 55bc3576a985..bdbf8a94b4d0 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -1335,6 +1335,9 @@ static int pci_set_full_power_state(struct pci_dev *dev) pci_restore_bars(dev); } + if (dev->bus->self) + pcie_aspm_pm_state_change(dev->bus->self); + return 0; } @@ -1429,6 +1432,9 @@ static int pci_set_low_power_state(struct pci_dev *dev, pci_power_t state) pci_power_name(dev->current_state), pci_power_name(state)); + if (dev->bus->self) + pcie_aspm_pm_state_change(dev->bus->self); + return 0; } diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index 5ecbcf041179..f43873049d52 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h @@ -569,10 +569,12 @@ int pcie_retrain_link(struct pci_dev *pdev, bool use_lt); #ifdef CONFIG_PCIEASPM void pcie_aspm_init_link_state(struct pci_dev *pdev); void pcie_aspm_exit_link_state(struct pci_dev *pdev); +void pcie_aspm_pm_state_change(struct pci_dev *pdev); void pcie_aspm_powersave_config_link(struct pci_dev *pdev); #else static inline void pcie_aspm_init_link_state(struct pci_dev *pdev) { } static inline void pcie_aspm_exit_link_state(struct pci_dev *pdev) { } +static inline void pcie_aspm_pm_state_change(struct pci_dev *pdev) { } static inline void pcie_aspm_powersave_config_link(struct pci_dev *pdev) { } #endif diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c index 5dab531c8654..5a0066ecc3c5 100644 --- a/drivers/pci/pcie/aspm.c +++ b/drivers/pci/pcie/aspm.c @@ -426,17 +426,6 @@ static void pcie_aspm_check_latency(struct pci_dev *endpoint) } } -static void pci_clear_and_set_dword(struct pci_dev *pdev, int pos, - u32 clear, u32 set) -{ - u32 val; - - pci_read_config_dword(pdev, pos, &val); - val &= ~clear; - val |= set; - pci_write_config_dword(pdev, pos, val); -} - /* Calculate L1.2 PM substate timing parameters */ static void aspm_calc_l12_info(struct pcie_link_state *link, u32 parent_l1ss_cap, u32 child_l1ss_cap) @@ -501,10 +490,12 @@ static void aspm_calc_l12_info(struct pcie_link_state *link, cl1_2_enables = cctl1 & PCI_L1SS_CTL1_L1_2_MASK; if (pl1_2_enables || cl1_2_enables) { - pci_clear_and_set_dword(child, child->l1ss + PCI_L1SS_CTL1, - PCI_L1SS_CTL1_L1_2_MASK, 0); - pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1, - PCI_L1SS_CTL1_L1_2_MASK, 0); + pci_clear_and_set_config_dword(child, + child->l1ss + PCI_L1SS_CTL1, + PCI_L1SS_CTL1_L1_2_MASK, 0); + pci_clear_and_set_config_dword(parent, + parent->l1ss + PCI_L1SS_CTL1, + PCI_L1SS_CTL1_L1_2_MASK, 0); } /* Program T_POWER_ON times in both ports */ @@ -512,22 +503,26 @@ static void aspm_calc_l12_info(struct pcie_link_state *link, pci_write_config_dword(child, child->l1ss + PCI_L1SS_CTL2, ctl2); /* Program Common_Mode_Restore_Time in upstream device */ - pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1, - PCI_L1SS_CTL1_CM_RESTORE_TIME, ctl1); + pci_clear_and_set_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1, + PCI_L1SS_CTL1_CM_RESTORE_TIME, ctl1); /* Program LTR_L1.2_THRESHOLD time in both ports */ - pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1, - PCI_L1SS_CTL1_LTR_L12_TH_VALUE | - PCI_L1SS_CTL1_LTR_L12_TH_SCALE, ctl1); - pci_clear_and_set_dword(child, child->l1ss + PCI_L1SS_CTL1, - PCI_L1SS_CTL1_LTR_L12_TH_VALUE | - PCI_L1SS_CTL1_LTR_L12_TH_SCALE, ctl1); + pci_clear_and_set_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1, + PCI_L1SS_CTL1_LTR_L12_TH_VALUE | + PCI_L1SS_CTL1_LTR_L12_TH_SCALE, + ctl1); + pci_clear_and_set_config_dword(child, child->l1ss + PCI_L1SS_CTL1, + PCI_L1SS_CTL1_LTR_L12_TH_VALUE | + PCI_L1SS_CTL1_LTR_L12_TH_SCALE, + ctl1); if (pl1_2_enables || cl1_2_enables) { - pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1, 0, - pl1_2_enables); - pci_clear_and_set_dword(child, child->l1ss + PCI_L1SS_CTL1, 0, - cl1_2_enables); + pci_clear_and_set_config_dword(parent, + parent->l1ss + PCI_L1SS_CTL1, 0, + pl1_2_enables); + pci_clear_and_set_config_dword(child, + child->l1ss + PCI_L1SS_CTL1, 0, + cl1_2_enables); } } @@ -687,10 +682,10 @@ static void pcie_config_aspm_l1ss(struct pcie_link_state *link, u32 state) */ /* Disable all L1 substates */ - pci_clear_and_set_dword(child, child->l1ss + PCI_L1SS_CTL1, - PCI_L1SS_CTL1_L1SS_MASK, 0); - pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1, - PCI_L1SS_CTL1_L1SS_MASK, 0); + pci_clear_and_set_config_dword(child, child->l1ss + PCI_L1SS_CTL1, + PCI_L1SS_CTL1_L1SS_MASK, 0); + pci_clear_and_set_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1, + PCI_L1SS_CTL1_L1SS_MASK, 0); /* * If needed, disable L1, and it gets enabled later * in pcie_config_aspm_link(). @@ -713,10 +708,10 @@ static void pcie_config_aspm_l1ss(struct pcie_link_state *link, u32 state) val |= PCI_L1SS_CTL1_PCIPM_L1_2; /* Enable what we need to enable */ - pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1, - PCI_L1SS_CTL1_L1SS_MASK, val); - pci_clear_and_set_dword(child, child->l1ss + PCI_L1SS_CTL1, - PCI_L1SS_CTL1_L1SS_MASK, val); + pci_clear_and_set_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1, + PCI_L1SS_CTL1_L1SS_MASK, val); + pci_clear_and_set_config_dword(child, child->l1ss + PCI_L1SS_CTL1, + PCI_L1SS_CTL1_L1SS_MASK, val); } static void pcie_config_aspm_dev(struct pci_dev *pdev, u32 val) @@ -1008,6 +1003,25 @@ void pcie_aspm_exit_link_state(struct pci_dev *pdev) up_read(&pci_bus_sem); } +/* @pdev: the root port or switch downstream port */ +void pcie_aspm_pm_state_change(struct pci_dev *pdev) +{ + struct pcie_link_state *link = pdev->link_state; + + if (aspm_disabled || !link) + return; + /* + * Devices changed PM state, we should recheck if latency + * meets all functions' requirement + */ + down_read(&pci_bus_sem); + mutex_lock(&aspm_lock); + pcie_update_aspm_capable(link->root); + pcie_config_aspm_path(link); + mutex_unlock(&aspm_lock); + up_read(&pci_bus_sem); +} + void pcie_aspm_powersave_config_link(struct pci_dev *pdev) { struct pcie_link_state *link = pdev->link_state; diff --git a/drivers/perf/Kconfig b/drivers/perf/Kconfig index 273d67ecf6d2..ec6e0d9194a1 100644 --- a/drivers/perf/Kconfig +++ b/drivers/perf/Kconfig @@ -217,6 +217,13 @@ config MARVELL_CN10K_DDR_PMU Enable perf support for Marvell DDR Performance monitoring event on CN10K platform. +config DWC_PCIE_PMU + tristate "Synopsys DesignWare PCIe PMU" + depends on PCI + help + Enable perf support for Synopsys DesignWare PCIe PMU Performance + monitoring event on platform including the Alibaba Yitian 710. + source "drivers/perf/arm_cspmu/Kconfig" source "drivers/perf/amlogic/Kconfig" diff --git a/drivers/perf/Makefile b/drivers/perf/Makefile index 16b3ec4db916..a06338e3401c 100644 --- a/drivers/perf/Makefile +++ b/drivers/perf/Makefile @@ -23,6 +23,7 @@ obj-$(CONFIG_MARVELL_CN10K_TAD_PMU) += marvell_cn10k_tad_pmu.o obj-$(CONFIG_MARVELL_CN10K_DDR_PMU) += marvell_cn10k_ddr_pmu.o obj-$(CONFIG_APPLE_M1_CPU_PMU) += apple_m1_cpu_pmu.o obj-$(CONFIG_ALIBABA_UNCORE_DRW_PMU) += alibaba_uncore_drw_pmu.o +obj-$(CONFIG_DWC_PCIE_PMU) += dwc_pcie_pmu.o obj-$(CONFIG_ARM_CORESIGHT_PMU_ARCH_SYSTEM_PMU) += arm_cspmu/ obj-$(CONFIG_MESON_DDR_PMU) += amlogic/ obj-$(CONFIG_CXL_PMU) += cxl_pmu.o diff --git a/drivers/perf/apple_m1_cpu_pmu.c b/drivers/perf/apple_m1_cpu_pmu.c index cd2de44b61b9..f322e5ca1114 100644 --- a/drivers/perf/apple_m1_cpu_pmu.c +++ b/drivers/perf/apple_m1_cpu_pmu.c @@ -524,8 +524,10 @@ static int m1_pmu_set_event_filter(struct hw_perf_event *event, { unsigned long config_base = 0; - if (!attr->exclude_guest) - return -EINVAL; + if (!attr->exclude_guest) { + pr_debug("ARM performance counters do not support mode exclusion\n"); + return -EOPNOTSUPP; + } if (!attr->exclude_kernel) config_base |= M1_PMU_CFG_COUNT_KERNEL; if (!attr->exclude_user) diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c index 847b0dc41293..c584165b13ba 100644 --- a/drivers/perf/arm-cmn.c +++ b/drivers/perf/arm-cmn.c @@ -811,7 +811,7 @@ static umode_t arm_cmn_event_attr_is_visible(struct kobject *kobj, #define CMN_EVENT_HNF_OCC(_model, _name, _event) \ CMN_EVENT_HN_OCC(_model, hnf_##_name, CMN_TYPE_HNF, _event) #define CMN_EVENT_HNF_CLS(_model, _name, _event) \ - CMN_EVENT_HN_CLS(_model, hnf_##_name, CMN_TYPE_HNS, _event) + CMN_EVENT_HN_CLS(_model, hnf_##_name, CMN_TYPE_HNF, _event) #define CMN_EVENT_HNF_SNT(_model, _name, _event) \ CMN_EVENT_HN_SNT(_model, hnf_##_name, CMN_TYPE_HNF, _event) diff --git a/drivers/perf/arm_cspmu/arm_cspmu.c b/drivers/perf/arm_cspmu/arm_cspmu.c index 2cc35dded007..50b89b989ce7 100644 --- a/drivers/perf/arm_cspmu/arm_cspmu.c +++ b/drivers/perf/arm_cspmu/arm_cspmu.c @@ -1108,7 +1108,6 @@ static int arm_cspmu_request_irq(struct arm_cspmu *cspmu) static inline int arm_cspmu_find_cpu_container(int cpu, u32 container_uid) { - u64 acpi_uid; struct device *cpu_dev; struct acpi_device *acpi_dev; @@ -1118,8 +1117,7 @@ static inline int arm_cspmu_find_cpu_container(int cpu, u32 container_uid) acpi_dev = ACPI_COMPANION(cpu_dev); while (acpi_dev) { - if (acpi_dev_hid_uid_match(acpi_dev, ACPI_PROCESSOR_CONTAINER_HID, NULL) && - !acpi_dev_uid_to_integer(acpi_dev, &acpi_uid) && acpi_uid == container_uid) + if (acpi_dev_hid_uid_match(acpi_dev, ACPI_PROCESSOR_CONTAINER_HID, container_uid)) return 0; acpi_dev = acpi_dev_parent(acpi_dev); diff --git a/drivers/perf/arm_dsu_pmu.c b/drivers/perf/arm_dsu_pmu.c index 8223c49bd082..7ec4498e312f 100644 --- a/drivers/perf/arm_dsu_pmu.c +++ b/drivers/perf/arm_dsu_pmu.c @@ -371,7 +371,7 @@ static inline u32 dsu_pmu_get_reset_overflow(void) return __dsu_pmu_get_reset_overflow(); } -/** +/* * dsu_pmu_set_event_period: Set the period for the counter. * * All DSU PMU event counters, except the cycle counter are 32bit @@ -602,7 +602,7 @@ static struct dsu_pmu *dsu_pmu_alloc(struct platform_device *pdev) return dsu_pmu; } -/** +/* * dsu_pmu_dt_get_cpus: Get the list of CPUs in the cluster * from device tree. */ @@ -632,7 +632,7 @@ static int dsu_pmu_dt_get_cpus(struct device *dev, cpumask_t *mask) return 0; } -/** +/* * dsu_pmu_acpi_get_cpus: Get the list of CPUs in the cluster * from ACPI. */ diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c index d712a19e47ac..8458fe2cebb4 100644 --- a/drivers/perf/arm_pmu.c +++ b/drivers/perf/arm_pmu.c @@ -445,7 +445,7 @@ __hw_perf_event_init(struct perf_event *event) { struct arm_pmu *armpmu = to_arm_pmu(event->pmu); struct hw_perf_event *hwc = &event->hw; - int mapping; + int mapping, ret; hwc->flags = 0; mapping = armpmu->map_event(event); @@ -470,11 +470,10 @@ __hw_perf_event_init(struct perf_event *event) /* * Check whether we need to exclude the counter from certain modes. */ - if (armpmu->set_event_filter && - armpmu->set_event_filter(hwc, &event->attr)) { - pr_debug("ARM performance counters do not support " - "mode exclusion\n"); - return -EOPNOTSUPP; + if (armpmu->set_event_filter) { + ret = armpmu->set_event_filter(hwc, &event->attr); + if (ret) + return ret; } /* @@ -893,7 +892,6 @@ struct arm_pmu *armpmu_alloc(void) struct pmu_hw_events *events; events = per_cpu_ptr(pmu->hw_events, cpu); - raw_spin_lock_init(&events->pmu_lock); events->percpu_pmu = pmu; } diff --git a/drivers/perf/arm_pmuv3.c b/drivers/perf/arm_pmuv3.c index 6ca7be05229c..23fa6c5da82c 100644 --- a/drivers/perf/arm_pmuv3.c +++ b/drivers/perf/arm_pmuv3.c @@ -15,6 +15,7 @@ #include <clocksource/arm_arch_timer.h> #include <linux/acpi.h> +#include <linux/bitfield.h> #include <linux/clocksource.h> #include <linux/of.h> #include <linux/perf/arm_pmu.h> @@ -169,7 +170,11 @@ armv8pmu_events_sysfs_show(struct device *dev, PMU_EVENT_ATTR_ID(name, armv8pmu_events_sysfs_show, config) static struct attribute *armv8_pmuv3_event_attrs[] = { - ARMV8_EVENT_ATTR(sw_incr, ARMV8_PMUV3_PERFCTR_SW_INCR), + /* + * Don't expose the sw_incr event in /sys. It's not usable as writes to + * PMSWINC_EL0 will trap as PMUSERENR.{SW,EN}=={0,0} and event rotation + * means we don't have a fixed event<->counter relationship regardless. + */ ARMV8_EVENT_ATTR(l1i_cache_refill, ARMV8_PMUV3_PERFCTR_L1I_CACHE_REFILL), ARMV8_EVENT_ATTR(l1i_tlb_refill, ARMV8_PMUV3_PERFCTR_L1I_TLB_REFILL), ARMV8_EVENT_ATTR(l1d_cache_refill, ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL), @@ -294,26 +299,66 @@ static const struct attribute_group armv8_pmuv3_events_attr_group = { .is_visible = armv8pmu_event_attr_is_visible, }; -PMU_FORMAT_ATTR(event, "config:0-15"); -PMU_FORMAT_ATTR(long, "config1:0"); -PMU_FORMAT_ATTR(rdpmc, "config1:1"); +/* User ABI */ +#define ATTR_CFG_FLD_event_CFG config +#define ATTR_CFG_FLD_event_LO 0 +#define ATTR_CFG_FLD_event_HI 15 +#define ATTR_CFG_FLD_long_CFG config1 +#define ATTR_CFG_FLD_long_LO 0 +#define ATTR_CFG_FLD_long_HI 0 +#define ATTR_CFG_FLD_rdpmc_CFG config1 +#define ATTR_CFG_FLD_rdpmc_LO 1 +#define ATTR_CFG_FLD_rdpmc_HI 1 +#define ATTR_CFG_FLD_threshold_count_CFG config1 /* PMEVTYPER.TC[0] */ +#define ATTR_CFG_FLD_threshold_count_LO 2 +#define ATTR_CFG_FLD_threshold_count_HI 2 +#define ATTR_CFG_FLD_threshold_compare_CFG config1 /* PMEVTYPER.TC[2:1] */ +#define ATTR_CFG_FLD_threshold_compare_LO 3 +#define ATTR_CFG_FLD_threshold_compare_HI 4 +#define ATTR_CFG_FLD_threshold_CFG config1 /* PMEVTYPER.TH */ +#define ATTR_CFG_FLD_threshold_LO 5 +#define ATTR_CFG_FLD_threshold_HI 16 + +GEN_PMU_FORMAT_ATTR(event); +GEN_PMU_FORMAT_ATTR(long); +GEN_PMU_FORMAT_ATTR(rdpmc); +GEN_PMU_FORMAT_ATTR(threshold_count); +GEN_PMU_FORMAT_ATTR(threshold_compare); +GEN_PMU_FORMAT_ATTR(threshold); static int sysctl_perf_user_access __read_mostly; -static inline bool armv8pmu_event_is_64bit(struct perf_event *event) +static bool armv8pmu_event_is_64bit(struct perf_event *event) +{ + return ATTR_CFG_GET_FLD(&event->attr, long); +} + +static bool armv8pmu_event_want_user_access(struct perf_event *event) { - return event->attr.config1 & 0x1; + return ATTR_CFG_GET_FLD(&event->attr, rdpmc); } -static inline bool armv8pmu_event_want_user_access(struct perf_event *event) +static u8 armv8pmu_event_threshold_control(struct perf_event_attr *attr) { - return event->attr.config1 & 0x2; + u8 th_compare = ATTR_CFG_GET_FLD(attr, threshold_compare); + u8 th_count = ATTR_CFG_GET_FLD(attr, threshold_count); + + /* + * The count bit is always the bottom bit of the full control field, and + * the comparison is the upper two bits, but it's not explicitly + * labelled in the Arm ARM. For the Perf interface we split it into two + * fields, so reconstruct it here. + */ + return (th_compare << 1) | th_count; } static struct attribute *armv8_pmuv3_format_attrs[] = { &format_attr_event.attr, &format_attr_long.attr, &format_attr_rdpmc.attr, + &format_attr_threshold.attr, + &format_attr_threshold_compare.attr, + &format_attr_threshold_count.attr, NULL, }; @@ -327,7 +372,7 @@ static ssize_t slots_show(struct device *dev, struct device_attribute *attr, { struct pmu *pmu = dev_get_drvdata(dev); struct arm_pmu *cpu_pmu = container_of(pmu, struct arm_pmu, pmu); - u32 slots = cpu_pmu->reg_pmmir & ARMV8_PMU_SLOTS_MASK; + u32 slots = FIELD_GET(ARMV8_PMU_SLOTS, cpu_pmu->reg_pmmir); return sysfs_emit(page, "0x%08x\n", slots); } @@ -339,8 +384,7 @@ static ssize_t bus_slots_show(struct device *dev, struct device_attribute *attr, { struct pmu *pmu = dev_get_drvdata(dev); struct arm_pmu *cpu_pmu = container_of(pmu, struct arm_pmu, pmu); - u32 bus_slots = (cpu_pmu->reg_pmmir >> ARMV8_PMU_BUS_SLOTS_SHIFT) - & ARMV8_PMU_BUS_SLOTS_MASK; + u32 bus_slots = FIELD_GET(ARMV8_PMU_BUS_SLOTS, cpu_pmu->reg_pmmir); return sysfs_emit(page, "0x%08x\n", bus_slots); } @@ -352,8 +396,7 @@ static ssize_t bus_width_show(struct device *dev, struct device_attribute *attr, { struct pmu *pmu = dev_get_drvdata(dev); struct arm_pmu *cpu_pmu = container_of(pmu, struct arm_pmu, pmu); - u32 bus_width = (cpu_pmu->reg_pmmir >> ARMV8_PMU_BUS_WIDTH_SHIFT) - & ARMV8_PMU_BUS_WIDTH_MASK; + u32 bus_width = FIELD_GET(ARMV8_PMU_BUS_WIDTH, cpu_pmu->reg_pmmir); u32 val = 0; /* Encoded as Log2(number of bytes), plus one */ @@ -365,10 +408,38 @@ static ssize_t bus_width_show(struct device *dev, struct device_attribute *attr, static DEVICE_ATTR_RO(bus_width); +static u32 threshold_max(struct arm_pmu *cpu_pmu) +{ + /* + * PMMIR.THWIDTH is readable and non-zero on aarch32, but it would be + * impossible to write the threshold in the upper 32 bits of PMEVTYPER. + */ + if (IS_ENABLED(CONFIG_ARM)) + return 0; + + /* + * The largest value that can be written to PMEVTYPER<n>_EL0.TH is + * (2 ^ PMMIR.THWIDTH) - 1. + */ + return (1 << FIELD_GET(ARMV8_PMU_THWIDTH, cpu_pmu->reg_pmmir)) - 1; +} + +static ssize_t threshold_max_show(struct device *dev, + struct device_attribute *attr, char *page) +{ + struct pmu *pmu = dev_get_drvdata(dev); + struct arm_pmu *cpu_pmu = container_of(pmu, struct arm_pmu, pmu); + + return sysfs_emit(page, "0x%08x\n", threshold_max(cpu_pmu)); +} + +static DEVICE_ATTR_RO(threshold_max); + static struct attribute *armv8_pmuv3_caps_attrs[] = { &dev_attr_slots.attr, &dev_attr_bus_slots.attr, &dev_attr_bus_width.attr, + &dev_attr_threshold_max.attr, NULL, }; @@ -397,7 +468,7 @@ static bool armv8pmu_has_long_event(struct arm_pmu *cpu_pmu) return (IS_ENABLED(CONFIG_ARM64) && is_pmuv3p5(cpu_pmu->pmuver)); } -static inline bool armv8pmu_event_has_user_read(struct perf_event *event) +static bool armv8pmu_event_has_user_read(struct perf_event *event) { return event->hw.flags & PERF_EVENT_FLAG_USER_READ_CNT; } @@ -407,7 +478,7 @@ static inline bool armv8pmu_event_has_user_read(struct perf_event *event) * except when we have allocated the 64bit cycle counter (for CPU * cycles event) or when user space counter access is enabled. */ -static inline bool armv8pmu_event_is_chained(struct perf_event *event) +static bool armv8pmu_event_is_chained(struct perf_event *event) { int idx = event->hw.idx; struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); @@ -428,36 +499,36 @@ static inline bool armv8pmu_event_is_chained(struct perf_event *event) #define ARMV8_IDX_TO_COUNTER(x) \ (((x) - ARMV8_IDX_COUNTER0) & ARMV8_PMU_COUNTER_MASK) -static inline u64 armv8pmu_pmcr_read(void) +static u64 armv8pmu_pmcr_read(void) { return read_pmcr(); } -static inline void armv8pmu_pmcr_write(u64 val) +static void armv8pmu_pmcr_write(u64 val) { val &= ARMV8_PMU_PMCR_MASK; isb(); write_pmcr(val); } -static inline int armv8pmu_has_overflowed(u32 pmovsr) +static int armv8pmu_has_overflowed(u32 pmovsr) { return pmovsr & ARMV8_PMU_OVERFLOWED_MASK; } -static inline int armv8pmu_counter_has_overflowed(u32 pmnc, int idx) +static int armv8pmu_counter_has_overflowed(u32 pmnc, int idx) { return pmnc & BIT(ARMV8_IDX_TO_COUNTER(idx)); } -static inline u64 armv8pmu_read_evcntr(int idx) +static u64 armv8pmu_read_evcntr(int idx) { u32 counter = ARMV8_IDX_TO_COUNTER(idx); return read_pmevcntrn(counter); } -static inline u64 armv8pmu_read_hw_counter(struct perf_event *event) +static u64 armv8pmu_read_hw_counter(struct perf_event *event) { int idx = event->hw.idx; u64 val = armv8pmu_read_evcntr(idx); @@ -519,14 +590,14 @@ static u64 armv8pmu_read_counter(struct perf_event *event) return armv8pmu_unbias_long_counter(event, value); } -static inline void armv8pmu_write_evcntr(int idx, u64 value) +static void armv8pmu_write_evcntr(int idx, u64 value) { u32 counter = ARMV8_IDX_TO_COUNTER(idx); write_pmevcntrn(counter, value); } -static inline void armv8pmu_write_hw_counter(struct perf_event *event, +static void armv8pmu_write_hw_counter(struct perf_event *event, u64 value) { int idx = event->hw.idx; @@ -552,15 +623,22 @@ static void armv8pmu_write_counter(struct perf_event *event, u64 value) armv8pmu_write_hw_counter(event, value); } -static inline void armv8pmu_write_evtype(int idx, u32 val) +static void armv8pmu_write_evtype(int idx, unsigned long val) { u32 counter = ARMV8_IDX_TO_COUNTER(idx); + unsigned long mask = ARMV8_PMU_EVTYPE_EVENT | + ARMV8_PMU_INCLUDE_EL2 | + ARMV8_PMU_EXCLUDE_EL0 | + ARMV8_PMU_EXCLUDE_EL1; - val &= ARMV8_PMU_EVTYPE_MASK; + if (IS_ENABLED(CONFIG_ARM64)) + mask |= ARMV8_PMU_EVTYPE_TC | ARMV8_PMU_EVTYPE_TH; + + val &= mask; write_pmevtypern(counter, val); } -static inline void armv8pmu_write_event_type(struct perf_event *event) +static void armv8pmu_write_event_type(struct perf_event *event) { struct hw_perf_event *hwc = &event->hw; int idx = hwc->idx; @@ -594,7 +672,7 @@ static u32 armv8pmu_event_cnten_mask(struct perf_event *event) return mask; } -static inline void armv8pmu_enable_counter(u32 mask) +static void armv8pmu_enable_counter(u32 mask) { /* * Make sure event configuration register writes are visible before we @@ -604,7 +682,7 @@ static inline void armv8pmu_enable_counter(u32 mask) write_pmcntenset(mask); } -static inline void armv8pmu_enable_event_counter(struct perf_event *event) +static void armv8pmu_enable_event_counter(struct perf_event *event) { struct perf_event_attr *attr = &event->attr; u32 mask = armv8pmu_event_cnten_mask(event); @@ -616,7 +694,7 @@ static inline void armv8pmu_enable_event_counter(struct perf_event *event) armv8pmu_enable_counter(mask); } -static inline void armv8pmu_disable_counter(u32 mask) +static void armv8pmu_disable_counter(u32 mask) { write_pmcntenclr(mask); /* @@ -626,7 +704,7 @@ static inline void armv8pmu_disable_counter(u32 mask) isb(); } -static inline void armv8pmu_disable_event_counter(struct perf_event *event) +static void armv8pmu_disable_event_counter(struct perf_event *event) { struct perf_event_attr *attr = &event->attr; u32 mask = armv8pmu_event_cnten_mask(event); @@ -638,18 +716,18 @@ static inline void armv8pmu_disable_event_counter(struct perf_event *event) armv8pmu_disable_counter(mask); } -static inline void armv8pmu_enable_intens(u32 mask) +static void armv8pmu_enable_intens(u32 mask) { write_pmintenset(mask); } -static inline void armv8pmu_enable_event_irq(struct perf_event *event) +static void armv8pmu_enable_event_irq(struct perf_event *event) { u32 counter = ARMV8_IDX_TO_COUNTER(event->hw.idx); armv8pmu_enable_intens(BIT(counter)); } -static inline void armv8pmu_disable_intens(u32 mask) +static void armv8pmu_disable_intens(u32 mask) { write_pmintenclr(mask); isb(); @@ -658,13 +736,13 @@ static inline void armv8pmu_disable_intens(u32 mask) isb(); } -static inline void armv8pmu_disable_event_irq(struct perf_event *event) +static void armv8pmu_disable_event_irq(struct perf_event *event) { u32 counter = ARMV8_IDX_TO_COUNTER(event->hw.idx); armv8pmu_disable_intens(BIT(counter)); } -static inline u32 armv8pmu_getreset_flags(void) +static u32 armv8pmu_getreset_flags(void) { u32 value; @@ -672,7 +750,7 @@ static inline u32 armv8pmu_getreset_flags(void) value = read_pmovsclr(); /* Write to clear flags */ - value &= ARMV8_PMU_OVSR_MASK; + value &= ARMV8_PMU_OVERFLOWED_MASK; write_pmovsclr(value); return value; @@ -914,9 +992,15 @@ static int armv8pmu_set_event_filter(struct hw_perf_event *event, struct perf_event_attr *attr) { unsigned long config_base = 0; - - if (attr->exclude_idle) - return -EPERM; + struct perf_event *perf_event = container_of(attr, struct perf_event, + attr); + struct arm_pmu *cpu_pmu = to_arm_pmu(perf_event->pmu); + u32 th; + + if (attr->exclude_idle) { + pr_debug("ARM performance counters do not support mode exclusion\n"); + return -EOPNOTSUPP; + } /* * If we're running in hyp mode, then we *are* the hypervisor. @@ -946,6 +1030,22 @@ static int armv8pmu_set_event_filter(struct hw_perf_event *event, config_base |= ARMV8_PMU_EXCLUDE_EL0; /* + * If FEAT_PMUv3_TH isn't implemented, then THWIDTH (threshold_max) will + * be 0 and will also trigger this check, preventing it from being used. + */ + th = ATTR_CFG_GET_FLD(attr, threshold); + if (th > threshold_max(cpu_pmu)) { + pr_debug("PMU event threshold exceeds max value\n"); + return -EINVAL; + } + + if (IS_ENABLED(CONFIG_ARM64) && th) { + config_base |= FIELD_PREP(ARMV8_PMU_EVTYPE_TH, th); + config_base |= FIELD_PREP(ARMV8_PMU_EVTYPE_TC, + armv8pmu_event_threshold_control(attr)); + } + + /* * Install the filter into config_base as this is used to * construct the event type. */ @@ -1107,8 +1207,7 @@ static void __armv8pmu_probe_pmu(void *info) probe->present = true; /* Read the nb of CNTx counters supported from PMNC */ - cpu_pmu->num_events = (armv8pmu_pmcr_read() >> ARMV8_PMU_PMCR_N_SHIFT) - & ARMV8_PMU_PMCR_N_MASK; + cpu_pmu->num_events = FIELD_GET(ARMV8_PMU_PMCR_N, armv8pmu_pmcr_read()); /* Add the CPU cycles counter */ cpu_pmu->num_events += 1; @@ -1221,6 +1320,12 @@ static int name##_pmu_init(struct arm_pmu *cpu_pmu) \ return armv8_pmu_init(cpu_pmu, #name, armv8_pmuv3_map_event); \ } +#define PMUV3_INIT_MAP_EVENT(name, map_event) \ +static int name##_pmu_init(struct arm_pmu *cpu_pmu) \ +{ \ + return armv8_pmu_init(cpu_pmu, #name, map_event); \ +} + PMUV3_INIT_SIMPLE(armv8_pmuv3) PMUV3_INIT_SIMPLE(armv8_cortex_a34) @@ -1247,51 +1352,24 @@ PMUV3_INIT_SIMPLE(armv8_neoverse_v1) PMUV3_INIT_SIMPLE(armv8_nvidia_carmel) PMUV3_INIT_SIMPLE(armv8_nvidia_denver) -static int armv8_a35_pmu_init(struct arm_pmu *cpu_pmu) -{ - return armv8_pmu_init(cpu_pmu, "armv8_cortex_a35", armv8_a53_map_event); -} - -static int armv8_a53_pmu_init(struct arm_pmu *cpu_pmu) -{ - return armv8_pmu_init(cpu_pmu, "armv8_cortex_a53", armv8_a53_map_event); -} - -static int armv8_a57_pmu_init(struct arm_pmu *cpu_pmu) -{ - return armv8_pmu_init(cpu_pmu, "armv8_cortex_a57", armv8_a57_map_event); -} - -static int armv8_a72_pmu_init(struct arm_pmu *cpu_pmu) -{ - return armv8_pmu_init(cpu_pmu, "armv8_cortex_a72", armv8_a57_map_event); -} - -static int armv8_a73_pmu_init(struct arm_pmu *cpu_pmu) -{ - return armv8_pmu_init(cpu_pmu, "armv8_cortex_a73", armv8_a73_map_event); -} - -static int armv8_thunder_pmu_init(struct arm_pmu *cpu_pmu) -{ - return armv8_pmu_init(cpu_pmu, "armv8_cavium_thunder", armv8_thunder_map_event); -} - -static int armv8_vulcan_pmu_init(struct arm_pmu *cpu_pmu) -{ - return armv8_pmu_init(cpu_pmu, "armv8_brcm_vulcan", armv8_vulcan_map_event); -} +PMUV3_INIT_MAP_EVENT(armv8_cortex_a35, armv8_a53_map_event) +PMUV3_INIT_MAP_EVENT(armv8_cortex_a53, armv8_a53_map_event) +PMUV3_INIT_MAP_EVENT(armv8_cortex_a57, armv8_a57_map_event) +PMUV3_INIT_MAP_EVENT(armv8_cortex_a72, armv8_a57_map_event) +PMUV3_INIT_MAP_EVENT(armv8_cortex_a73, armv8_a73_map_event) +PMUV3_INIT_MAP_EVENT(armv8_cavium_thunder, armv8_thunder_map_event) +PMUV3_INIT_MAP_EVENT(armv8_brcm_vulcan, armv8_vulcan_map_event) static const struct of_device_id armv8_pmu_of_device_ids[] = { {.compatible = "arm,armv8-pmuv3", .data = armv8_pmuv3_pmu_init}, {.compatible = "arm,cortex-a34-pmu", .data = armv8_cortex_a34_pmu_init}, - {.compatible = "arm,cortex-a35-pmu", .data = armv8_a35_pmu_init}, - {.compatible = "arm,cortex-a53-pmu", .data = armv8_a53_pmu_init}, + {.compatible = "arm,cortex-a35-pmu", .data = armv8_cortex_a35_pmu_init}, + {.compatible = "arm,cortex-a53-pmu", .data = armv8_cortex_a53_pmu_init}, {.compatible = "arm,cortex-a55-pmu", .data = armv8_cortex_a55_pmu_init}, - {.compatible = "arm,cortex-a57-pmu", .data = armv8_a57_pmu_init}, + {.compatible = "arm,cortex-a57-pmu", .data = armv8_cortex_a57_pmu_init}, {.compatible = "arm,cortex-a65-pmu", .data = armv8_cortex_a65_pmu_init}, - {.compatible = "arm,cortex-a72-pmu", .data = armv8_a72_pmu_init}, - {.compatible = "arm,cortex-a73-pmu", .data = armv8_a73_pmu_init}, + {.compatible = "arm,cortex-a72-pmu", .data = armv8_cortex_a72_pmu_init}, + {.compatible = "arm,cortex-a73-pmu", .data = armv8_cortex_a73_pmu_init}, {.compatible = "arm,cortex-a75-pmu", .data = armv8_cortex_a75_pmu_init}, {.compatible = "arm,cortex-a76-pmu", .data = armv8_cortex_a76_pmu_init}, {.compatible = "arm,cortex-a77-pmu", .data = armv8_cortex_a77_pmu_init}, @@ -1309,8 +1387,8 @@ static const struct of_device_id armv8_pmu_of_device_ids[] = { {.compatible = "arm,neoverse-n1-pmu", .data = armv8_neoverse_n1_pmu_init}, {.compatible = "arm,neoverse-n2-pmu", .data = armv9_neoverse_n2_pmu_init}, {.compatible = "arm,neoverse-v1-pmu", .data = armv8_neoverse_v1_pmu_init}, - {.compatible = "cavium,thunder-pmu", .data = armv8_thunder_pmu_init}, - {.compatible = "brcm,vulcan-pmu", .data = armv8_vulcan_pmu_init}, + {.compatible = "cavium,thunder-pmu", .data = armv8_cavium_thunder_pmu_init}, + {.compatible = "brcm,vulcan-pmu", .data = armv8_brcm_vulcan_pmu_init}, {.compatible = "nvidia,carmel-pmu", .data = armv8_nvidia_carmel_pmu_init}, {.compatible = "nvidia,denver-pmu", .data = armv8_nvidia_denver_pmu_init}, {}, diff --git a/drivers/perf/arm_spe_pmu.c b/drivers/perf/arm_spe_pmu.c index d2b0cbf0e0c4..b622d75d8c9e 100644 --- a/drivers/perf/arm_spe_pmu.c +++ b/drivers/perf/arm_spe_pmu.c @@ -206,28 +206,6 @@ static const struct attribute_group arm_spe_pmu_cap_group = { #define ATTR_CFG_FLD_inv_event_filter_LO 0 #define ATTR_CFG_FLD_inv_event_filter_HI 63 -/* Why does everything I do descend into this? */ -#define __GEN_PMU_FORMAT_ATTR(cfg, lo, hi) \ - (lo) == (hi) ? #cfg ":" #lo "\n" : #cfg ":" #lo "-" #hi - -#define _GEN_PMU_FORMAT_ATTR(cfg, lo, hi) \ - __GEN_PMU_FORMAT_ATTR(cfg, lo, hi) - -#define GEN_PMU_FORMAT_ATTR(name) \ - PMU_FORMAT_ATTR(name, \ - _GEN_PMU_FORMAT_ATTR(ATTR_CFG_FLD_##name##_CFG, \ - ATTR_CFG_FLD_##name##_LO, \ - ATTR_CFG_FLD_##name##_HI)) - -#define _ATTR_CFG_GET_FLD(attr, cfg, lo, hi) \ - ((((attr)->cfg) >> lo) & GENMASK(hi - lo, 0)) - -#define ATTR_CFG_GET_FLD(attr, name) \ - _ATTR_CFG_GET_FLD(attr, \ - ATTR_CFG_FLD_##name##_CFG, \ - ATTR_CFG_FLD_##name##_LO, \ - ATTR_CFG_FLD_##name##_HI) - GEN_PMU_FORMAT_ATTR(ts_enable); GEN_PMU_FORMAT_ATTR(pa_enable); GEN_PMU_FORMAT_ATTR(pct_enable); diff --git a/drivers/perf/dwc_pcie_pmu.c b/drivers/perf/dwc_pcie_pmu.c new file mode 100644 index 000000000000..957058ad0099 --- /dev/null +++ b/drivers/perf/dwc_pcie_pmu.c @@ -0,0 +1,792 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Synopsys DesignWare PCIe PMU driver + * + * Copyright (C) 2021-2023 Alibaba Inc. + */ + +#include <linux/bitfield.h> +#include <linux/bitops.h> +#include <linux/cpuhotplug.h> +#include <linux/cpumask.h> +#include <linux/device.h> +#include <linux/errno.h> +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/perf_event.h> +#include <linux/pci.h> +#include <linux/platform_device.h> +#include <linux/smp.h> +#include <linux/sysfs.h> +#include <linux/types.h> + +#define DWC_PCIE_VSEC_RAS_DES_ID 0x02 +#define DWC_PCIE_EVENT_CNT_CTL 0x8 + +/* + * Event Counter Data Select includes two parts: + * - 27-24: Group number(4-bit: 0..0x7) + * - 23-16: Event number(8-bit: 0..0x13) within the Group + * + * Put them together as in TRM. + */ +#define DWC_PCIE_CNT_EVENT_SEL GENMASK(27, 16) +#define DWC_PCIE_CNT_LANE_SEL GENMASK(11, 8) +#define DWC_PCIE_CNT_STATUS BIT(7) +#define DWC_PCIE_CNT_ENABLE GENMASK(4, 2) +#define DWC_PCIE_PER_EVENT_OFF 0x1 +#define DWC_PCIE_PER_EVENT_ON 0x3 +#define DWC_PCIE_EVENT_CLEAR GENMASK(1, 0) +#define DWC_PCIE_EVENT_PER_CLEAR 0x1 + +#define DWC_PCIE_EVENT_CNT_DATA 0xC + +#define DWC_PCIE_TIME_BASED_ANAL_CTL 0x10 +#define DWC_PCIE_TIME_BASED_REPORT_SEL GENMASK(31, 24) +#define DWC_PCIE_TIME_BASED_DURATION_SEL GENMASK(15, 8) +#define DWC_PCIE_DURATION_MANUAL_CTL 0x0 +#define DWC_PCIE_DURATION_1MS 0x1 +#define DWC_PCIE_DURATION_10MS 0x2 +#define DWC_PCIE_DURATION_100MS 0x3 +#define DWC_PCIE_DURATION_1S 0x4 +#define DWC_PCIE_DURATION_2S 0x5 +#define DWC_PCIE_DURATION_4S 0x6 +#define DWC_PCIE_DURATION_4US 0xFF +#define DWC_PCIE_TIME_BASED_TIMER_START BIT(0) +#define DWC_PCIE_TIME_BASED_CNT_ENABLE 0x1 + +#define DWC_PCIE_TIME_BASED_ANAL_DATA_REG_LOW 0x14 +#define DWC_PCIE_TIME_BASED_ANAL_DATA_REG_HIGH 0x18 + +/* Event attributes */ +#define DWC_PCIE_CONFIG_EVENTID GENMASK(15, 0) +#define DWC_PCIE_CONFIG_TYPE GENMASK(19, 16) +#define DWC_PCIE_CONFIG_LANE GENMASK(27, 20) + +#define DWC_PCIE_EVENT_ID(event) FIELD_GET(DWC_PCIE_CONFIG_EVENTID, (event)->attr.config) +#define DWC_PCIE_EVENT_TYPE(event) FIELD_GET(DWC_PCIE_CONFIG_TYPE, (event)->attr.config) +#define DWC_PCIE_EVENT_LANE(event) FIELD_GET(DWC_PCIE_CONFIG_LANE, (event)->attr.config) + +enum dwc_pcie_event_type { + DWC_PCIE_TIME_BASE_EVENT, + DWC_PCIE_LANE_EVENT, + DWC_PCIE_EVENT_TYPE_MAX, +}; + +#define DWC_PCIE_LANE_EVENT_MAX_PERIOD GENMASK_ULL(31, 0) +#define DWC_PCIE_MAX_PERIOD GENMASK_ULL(63, 0) + +struct dwc_pcie_pmu { + struct pmu pmu; + struct pci_dev *pdev; /* Root Port device */ + u16 ras_des_offset; + u32 nr_lanes; + + struct list_head pmu_node; + struct hlist_node cpuhp_node; + struct perf_event *event[DWC_PCIE_EVENT_TYPE_MAX]; + int on_cpu; +}; + +#define to_dwc_pcie_pmu(p) (container_of(p, struct dwc_pcie_pmu, pmu)) + +static int dwc_pcie_pmu_hp_state; +static struct list_head dwc_pcie_dev_info_head = + LIST_HEAD_INIT(dwc_pcie_dev_info_head); +static bool notify; + +struct dwc_pcie_dev_info { + struct platform_device *plat_dev; + struct pci_dev *pdev; + struct list_head dev_node; +}; + +struct dwc_pcie_vendor_id { + int vendor_id; +}; + +static const struct dwc_pcie_vendor_id dwc_pcie_vendor_ids[] = { + {.vendor_id = PCI_VENDOR_ID_ALIBABA }, + {} /* terminator */ +}; + +static ssize_t cpumask_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct dwc_pcie_pmu *pcie_pmu = to_dwc_pcie_pmu(dev_get_drvdata(dev)); + + return cpumap_print_to_pagebuf(true, buf, cpumask_of(pcie_pmu->on_cpu)); +} +static DEVICE_ATTR_RO(cpumask); + +static struct attribute *dwc_pcie_pmu_cpumask_attrs[] = { + &dev_attr_cpumask.attr, + NULL +}; + +static struct attribute_group dwc_pcie_cpumask_attr_group = { + .attrs = dwc_pcie_pmu_cpumask_attrs, +}; + +struct dwc_pcie_format_attr { + struct device_attribute attr; + u64 field; + int config; +}; + +PMU_FORMAT_ATTR(eventid, "config:0-15"); +PMU_FORMAT_ATTR(type, "config:16-19"); +PMU_FORMAT_ATTR(lane, "config:20-27"); + +static struct attribute *dwc_pcie_format_attrs[] = { + &format_attr_type.attr, + &format_attr_eventid.attr, + &format_attr_lane.attr, + NULL, +}; + +static struct attribute_group dwc_pcie_format_attrs_group = { + .name = "format", + .attrs = dwc_pcie_format_attrs, +}; + +struct dwc_pcie_event_attr { + struct device_attribute attr; + enum dwc_pcie_event_type type; + u16 eventid; + u8 lane; +}; + +static ssize_t dwc_pcie_event_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct dwc_pcie_event_attr *eattr; + + eattr = container_of(attr, typeof(*eattr), attr); + + if (eattr->type == DWC_PCIE_LANE_EVENT) + return sysfs_emit(buf, "eventid=0x%x,type=0x%x,lane=?\n", + eattr->eventid, eattr->type); + else if (eattr->type == DWC_PCIE_TIME_BASE_EVENT) + return sysfs_emit(buf, "eventid=0x%x,type=0x%x\n", + eattr->eventid, eattr->type); + + return 0; +} + +#define DWC_PCIE_EVENT_ATTR(_name, _type, _eventid, _lane) \ + (&((struct dwc_pcie_event_attr[]) {{ \ + .attr = __ATTR(_name, 0444, dwc_pcie_event_show, NULL), \ + .type = _type, \ + .eventid = _eventid, \ + .lane = _lane, \ + }})[0].attr.attr) + +#define DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(_name, _eventid) \ + DWC_PCIE_EVENT_ATTR(_name, DWC_PCIE_TIME_BASE_EVENT, _eventid, 0) +#define DWC_PCIE_PMU_LANE_EVENT_ATTR(_name, _eventid) \ + DWC_PCIE_EVENT_ATTR(_name, DWC_PCIE_LANE_EVENT, _eventid, 0) + +static struct attribute *dwc_pcie_pmu_time_event_attrs[] = { + /* Group #0 */ + DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(one_cycle, 0x00), + DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(TX_L0S, 0x01), + DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(RX_L0S, 0x02), + DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(L0, 0x03), + DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(L1, 0x04), + DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(L1_1, 0x05), + DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(L1_2, 0x06), + DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(CFG_RCVRY, 0x07), + DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(TX_RX_L0S, 0x08), + DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(L1_AUX, 0x09), + + /* Group #1 */ + DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(Tx_PCIe_TLP_Data_Payload, 0x20), + DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(Rx_PCIe_TLP_Data_Payload, 0x21), + DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(Tx_CCIX_TLP_Data_Payload, 0x22), + DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(Rx_CCIX_TLP_Data_Payload, 0x23), + + /* + * Leave it to the user to specify the lane ID to avoid generating + * a list of hundreds of events. + */ + DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_ack_dllp, 0x600), + DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_update_fc_dllp, 0x601), + DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_ack_dllp, 0x602), + DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_update_fc_dllp, 0x603), + DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_nulified_tlp, 0x604), + DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_nulified_tlp, 0x605), + DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_duplicate_tl, 0x606), + DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_memory_write, 0x700), + DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_memory_read, 0x701), + DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_configuration_write, 0x702), + DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_configuration_read, 0x703), + DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_io_write, 0x704), + DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_io_read, 0x705), + DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_completion_without_data, 0x706), + DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_completion_with_data, 0x707), + DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_message_tlp, 0x708), + DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_atomic, 0x709), + DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_tlp_with_prefix, 0x70A), + DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_memory_write, 0x70B), + DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_memory_read, 0x70C), + DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_io_write, 0x70F), + DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_io_read, 0x710), + DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_completion_without_data, 0x711), + DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_completion_with_data, 0x712), + DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_message_tlp, 0x713), + DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_atomic, 0x714), + DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_tlp_with_prefix, 0x715), + DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_ccix_tlp, 0x716), + DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_ccix_tlp, 0x717), + NULL +}; + +static const struct attribute_group dwc_pcie_event_attrs_group = { + .name = "events", + .attrs = dwc_pcie_pmu_time_event_attrs, +}; + +static const struct attribute_group *dwc_pcie_attr_groups[] = { + &dwc_pcie_event_attrs_group, + &dwc_pcie_format_attrs_group, + &dwc_pcie_cpumask_attr_group, + NULL +}; + +static void dwc_pcie_pmu_lane_event_enable(struct dwc_pcie_pmu *pcie_pmu, + bool enable) +{ + struct pci_dev *pdev = pcie_pmu->pdev; + u16 ras_des_offset = pcie_pmu->ras_des_offset; + + if (enable) + pci_clear_and_set_config_dword(pdev, + ras_des_offset + DWC_PCIE_EVENT_CNT_CTL, + DWC_PCIE_CNT_ENABLE, DWC_PCIE_PER_EVENT_ON); + else + pci_clear_and_set_config_dword(pdev, + ras_des_offset + DWC_PCIE_EVENT_CNT_CTL, + DWC_PCIE_CNT_ENABLE, DWC_PCIE_PER_EVENT_OFF); +} + +static void dwc_pcie_pmu_time_based_event_enable(struct dwc_pcie_pmu *pcie_pmu, + bool enable) +{ + struct pci_dev *pdev = pcie_pmu->pdev; + u16 ras_des_offset = pcie_pmu->ras_des_offset; + + pci_clear_and_set_config_dword(pdev, + ras_des_offset + DWC_PCIE_TIME_BASED_ANAL_CTL, + DWC_PCIE_TIME_BASED_TIMER_START, enable); +} + +static u64 dwc_pcie_pmu_read_lane_event_counter(struct perf_event *event) +{ + struct dwc_pcie_pmu *pcie_pmu = to_dwc_pcie_pmu(event->pmu); + struct pci_dev *pdev = pcie_pmu->pdev; + u16 ras_des_offset = pcie_pmu->ras_des_offset; + u32 val; + + pci_read_config_dword(pdev, ras_des_offset + DWC_PCIE_EVENT_CNT_DATA, &val); + + return val; +} + +static u64 dwc_pcie_pmu_read_time_based_counter(struct perf_event *event) +{ + struct dwc_pcie_pmu *pcie_pmu = to_dwc_pcie_pmu(event->pmu); + struct pci_dev *pdev = pcie_pmu->pdev; + int event_id = DWC_PCIE_EVENT_ID(event); + u16 ras_des_offset = pcie_pmu->ras_des_offset; + u32 lo, hi, ss; + u64 val; + + /* + * The 64-bit value of the data counter is spread across two + * registers that are not synchronized. In order to read them + * atomically, ensure that the high 32 bits match before and after + * reading the low 32 bits. + */ + pci_read_config_dword(pdev, + ras_des_offset + DWC_PCIE_TIME_BASED_ANAL_DATA_REG_HIGH, &hi); + do { + /* snapshot the high 32 bits */ + ss = hi; + + pci_read_config_dword( + pdev, ras_des_offset + DWC_PCIE_TIME_BASED_ANAL_DATA_REG_LOW, + &lo); + pci_read_config_dword( + pdev, ras_des_offset + DWC_PCIE_TIME_BASED_ANAL_DATA_REG_HIGH, + &hi); + } while (hi != ss); + + val = ((u64)hi << 32) | lo; + /* + * The Group#1 event measures the amount of data processed in 16-byte + * units. Simplify the end-user interface by multiplying the counter + * at the point of read. + */ + if (event_id >= 0x20 && event_id <= 0x23) + val *= 16; + + return val; +} + +static void dwc_pcie_pmu_event_update(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + enum dwc_pcie_event_type type = DWC_PCIE_EVENT_TYPE(event); + u64 delta, prev, now = 0; + + do { + prev = local64_read(&hwc->prev_count); + + if (type == DWC_PCIE_LANE_EVENT) + now = dwc_pcie_pmu_read_lane_event_counter(event); + else if (type == DWC_PCIE_TIME_BASE_EVENT) + now = dwc_pcie_pmu_read_time_based_counter(event); + + } while (local64_cmpxchg(&hwc->prev_count, prev, now) != prev); + + delta = (now - prev) & DWC_PCIE_MAX_PERIOD; + /* 32-bit counter for Lane Event Counting */ + if (type == DWC_PCIE_LANE_EVENT) + delta &= DWC_PCIE_LANE_EVENT_MAX_PERIOD; + + local64_add(delta, &event->count); +} + +static int dwc_pcie_pmu_event_init(struct perf_event *event) +{ + struct dwc_pcie_pmu *pcie_pmu = to_dwc_pcie_pmu(event->pmu); + enum dwc_pcie_event_type type = DWC_PCIE_EVENT_TYPE(event); + struct perf_event *sibling; + u32 lane; + + if (event->attr.type != event->pmu->type) + return -ENOENT; + + /* We don't support sampling */ + if (is_sampling_event(event)) + return -EINVAL; + + /* We cannot support task bound events */ + if (event->cpu < 0 || event->attach_state & PERF_ATTACH_TASK) + return -EINVAL; + + if (event->group_leader != event && + !is_software_event(event->group_leader)) + return -EINVAL; + + for_each_sibling_event(sibling, event->group_leader) { + if (sibling->pmu != event->pmu && !is_software_event(sibling)) + return -EINVAL; + } + + if (type < 0 || type >= DWC_PCIE_EVENT_TYPE_MAX) + return -EINVAL; + + if (type == DWC_PCIE_LANE_EVENT) { + lane = DWC_PCIE_EVENT_LANE(event); + if (lane < 0 || lane >= pcie_pmu->nr_lanes) + return -EINVAL; + } + + event->cpu = pcie_pmu->on_cpu; + + return 0; +} + +static void dwc_pcie_pmu_event_start(struct perf_event *event, int flags) +{ + struct hw_perf_event *hwc = &event->hw; + struct dwc_pcie_pmu *pcie_pmu = to_dwc_pcie_pmu(event->pmu); + enum dwc_pcie_event_type type = DWC_PCIE_EVENT_TYPE(event); + + hwc->state = 0; + local64_set(&hwc->prev_count, 0); + + if (type == DWC_PCIE_LANE_EVENT) + dwc_pcie_pmu_lane_event_enable(pcie_pmu, true); + else if (type == DWC_PCIE_TIME_BASE_EVENT) + dwc_pcie_pmu_time_based_event_enable(pcie_pmu, true); +} + +static void dwc_pcie_pmu_event_stop(struct perf_event *event, int flags) +{ + struct dwc_pcie_pmu *pcie_pmu = to_dwc_pcie_pmu(event->pmu); + enum dwc_pcie_event_type type = DWC_PCIE_EVENT_TYPE(event); + struct hw_perf_event *hwc = &event->hw; + + if (event->hw.state & PERF_HES_STOPPED) + return; + + if (type == DWC_PCIE_LANE_EVENT) + dwc_pcie_pmu_lane_event_enable(pcie_pmu, false); + else if (type == DWC_PCIE_TIME_BASE_EVENT) + dwc_pcie_pmu_time_based_event_enable(pcie_pmu, false); + + dwc_pcie_pmu_event_update(event); + hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; +} + +static int dwc_pcie_pmu_event_add(struct perf_event *event, int flags) +{ + struct dwc_pcie_pmu *pcie_pmu = to_dwc_pcie_pmu(event->pmu); + struct pci_dev *pdev = pcie_pmu->pdev; + struct hw_perf_event *hwc = &event->hw; + enum dwc_pcie_event_type type = DWC_PCIE_EVENT_TYPE(event); + int event_id = DWC_PCIE_EVENT_ID(event); + int lane = DWC_PCIE_EVENT_LANE(event); + u16 ras_des_offset = pcie_pmu->ras_des_offset; + u32 ctrl; + + /* one counter for each type and it is in use */ + if (pcie_pmu->event[type]) + return -ENOSPC; + + pcie_pmu->event[type] = event; + hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; + + if (type == DWC_PCIE_LANE_EVENT) { + /* EVENT_COUNTER_DATA_REG needs clear manually */ + ctrl = FIELD_PREP(DWC_PCIE_CNT_EVENT_SEL, event_id) | + FIELD_PREP(DWC_PCIE_CNT_LANE_SEL, lane) | + FIELD_PREP(DWC_PCIE_CNT_ENABLE, DWC_PCIE_PER_EVENT_OFF) | + FIELD_PREP(DWC_PCIE_EVENT_CLEAR, DWC_PCIE_EVENT_PER_CLEAR); + pci_write_config_dword(pdev, ras_des_offset + DWC_PCIE_EVENT_CNT_CTL, + ctrl); + } else if (type == DWC_PCIE_TIME_BASE_EVENT) { + /* + * TIME_BASED_ANAL_DATA_REG is a 64 bit register, we can safely + * use it with any manually controlled duration. And it is + * cleared when next measurement starts. + */ + ctrl = FIELD_PREP(DWC_PCIE_TIME_BASED_REPORT_SEL, event_id) | + FIELD_PREP(DWC_PCIE_TIME_BASED_DURATION_SEL, + DWC_PCIE_DURATION_MANUAL_CTL) | + DWC_PCIE_TIME_BASED_CNT_ENABLE; + pci_write_config_dword( + pdev, ras_des_offset + DWC_PCIE_TIME_BASED_ANAL_CTL, ctrl); + } + + if (flags & PERF_EF_START) + dwc_pcie_pmu_event_start(event, PERF_EF_RELOAD); + + perf_event_update_userpage(event); + + return 0; +} + +static void dwc_pcie_pmu_event_del(struct perf_event *event, int flags) +{ + struct dwc_pcie_pmu *pcie_pmu = to_dwc_pcie_pmu(event->pmu); + enum dwc_pcie_event_type type = DWC_PCIE_EVENT_TYPE(event); + + dwc_pcie_pmu_event_stop(event, flags | PERF_EF_UPDATE); + perf_event_update_userpage(event); + pcie_pmu->event[type] = NULL; +} + +static void dwc_pcie_pmu_remove_cpuhp_instance(void *hotplug_node) +{ + cpuhp_state_remove_instance_nocalls(dwc_pcie_pmu_hp_state, hotplug_node); +} + +/* + * Find the binded DES capability device info of a PCI device. + * @pdev: The PCI device. + */ +static struct dwc_pcie_dev_info *dwc_pcie_find_dev_info(struct pci_dev *pdev) +{ + struct dwc_pcie_dev_info *dev_info; + + list_for_each_entry(dev_info, &dwc_pcie_dev_info_head, dev_node) + if (dev_info->pdev == pdev) + return dev_info; + + return NULL; +} + +static void dwc_pcie_unregister_pmu(void *data) +{ + struct dwc_pcie_pmu *pcie_pmu = data; + + perf_pmu_unregister(&pcie_pmu->pmu); +} + +static bool dwc_pcie_match_des_cap(struct pci_dev *pdev) +{ + const struct dwc_pcie_vendor_id *vid; + u16 vsec = 0; + u32 val; + + if (!pci_is_pcie(pdev) || !(pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT)) + return false; + + for (vid = dwc_pcie_vendor_ids; vid->vendor_id; vid++) { + vsec = pci_find_vsec_capability(pdev, vid->vendor_id, + DWC_PCIE_VSEC_RAS_DES_ID); + if (vsec) + break; + } + if (!vsec) + return false; + + pci_read_config_dword(pdev, vsec + PCI_VNDR_HEADER, &val); + if (PCI_VNDR_HEADER_REV(val) != 0x04) + return false; + + pci_dbg(pdev, + "Detected PCIe Vendor-Specific Extended Capability RAS DES\n"); + return true; +} + +static void dwc_pcie_unregister_dev(struct dwc_pcie_dev_info *dev_info) +{ + platform_device_unregister(dev_info->plat_dev); + list_del(&dev_info->dev_node); + kfree(dev_info); +} + +static int dwc_pcie_register_dev(struct pci_dev *pdev) +{ + struct platform_device *plat_dev; + struct dwc_pcie_dev_info *dev_info; + u32 bdf; + + bdf = PCI_DEVID(pdev->bus->number, pdev->devfn); + plat_dev = platform_device_register_data(NULL, "dwc_pcie_pmu", bdf, + pdev, sizeof(*pdev)); + + if (IS_ERR(plat_dev)) + return PTR_ERR(plat_dev); + + dev_info = kzalloc(sizeof(*dev_info), GFP_KERNEL); + if (!dev_info) + return -ENOMEM; + + /* Cache platform device to handle pci device hotplug */ + dev_info->plat_dev = plat_dev; + dev_info->pdev = pdev; + list_add(&dev_info->dev_node, &dwc_pcie_dev_info_head); + + return 0; +} + +static int dwc_pcie_pmu_notifier(struct notifier_block *nb, + unsigned long action, void *data) +{ + struct device *dev = data; + struct pci_dev *pdev = to_pci_dev(dev); + struct dwc_pcie_dev_info *dev_info; + + switch (action) { + case BUS_NOTIFY_ADD_DEVICE: + if (!dwc_pcie_match_des_cap(pdev)) + return NOTIFY_DONE; + if (dwc_pcie_register_dev(pdev)) + return NOTIFY_BAD; + break; + case BUS_NOTIFY_DEL_DEVICE: + dev_info = dwc_pcie_find_dev_info(pdev); + if (!dev_info) + return NOTIFY_DONE; + dwc_pcie_unregister_dev(dev_info); + break; + } + + return NOTIFY_OK; +} + +static struct notifier_block dwc_pcie_pmu_nb = { + .notifier_call = dwc_pcie_pmu_notifier, +}; + +static int dwc_pcie_pmu_probe(struct platform_device *plat_dev) +{ + struct pci_dev *pdev = plat_dev->dev.platform_data; + struct dwc_pcie_pmu *pcie_pmu; + char *name; + u32 bdf, val; + u16 vsec; + int ret; + + vsec = pci_find_vsec_capability(pdev, pdev->vendor, + DWC_PCIE_VSEC_RAS_DES_ID); + pci_read_config_dword(pdev, vsec + PCI_VNDR_HEADER, &val); + bdf = PCI_DEVID(pdev->bus->number, pdev->devfn); + name = devm_kasprintf(&plat_dev->dev, GFP_KERNEL, "dwc_rootport_%x", bdf); + if (!name) + return -ENOMEM; + + pcie_pmu = devm_kzalloc(&plat_dev->dev, sizeof(*pcie_pmu), GFP_KERNEL); + if (!pcie_pmu) + return -ENOMEM; + + pcie_pmu->pdev = pdev; + pcie_pmu->ras_des_offset = vsec; + pcie_pmu->nr_lanes = pcie_get_width_cap(pdev); + pcie_pmu->on_cpu = -1; + pcie_pmu->pmu = (struct pmu){ + .name = name, + .parent = &pdev->dev, + .module = THIS_MODULE, + .attr_groups = dwc_pcie_attr_groups, + .capabilities = PERF_PMU_CAP_NO_EXCLUDE, + .task_ctx_nr = perf_invalid_context, + .event_init = dwc_pcie_pmu_event_init, + .add = dwc_pcie_pmu_event_add, + .del = dwc_pcie_pmu_event_del, + .start = dwc_pcie_pmu_event_start, + .stop = dwc_pcie_pmu_event_stop, + .read = dwc_pcie_pmu_event_update, + }; + + /* Add this instance to the list used by the offline callback */ + ret = cpuhp_state_add_instance(dwc_pcie_pmu_hp_state, + &pcie_pmu->cpuhp_node); + if (ret) { + pci_err(pdev, "Error %d registering hotplug @%x\n", ret, bdf); + return ret; + } + + /* Unwind when platform driver removes */ + ret = devm_add_action_or_reset(&plat_dev->dev, + dwc_pcie_pmu_remove_cpuhp_instance, + &pcie_pmu->cpuhp_node); + if (ret) + return ret; + + ret = perf_pmu_register(&pcie_pmu->pmu, name, -1); + if (ret) { + pci_err(pdev, "Error %d registering PMU @%x\n", ret, bdf); + return ret; + } + ret = devm_add_action_or_reset(&plat_dev->dev, dwc_pcie_unregister_pmu, + pcie_pmu); + if (ret) + return ret; + + return 0; +} + +static int dwc_pcie_pmu_online_cpu(unsigned int cpu, struct hlist_node *cpuhp_node) +{ + struct dwc_pcie_pmu *pcie_pmu; + + pcie_pmu = hlist_entry_safe(cpuhp_node, struct dwc_pcie_pmu, cpuhp_node); + if (pcie_pmu->on_cpu == -1) + pcie_pmu->on_cpu = cpumask_local_spread( + 0, dev_to_node(&pcie_pmu->pdev->dev)); + + return 0; +} + +static int dwc_pcie_pmu_offline_cpu(unsigned int cpu, struct hlist_node *cpuhp_node) +{ + struct dwc_pcie_pmu *pcie_pmu; + struct pci_dev *pdev; + int node; + cpumask_t mask; + unsigned int target; + + pcie_pmu = hlist_entry_safe(cpuhp_node, struct dwc_pcie_pmu, cpuhp_node); + /* Nothing to do if this CPU doesn't own the PMU */ + if (cpu != pcie_pmu->on_cpu) + return 0; + + pcie_pmu->on_cpu = -1; + pdev = pcie_pmu->pdev; + node = dev_to_node(&pdev->dev); + if (cpumask_and(&mask, cpumask_of_node(node), cpu_online_mask) && + cpumask_andnot(&mask, &mask, cpumask_of(cpu))) + target = cpumask_any(&mask); + else + target = cpumask_any_but(cpu_online_mask, cpu); + + if (target >= nr_cpu_ids) { + pci_err(pdev, "There is no CPU to set\n"); + return 0; + } + + /* This PMU does NOT support interrupt, just migrate context. */ + perf_pmu_migrate_context(&pcie_pmu->pmu, cpu, target); + pcie_pmu->on_cpu = target; + + return 0; +} + +static struct platform_driver dwc_pcie_pmu_driver = { + .probe = dwc_pcie_pmu_probe, + .driver = {.name = "dwc_pcie_pmu",}, +}; + +static int __init dwc_pcie_pmu_init(void) +{ + struct pci_dev *pdev = NULL; + bool found = false; + int ret; + + for_each_pci_dev(pdev) { + if (!dwc_pcie_match_des_cap(pdev)) + continue; + + ret = dwc_pcie_register_dev(pdev); + if (ret) { + pci_dev_put(pdev); + return ret; + } + + found = true; + } + if (!found) + return -ENODEV; + + ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, + "perf/dwc_pcie_pmu:online", + dwc_pcie_pmu_online_cpu, + dwc_pcie_pmu_offline_cpu); + if (ret < 0) + return ret; + + dwc_pcie_pmu_hp_state = ret; + + ret = platform_driver_register(&dwc_pcie_pmu_driver); + if (ret) + goto platform_driver_register_err; + + ret = bus_register_notifier(&pci_bus_type, &dwc_pcie_pmu_nb); + if (ret) + goto platform_driver_register_err; + notify = true; + + return 0; + +platform_driver_register_err: + cpuhp_remove_multi_state(dwc_pcie_pmu_hp_state); + + return ret; +} + +static void __exit dwc_pcie_pmu_exit(void) +{ + struct dwc_pcie_dev_info *dev_info, *tmp; + + if (notify) + bus_unregister_notifier(&pci_bus_type, &dwc_pcie_pmu_nb); + list_for_each_entry_safe(dev_info, tmp, &dwc_pcie_dev_info_head, dev_node) + dwc_pcie_unregister_dev(dev_info); + platform_driver_unregister(&dwc_pcie_pmu_driver); + cpuhp_remove_multi_state(dwc_pcie_pmu_hp_state); +} + +module_init(dwc_pcie_pmu_init); +module_exit(dwc_pcie_pmu_exit); + +MODULE_DESCRIPTION("PMU driver for DesignWare Cores PCI Express Controller"); +MODULE_AUTHOR("Shuai Xue <xueshuai@linux.alibaba.com>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/perf/fsl_imx8_ddr_perf.c b/drivers/perf/fsl_imx8_ddr_perf.c index 92611c98120f..7dbfaee372c7 100644 --- a/drivers/perf/fsl_imx8_ddr_perf.c +++ b/drivers/perf/fsl_imx8_ddr_perf.c @@ -19,6 +19,8 @@ #define COUNTER_READ 0x20 #define COUNTER_DPCR1 0x30 +#define COUNTER_MUX_CNTL 0x50 +#define COUNTER_MASK_COMP 0x54 #define CNTL_OVER 0x1 #define CNTL_CLEAR 0x2 @@ -32,6 +34,13 @@ #define CNTL_CSV_SHIFT 24 #define CNTL_CSV_MASK (0xFFU << CNTL_CSV_SHIFT) +#define READ_PORT_SHIFT 0 +#define READ_PORT_MASK (0x7 << READ_PORT_SHIFT) +#define READ_CHANNEL_REVERT 0x00000008 /* bit 3 for read channel select */ +#define WRITE_PORT_SHIFT 8 +#define WRITE_PORT_MASK (0x7 << WRITE_PORT_SHIFT) +#define WRITE_CHANNEL_REVERT 0x00000800 /* bit 11 for write channel select */ + #define EVENT_CYCLES_ID 0 #define EVENT_CYCLES_COUNTER 0 #define NUM_COUNTERS 4 @@ -50,6 +59,7 @@ static DEFINE_IDA(ddr_ida); /* DDR Perf hardware feature */ #define DDR_CAP_AXI_ID_FILTER 0x1 /* support AXI ID filter */ #define DDR_CAP_AXI_ID_FILTER_ENHANCED 0x3 /* support enhanced AXI ID filter */ +#define DDR_CAP_AXI_ID_PORT_CHANNEL_FILTER 0x4 /* support AXI ID PORT CHANNEL filter */ struct fsl_ddr_devtype_data { unsigned int quirks; /* quirks needed for different DDR Perf core */ @@ -82,6 +92,11 @@ static const struct fsl_ddr_devtype_data imx8mp_devtype_data = { .identifier = "i.MX8MP", }; +static const struct fsl_ddr_devtype_data imx8dxl_devtype_data = { + .quirks = DDR_CAP_AXI_ID_PORT_CHANNEL_FILTER, + .identifier = "i.MX8DXL", +}; + static const struct of_device_id imx_ddr_pmu_dt_ids[] = { { .compatible = "fsl,imx8-ddr-pmu", .data = &imx8_devtype_data}, { .compatible = "fsl,imx8m-ddr-pmu", .data = &imx8m_devtype_data}, @@ -89,6 +104,7 @@ static const struct of_device_id imx_ddr_pmu_dt_ids[] = { { .compatible = "fsl,imx8mm-ddr-pmu", .data = &imx8mm_devtype_data}, { .compatible = "fsl,imx8mn-ddr-pmu", .data = &imx8mn_devtype_data}, { .compatible = "fsl,imx8mp-ddr-pmu", .data = &imx8mp_devtype_data}, + { .compatible = "fsl,imx8dxl-ddr-pmu", .data = &imx8dxl_devtype_data}, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, imx_ddr_pmu_dt_ids); @@ -144,6 +160,7 @@ static const struct attribute_group ddr_perf_identifier_attr_group = { enum ddr_perf_filter_capabilities { PERF_CAP_AXI_ID_FILTER = 0, PERF_CAP_AXI_ID_FILTER_ENHANCED, + PERF_CAP_AXI_ID_PORT_CHANNEL_FILTER, PERF_CAP_AXI_ID_FEAT_MAX, }; @@ -157,6 +174,8 @@ static u32 ddr_perf_filter_cap_get(struct ddr_pmu *pmu, int cap) case PERF_CAP_AXI_ID_FILTER_ENHANCED: quirks &= DDR_CAP_AXI_ID_FILTER_ENHANCED; return quirks == DDR_CAP_AXI_ID_FILTER_ENHANCED; + case PERF_CAP_AXI_ID_PORT_CHANNEL_FILTER: + return !!(quirks & DDR_CAP_AXI_ID_PORT_CHANNEL_FILTER); default: WARN(1, "unknown filter cap %d\n", cap); } @@ -187,6 +206,7 @@ static ssize_t ddr_perf_filter_cap_show(struct device *dev, static struct attribute *ddr_perf_filter_cap_attr[] = { PERF_FILTER_EXT_ATTR_ENTRY(filter, PERF_CAP_AXI_ID_FILTER), PERF_FILTER_EXT_ATTR_ENTRY(enhanced_filter, PERF_CAP_AXI_ID_FILTER_ENHANCED), + PERF_FILTER_EXT_ATTR_ENTRY(super_filter, PERF_CAP_AXI_ID_PORT_CHANNEL_FILTER), NULL, }; @@ -272,11 +292,15 @@ static const struct attribute_group ddr_perf_events_attr_group = { PMU_FORMAT_ATTR(event, "config:0-7"); PMU_FORMAT_ATTR(axi_id, "config1:0-15"); PMU_FORMAT_ATTR(axi_mask, "config1:16-31"); +PMU_FORMAT_ATTR(axi_port, "config2:0-2"); +PMU_FORMAT_ATTR(axi_channel, "config2:3-3"); static struct attribute *ddr_perf_format_attrs[] = { &format_attr_event.attr, &format_attr_axi_id.attr, &format_attr_axi_mask.attr, + &format_attr_axi_port.attr, + &format_attr_axi_channel.attr, NULL, }; @@ -530,6 +554,7 @@ static int ddr_perf_event_add(struct perf_event *event, int flags) int counter; int cfg = event->attr.config; int cfg1 = event->attr.config1; + int cfg2 = event->attr.config2; if (pmu->devtype_data->quirks & DDR_CAP_AXI_ID_FILTER) { int i; @@ -553,6 +578,26 @@ static int ddr_perf_event_add(struct perf_event *event, int flags) return -EOPNOTSUPP; } + if (pmu->devtype_data->quirks & DDR_CAP_AXI_ID_PORT_CHANNEL_FILTER) { + if (ddr_perf_is_filtered(event)) { + /* revert axi id masking(axi_mask) value */ + cfg1 ^= AXI_MASKING_REVERT; + writel(cfg1, pmu->base + COUNTER_MASK_COMP + ((counter - 1) << 4)); + + if (cfg == 0x41) { + /* revert axi read channel(axi_channel) value */ + cfg2 ^= READ_CHANNEL_REVERT; + cfg2 |= FIELD_PREP(READ_PORT_MASK, cfg2); + } else { + /* revert axi write channel(axi_channel) value */ + cfg2 ^= WRITE_CHANNEL_REVERT; + cfg2 |= FIELD_PREP(WRITE_PORT_MASK, cfg2); + } + + writel(cfg2, pmu->base + COUNTER_MUX_CNTL + ((counter - 1) << 4)); + } + } + pmu->events[counter] = event; hwc->idx = counter; diff --git a/drivers/perf/fsl_imx9_ddr_perf.c b/drivers/perf/fsl_imx9_ddr_perf.c index 5cf770a1bc31..9685645bfe04 100644 --- a/drivers/perf/fsl_imx9_ddr_perf.c +++ b/drivers/perf/fsl_imx9_ddr_perf.c @@ -617,7 +617,7 @@ static int ddr_perf_probe(struct platform_device *pdev) platform_set_drvdata(pdev, pmu); - pmu->id = ida_simple_get(&ddr_ida, 0, 0, GFP_KERNEL); + pmu->id = ida_alloc(&ddr_ida, GFP_KERNEL); name = devm_kasprintf(&pdev->dev, GFP_KERNEL, DDR_PERF_DEV_NAME "%d", pmu->id); if (!name) { ret = -ENOMEM; @@ -674,7 +674,7 @@ cpuhp_instance_err: cpuhp_remove_multi_state(pmu->cpuhp_state); cpuhp_state_err: format_string_err: - ida_simple_remove(&ddr_ida, pmu->id); + ida_free(&ddr_ida, pmu->id); dev_warn(&pdev->dev, "i.MX9 DDR Perf PMU failed (%d), disabled\n", ret); return ret; } @@ -688,7 +688,7 @@ static int ddr_perf_remove(struct platform_device *pdev) perf_pmu_unregister(&pmu->pmu); - ida_simple_remove(&ddr_ida, pmu->id); + ida_free(&ddr_ida, pmu->id); return 0; } diff --git a/drivers/perf/hisilicon/hisi_uncore_uc_pmu.c b/drivers/perf/hisilicon/hisi_uncore_uc_pmu.c index 63da05e5831c..636fb79647c8 100644 --- a/drivers/perf/hisilicon/hisi_uncore_uc_pmu.c +++ b/drivers/perf/hisilicon/hisi_uncore_uc_pmu.c @@ -383,8 +383,8 @@ static struct attribute *hisi_uc_pmu_events_attr[] = { HISI_PMU_EVENT_ATTR(cpu_rd, 0x10), HISI_PMU_EVENT_ATTR(cpu_rd64, 0x17), HISI_PMU_EVENT_ATTR(cpu_rs64, 0x19), - HISI_PMU_EVENT_ATTR(cpu_mru, 0x1a), - HISI_PMU_EVENT_ATTR(cycles, 0x9c), + HISI_PMU_EVENT_ATTR(cpu_mru, 0x1c), + HISI_PMU_EVENT_ATTR(cycles, 0x95), HISI_PMU_EVENT_ATTR(spipe_hit, 0xb3), HISI_PMU_EVENT_ATTR(hpipe_hit, 0xdb), HISI_PMU_EVENT_ATTR(cring_rxdat_cnt, 0xfa), diff --git a/drivers/platform/chrome/cros_ec_debugfs.c b/drivers/platform/chrome/cros_ec_debugfs.c index 091fdc154d79..6bf6f0e7b597 100644 --- a/drivers/platform/chrome/cros_ec_debugfs.c +++ b/drivers/platform/chrome/cros_ec_debugfs.c @@ -454,7 +454,7 @@ static int cros_ec_create_panicinfo(struct cros_ec_debugfs *debug_info) debug_info->panicinfo_blob.data = data; debug_info->panicinfo_blob.size = ret; - debugfs_create_blob("panicinfo", S_IFREG | 0444, debug_info->dir, + debugfs_create_blob("panicinfo", 0444, debug_info->dir, &debug_info->panicinfo_blob); return 0; diff --git a/drivers/platform/mips/rs780e-acpi.c b/drivers/platform/mips/rs780e-acpi.c index bb0e8ae0eefd..5b8f9cc32589 100644 --- a/drivers/platform/mips/rs780e-acpi.c +++ b/drivers/platform/mips/rs780e-acpi.c @@ -32,29 +32,25 @@ static u8 pmio_read_index(u16 index, u8 reg) return inb(index + 1); } -void pm_iowrite(u8 reg, u8 value) +static void pm_iowrite(u8 reg, u8 value) { pmio_write_index(PM_INDEX, reg, value); } -EXPORT_SYMBOL(pm_iowrite); -u8 pm_ioread(u8 reg) +static u8 pm_ioread(u8 reg) { return pmio_read_index(PM_INDEX, reg); } -EXPORT_SYMBOL(pm_ioread); -void pm2_iowrite(u8 reg, u8 value) +static void pm2_iowrite(u8 reg, u8 value) { pmio_write_index(PM2_INDEX, reg, value); } -EXPORT_SYMBOL(pm2_iowrite); -u8 pm2_ioread(u8 reg) +static u8 pm2_ioread(u8 reg) { return pmio_read_index(PM2_INDEX, reg); } -EXPORT_SYMBOL(pm2_ioread); static void acpi_hw_clear_status(void) { diff --git a/drivers/platform/surface/aggregator/Kconfig b/drivers/platform/surface/aggregator/Kconfig index 88afc38ffdc5..957c216c180c 100644 --- a/drivers/platform/surface/aggregator/Kconfig +++ b/drivers/platform/surface/aggregator/Kconfig @@ -5,7 +5,7 @@ menuconfig SURFACE_AGGREGATOR tristate "Microsoft Surface System Aggregator Module Subsystem and Drivers" depends on SERIAL_DEV_BUS depends on ACPI && !RISCV - select CRC_CCITT + select CRC_ITU_T help The Surface System Aggregator Module (Surface SAM or SSAM) is an embedded controller (EC) found on 5th- and later-generation Microsoft diff --git a/drivers/platform/surface/surface_acpi_notify.c b/drivers/platform/surface/surface_acpi_notify.c index e4dee920da18..20f3870915d2 100644 --- a/drivers/platform/surface/surface_acpi_notify.c +++ b/drivers/platform/surface/surface_acpi_notify.c @@ -736,34 +736,6 @@ do { \ #define san_consumer_warn(dev, handle, fmt, ...) \ san_consumer_printk(warn, dev, handle, fmt, ##__VA_ARGS__) -static bool is_san_consumer(struct platform_device *pdev, acpi_handle handle) -{ - struct acpi_handle_list dep_devices; - acpi_handle supplier = ACPI_HANDLE(&pdev->dev); - acpi_status status; - bool ret = false; - int i; - - if (!acpi_has_method(handle, "_DEP")) - return false; - - status = acpi_evaluate_reference(handle, "_DEP", NULL, &dep_devices); - if (ACPI_FAILURE(status)) { - san_consumer_dbg(&pdev->dev, handle, "failed to evaluate _DEP\n"); - return false; - } - - for (i = 0; i < dep_devices.count; i++) { - if (dep_devices.handles[i] == supplier) { - ret = true; - break; - } - } - - acpi_handle_list_free(&dep_devices); - return ret; -} - static acpi_status san_consumer_setup(acpi_handle handle, u32 lvl, void *context, void **rv) { @@ -772,7 +744,7 @@ static acpi_status san_consumer_setup(acpi_handle handle, u32 lvl, struct acpi_device *adev; struct device_link *link; - if (!is_san_consumer(pdev, handle)) + if (!acpi_device_dep(handle, ACPI_HANDLE(&pdev->dev))) return AE_OK; /* Ignore ACPI devices that are not present. */ diff --git a/drivers/platform/x86/p2sb.c b/drivers/platform/x86/p2sb.c index fcf1ce8bbdc5..1cf2471d54dd 100644 --- a/drivers/platform/x86/p2sb.c +++ b/drivers/platform/x86/p2sb.c @@ -26,21 +26,6 @@ static const struct x86_cpu_id p2sb_cpu_ids[] = { {} }; -/* - * Cache BAR0 of P2SB device functions 0 to 7. - * TODO: The constant 8 is the number of functions that PCI specification - * defines. Same definitions exist tree-wide. Unify this definition and - * the other definitions then move to include/uapi/linux/pci.h. - */ -#define NR_P2SB_RES_CACHE 8 - -struct p2sb_res_cache { - u32 bus_dev_id; - struct resource res; -}; - -static struct p2sb_res_cache p2sb_resources[NR_P2SB_RES_CACHE]; - static int p2sb_get_devfn(unsigned int *devfn) { unsigned int fn = P2SB_DEVFN_DEFAULT; @@ -54,16 +39,8 @@ static int p2sb_get_devfn(unsigned int *devfn) return 0; } -static bool p2sb_valid_resource(struct resource *res) -{ - if (res->flags) - return true; - - return false; -} - /* Copy resource from the first BAR of the device in question */ -static void p2sb_read_bar0(struct pci_dev *pdev, struct resource *mem) +static int p2sb_read_bar0(struct pci_dev *pdev, struct resource *mem) { struct resource *bar0 = &pdev->resource[0]; @@ -79,64 +56,47 @@ static void p2sb_read_bar0(struct pci_dev *pdev, struct resource *mem) mem->end = bar0->end; mem->flags = bar0->flags; mem->desc = bar0->desc; + + return 0; } -static void p2sb_scan_and_cache_devfn(struct pci_bus *bus, unsigned int devfn) +static int p2sb_scan_and_read(struct pci_bus *bus, unsigned int devfn, struct resource *mem) { - struct p2sb_res_cache *cache = &p2sb_resources[PCI_FUNC(devfn)]; struct pci_dev *pdev; + int ret; pdev = pci_scan_single_device(bus, devfn); if (!pdev) - return; + return -ENODEV; - p2sb_read_bar0(pdev, &cache->res); - cache->bus_dev_id = bus->dev.id; + ret = p2sb_read_bar0(pdev, mem); pci_stop_and_remove_bus_device(pdev); - return; -} - -static int p2sb_scan_and_cache(struct pci_bus *bus, unsigned int devfn) -{ - unsigned int slot, fn; - - if (PCI_FUNC(devfn) == 0) { - /* - * When function number of the P2SB device is zero, scan it and - * other function numbers, and if devices are available, cache - * their BAR0s. - */ - slot = PCI_SLOT(devfn); - for (fn = 0; fn < NR_P2SB_RES_CACHE; fn++) - p2sb_scan_and_cache_devfn(bus, PCI_DEVFN(slot, fn)); - } else { - /* Scan the P2SB device and cache its BAR0 */ - p2sb_scan_and_cache_devfn(bus, devfn); - } - - if (!p2sb_valid_resource(&p2sb_resources[PCI_FUNC(devfn)].res)) - return -ENOENT; - - return 0; -} - -static struct pci_bus *p2sb_get_bus(struct pci_bus *bus) -{ - static struct pci_bus *p2sb_bus; - - bus = bus ?: p2sb_bus; - if (bus) - return bus; - - /* Assume P2SB is on the bus 0 in domain 0 */ - p2sb_bus = pci_find_bus(0, 0); - return p2sb_bus; + return ret; } -static int p2sb_cache_resources(void) +/** + * p2sb_bar - Get Primary to Sideband (P2SB) bridge device BAR + * @bus: PCI bus to communicate with + * @devfn: PCI slot and function to communicate with + * @mem: memory resource to be filled in + * + * The BIOS prevents the P2SB device from being enumerated by the PCI + * subsystem, so we need to unhide and hide it back to lookup the BAR. + * + * if @bus is NULL, the bus 0 in domain 0 will be used. + * If @devfn is 0, it will be replaced by devfn of the P2SB device. + * + * Caller must provide a valid pointer to @mem. + * + * Locking is handled by pci_rescan_remove_lock mutex. + * + * Return: + * 0 on success or appropriate errno value on error. + */ +int p2sb_bar(struct pci_bus *bus, unsigned int devfn, struct resource *mem) { - struct pci_bus *bus; + struct pci_dev *pdev_p2sb; unsigned int devfn_p2sb; u32 value = P2SBC_HIDE; int ret; @@ -146,9 +106,8 @@ static int p2sb_cache_resources(void) if (ret) return ret; - bus = p2sb_get_bus(NULL); - if (!bus) - return -ENODEV; + /* if @bus is NULL, use bus 0 in domain 0 */ + bus = bus ?: pci_find_bus(0, 0); /* * Prevent concurrent PCI bus scan from seeing the P2SB device and @@ -156,16 +115,17 @@ static int p2sb_cache_resources(void) */ pci_lock_rescan_remove(); - /* - * The BIOS prevents the P2SB device from being enumerated by the PCI - * subsystem, so we need to unhide and hide it back to lookup the BAR. - * Unhide the P2SB device here, if needed. - */ + /* Unhide the P2SB device, if needed */ pci_bus_read_config_dword(bus, devfn_p2sb, P2SBC, &value); if (value & P2SBC_HIDE) pci_bus_write_config_dword(bus, devfn_p2sb, P2SBC, 0); - ret = p2sb_scan_and_cache(bus, devfn_p2sb); + pdev_p2sb = pci_scan_single_device(bus, devfn_p2sb); + if (devfn) + ret = p2sb_scan_and_read(bus, devfn, mem); + else + ret = p2sb_read_bar0(pdev_p2sb, mem); + pci_stop_and_remove_bus_device(pdev_p2sb); /* Hide the P2SB device, if it was hidden */ if (value & P2SBC_HIDE) @@ -173,62 +133,12 @@ static int p2sb_cache_resources(void) pci_unlock_rescan_remove(); - return ret; -} - -/** - * p2sb_bar - Get Primary to Sideband (P2SB) bridge device BAR - * @bus: PCI bus to communicate with - * @devfn: PCI slot and function to communicate with - * @mem: memory resource to be filled in - * - * If @bus is NULL, the bus 0 in domain 0 will be used. - * If @devfn is 0, it will be replaced by devfn of the P2SB device. - * - * Caller must provide a valid pointer to @mem. - * - * Return: - * 0 on success or appropriate errno value on error. - */ -int p2sb_bar(struct pci_bus *bus, unsigned int devfn, struct resource *mem) -{ - struct p2sb_res_cache *cache; - int ret; - - bus = p2sb_get_bus(bus); - if (!bus) - return -ENODEV; - - if (!devfn) { - ret = p2sb_get_devfn(&devfn); - if (ret) - return ret; - } + if (ret) + return ret; - cache = &p2sb_resources[PCI_FUNC(devfn)]; - if (cache->bus_dev_id != bus->dev.id) + if (mem->flags == 0) return -ENODEV; - if (!p2sb_valid_resource(&cache->res)) - return -ENOENT; - - memcpy(mem, &cache->res, sizeof(*mem)); return 0; } EXPORT_SYMBOL_GPL(p2sb_bar); - -static int __init p2sb_fs_init(void) -{ - p2sb_cache_resources(); - return 0; -} - -/* - * pci_rescan_remove_lock to avoid access to unhidden P2SB devices can - * not be locked in sysfs pci bus rescan path because of deadlock. To - * avoid the deadlock, access to P2SB devices with the lock at an early - * step in kernel initialization and cache required resources. This - * should happen after subsys_initcall which initializes PCI subsystem - * and before device_initcall which requires P2SB resources. - */ -fs_initcall(p2sb_fs_init); diff --git a/drivers/pnp/pnpacpi/rsparser.c b/drivers/pnp/pnpacpi/rsparser.c index 4f05f610391b..c02ce0834c2c 100644 --- a/drivers/pnp/pnpacpi/rsparser.c +++ b/drivers/pnp/pnpacpi/rsparser.c @@ -151,13 +151,13 @@ static int vendor_resource_matches(struct pnp_dev *dev, static void pnpacpi_parse_allocated_vendor(struct pnp_dev *dev, struct acpi_resource_vendor_typed *vendor) { - if (vendor_resource_matches(dev, vendor, &hp_ccsr_uuid, 16)) { - u64 start, length; + struct { u64 start, length; } range; - memcpy(&start, vendor->byte_data, sizeof(start)); - memcpy(&length, vendor->byte_data + 8, sizeof(length)); - - pnp_add_mem_resource(dev, start, start + length - 1, 0); + if (vendor_resource_matches(dev, vendor, &hp_ccsr_uuid, + sizeof(range))) { + memcpy(&range, vendor->byte_data, sizeof(range)); + pnp_add_mem_resource(dev, range.start, range.start + + range.length - 1, 0); } } diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c index ddc6f2163c8e..1f31dce5835a 100644 --- a/drivers/pnp/pnpbios/bioscalls.c +++ b/drivers/pnp/pnpbios/bioscalls.c @@ -60,7 +60,7 @@ do { \ set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \ } while(0) -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092, +static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(DESC_DATA32_BIOS, (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1); /* diff --git a/drivers/ptp/ptp_ocp.c b/drivers/ptp/ptp_ocp.c index 4021d3d325f9..e7defce8cf48 100644 --- a/drivers/ptp/ptp_ocp.c +++ b/drivers/ptp/ptp_ocp.c @@ -4492,7 +4492,7 @@ ptp_ocp_remove(struct pci_dev *pdev) cancel_delayed_work_sync(&bp->sync_work); for (i = 0; i < OCP_SMA_NUM; i++) { if (bp->sma[i].dpll_pin) { - dpll_pin_unregister(bp->dpll, bp->sma[i].dpll_pin, &dpll_pins_ops, bp); + dpll_pin_unregister(bp->dpll, bp->sma[i].dpll_pin, &dpll_pins_ops, &bp->sma[i]); dpll_pin_put(bp->sma[i].dpll_pin); } } diff --git a/drivers/rapidio/devices/tsi721.c b/drivers/rapidio/devices/tsi721.c index 83323c3d10af..4b84270a8906 100644 --- a/drivers/rapidio/devices/tsi721.c +++ b/drivers/rapidio/devices/tsi721.c @@ -51,8 +51,9 @@ static void tsi721_imsg_handler(struct tsi721_device *priv, int ch); * @len: Length (in bytes) of the maintenance transaction * @data: Value to be read into * - * Generates a local SREP space read. Returns %0 on - * success or %-EINVAL on failure. + * Generates a local SREP space read. + * + * Returns: %0 on success or %-EINVAL on failure. */ static int tsi721_lcread(struct rio_mport *mport, int index, u32 offset, int len, u32 *data) @@ -75,8 +76,9 @@ static int tsi721_lcread(struct rio_mport *mport, int index, u32 offset, * @len: Length (in bytes) of the maintenance transaction * @data: Value to be written * - * Generates a local write into SREP configuration space. Returns %0 on - * success or %-EINVAL on failure. + * Generates a local write into SREP configuration space. + * + * Returns: %0 on success or %-EINVAL on failure. */ static int tsi721_lcwrite(struct rio_mport *mport, int index, u32 offset, int len, u32 data) @@ -104,7 +106,7 @@ static int tsi721_lcwrite(struct rio_mport *mport, int index, u32 offset, * @do_wr: Operation flag (1 == MAINT_WR) * * Generates a RapidIO maintenance transaction (Read or Write). - * Returns %0 on success and %-EINVAL or %-EFAULT on failure. + * Returns: %0 on success and %-EINVAL or %-EFAULT on failure. */ static int tsi721_maint_dma(struct tsi721_device *priv, u32 sys_size, u16 destid, u8 hopcount, u32 offset, int len, @@ -205,10 +207,10 @@ err_out: * @hopcount: Number of hops to target device * @offset: Offset into configuration space * @len: Length (in bytes) of the maintenance transaction - * @val: Location to be read into + * @data: Location to be read into * * Generates a RapidIO maintenance read transaction. - * Returns %0 on success and %-EINVAL or %-EFAULT on failure. + * Returns: %0 on success and %-EINVAL or %-EFAULT on failure. */ static int tsi721_cread_dma(struct rio_mport *mport, int index, u16 destid, u8 hopcount, u32 offset, int len, u32 *data) @@ -228,10 +230,10 @@ static int tsi721_cread_dma(struct rio_mport *mport, int index, u16 destid, * @hopcount: Number of hops to target device * @offset: Offset into configuration space * @len: Length (in bytes) of the maintenance transaction - * @val: Value to be written + * @data: Value to be written * * Generates a RapidIO maintenance write transaction. - * Returns %0 on success and %-EINVAL or %-EFAULT on failure. + * Returns: %0 on success and %-EINVAL or %-EFAULT on failure. */ static int tsi721_cwrite_dma(struct rio_mport *mport, int index, u16 destid, u8 hopcount, u32 offset, int len, u32 data) @@ -250,6 +252,8 @@ static int tsi721_cwrite_dma(struct rio_mport *mport, int index, u16 destid, * Handles inbound port-write interrupts. Copies PW message from an internal * buffer into PW message FIFO and schedules deferred routine to process * queued messages. + * + * Returns: %0 */ static int tsi721_pw_handler(struct tsi721_device *priv) @@ -307,6 +311,8 @@ static void tsi721_pw_dpc(struct work_struct *work) * tsi721_pw_enable - enable/disable port-write interface init * @mport: Master port implementing the port write unit * @enable: 1=enable; 0=disable port-write message handling + * + * Returns: %0 */ static int tsi721_pw_enable(struct rio_mport *mport, int enable) { @@ -336,7 +342,9 @@ static int tsi721_pw_enable(struct rio_mport *mport, int enable) * @destid: Destination ID of target device * @data: 16-bit info field of RapidIO doorbell * - * Sends a RapidIO doorbell message. Always returns %0. + * Sends a RapidIO doorbell message. + * + * Returns: %0 */ static int tsi721_dsend(struct rio_mport *mport, int index, u16 destid, u16 data) @@ -361,6 +369,8 @@ static int tsi721_dsend(struct rio_mport *mport, int index, * Handles inbound doorbell interrupts. Copies doorbell entry from an internal * buffer into DB message FIFO and schedules deferred routine to process * queued DBs. + * + * Returns: %0 */ static int tsi721_dbell_handler(struct tsi721_device *priv) @@ -453,6 +463,8 @@ static void tsi721_db_dpc(struct work_struct *work) * * Handles Tsi721 interrupts signaled using MSI and INTA. Checks reported * interrupt events and calls an event-specific handler(s). + * + * Returns: %IRQ_HANDLED or %IRQ_NONE */ static irqreturn_t tsi721_irqhandler(int irq, void *ptr) { @@ -607,6 +619,8 @@ static void tsi721_interrupts_init(struct tsi721_device *priv) * @ptr: Pointer to interrupt-specific data (tsi721_device structure) * * Handles outbound messaging interrupts signaled using MSI-X. + * + * Returns: %IRQ_HANDLED */ static irqreturn_t tsi721_omsg_msix(int irq, void *ptr) { @@ -624,6 +638,8 @@ static irqreturn_t tsi721_omsg_msix(int irq, void *ptr) * @ptr: Pointer to interrupt-specific data (tsi721_device structure) * * Handles inbound messaging interrupts signaled using MSI-X. + * + * Returns: %IRQ_HANDLED */ static irqreturn_t tsi721_imsg_msix(int irq, void *ptr) { @@ -641,6 +657,8 @@ static irqreturn_t tsi721_imsg_msix(int irq, void *ptr) * @ptr: Pointer to interrupt-specific data (tsi721_device structure) * * Handles Tsi721 interrupts from SRIO MAC. + * + * Returns: %IRQ_HANDLED */ static irqreturn_t tsi721_srio_msix(int irq, void *ptr) { @@ -663,6 +681,8 @@ static irqreturn_t tsi721_srio_msix(int irq, void *ptr) * Handles Tsi721 interrupts from SR2PC Channel. * NOTE: At this moment services only one SR2PC channel associated with inbound * doorbells. + * + * Returns: %IRQ_HANDLED */ static irqreturn_t tsi721_sr2pc_ch_msix(int irq, void *ptr) { @@ -689,6 +709,8 @@ static irqreturn_t tsi721_sr2pc_ch_msix(int irq, void *ptr) * Registers MSI-X interrupt service routines for interrupts that are active * immediately after mport initialization. Messaging interrupt service routines * should be registered during corresponding open requests. + * + * Returns: %0 on success or -errno value on failure. */ static int tsi721_request_msix(struct tsi721_device *priv) { @@ -717,6 +739,8 @@ static int tsi721_request_msix(struct tsi721_device *priv) * * Configures MSI-X support for Tsi721. Supports only an exact number * of requested vectors. + * + * Returns: %0 on success or -errno value on failure. */ static int tsi721_enable_msix(struct tsi721_device *priv) { @@ -1334,7 +1358,7 @@ static void tsi721_close_sr2pc_mapping(struct tsi721_device *priv) * @priv: pointer to tsi721 private data * * Initializes inbound port write handler. - * Returns %0 on success or %-ENOMEM on failure. + * Returns: %0 on success or %-ENOMEM on failure. */ static int tsi721_port_write_init(struct tsi721_device *priv) { @@ -1412,7 +1436,8 @@ static void tsi721_doorbell_free(struct tsi721_device *priv) * * Initialize BDMA channel allocated for RapidIO maintenance read/write * request generation - * Returns %0 on success or %-ENOMEM on failure. + * + * Returns: %0 on success or %-ENOMEM on failure. */ static int tsi721_bdma_maint_init(struct tsi721_device *priv) { @@ -1662,6 +1687,8 @@ tsi721_omsg_interrupt_disable(struct tsi721_device *priv, int ch, * @mbox: Outbound mailbox * @buffer: Message to add to outbound queue * @len: Length of message + * + * Returns: %0 on success or -errno value on failure. */ static int tsi721_add_outb_message(struct rio_mport *mport, struct rio_dev *rdev, int mbox, @@ -1869,6 +1896,8 @@ no_sts_update: * @dev_id: Device specific pointer to pass on event * @mbox: Mailbox to open * @entries: Number of entries in the outbound mailbox ring + * + * Returns: %0 on success or -errno value on failure. */ static int tsi721_open_outb_mbox(struct rio_mport *mport, void *dev_id, int mbox, int entries) @@ -2156,6 +2185,8 @@ static void tsi721_imsg_handler(struct tsi721_device *priv, int ch) * @dev_id: Device specific pointer to pass on event * @mbox: Mailbox to open * @entries: Number of entries in the inbound mailbox ring + * + * Returns: %0 on success or -errno value on failure. */ static int tsi721_open_inb_mbox(struct rio_mport *mport, void *dev_id, int mbox, int entries) @@ -2409,6 +2440,8 @@ static void tsi721_close_inb_mbox(struct rio_mport *mport, int mbox) * @mport: Master port implementing the Inbound Messaging Engine * @mbox: Inbound mailbox number * @buf: Buffer to add to inbound queue + * + * Returns: %0 on success or -errno value on failure. */ static int tsi721_add_inb_buffer(struct rio_mport *mport, int mbox, void *buf) { @@ -2439,7 +2472,7 @@ out: * @mport: Master port implementing the Inbound Messaging Engine * @mbox: Inbound mailbox number * - * Returns pointer to the message on success or NULL on failure. + * Returns: pointer to the message on success or %NULL on failure. */ static void *tsi721_get_inb_message(struct rio_mport *mport, int mbox) { @@ -2507,6 +2540,8 @@ out: * @priv: pointer to tsi721 private data * * Configures Tsi721 messaging engine. + * + * Returns: %0 */ static int tsi721_messages_init(struct tsi721_device *priv) { @@ -2539,9 +2574,9 @@ static int tsi721_messages_init(struct tsi721_device *priv) /** * tsi721_query_mport - Fetch inbound message from the Tsi721 MSG Queue * @mport: Master port implementing the Inbound Messaging Engine - * @mbox: Inbound mailbox number + * @attr: mport device attributes * - * Returns pointer to the message on success or NULL on failure. + * Returns: pointer to the message on success or %NULL on failure. */ static int tsi721_query_mport(struct rio_mport *mport, struct rio_mport_attr *attr) @@ -2653,6 +2688,8 @@ static void tsi721_mport_release(struct device *dev) * @priv: pointer to tsi721 private data * * Configures Tsi721 as RapidIO master port. + * + * Returns: %0 on success or -errno value on failure. */ static int tsi721_setup_mport(struct tsi721_device *priv) { diff --git a/drivers/rapidio/devices/tsi721_dma.c b/drivers/rapidio/devices/tsi721_dma.c index d375c02059f3..f77f75172bdc 100644 --- a/drivers/rapidio/devices/tsi721_dma.c +++ b/drivers/rapidio/devices/tsi721_dma.c @@ -283,11 +283,13 @@ void tsi721_bdma_handler(struct tsi721_bdma_chan *bdma_chan) #ifdef CONFIG_PCI_MSI /** - * tsi721_omsg_msix - MSI-X interrupt handler for BDMA channels + * tsi721_bdma_msix - MSI-X interrupt handler for BDMA channels * @irq: Linux interrupt number * @ptr: Pointer to interrupt-specific data (BDMA channel structure) * * Handles BDMA channel interrupts signaled using MSI-X. + * + * Returns: %IRQ_HANDLED */ static irqreturn_t tsi721_bdma_msix(int irq, void *ptr) { diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig index f3ec24691378..550145f82726 100644 --- a/drivers/regulator/Kconfig +++ b/drivers/regulator/Kconfig @@ -56,6 +56,16 @@ config REGULATOR_USERSPACE_CONSUMER If unsure, say no. +config REGULATOR_NETLINK_EVENTS + bool "Enable support for receiving regulator events via netlink" + depends on NET + help + Enabling this option allows the kernel to broadcast regulator events using + the netlink mechanism. User-space applications can subscribe to these events + for real-time updates on various regulator events. + + If unsure, say no. + config REGULATOR_88PG86X tristate "Marvell 88PG86X voltage regulators" depends on I2C diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile index b2b059b5ee56..46fb569e6be8 100644 --- a/drivers/regulator/Makefile +++ b/drivers/regulator/Makefile @@ -5,6 +5,7 @@ obj-$(CONFIG_REGULATOR) += core.o dummy.o fixed-helper.o helpers.o devres.o irq_helpers.o +obj-$(CONFIG_REGULATOR_NETLINK_EVENTS) += event.o obj-$(CONFIG_OF) += of_regulator.o obj-$(CONFIG_REGULATOR_FIXED_VOLTAGE) += fixed.o obj-$(CONFIG_REGULATOR_VIRTUAL_CONSUMER) += virtual.o diff --git a/drivers/regulator/arizona-ldo1.c b/drivers/regulator/arizona-ldo1.c index b465c0010665..4b54068d4f59 100644 --- a/drivers/regulator/arizona-ldo1.c +++ b/drivers/regulator/arizona-ldo1.c @@ -339,14 +339,12 @@ static int arizona_ldo1_probe(struct platform_device *pdev) return ret; } -static int arizona_ldo1_remove(struct platform_device *pdev) +static void arizona_ldo1_remove(struct platform_device *pdev) { struct arizona_ldo1 *ldo1 = platform_get_drvdata(pdev); if (ldo1->ena_gpiod) gpiod_put(ldo1->ena_gpiod); - - return 0; } static int madera_ldo1_probe(struct platform_device *pdev) @@ -377,7 +375,7 @@ static int madera_ldo1_probe(struct platform_device *pdev) static struct platform_driver arizona_ldo1_driver = { .probe = arizona_ldo1_probe, - .remove = arizona_ldo1_remove, + .remove_new = arizona_ldo1_remove, .driver = { .name = "arizona-ldo1", .probe_type = PROBE_FORCE_SYNCHRONOUS, @@ -386,7 +384,7 @@ static struct platform_driver arizona_ldo1_driver = { static struct platform_driver madera_ldo1_driver = { .probe = madera_ldo1_probe, - .remove = arizona_ldo1_remove, + .remove_new = arizona_ldo1_remove, .driver = { .name = "madera-ldo1", .probe_type = PROBE_FORCE_SYNCHRONOUS, diff --git a/drivers/regulator/bd9571mwv-regulator.c b/drivers/regulator/bd9571mwv-regulator.c index d469481d8442..c7ceba56e7dc 100644 --- a/drivers/regulator/bd9571mwv-regulator.c +++ b/drivers/regulator/bd9571mwv-regulator.c @@ -260,10 +260,9 @@ static const struct dev_pm_ops bd9571mwv_pm = { SET_SYSTEM_SLEEP_PM_OPS(bd9571mwv_suspend, bd9571mwv_resume) }; -static int bd9571mwv_regulator_remove(struct platform_device *pdev) +static void bd9571mwv_regulator_remove(struct platform_device *pdev) { device_remove_file(&pdev->dev, &dev_attr_backup_mode); - return 0; } #define DEV_PM_OPS &bd9571mwv_pm #else @@ -357,7 +356,7 @@ static struct platform_driver bd9571mwv_regulator_driver = { .pm = DEV_PM_OPS, }, .probe = bd9571mwv_regulator_probe, - .remove = bd9571mwv_regulator_remove, + .remove_new = bd9571mwv_regulator_remove, .id_table = bd9571mwv_regulator_id_table, }; module_platform_driver(bd9571mwv_regulator_driver); diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index 3137e40fcd3e..a968dabb48f5 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c @@ -19,6 +19,7 @@ #include <linux/delay.h> #include <linux/gpio/consumer.h> #include <linux/of.h> +#include <linux/reboot.h> #include <linux/regmap.h> #include <linux/regulator/of_regulator.h> #include <linux/regulator/consumer.h> @@ -32,6 +33,7 @@ #include "dummy.h" #include "internal.h" +#include "regnl.h" static DEFINE_WW_CLASS(regulator_ww_class); static DEFINE_MUTEX(regulator_nesting_mutex); @@ -2918,7 +2920,8 @@ static int _regulator_enable(struct regulator *regulator) /* Fallthrough on positive return values - already enabled */ } - rdev->use_count++; + if (regulator->enable_count == 1) + rdev->use_count++; return 0; @@ -2993,37 +2996,40 @@ static int _regulator_disable(struct regulator *regulator) lockdep_assert_held_once(&rdev->mutex.base); - if (WARN(rdev->use_count <= 0, + if (WARN(regulator->enable_count == 0, "unbalanced disables for %s\n", rdev_get_name(rdev))) return -EIO; - /* are we the last user and permitted to disable ? */ - if (rdev->use_count == 1 && - (rdev->constraints && !rdev->constraints->always_on)) { - - /* we are last user */ - if (regulator_ops_is_valid(rdev, REGULATOR_CHANGE_STATUS)) { - ret = _notifier_call_chain(rdev, - REGULATOR_EVENT_PRE_DISABLE, - NULL); - if (ret & NOTIFY_STOP_MASK) - return -EINVAL; - - ret = _regulator_do_disable(rdev); - if (ret < 0) { - rdev_err(rdev, "failed to disable: %pe\n", ERR_PTR(ret)); - _notifier_call_chain(rdev, - REGULATOR_EVENT_ABORT_DISABLE, + if (regulator->enable_count == 1) { + /* disabling last enable_count from this regulator */ + /* are we the last user and permitted to disable ? */ + if (rdev->use_count == 1 && + (rdev->constraints && !rdev->constraints->always_on)) { + + /* we are last user */ + if (regulator_ops_is_valid(rdev, REGULATOR_CHANGE_STATUS)) { + ret = _notifier_call_chain(rdev, + REGULATOR_EVENT_PRE_DISABLE, + NULL); + if (ret & NOTIFY_STOP_MASK) + return -EINVAL; + + ret = _regulator_do_disable(rdev); + if (ret < 0) { + rdev_err(rdev, "failed to disable: %pe\n", ERR_PTR(ret)); + _notifier_call_chain(rdev, + REGULATOR_EVENT_ABORT_DISABLE, + NULL); + return ret; + } + _notifier_call_chain(rdev, REGULATOR_EVENT_DISABLE, NULL); - return ret; } - _notifier_call_chain(rdev, REGULATOR_EVENT_DISABLE, - NULL); - } - rdev->use_count = 0; - } else if (rdev->use_count > 1) { - rdev->use_count--; + rdev->use_count = 0; + } else if (rdev->use_count > 1) { + rdev->use_count--; + } } if (ret == 0) @@ -4849,7 +4855,23 @@ static int _notifier_call_chain(struct regulator_dev *rdev, unsigned long event, void *data) { /* call rdev chain first */ - return blocking_notifier_call_chain(&rdev->notifier, event, data); + int ret = blocking_notifier_call_chain(&rdev->notifier, event, data); + + if (IS_REACHABLE(CONFIG_REGULATOR_NETLINK_EVENTS)) { + struct device *parent = rdev->dev.parent; + const char *rname = rdev_get_name(rdev); + char name[32]; + + /* Avoid duplicate debugfs directory names */ + if (parent && rname == rdev->desc->name) { + snprintf(name, sizeof(name), "%s-%s", dev_name(parent), + rname); + rname = name; + } + reg_generate_netlink_event(rname, event); + } + + return ret; } int _regulator_bulk_get(struct device *dev, int num_consumers, @@ -5062,6 +5084,41 @@ void regulator_bulk_free(int num_consumers, EXPORT_SYMBOL_GPL(regulator_bulk_free); /** + * regulator_handle_critical - Handle events for system-critical regulators. + * @rdev: The regulator device. + * @event: The event being handled. + * + * This function handles critical events such as under-voltage, over-current, + * and unknown errors for regulators deemed system-critical. On detecting such + * events, it triggers a hardware protection shutdown with a defined timeout. + */ +static void regulator_handle_critical(struct regulator_dev *rdev, + unsigned long event) +{ + const char *reason = NULL; + + if (!rdev->constraints->system_critical) + return; + + switch (event) { + case REGULATOR_EVENT_UNDER_VOLTAGE: + reason = "System critical regulator: voltage drop detected"; + break; + case REGULATOR_EVENT_OVER_CURRENT: + reason = "System critical regulator: over-current detected"; + break; + case REGULATOR_EVENT_FAIL: + reason = "System critical regulator: unknown error"; + } + + if (!reason) + return; + + hw_protection_shutdown(reason, + rdev->constraints->uv_less_critical_window_ms); +} + +/** * regulator_notifier_call_chain - call regulator event notifier * @rdev: regulator source * @event: notifier block @@ -5073,6 +5130,8 @@ EXPORT_SYMBOL_GPL(regulator_bulk_free); int regulator_notifier_call_chain(struct regulator_dev *rdev, unsigned long event, void *data) { + regulator_handle_critical(rdev, event); + _notifier_call_chain(rdev, event, data); return NOTIFY_DONE; @@ -6234,6 +6293,14 @@ unlock: return 0; } +static bool regulator_ignore_unused; +static int __init regulator_ignore_unused_setup(char *__unused) +{ + regulator_ignore_unused = true; + return 1; +} +__setup("regulator_ignore_unused", regulator_ignore_unused_setup); + static void regulator_init_complete_work_function(struct work_struct *work) { /* @@ -6246,6 +6313,15 @@ static void regulator_init_complete_work_function(struct work_struct *work) class_for_each_device(®ulator_class, NULL, NULL, regulator_register_resolve_supply); + /* + * For debugging purposes, it may be useful to prevent unused + * regulators from being disabled. + */ + if (regulator_ignore_unused) { + pr_warn("regulator: Not disabling unused regulators\n"); + return; + } + /* If we have a full configuration then disable any regulators * we have permission to change the status for and which are * not in use or always_on. This is effectively the default diff --git a/drivers/regulator/db8500-prcmu.c b/drivers/regulator/db8500-prcmu.c index 34c5e485d0af..1e2d54da1b9a 100644 --- a/drivers/regulator/db8500-prcmu.c +++ b/drivers/regulator/db8500-prcmu.c @@ -469,11 +469,9 @@ static int db8500_regulator_probe(struct platform_device *pdev) return 0; } -static int db8500_regulator_remove(struct platform_device *pdev) +static void db8500_regulator_remove(struct platform_device *pdev) { ux500_regulator_debug_exit(); - - return 0; } static struct platform_driver db8500_regulator_driver = { @@ -482,7 +480,7 @@ static struct platform_driver db8500_regulator_driver = { .probe_type = PROBE_PREFER_ASYNCHRONOUS, }, .probe = db8500_regulator_probe, - .remove = db8500_regulator_remove, + .remove_new = db8500_regulator_remove, }; static int __init db8500_regulator_init(void) diff --git a/drivers/regulator/event.c b/drivers/regulator/event.c new file mode 100644 index 000000000000..ea3bd49544e8 --- /dev/null +++ b/drivers/regulator/event.c @@ -0,0 +1,91 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Regulator event over netlink + * + * Author: Naresh Solanki <Naresh.Solanki@9elements.com> + */ + +#include <regulator/regulator.h> +#include <net/netlink.h> +#include <net/genetlink.h> +#include <linux/atomic.h> + +#include "regnl.h" + +static atomic_t reg_event_seqnum = ATOMIC_INIT(0); + +static const struct genl_multicast_group reg_event_mcgrps[] = { + { .name = REG_GENL_MCAST_GROUP_NAME, }, +}; + +static struct genl_family reg_event_genl_family __ro_after_init = { + .module = THIS_MODULE, + .name = REG_GENL_FAMILY_NAME, + .version = REG_GENL_VERSION, + .maxattr = REG_GENL_ATTR_MAX, + .mcgrps = reg_event_mcgrps, + .n_mcgrps = ARRAY_SIZE(reg_event_mcgrps), +}; + +int reg_generate_netlink_event(const char *reg_name, u64 event) +{ + struct sk_buff *skb; + struct nlattr *attr; + struct reg_genl_event *edata; + void *msg_header; + int size; + + /* allocate memory */ + size = nla_total_size(sizeof(struct reg_genl_event)) + + nla_total_size(0); + + skb = genlmsg_new(size, GFP_ATOMIC); + if (!skb) + return -ENOMEM; + + /* add the genetlink message header */ + msg_header = genlmsg_put(skb, 0, atomic_inc_return(®_event_seqnum), + ®_event_genl_family, 0, REG_GENL_CMD_EVENT); + if (!msg_header) { + nlmsg_free(skb); + return -ENOMEM; + } + + /* fill the data */ + attr = nla_reserve(skb, REG_GENL_ATTR_EVENT, sizeof(struct reg_genl_event)); + if (!attr) { + nlmsg_free(skb); + return -EINVAL; + } + + edata = nla_data(attr); + memset(edata, 0, sizeof(struct reg_genl_event)); + + strscpy(edata->reg_name, reg_name, sizeof(edata->reg_name)); + edata->event = event; + + /* send multicast genetlink message */ + genlmsg_end(skb, msg_header); + size = genlmsg_multicast(®_event_genl_family, skb, 0, 0, GFP_ATOMIC); + + return size; +} + +static int __init reg_event_genetlink_init(void) +{ + return genl_register_family(®_event_genl_family); +} + +static int __init reg_event_init(void) +{ + int error; + + /* create genetlink for acpi event */ + error = reg_event_genetlink_init(); + if (error) + pr_warn("Failed to create genetlink family for reg event\n"); + + return 0; +} + +fs_initcall(reg_event_init); diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c index 1b65e5e4e40f..03afc160fc72 100644 --- a/drivers/regulator/of_regulator.c +++ b/drivers/regulator/of_regulator.c @@ -131,6 +131,8 @@ static int of_get_regulation_constraints(struct device *dev, constraints->valid_ops_mask |= REGULATOR_CHANGE_STATUS; constraints->pull_down = of_property_read_bool(np, "regulator-pull-down"); + constraints->system_critical = of_property_read_bool(np, + "system-critical-regulator"); if (of_property_read_bool(np, "regulator-allow-bypass")) constraints->valid_ops_mask |= REGULATOR_CHANGE_BYPASS; @@ -173,6 +175,13 @@ static int of_get_regulation_constraints(struct device *dev, if (!ret) constraints->enable_time = pval; + ret = of_property_read_u32(np, "regulator-uv-survival-time-ms", &pval); + if (!ret) + constraints->uv_less_critical_window_ms = pval; + else + constraints->uv_less_critical_window_ms = + REGULATOR_DEF_UV_LESS_CRITICAL_WINDOW_MS; + constraints->soft_start = of_property_read_bool(np, "regulator-soft-start"); ret = of_property_read_u32(np, "regulator-active-discharge", &pval); diff --git a/drivers/regulator/palmas-regulator.c b/drivers/regulator/palmas-regulator.c index e0dc033aae0f..60656a815b9e 100644 --- a/drivers/regulator/palmas-regulator.c +++ b/drivers/regulator/palmas-regulator.c @@ -1594,7 +1594,7 @@ static const struct of_device_id of_palmas_match_tbl[] = { static int palmas_regulators_probe(struct platform_device *pdev) { struct palmas *palmas = dev_get_drvdata(pdev->dev.parent); - struct palmas_pmic_platform_data *pdata = dev_get_platdata(&pdev->dev); + struct palmas_pmic_platform_data *pdata; struct device_node *node = pdev->dev.of_node; struct palmas_pmic_driver_data *driver_data; struct regulator_config config = { }; diff --git a/drivers/regulator/qcom-rpmh-regulator.c b/drivers/regulator/qcom-rpmh-regulator.c index cf502eec0915..80e304711345 100644 --- a/drivers/regulator/qcom-rpmh-regulator.c +++ b/drivers/regulator/qcom-rpmh-regulator.c @@ -1,5 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 // Copyright (c) 2018-2021, The Linux Foundation. All rights reserved. +// Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved. #define pr_fmt(fmt) "%s: " fmt, __func__ @@ -68,10 +69,11 @@ enum rpmh_regulator_type { * @regulator_type: RPMh accelerator type used to manage this * regulator * @ops: Pointer to regulator ops callback structure - * @voltage_range: The single range of voltages supported by this - * PMIC regulator type + * @voltage_ranges: The possible ranges of voltages supported by this + * PMIC regulator type + * @n_linear_ranges: Number of entries in voltage_ranges * @n_voltages: The number of unique voltage set points defined - * by voltage_range + * by voltage_ranges * @hpm_min_load_uA: Minimum load current in microamps that requires * high power mode (HPM) operation. This is used * for LDO hardware type regulators only. @@ -85,7 +87,8 @@ enum rpmh_regulator_type { struct rpmh_vreg_hw_data { enum rpmh_regulator_type regulator_type; const struct regulator_ops *ops; - const struct linear_range voltage_range; + const struct linear_range *voltage_ranges; + int n_linear_ranges; int n_voltages; int hpm_min_load_uA; const int *pmic_mode_map; @@ -449,8 +452,8 @@ static int rpmh_regulator_init_vreg(struct rpmh_vreg *vreg, struct device *dev, vreg->mode = REGULATOR_MODE_INVALID; if (rpmh_data->hw_data->n_voltages) { - vreg->rdesc.linear_ranges = &rpmh_data->hw_data->voltage_range; - vreg->rdesc.n_linear_ranges = 1; + vreg->rdesc.linear_ranges = rpmh_data->hw_data->voltage_ranges; + vreg->rdesc.n_linear_ranges = rpmh_data->hw_data->n_linear_ranges; vreg->rdesc.n_voltages = rpmh_data->hw_data->n_voltages; } @@ -508,6 +511,14 @@ static const int pmic_mode_map_pmic5_ldo[REGULATOR_MODE_STANDBY + 1] = { [REGULATOR_MODE_FAST] = -EINVAL, }; +static const int pmic_mode_map_pmic5_ldo_hpm[REGULATOR_MODE_STANDBY + 1] = { + [REGULATOR_MODE_INVALID] = -EINVAL, + [REGULATOR_MODE_STANDBY] = -EINVAL, + [REGULATOR_MODE_IDLE] = -EINVAL, + [REGULATOR_MODE_NORMAL] = PMIC5_LDO_MODE_HPM, + [REGULATOR_MODE_FAST] = -EINVAL, +}; + static unsigned int rpmh_regulator_pmic4_ldo_of_map_mode(unsigned int rpmh_mode) { unsigned int mode; @@ -613,7 +624,10 @@ static unsigned int rpmh_regulator_pmic4_bob_of_map_mode(unsigned int rpmh_mode) static const struct rpmh_vreg_hw_data pmic4_pldo = { .regulator_type = VRM, .ops = &rpmh_regulator_vrm_drms_ops, - .voltage_range = REGULATOR_LINEAR_RANGE(1664000, 0, 255, 8000), + .voltage_ranges = (struct linear_range[]) { + REGULATOR_LINEAR_RANGE(1664000, 0, 255, 8000), + }, + .n_linear_ranges = 1, .n_voltages = 256, .hpm_min_load_uA = 10000, .pmic_mode_map = pmic_mode_map_pmic4_ldo, @@ -623,7 +637,10 @@ static const struct rpmh_vreg_hw_data pmic4_pldo = { static const struct rpmh_vreg_hw_data pmic4_pldo_lv = { .regulator_type = VRM, .ops = &rpmh_regulator_vrm_drms_ops, - .voltage_range = REGULATOR_LINEAR_RANGE(1256000, 0, 127, 8000), + .voltage_ranges = (struct linear_range[]) { + REGULATOR_LINEAR_RANGE(1256000, 0, 127, 8000), + }, + .n_linear_ranges = 1, .n_voltages = 128, .hpm_min_load_uA = 10000, .pmic_mode_map = pmic_mode_map_pmic4_ldo, @@ -633,7 +650,10 @@ static const struct rpmh_vreg_hw_data pmic4_pldo_lv = { static const struct rpmh_vreg_hw_data pmic4_nldo = { .regulator_type = VRM, .ops = &rpmh_regulator_vrm_drms_ops, - .voltage_range = REGULATOR_LINEAR_RANGE(312000, 0, 127, 8000), + .voltage_ranges = (struct linear_range[]) { + REGULATOR_LINEAR_RANGE(312000, 0, 127, 8000), + }, + .n_linear_ranges = 1, .n_voltages = 128, .hpm_min_load_uA = 30000, .pmic_mode_map = pmic_mode_map_pmic4_ldo, @@ -643,7 +663,10 @@ static const struct rpmh_vreg_hw_data pmic4_nldo = { static const struct rpmh_vreg_hw_data pmic4_hfsmps3 = { .regulator_type = VRM, .ops = &rpmh_regulator_vrm_ops, - .voltage_range = REGULATOR_LINEAR_RANGE(320000, 0, 215, 8000), + .voltage_ranges = (struct linear_range[]) { + REGULATOR_LINEAR_RANGE(320000, 0, 215, 8000), + }, + .n_linear_ranges = 1, .n_voltages = 216, .pmic_mode_map = pmic_mode_map_pmic4_smps, .of_map_mode = rpmh_regulator_pmic4_smps_of_map_mode, @@ -652,7 +675,10 @@ static const struct rpmh_vreg_hw_data pmic4_hfsmps3 = { static const struct rpmh_vreg_hw_data pmic4_ftsmps426 = { .regulator_type = VRM, .ops = &rpmh_regulator_vrm_ops, - .voltage_range = REGULATOR_LINEAR_RANGE(320000, 0, 258, 4000), + .voltage_ranges = (struct linear_range[]) { + REGULATOR_LINEAR_RANGE(320000, 0, 258, 4000), + }, + .n_linear_ranges = 1, .n_voltages = 259, .pmic_mode_map = pmic_mode_map_pmic4_smps, .of_map_mode = rpmh_regulator_pmic4_smps_of_map_mode, @@ -661,7 +687,10 @@ static const struct rpmh_vreg_hw_data pmic4_ftsmps426 = { static const struct rpmh_vreg_hw_data pmic4_bob = { .regulator_type = VRM, .ops = &rpmh_regulator_vrm_bypass_ops, - .voltage_range = REGULATOR_LINEAR_RANGE(1824000, 0, 83, 32000), + .voltage_ranges = (struct linear_range[]) { + REGULATOR_LINEAR_RANGE(1824000, 0, 83, 32000), + }, + .n_linear_ranges = 1, .n_voltages = 84, .pmic_mode_map = pmic_mode_map_pmic4_bob, .of_map_mode = rpmh_regulator_pmic4_bob_of_map_mode, @@ -676,7 +705,10 @@ static const struct rpmh_vreg_hw_data pmic4_lvs = { static const struct rpmh_vreg_hw_data pmic5_pldo = { .regulator_type = VRM, .ops = &rpmh_regulator_vrm_drms_ops, - .voltage_range = REGULATOR_LINEAR_RANGE(1504000, 0, 255, 8000), + .voltage_ranges = (struct linear_range[]) { + REGULATOR_LINEAR_RANGE(1504000, 0, 255, 8000), + }, + .n_linear_ranges = 1, .n_voltages = 256, .hpm_min_load_uA = 10000, .pmic_mode_map = pmic_mode_map_pmic5_ldo, @@ -686,7 +718,10 @@ static const struct rpmh_vreg_hw_data pmic5_pldo = { static const struct rpmh_vreg_hw_data pmic5_pldo_lv = { .regulator_type = VRM, .ops = &rpmh_regulator_vrm_drms_ops, - .voltage_range = REGULATOR_LINEAR_RANGE(1504000, 0, 62, 8000), + .voltage_ranges = (struct linear_range[]) { + REGULATOR_LINEAR_RANGE(1504000, 0, 62, 8000), + }, + .n_linear_ranges = 1, .n_voltages = 63, .hpm_min_load_uA = 10000, .pmic_mode_map = pmic_mode_map_pmic5_ldo, @@ -696,17 +731,50 @@ static const struct rpmh_vreg_hw_data pmic5_pldo_lv = { static const struct rpmh_vreg_hw_data pmic5_pldo515_mv = { .regulator_type = VRM, .ops = &rpmh_regulator_vrm_drms_ops, - .voltage_range = REGULATOR_LINEAR_RANGE(1800000, 0, 187, 8000), + .voltage_ranges = (struct linear_range[]) { + REGULATOR_LINEAR_RANGE(1800000, 0, 187, 8000), + }, + .n_linear_ranges = 1, .n_voltages = 188, .hpm_min_load_uA = 10000, .pmic_mode_map = pmic_mode_map_pmic5_ldo, .of_map_mode = rpmh_regulator_pmic4_ldo_of_map_mode, }; +static const struct rpmh_vreg_hw_data pmic5_pldo502 = { + .regulator_type = VRM, + .ops = &rpmh_regulator_vrm_ops, + .voltage_ranges = (struct linear_range[]) { + REGULATOR_LINEAR_RANGE(1504000, 0, 255, 8000), + }, + .n_linear_ranges = 1, + .n_voltages = 256, + .pmic_mode_map = pmic_mode_map_pmic5_ldo_hpm, + .of_map_mode = rpmh_regulator_pmic4_ldo_of_map_mode, +}; + +static const struct rpmh_vreg_hw_data pmic5_pldo502ln = { + .regulator_type = VRM, + .ops = &rpmh_regulator_vrm_ops, + .voltage_ranges = (struct linear_range[]) { + REGULATOR_LINEAR_RANGE(1800000, 0, 2, 200000), + REGULATOR_LINEAR_RANGE(2608000, 3, 28, 16000), + REGULATOR_LINEAR_RANGE(3104000, 29, 30, 96000), + REGULATOR_LINEAR_RANGE(3312000, 31, 31, 0), + }, + .n_linear_ranges = 4, + .n_voltages = 32, + .pmic_mode_map = pmic_mode_map_pmic5_ldo_hpm, + .of_map_mode = rpmh_regulator_pmic4_ldo_of_map_mode, +}; + static const struct rpmh_vreg_hw_data pmic5_nldo = { .regulator_type = VRM, .ops = &rpmh_regulator_vrm_drms_ops, - .voltage_range = REGULATOR_LINEAR_RANGE(320000, 0, 123, 8000), + .voltage_ranges = (struct linear_range[]) { + REGULATOR_LINEAR_RANGE(320000, 0, 123, 8000), + }, + .n_linear_ranges = 1, .n_voltages = 124, .hpm_min_load_uA = 30000, .pmic_mode_map = pmic_mode_map_pmic5_ldo, @@ -716,17 +784,36 @@ static const struct rpmh_vreg_hw_data pmic5_nldo = { static const struct rpmh_vreg_hw_data pmic5_nldo515 = { .regulator_type = VRM, .ops = &rpmh_regulator_vrm_drms_ops, - .voltage_range = REGULATOR_LINEAR_RANGE(320000, 0, 210, 8000), + .voltage_ranges = (struct linear_range[]) { + REGULATOR_LINEAR_RANGE(320000, 0, 210, 8000), + }, + .n_linear_ranges = 1, .n_voltages = 211, .hpm_min_load_uA = 30000, .pmic_mode_map = pmic_mode_map_pmic5_ldo, .of_map_mode = rpmh_regulator_pmic4_ldo_of_map_mode, }; +static const struct rpmh_vreg_hw_data pmic5_nldo502 = { + .regulator_type = VRM, + .ops = &rpmh_regulator_vrm_drms_ops, + .voltage_ranges = (struct linear_range[]) { + REGULATOR_LINEAR_RANGE(528000, 0, 127, 8000), + }, + .n_linear_ranges = 1, + .n_voltages = 128, + .hpm_min_load_uA = 30000, + .pmic_mode_map = pmic_mode_map_pmic5_ldo, + .of_map_mode = rpmh_regulator_pmic4_ldo_of_map_mode, +}; + static const struct rpmh_vreg_hw_data pmic5_hfsmps510 = { .regulator_type = VRM, .ops = &rpmh_regulator_vrm_ops, - .voltage_range = REGULATOR_LINEAR_RANGE(320000, 0, 215, 8000), + .voltage_ranges = (struct linear_range[]) { + REGULATOR_LINEAR_RANGE(320000, 0, 215, 8000), + }, + .n_linear_ranges = 1, .n_voltages = 216, .pmic_mode_map = pmic_mode_map_pmic5_smps, .of_map_mode = rpmh_regulator_pmic4_smps_of_map_mode, @@ -735,7 +822,10 @@ static const struct rpmh_vreg_hw_data pmic5_hfsmps510 = { static const struct rpmh_vreg_hw_data pmic5_ftsmps510 = { .regulator_type = VRM, .ops = &rpmh_regulator_vrm_ops, - .voltage_range = REGULATOR_LINEAR_RANGE(300000, 0, 263, 4000), + .voltage_ranges = (struct linear_range[]) { + REGULATOR_LINEAR_RANGE(300000, 0, 263, 4000), + }, + .n_linear_ranges = 1, .n_voltages = 264, .pmic_mode_map = pmic_mode_map_pmic5_smps, .of_map_mode = rpmh_regulator_pmic4_smps_of_map_mode, @@ -744,7 +834,10 @@ static const struct rpmh_vreg_hw_data pmic5_ftsmps510 = { static const struct rpmh_vreg_hw_data pmic5_ftsmps520 = { .regulator_type = VRM, .ops = &rpmh_regulator_vrm_ops, - .voltage_range = REGULATOR_LINEAR_RANGE(300000, 0, 263, 4000), + .voltage_ranges = (struct linear_range[]) { + REGULATOR_LINEAR_RANGE(300000, 0, 263, 4000), + }, + .n_linear_ranges = 1, .n_voltages = 264, .pmic_mode_map = pmic_mode_map_pmic5_smps, .of_map_mode = rpmh_regulator_pmic4_smps_of_map_mode, @@ -753,7 +846,10 @@ static const struct rpmh_vreg_hw_data pmic5_ftsmps520 = { static const struct rpmh_vreg_hw_data pmic5_ftsmps525_lv = { .regulator_type = VRM, .ops = &rpmh_regulator_vrm_ops, - .voltage_range = REGULATOR_LINEAR_RANGE(300000, 0, 267, 4000), + .voltage_ranges = (struct linear_range[]) { + REGULATOR_LINEAR_RANGE(300000, 0, 267, 4000), + }, + .n_linear_ranges = 1, .n_voltages = 268, .pmic_mode_map = pmic_mode_map_pmic5_smps, .of_map_mode = rpmh_regulator_pmic4_smps_of_map_mode, @@ -762,7 +858,10 @@ static const struct rpmh_vreg_hw_data pmic5_ftsmps525_lv = { static const struct rpmh_vreg_hw_data pmic5_ftsmps525_mv = { .regulator_type = VRM, .ops = &rpmh_regulator_vrm_ops, - .voltage_range = REGULATOR_LINEAR_RANGE(600000, 0, 267, 8000), + .voltage_ranges = (struct linear_range[]) { + REGULATOR_LINEAR_RANGE(600000, 0, 267, 8000), + }, + .n_linear_ranges = 1, .n_voltages = 268, .pmic_mode_map = pmic_mode_map_pmic5_smps, .of_map_mode = rpmh_regulator_pmic4_smps_of_map_mode, @@ -771,7 +870,10 @@ static const struct rpmh_vreg_hw_data pmic5_ftsmps525_mv = { static const struct rpmh_vreg_hw_data pmic5_ftsmps527 = { .regulator_type = VRM, .ops = &rpmh_regulator_vrm_ops, - .voltage_range = REGULATOR_LINEAR_RANGE(320000, 0, 215, 8000), + .voltage_ranges = (struct linear_range[]) { + REGULATOR_LINEAR_RANGE(320000, 0, 215, 8000), + }, + .n_linear_ranges = 1, .n_voltages = 215, .pmic_mode_map = pmic_mode_map_pmic5_smps, .of_map_mode = rpmh_regulator_pmic4_smps_of_map_mode, @@ -780,7 +882,10 @@ static const struct rpmh_vreg_hw_data pmic5_ftsmps527 = { static const struct rpmh_vreg_hw_data pmic5_hfsmps515 = { .regulator_type = VRM, .ops = &rpmh_regulator_vrm_ops, - .voltage_range = REGULATOR_LINEAR_RANGE(320000, 0, 235, 16000), + .voltage_ranges = (struct linear_range[]) { + REGULATOR_LINEAR_RANGE(320000, 0, 235, 16000), + }, + .n_linear_ranges = 1, .n_voltages = 236, .pmic_mode_map = pmic_mode_map_pmic5_smps, .of_map_mode = rpmh_regulator_pmic4_smps_of_map_mode, @@ -789,7 +894,10 @@ static const struct rpmh_vreg_hw_data pmic5_hfsmps515 = { static const struct rpmh_vreg_hw_data pmic5_hfsmps515_1 = { .regulator_type = VRM, .ops = &rpmh_regulator_vrm_ops, - .voltage_range = REGULATOR_LINEAR_RANGE(900000, 0, 4, 16000), + .voltage_ranges = (struct linear_range[]) { + REGULATOR_LINEAR_RANGE(900000, 0, 4, 16000), + }, + .n_linear_ranges = 1, .n_voltages = 5, .pmic_mode_map = pmic_mode_map_pmic5_smps, .of_map_mode = rpmh_regulator_pmic4_smps_of_map_mode, @@ -798,7 +906,10 @@ static const struct rpmh_vreg_hw_data pmic5_hfsmps515_1 = { static const struct rpmh_vreg_hw_data pmic5_bob = { .regulator_type = VRM, .ops = &rpmh_regulator_vrm_bypass_ops, - .voltage_range = REGULATOR_LINEAR_RANGE(3000000, 0, 31, 32000), + .voltage_ranges = (struct linear_range[]) { + REGULATOR_LINEAR_RANGE(3000000, 0, 31, 32000), + }, + .n_linear_ranges = 1, .n_voltages = 32, .pmic_mode_map = pmic_mode_map_pmic5_bob, .of_map_mode = rpmh_regulator_pmic4_bob_of_map_mode, @@ -1147,6 +1258,16 @@ static const struct rpmh_vreg_init_data pm8009_1_vreg_data[] = { {} }; +static const struct rpmh_vreg_init_data pm8010_vreg_data[] = { + RPMH_VREG("ldo1", "ldo%s1", &pmic5_nldo502, "vdd-l1-l2"), + RPMH_VREG("ldo2", "ldo%s2", &pmic5_nldo502, "vdd-l1-l2"), + RPMH_VREG("ldo3", "ldo%s3", &pmic5_pldo502ln, "vdd-l3-l4"), + RPMH_VREG("ldo4", "ldo%s4", &pmic5_pldo502ln, "vdd-l3-l4"), + RPMH_VREG("ldo5", "ldo%s5", &pmic5_pldo502, "vdd-l5"), + RPMH_VREG("ldo6", "ldo%s6", &pmic5_pldo502ln, "vdd-l6"), + RPMH_VREG("ldo7", "ldo%s7", &pmic5_pldo502, "vdd-l7"), +}; + static const struct rpmh_vreg_init_data pm6150_vreg_data[] = { RPMH_VREG("smps1", "smp%s1", &pmic5_ftsmps510, "vdd-s1"), RPMH_VREG("smps2", "smp%s2", &pmic5_ftsmps510, "vdd-s2"), @@ -1463,6 +1584,10 @@ static const struct of_device_id __maybe_unused rpmh_regulator_match_table[] = { .data = pm8009_1_vreg_data, }, { + .compatible = "qcom,pm8010-rpmh-regulators", + .data = pm8010_vreg_data, + }, + { .compatible = "qcom,pm8150-rpmh-regulators", .data = pm8150_vreg_data, }, diff --git a/drivers/regulator/qcom_smd-regulator.c b/drivers/regulator/qcom_smd-regulator.c index f53ada076252..d1be9568025e 100644 --- a/drivers/regulator/qcom_smd-regulator.c +++ b/drivers/regulator/qcom_smd-regulator.c @@ -796,6 +796,7 @@ static const struct rpm_regulator_data rpm_mp5496_regulators[] = { { "s1", QCOM_SMD_RPM_SMPA, 1, &mp5496_smps, "s1" }, { "s2", QCOM_SMD_RPM_SMPA, 2, &mp5496_smps, "s2" }, { "l2", QCOM_SMD_RPM_LDOA, 2, &mp5496_ldoa2, "l2" }, + { "l5", QCOM_SMD_RPM_LDOA, 5, &mp5496_ldoa2, "l5" }, {} }; @@ -1012,6 +1013,39 @@ static const struct rpm_regulator_data rpm_pm8916_regulators[] = { {} }; +static const struct rpm_regulator_data rpm_pm8937_regulators[] = { + { "s1", QCOM_SMD_RPM_SMPA, 1, &pm8994_hfsmps, "vdd_s1" }, + { "s2", QCOM_SMD_RPM_SMPA, 2, &pm8994_hfsmps, "vdd_s2" }, + { "s3", QCOM_SMD_RPM_SMPA, 3, &pm8994_hfsmps, "vdd_s3" }, + { "s4", QCOM_SMD_RPM_SMPA, 4, &pm8994_hfsmps, "vdd_s4" }, + /* S5 - S6 are managed by SPMI */ + + { "l1", QCOM_SMD_RPM_LDOA, 1, &pm8953_ult_nldo, "vdd_l1_l19" }, + { "l2", QCOM_SMD_RPM_LDOA, 2, &pm8953_ult_nldo, "vdd_l2_l23" }, + { "l3", QCOM_SMD_RPM_LDOA, 3, &pm8953_ult_nldo, "vdd_l3" }, + { "l4", QCOM_SMD_RPM_LDOA, 4, &pm8950_ult_pldo, "vdd_l4_l5_l6_l7_l16" }, + { "l5", QCOM_SMD_RPM_LDOA, 5, &pm8950_ult_pldo, "vdd_l4_l5_l6_l7_l16" }, + { "l6", QCOM_SMD_RPM_LDOA, 6, &pm8950_ult_pldo, "vdd_l4_l5_l6_l7_l16" }, + { "l7", QCOM_SMD_RPM_LDOA, 7, &pm8950_ult_pldo, "vdd_l4_l5_l6_l7_l16" }, + { "l8", QCOM_SMD_RPM_LDOA, 8, &pm8950_ult_pldo, "vdd_l8_l11_l12_l17_l22" }, + { "l9", QCOM_SMD_RPM_LDOA, 9, &pm8950_ult_pldo, "vdd_l9_l10_l13_l14_l15_l18" }, + { "l10", QCOM_SMD_RPM_LDOA, 10, &pm8950_ult_pldo, "vdd_l9_l10_l13_l14_l15_l18"}, + { "l11", QCOM_SMD_RPM_LDOA, 11, &pm8950_ult_pldo, "vdd_l8_l11_l12_l17_l22" }, + { "l12", QCOM_SMD_RPM_LDOA, 12, &pm8950_ult_pldo, "vdd_l8_l11_l12_l17_l22" }, + { "l13", QCOM_SMD_RPM_LDOA, 13, &pm8950_ult_pldo, "vdd_l9_l10_l13_l14_l15_l18" }, + { "l14", QCOM_SMD_RPM_LDOA, 14, &pm8950_ult_pldo, "vdd_l9_l10_l13_l14_l15_l18" }, + { "l15", QCOM_SMD_RPM_LDOA, 15, &pm8950_ult_pldo, "vdd_l9_l10_l13_l14_l15_l18" }, + { "l16", QCOM_SMD_RPM_LDOA, 16, &pm8950_ult_pldo, "vdd_l4_l5_l6_l7_l16" }, + { "l17", QCOM_SMD_RPM_LDOA, 17, &pm8950_ult_pldo, "vdd_l8_l11_l12_l17_l22" }, + { "l18", QCOM_SMD_RPM_LDOA, 18, &pm8950_ult_pldo, "vdd_l9_l10_l13_l14_l15_l18" }, + { "l19", QCOM_SMD_RPM_LDOA, 19, &pm8953_ult_nldo, "vdd_l1_l19" }, + { "l20", QCOM_SMD_RPM_LDOA, 20, &pm8953_lnldo, "vdd_l20_l21" }, + { "l21", QCOM_SMD_RPM_LDOA, 21, &pm8953_lnldo, "vdd_l20_l21" }, + { "l22", QCOM_SMD_RPM_LDOA, 22, &pm8950_ult_pldo, "vdd_l8_l11_l12_l17_l22" }, + { "l23", QCOM_SMD_RPM_LDOA, 23, &pm8994_nldo, "vdd_l2_l23" }, + {} +}; + static const struct rpm_regulator_data rpm_pm8941_regulators[] = { { "s1", QCOM_SMD_RPM_SMPA, 1, &pm8x41_hfsmps, "vdd_s1" }, { "s2", QCOM_SMD_RPM_SMPA, 2, &pm8x41_hfsmps, "vdd_s2" }, @@ -1329,6 +1363,7 @@ static const struct of_device_id rpm_of_match[] = { { .compatible = "qcom,rpm-pm8841-regulators", .data = &rpm_pm8841_regulators }, { .compatible = "qcom,rpm-pm8909-regulators", .data = &rpm_pm8909_regulators }, { .compatible = "qcom,rpm-pm8916-regulators", .data = &rpm_pm8916_regulators }, + { .compatible = "qcom,rpm-pm8937-regulators", .data = &rpm_pm8937_regulators }, { .compatible = "qcom,rpm-pm8941-regulators", .data = &rpm_pm8941_regulators }, { .compatible = "qcom,rpm-pm8950-regulators", .data = &rpm_pm8950_regulators }, { .compatible = "qcom,rpm-pm8953-regulators", .data = &rpm_pm8953_regulators }, diff --git a/drivers/regulator/qcom_spmi-regulator.c b/drivers/regulator/qcom_spmi-regulator.c index 94f9092b29ef..9a9fa20dcd95 100644 --- a/drivers/regulator/qcom_spmi-regulator.c +++ b/drivers/regulator/qcom_spmi-regulator.c @@ -2239,6 +2239,39 @@ static const struct spmi_regulator_data pm8916_regulators[] = { { } }; +static const struct spmi_regulator_data pm8937_regulators[] = { + { "s1", 0x1400, "vdd_s1", }, + { "s2", 0x1700, "vdd_s2", }, + { "s3", 0x1a00, "vdd_s3", }, + { "s4", 0x1d00, "vdd_s4", }, + { "s5", 0x2000, "vdd_s5", }, + { "s6", 0x2300, "vdd_s6", }, + { "l1", 0x4000, "vdd_l1_l19", }, + { "l2", 0x4100, "vdd_l2_l23", }, + { "l3", 0x4200, "vdd_l3", }, + { "l4", 0x4300, "vdd_l4_l5_l6_l7_l16", }, + { "l5", 0x4400, "vdd_l4_l5_l6_l7_l16", }, + { "l6", 0x4500, "vdd_l4_l5_l6_l7_l16", }, + { "l7", 0x4600, "vdd_l4_l5_l6_l7_l16", }, + { "l8", 0x4700, "vdd_l8_l11_l12_l17_l22", }, + { "l9", 0x4800, "vdd_l9_l10_l13_l14_l15_l18", }, + { "l10", 0x4900, "vdd_l9_l10_l13_l14_l15_l18", }, + { "l11", 0x4a00, "vdd_l8_l11_l12_l17_l22", }, + { "l12", 0x4b00, "vdd_l8_l11_l12_l17_l22", }, + { "l13", 0x4c00, "vdd_l9_l10_l13_l14_l15_l18", }, + { "l14", 0x4d00, "vdd_l9_l10_l13_l14_l15_l18", }, + { "l15", 0x4e00, "vdd_l9_l10_l13_l14_l15_l18", }, + { "l16", 0x4f00, "vdd_l4_l5_l6_l7_l16", }, + { "l17", 0x5000, "vdd_l8_l11_l12_l17_l22", }, + { "l18", 0x5100, "vdd_l9_l10_l13_l14_l15_l18", }, + { "l19", 0x5200, "vdd_l1_l19", }, + { "l20", 0x5300, "vdd_l20_l21", }, + { "l21", 0x5400, "vdd_l21_l21", }, + { "l22", 0x5500, "vdd_l8_l11_l12_l17_l22", }, + { "l23", 0x5600, "vdd_l2_l23", }, + { } +}; + static const struct spmi_regulator_data pm8941_regulators[] = { { "s1", 0x1400, "vdd_s1", }, { "s2", 0x1700, "vdd_s2", }, @@ -2453,6 +2486,7 @@ static const struct of_device_id qcom_spmi_regulator_match[] = { { .compatible = "qcom,pm8841-regulators", .data = &pm8841_regulators }, { .compatible = "qcom,pm8909-regulators", .data = &pm8909_regulators }, { .compatible = "qcom,pm8916-regulators", .data = &pm8916_regulators }, + { .compatible = "qcom,pm8937-regulators", .data = &pm8937_regulators }, { .compatible = "qcom,pm8941-regulators", .data = &pm8941_regulators }, { .compatible = "qcom,pm8950-regulators", .data = &pm8950_regulators }, { .compatible = "qcom,pm8994-regulators", .data = &pm8994_regulators }, diff --git a/drivers/regulator/regnl.h b/drivers/regulator/regnl.h new file mode 100644 index 000000000000..bcba16cc05cc --- /dev/null +++ b/drivers/regulator/regnl.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Regulator event over netlink + * + * Author: Naresh Solanki <Naresh.Solanki@9elements.com> + */ + +#ifndef __REGULATOR_EVENT_H +#define __REGULATOR_EVENT_H + +int reg_generate_netlink_event(const char *reg_name, u64 event); + +#endif diff --git a/drivers/regulator/stm32-vrefbuf.c b/drivers/regulator/stm32-vrefbuf.c index 717144cbe0f9..40855105dd33 100644 --- a/drivers/regulator/stm32-vrefbuf.c +++ b/drivers/regulator/stm32-vrefbuf.c @@ -233,7 +233,7 @@ err_pm_stop: return ret; } -static int stm32_vrefbuf_remove(struct platform_device *pdev) +static void stm32_vrefbuf_remove(struct platform_device *pdev) { struct regulator_dev *rdev = platform_get_drvdata(pdev); struct stm32_vrefbuf *priv = rdev_get_drvdata(rdev); @@ -244,8 +244,6 @@ static int stm32_vrefbuf_remove(struct platform_device *pdev) pm_runtime_disable(&pdev->dev); pm_runtime_set_suspended(&pdev->dev); pm_runtime_put_noidle(&pdev->dev); - - return 0; }; static int __maybe_unused stm32_vrefbuf_runtime_suspend(struct device *dev) @@ -282,7 +280,7 @@ MODULE_DEVICE_TABLE(of, stm32_vrefbuf_of_match); static struct platform_driver stm32_vrefbuf_driver = { .probe = stm32_vrefbuf_probe, - .remove = stm32_vrefbuf_remove, + .remove_new = stm32_vrefbuf_remove, .driver = { .name = "stm32-vrefbuf", .probe_type = PROBE_PREFER_ASYNCHRONOUS, diff --git a/drivers/regulator/stpmic1_regulator.c b/drivers/regulator/stpmic1_regulator.c index 79d1a3eb18d4..a498df7cb016 100644 --- a/drivers/regulator/stpmic1_regulator.c +++ b/drivers/regulator/stpmic1_regulator.c @@ -15,7 +15,7 @@ #include <dt-bindings/mfd/st,stpmic1.h> /** - * struct stpmic1 regulator description: this structure is used as driver data + * struct stpmic1_regulator_cfg - this structure is used as driver data * @desc: regulator framework description * @mask_reset_reg: mask reset register address * @mask_reset_mask: mask rank and mask reset register mask diff --git a/drivers/regulator/uniphier-regulator.c b/drivers/regulator/uniphier-regulator.c index 1d8304b88bd6..5f868042392f 100644 --- a/drivers/regulator/uniphier-regulator.c +++ b/drivers/regulator/uniphier-regulator.c @@ -115,7 +115,7 @@ out_rst_assert: return ret; } -static int uniphier_regulator_remove(struct platform_device *pdev) +static void uniphier_regulator_remove(struct platform_device *pdev) { struct uniphier_regulator_priv *priv = platform_get_drvdata(pdev); int i; @@ -124,8 +124,6 @@ static int uniphier_regulator_remove(struct platform_device *pdev) reset_control_assert(priv->rst[i]); clk_bulk_disable_unprepare(priv->data->nclks, priv->clk); - - return 0; } /* USB3 controller data */ @@ -209,7 +207,7 @@ MODULE_DEVICE_TABLE(of, uniphier_regulator_match); static struct platform_driver uniphier_regulator_driver = { .probe = uniphier_regulator_probe, - .remove = uniphier_regulator_remove, + .remove_new = uniphier_regulator_remove, .driver = { .name = "uniphier-regulator", .probe_type = PROBE_PREFER_ASYNCHRONOUS, diff --git a/drivers/regulator/userspace-consumer.c b/drivers/regulator/userspace-consumer.c index 97f075ed68c9..53d1b9d6f69c 100644 --- a/drivers/regulator/userspace-consumer.c +++ b/drivers/regulator/userspace-consumer.c @@ -194,7 +194,7 @@ err_enable: return ret; } -static int regulator_userspace_consumer_remove(struct platform_device *pdev) +static void regulator_userspace_consumer_remove(struct platform_device *pdev) { struct userspace_consumer_data *data = platform_get_drvdata(pdev); @@ -202,8 +202,6 @@ static int regulator_userspace_consumer_remove(struct platform_device *pdev) if (data->enabled && !data->no_autoswitch) regulator_bulk_disable(data->num_supplies, data->supplies); - - return 0; } static const struct of_device_id regulator_userspace_consumer_of_match[] = { @@ -213,7 +211,7 @@ static const struct of_device_id regulator_userspace_consumer_of_match[] = { static struct platform_driver regulator_userspace_consumer_driver = { .probe = regulator_userspace_consumer_probe, - .remove = regulator_userspace_consumer_remove, + .remove_new = regulator_userspace_consumer_remove, .driver = { .name = "reg-userspace-consumer", .probe_type = PROBE_PREFER_ASYNCHRONOUS, diff --git a/drivers/regulator/virtual.c b/drivers/regulator/virtual.c index d5a160efdae6..0a0ee186c6af 100644 --- a/drivers/regulator/virtual.c +++ b/drivers/regulator/virtual.c @@ -345,7 +345,7 @@ static int regulator_virtual_probe(struct platform_device *pdev) return 0; } -static int regulator_virtual_remove(struct platform_device *pdev) +static void regulator_virtual_remove(struct platform_device *pdev) { struct virtual_consumer_data *drvdata = platform_get_drvdata(pdev); @@ -353,13 +353,11 @@ static int regulator_virtual_remove(struct platform_device *pdev) if (drvdata->enabled) regulator_disable(drvdata->regulator); - - return 0; } static struct platform_driver regulator_virtual_consumer_driver = { .probe = regulator_virtual_probe, - .remove = regulator_virtual_remove, + .remove_new = regulator_virtual_remove, .driver = { .name = "reg-virt-consumer", .probe_type = PROBE_PREFER_ASYNCHRONOUS, diff --git a/drivers/regulator/wm8350-regulator.c b/drivers/regulator/wm8350-regulator.c index 1445bafcab40..9939a5d2cbec 100644 --- a/drivers/regulator/wm8350-regulator.c +++ b/drivers/regulator/wm8350-regulator.c @@ -1158,14 +1158,12 @@ static int wm8350_regulator_probe(struct platform_device *pdev) return 0; } -static int wm8350_regulator_remove(struct platform_device *pdev) +static void wm8350_regulator_remove(struct platform_device *pdev) { struct regulator_dev *rdev = platform_get_drvdata(pdev); struct wm8350 *wm8350 = rdev_get_drvdata(rdev); wm8350_free_irq(wm8350, wm8350_reg[pdev->id].irq, rdev); - - return 0; } int wm8350_register_regulator(struct wm8350 *wm8350, int reg, @@ -1306,7 +1304,7 @@ EXPORT_SYMBOL_GPL(wm8350_register_led); static struct platform_driver wm8350_regulator_driver = { .probe = wm8350_regulator_probe, - .remove = wm8350_regulator_remove, + .remove_new = wm8350_regulator_remove, .driver = { .name = "wm8350-regulator", .probe_type = PROBE_PREFER_ASYNCHRONOUS, diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index 833cfab7d877..7327e81352e9 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c @@ -1106,12 +1106,6 @@ static void dasd_statistics_removeroot(void) return; } -int dasd_stats_generic_show(struct seq_file *m, void *v) -{ - seq_puts(m, "Statistics are not activated in this kernel\n"); - return 0; -} - static void dasd_profile_init(struct dasd_profile *profile, struct dentry *base_dentry) { diff --git a/drivers/s390/cio/vfio_ccw_chp.c b/drivers/s390/cio/vfio_ccw_chp.c index d3f3a611f95b..38c176cf6295 100644 --- a/drivers/s390/cio/vfio_ccw_chp.c +++ b/drivers/s390/cio/vfio_ccw_chp.c @@ -115,7 +115,7 @@ static ssize_t vfio_ccw_crw_region_read(struct vfio_ccw_private *private, /* Notify the guest if more CRWs are on our queue */ if (!list_empty(&private->crw) && private->crw_trigger) - eventfd_signal(private->crw_trigger, 1); + eventfd_signal(private->crw_trigger); return ret; } diff --git a/drivers/s390/cio/vfio_ccw_drv.c b/drivers/s390/cio/vfio_ccw_drv.c index 43601816ea4e..bfb35cfce1ef 100644 --- a/drivers/s390/cio/vfio_ccw_drv.c +++ b/drivers/s390/cio/vfio_ccw_drv.c @@ -112,7 +112,7 @@ void vfio_ccw_sch_io_todo(struct work_struct *work) private->state = VFIO_CCW_STATE_IDLE; if (private->io_trigger) - eventfd_signal(private->io_trigger, 1); + eventfd_signal(private->io_trigger); } void vfio_ccw_crw_todo(struct work_struct *work) @@ -122,7 +122,7 @@ void vfio_ccw_crw_todo(struct work_struct *work) private = container_of(work, struct vfio_ccw_private, crw_work); if (!list_empty(&private->crw) && private->crw_trigger) - eventfd_signal(private->crw_trigger, 1); + eventfd_signal(private->crw_trigger); } /* diff --git a/drivers/s390/cio/vfio_ccw_ops.c b/drivers/s390/cio/vfio_ccw_ops.c index cba4971618ff..ea532a8a4a0c 100644 --- a/drivers/s390/cio/vfio_ccw_ops.c +++ b/drivers/s390/cio/vfio_ccw_ops.c @@ -421,7 +421,7 @@ static int vfio_ccw_mdev_set_irqs(struct vfio_ccw_private *private, case VFIO_IRQ_SET_DATA_NONE: { if (*ctx) - eventfd_signal(*ctx, 1); + eventfd_signal(*ctx); return 0; } case VFIO_IRQ_SET_DATA_BOOL: @@ -432,7 +432,7 @@ static int vfio_ccw_mdev_set_irqs(struct vfio_ccw_private *private, return -EFAULT; if (trigger && *ctx) - eventfd_signal(*ctx, 1); + eventfd_signal(*ctx); return 0; } case VFIO_IRQ_SET_DATA_EVENTFD: @@ -612,7 +612,7 @@ static void vfio_ccw_mdev_request(struct vfio_device *vdev, unsigned int count) "Relaying device request to user (#%u)\n", count); - eventfd_signal(private->req_trigger, 1); + eventfd_signal(private->req_trigger); } else if (count == 0) { dev_notice(dev, "No device request channel registered, blocked until released by user\n"); diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c index 4db538a55192..542b5be73a6a 100644 --- a/drivers/s390/crypto/vfio_ap_ops.c +++ b/drivers/s390/crypto/vfio_ap_ops.c @@ -1794,7 +1794,7 @@ static void vfio_ap_mdev_request(struct vfio_device *vdev, unsigned int count) "Relaying device request to user (#%u)\n", count); - eventfd_signal(matrix_mdev->req_trigger, 1); + eventfd_signal(matrix_mdev->req_trigger); } else if (count == 0) { dev_notice(dev, "No device request registered, blocked until released by user\n"); diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index 70c9dd6b6a31..ddae0fde798e 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig @@ -1177,9 +1177,10 @@ config SPI_ZYNQ_QSPI config SPI_ZYNQMP_GQSPI tristate "Xilinx ZynqMP GQSPI controller" - depends on (SPI_MASTER && HAS_DMA) || COMPILE_TEST + depends on (SPI_MEM && HAS_DMA) || COMPILE_TEST help Enables Xilinx GQSPI controller driver for Zynq UltraScale+ MPSoC. + This controller only supports SPI memory interface. config SPI_AMD tristate "AMD SPI controller" diff --git a/drivers/spi/atmel-quadspi.c b/drivers/spi/atmel-quadspi.c index 3d1252566134..370c4d1572ed 100644 --- a/drivers/spi/atmel-quadspi.c +++ b/drivers/spi/atmel-quadspi.c @@ -272,7 +272,7 @@ static int atmel_qspi_find_mode(const struct spi_mem_op *op) if (atmel_qspi_is_compatible(op, &atmel_qspi_modes[i])) return i; - return -ENOTSUPP; + return -EOPNOTSUPP; } static bool atmel_qspi_supports_op(struct spi_mem *mem, diff --git a/drivers/spi/spi-ath79.c b/drivers/spi/spi-ath79.c index c9f1d1e1dcf7..b7ada981464a 100644 --- a/drivers/spi/spi-ath79.c +++ b/drivers/spi/spi-ath79.c @@ -146,7 +146,7 @@ static int ath79_exec_mem_op(struct spi_mem *mem, /* Only use for fast-read op. */ if (op->cmd.opcode != 0x0b || op->data.dir != SPI_MEM_DATA_IN || op->addr.nbytes != 3 || op->dummy.nbytes != 1) - return -ENOTSUPP; + return -EOPNOTSUPP; /* disable GPIO mode */ ath79_spi_wr(sp, AR71XX_SPI_REG_FS, 0); diff --git a/drivers/spi/spi-axi-spi-engine.c b/drivers/spi/spi-axi-spi-engine.c index b96e55f59d1a..9ace259d2d29 100644 --- a/drivers/spi/spi-axi-spi-engine.c +++ b/drivers/spi/spi-axi-spi-engine.c @@ -6,12 +6,14 @@ */ #include <linux/clk.h> +#include <linux/idr.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/of.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/spi/spi.h> +#include <linux/timer.h> #define SPI_ENGINE_VERSION_MAJOR(x) ((x >> 16) & 0xff) #define SPI_ENGINE_VERSION_MINOR(x) ((x >> 8) & 0xff) @@ -52,6 +54,7 @@ #define SPI_ENGINE_CMD_REG_CLK_DIV 0x0 #define SPI_ENGINE_CMD_REG_CONFIG 0x1 +#define SPI_ENGINE_CMD_REG_XFER_BITS 0x2 #define SPI_ENGINE_MISC_SYNC 0x0 #define SPI_ENGINE_MISC_SLEEP 0x1 @@ -78,29 +81,42 @@ struct spi_engine_program { uint16_t instructions[]; }; -struct spi_engine { - struct clk *clk; - struct clk *ref_clk; - - spinlock_t lock; - - void __iomem *base; - - struct spi_message *msg; +/** + * struct spi_engine_message_state - SPI engine per-message state + */ +struct spi_engine_message_state { + /** @p: Instructions for executing this message. */ struct spi_engine_program *p; + /** @cmd_length: Number of elements in cmd_buf array. */ unsigned cmd_length; + /** @cmd_buf: Array of commands not yet written to CMD FIFO. */ const uint16_t *cmd_buf; - + /** @tx_xfer: Next xfer with tx_buf not yet fully written to TX FIFO. */ struct spi_transfer *tx_xfer; + /** @tx_length: Size of tx_buf in bytes. */ unsigned int tx_length; + /** @tx_buf: Bytes not yet written to TX FIFO. */ const uint8_t *tx_buf; - + /** @rx_xfer: Next xfer with rx_buf not yet fully written to RX FIFO. */ struct spi_transfer *rx_xfer; + /** @rx_length: Size of tx_buf in bytes. */ unsigned int rx_length; + /** @rx_buf: Bytes not yet written to the RX FIFO. */ uint8_t *rx_buf; + /** @sync_id: ID to correlate SYNC interrupts with this message. */ + u8 sync_id; +}; + +struct spi_engine { + struct clk *clk; + struct clk *ref_clk; + + spinlock_t lock; - unsigned int sync_id; - unsigned int completed_id; + void __iomem *base; + struct ida sync_ida; + struct timer_list watchdog_timer; + struct spi_controller *controller; unsigned int int_enable; }; @@ -127,25 +143,17 @@ static unsigned int spi_engine_get_config(struct spi_device *spi) return config; } -static unsigned int spi_engine_get_clk_div(struct spi_engine *spi_engine, - struct spi_device *spi, struct spi_transfer *xfer) -{ - unsigned int clk_div; - - clk_div = DIV_ROUND_UP(clk_get_rate(spi_engine->ref_clk), - xfer->speed_hz * 2); - if (clk_div > 255) - clk_div = 255; - else if (clk_div > 0) - clk_div -= 1; - - return clk_div; -} - static void spi_engine_gen_xfer(struct spi_engine_program *p, bool dry, struct spi_transfer *xfer) { - unsigned int len = xfer->len; + unsigned int len; + + if (xfer->bits_per_word <= 8) + len = xfer->len; + else if (xfer->bits_per_word <= 16) + len = xfer->len / 2; + else + len = xfer->len / 4; while (len) { unsigned int n = min(len, 256U); @@ -163,22 +171,16 @@ static void spi_engine_gen_xfer(struct spi_engine_program *p, bool dry, } static void spi_engine_gen_sleep(struct spi_engine_program *p, bool dry, - struct spi_engine *spi_engine, unsigned int clk_div, - struct spi_transfer *xfer) + int delay_ns, u32 sclk_hz) { - unsigned int spi_clk = clk_get_rate(spi_engine->ref_clk); unsigned int t; - int delay; - delay = spi_delay_to_ns(&xfer->delay, xfer); - if (delay < 0) + /* negative delay indicates error, e.g. from spi_delay_to_ns() */ + if (delay_ns <= 0) return; - delay /= 1000; - if (delay == 0) - return; - - t = DIV_ROUND_UP(delay * spi_clk, (clk_div + 1) * 2); + /* rounding down since executing the instruction adds a couple of ticks delay */ + t = DIV_ROUND_DOWN_ULL((u64)delay_ns * sclk_hz, NSEC_PER_SEC); while (t) { unsigned int n = min(t, 256U); @@ -195,53 +197,105 @@ static void spi_engine_gen_cs(struct spi_engine_program *p, bool dry, if (assert) mask ^= BIT(spi_get_chipselect(spi, 0)); - spi_engine_program_add_cmd(p, dry, SPI_ENGINE_CMD_ASSERT(1, mask)); + spi_engine_program_add_cmd(p, dry, SPI_ENGINE_CMD_ASSERT(0, mask)); +} + +/* + * Performs precompile steps on the message. + * + * The SPI core does most of the message/transfer validation and filling in + * fields for us via __spi_validate(). This fixes up anything remaining not + * done there. + * + * NB: This is separate from spi_engine_compile_message() because the latter + * is called twice and would otherwise result in double-evaluation. + */ +static void spi_engine_precompile_message(struct spi_message *msg) +{ + unsigned int clk_div, max_hz = msg->spi->controller->max_speed_hz; + struct spi_transfer *xfer; + + list_for_each_entry(xfer, &msg->transfers, transfer_list) { + clk_div = DIV_ROUND_UP(max_hz, xfer->speed_hz); + xfer->effective_speed_hz = max_hz / min(clk_div, 256U); + } } -static int spi_engine_compile_message(struct spi_engine *spi_engine, - struct spi_message *msg, bool dry, struct spi_engine_program *p) +static void spi_engine_compile_message(struct spi_message *msg, bool dry, + struct spi_engine_program *p) { struct spi_device *spi = msg->spi; + struct spi_controller *host = spi->controller; struct spi_transfer *xfer; int clk_div, new_clk_div; - bool cs_change = true; + bool keep_cs = false; + u8 bits_per_word = 0; - clk_div = -1; + clk_div = 1; spi_engine_program_add_cmd(p, dry, SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_CONFIG, spi_engine_get_config(spi))); + xfer = list_first_entry(&msg->transfers, struct spi_transfer, transfer_list); + spi_engine_gen_cs(p, dry, spi, !xfer->cs_off); + list_for_each_entry(xfer, &msg->transfers, transfer_list) { - new_clk_div = spi_engine_get_clk_div(spi_engine, spi, xfer); + new_clk_div = host->max_speed_hz / xfer->effective_speed_hz; if (new_clk_div != clk_div) { clk_div = new_clk_div; + /* actual divider used is register value + 1 */ spi_engine_program_add_cmd(p, dry, SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_CLK_DIV, - clk_div)); + clk_div - 1)); } - if (cs_change) - spi_engine_gen_cs(p, dry, spi, true); + if (bits_per_word != xfer->bits_per_word) { + bits_per_word = xfer->bits_per_word; + spi_engine_program_add_cmd(p, dry, + SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_XFER_BITS, + bits_per_word)); + } spi_engine_gen_xfer(p, dry, xfer); - spi_engine_gen_sleep(p, dry, spi_engine, clk_div, xfer); - - cs_change = xfer->cs_change; - if (list_is_last(&xfer->transfer_list, &msg->transfers)) - cs_change = !cs_change; - - if (cs_change) - spi_engine_gen_cs(p, dry, spi, false); + spi_engine_gen_sleep(p, dry, spi_delay_to_ns(&xfer->delay, xfer), + xfer->effective_speed_hz); + + if (xfer->cs_change) { + if (list_is_last(&xfer->transfer_list, &msg->transfers)) { + keep_cs = true; + } else { + if (!xfer->cs_off) + spi_engine_gen_cs(p, dry, spi, false); + + spi_engine_gen_sleep(p, dry, spi_delay_to_ns( + &xfer->cs_change_delay, xfer), + xfer->effective_speed_hz); + + if (!list_next_entry(xfer, transfer_list)->cs_off) + spi_engine_gen_cs(p, dry, spi, true); + } + } else if (!list_is_last(&xfer->transfer_list, &msg->transfers) && + xfer->cs_off != list_next_entry(xfer, transfer_list)->cs_off) { + spi_engine_gen_cs(p, dry, spi, xfer->cs_off); + } } - return 0; + if (!keep_cs) + spi_engine_gen_cs(p, dry, spi, false); + + /* + * Restore clockdiv to default so that future gen_sleep commands don't + * have to be aware of the current register state. + */ + if (clk_div != 1) + spi_engine_program_add_cmd(p, dry, + SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_CLK_DIV, 0)); } -static void spi_engine_xfer_next(struct spi_engine *spi_engine, +static void spi_engine_xfer_next(struct spi_message *msg, struct spi_transfer **_xfer) { - struct spi_message *msg = spi_engine->msg; struct spi_transfer *xfer = *_xfer; if (!xfer) { @@ -256,147 +310,192 @@ static void spi_engine_xfer_next(struct spi_engine *spi_engine, *_xfer = xfer; } -static void spi_engine_tx_next(struct spi_engine *spi_engine) +static void spi_engine_tx_next(struct spi_message *msg) { - struct spi_transfer *xfer = spi_engine->tx_xfer; + struct spi_engine_message_state *st = msg->state; + struct spi_transfer *xfer = st->tx_xfer; do { - spi_engine_xfer_next(spi_engine, &xfer); + spi_engine_xfer_next(msg, &xfer); } while (xfer && !xfer->tx_buf); - spi_engine->tx_xfer = xfer; + st->tx_xfer = xfer; if (xfer) { - spi_engine->tx_length = xfer->len; - spi_engine->tx_buf = xfer->tx_buf; + st->tx_length = xfer->len; + st->tx_buf = xfer->tx_buf; } else { - spi_engine->tx_buf = NULL; + st->tx_buf = NULL; } } -static void spi_engine_rx_next(struct spi_engine *spi_engine) +static void spi_engine_rx_next(struct spi_message *msg) { - struct spi_transfer *xfer = spi_engine->rx_xfer; + struct spi_engine_message_state *st = msg->state; + struct spi_transfer *xfer = st->rx_xfer; do { - spi_engine_xfer_next(spi_engine, &xfer); + spi_engine_xfer_next(msg, &xfer); } while (xfer && !xfer->rx_buf); - spi_engine->rx_xfer = xfer; + st->rx_xfer = xfer; if (xfer) { - spi_engine->rx_length = xfer->len; - spi_engine->rx_buf = xfer->rx_buf; + st->rx_length = xfer->len; + st->rx_buf = xfer->rx_buf; } else { - spi_engine->rx_buf = NULL; + st->rx_buf = NULL; } } -static bool spi_engine_write_cmd_fifo(struct spi_engine *spi_engine) +static bool spi_engine_write_cmd_fifo(struct spi_engine *spi_engine, + struct spi_message *msg) { void __iomem *addr = spi_engine->base + SPI_ENGINE_REG_CMD_FIFO; + struct spi_engine_message_state *st = msg->state; unsigned int n, m, i; const uint16_t *buf; n = readl_relaxed(spi_engine->base + SPI_ENGINE_REG_CMD_FIFO_ROOM); - while (n && spi_engine->cmd_length) { - m = min(n, spi_engine->cmd_length); - buf = spi_engine->cmd_buf; + while (n && st->cmd_length) { + m = min(n, st->cmd_length); + buf = st->cmd_buf; for (i = 0; i < m; i++) writel_relaxed(buf[i], addr); - spi_engine->cmd_buf += m; - spi_engine->cmd_length -= m; + st->cmd_buf += m; + st->cmd_length -= m; n -= m; } - return spi_engine->cmd_length != 0; + return st->cmd_length != 0; } -static bool spi_engine_write_tx_fifo(struct spi_engine *spi_engine) +static bool spi_engine_write_tx_fifo(struct spi_engine *spi_engine, + struct spi_message *msg) { void __iomem *addr = spi_engine->base + SPI_ENGINE_REG_SDO_DATA_FIFO; + struct spi_engine_message_state *st = msg->state; unsigned int n, m, i; - const uint8_t *buf; n = readl_relaxed(spi_engine->base + SPI_ENGINE_REG_SDO_FIFO_ROOM); - while (n && spi_engine->tx_length) { - m = min(n, spi_engine->tx_length); - buf = spi_engine->tx_buf; - for (i = 0; i < m; i++) - writel_relaxed(buf[i], addr); - spi_engine->tx_buf += m; - spi_engine->tx_length -= m; + while (n && st->tx_length) { + if (st->tx_xfer->bits_per_word <= 8) { + const u8 *buf = st->tx_buf; + + m = min(n, st->tx_length); + for (i = 0; i < m; i++) + writel_relaxed(buf[i], addr); + st->tx_buf += m; + st->tx_length -= m; + } else if (st->tx_xfer->bits_per_word <= 16) { + const u16 *buf = (const u16 *)st->tx_buf; + + m = min(n, st->tx_length / 2); + for (i = 0; i < m; i++) + writel_relaxed(buf[i], addr); + st->tx_buf += m * 2; + st->tx_length -= m * 2; + } else { + const u32 *buf = (const u32 *)st->tx_buf; + + m = min(n, st->tx_length / 4); + for (i = 0; i < m; i++) + writel_relaxed(buf[i], addr); + st->tx_buf += m * 4; + st->tx_length -= m * 4; + } n -= m; - if (spi_engine->tx_length == 0) - spi_engine_tx_next(spi_engine); + if (st->tx_length == 0) + spi_engine_tx_next(msg); } - return spi_engine->tx_length != 0; + return st->tx_length != 0; } -static bool spi_engine_read_rx_fifo(struct spi_engine *spi_engine) +static bool spi_engine_read_rx_fifo(struct spi_engine *spi_engine, + struct spi_message *msg) { void __iomem *addr = spi_engine->base + SPI_ENGINE_REG_SDI_DATA_FIFO; + struct spi_engine_message_state *st = msg->state; unsigned int n, m, i; - uint8_t *buf; n = readl_relaxed(spi_engine->base + SPI_ENGINE_REG_SDI_FIFO_LEVEL); - while (n && spi_engine->rx_length) { - m = min(n, spi_engine->rx_length); - buf = spi_engine->rx_buf; - for (i = 0; i < m; i++) - buf[i] = readl_relaxed(addr); - spi_engine->rx_buf += m; - spi_engine->rx_length -= m; + while (n && st->rx_length) { + if (st->rx_xfer->bits_per_word <= 8) { + u8 *buf = st->rx_buf; + + m = min(n, st->rx_length); + for (i = 0; i < m; i++) + buf[i] = readl_relaxed(addr); + st->rx_buf += m; + st->rx_length -= m; + } else if (st->rx_xfer->bits_per_word <= 16) { + u16 *buf = (u16 *)st->rx_buf; + + m = min(n, st->rx_length / 2); + for (i = 0; i < m; i++) + buf[i] = readl_relaxed(addr); + st->rx_buf += m * 2; + st->rx_length -= m * 2; + } else { + u32 *buf = (u32 *)st->rx_buf; + + m = min(n, st->rx_length / 4); + for (i = 0; i < m; i++) + buf[i] = readl_relaxed(addr); + st->rx_buf += m * 4; + st->rx_length -= m * 4; + } n -= m; - if (spi_engine->rx_length == 0) - spi_engine_rx_next(spi_engine); + if (st->rx_length == 0) + spi_engine_rx_next(msg); } - return spi_engine->rx_length != 0; + return st->rx_length != 0; } static irqreturn_t spi_engine_irq(int irq, void *devid) { struct spi_controller *host = devid; + struct spi_message *msg = host->cur_msg; struct spi_engine *spi_engine = spi_controller_get_devdata(host); unsigned int disable_int = 0; unsigned int pending; + int completed_id = -1; pending = readl_relaxed(spi_engine->base + SPI_ENGINE_REG_INT_PENDING); if (pending & SPI_ENGINE_INT_SYNC) { writel_relaxed(SPI_ENGINE_INT_SYNC, spi_engine->base + SPI_ENGINE_REG_INT_PENDING); - spi_engine->completed_id = readl_relaxed( + completed_id = readl_relaxed( spi_engine->base + SPI_ENGINE_REG_SYNC_ID); } spin_lock(&spi_engine->lock); if (pending & SPI_ENGINE_INT_CMD_ALMOST_EMPTY) { - if (!spi_engine_write_cmd_fifo(spi_engine)) + if (!spi_engine_write_cmd_fifo(spi_engine, msg)) disable_int |= SPI_ENGINE_INT_CMD_ALMOST_EMPTY; } if (pending & SPI_ENGINE_INT_SDO_ALMOST_EMPTY) { - if (!spi_engine_write_tx_fifo(spi_engine)) + if (!spi_engine_write_tx_fifo(spi_engine, msg)) disable_int |= SPI_ENGINE_INT_SDO_ALMOST_EMPTY; } if (pending & (SPI_ENGINE_INT_SDI_ALMOST_FULL | SPI_ENGINE_INT_SYNC)) { - if (!spi_engine_read_rx_fifo(spi_engine)) + if (!spi_engine_read_rx_fifo(spi_engine, msg)) disable_int |= SPI_ENGINE_INT_SDI_ALMOST_FULL; } - if (pending & SPI_ENGINE_INT_SYNC) { - if (spi_engine->msg && - spi_engine->completed_id == spi_engine->sync_id) { - struct spi_message *msg = spi_engine->msg; - - kfree(spi_engine->p); - msg->status = 0; - msg->actual_length = msg->frame_length; - spi_engine->msg = NULL; - spi_finalize_current_message(host); + if (pending & SPI_ENGINE_INT_SYNC && msg) { + struct spi_engine_message_state *st = msg->state; + + if (completed_id == st->sync_id) { + if (timer_delete_sync(&spi_engine->watchdog_timer)) { + msg->status = 0; + msg->actual_length = msg->frame_length; + spi_finalize_current_message(host); + } disable_int |= SPI_ENGINE_INT_SYNC; } } @@ -412,43 +511,86 @@ static irqreturn_t spi_engine_irq(int irq, void *devid) return IRQ_HANDLED; } -static int spi_engine_transfer_one_message(struct spi_controller *host, - struct spi_message *msg) +static int spi_engine_prepare_message(struct spi_controller *host, + struct spi_message *msg) { struct spi_engine_program p_dry, *p; struct spi_engine *spi_engine = spi_controller_get_devdata(host); - unsigned int int_enable = 0; - unsigned long flags; + struct spi_engine_message_state *st; size_t size; + int ret; + + st = kzalloc(sizeof(*st), GFP_KERNEL); + if (!st) + return -ENOMEM; + + spi_engine_precompile_message(msg); p_dry.length = 0; - spi_engine_compile_message(spi_engine, msg, true, &p_dry); + spi_engine_compile_message(msg, true, &p_dry); size = sizeof(*p->instructions) * (p_dry.length + 1); p = kzalloc(sizeof(*p) + size, GFP_KERNEL); - if (!p) + if (!p) { + kfree(st); return -ENOMEM; - spi_engine_compile_message(spi_engine, msg, false, p); + } - spin_lock_irqsave(&spi_engine->lock, flags); - spi_engine->sync_id = (spi_engine->sync_id + 1) & 0xff; - spi_engine_program_add_cmd(p, false, - SPI_ENGINE_CMD_SYNC(spi_engine->sync_id)); + ret = ida_alloc_range(&spi_engine->sync_ida, 0, U8_MAX, GFP_KERNEL); + if (ret < 0) { + kfree(p); + kfree(st); + return ret; + } + + st->sync_id = ret; + + spi_engine_compile_message(msg, false, p); + + spi_engine_program_add_cmd(p, false, SPI_ENGINE_CMD_SYNC(st->sync_id)); + + st->p = p; + st->cmd_buf = p->instructions; + st->cmd_length = p->length; + msg->state = st; + + return 0; +} + +static int spi_engine_unprepare_message(struct spi_controller *host, + struct spi_message *msg) +{ + struct spi_engine *spi_engine = spi_controller_get_devdata(host); + struct spi_engine_message_state *st = msg->state; + + ida_free(&spi_engine->sync_ida, st->sync_id); + kfree(st->p); + kfree(st); - spi_engine->msg = msg; - spi_engine->p = p; + return 0; +} - spi_engine->cmd_buf = p->instructions; - spi_engine->cmd_length = p->length; - if (spi_engine_write_cmd_fifo(spi_engine)) +static int spi_engine_transfer_one_message(struct spi_controller *host, + struct spi_message *msg) +{ + struct spi_engine *spi_engine = spi_controller_get_devdata(host); + struct spi_engine_message_state *st = msg->state; + unsigned int int_enable = 0; + unsigned long flags; + + mod_timer(&spi_engine->watchdog_timer, jiffies + msecs_to_jiffies(5000)); + + spin_lock_irqsave(&spi_engine->lock, flags); + + if (spi_engine_write_cmd_fifo(spi_engine, msg)) int_enable |= SPI_ENGINE_INT_CMD_ALMOST_EMPTY; - spi_engine_tx_next(spi_engine); - if (spi_engine_write_tx_fifo(spi_engine)) + spi_engine_tx_next(msg); + if (spi_engine_write_tx_fifo(spi_engine, msg)) int_enable |= SPI_ENGINE_INT_SDO_ALMOST_EMPTY; - spi_engine_rx_next(spi_engine); - if (spi_engine->rx_length != 0) + spi_engine_rx_next(msg); + if (st->rx_length != 0) int_enable |= SPI_ENGINE_INT_SDI_ALMOST_FULL; int_enable |= SPI_ENGINE_INT_SYNC; @@ -461,6 +603,29 @@ static int spi_engine_transfer_one_message(struct spi_controller *host, return 0; } +static void spi_engine_timeout(struct timer_list *timer) +{ + struct spi_engine *spi_engine = from_timer(spi_engine, timer, watchdog_timer); + struct spi_controller *host = spi_engine->controller; + + if (WARN_ON(!host->cur_msg)) + return; + + dev_err(&host->dev, + "Timeout occurred while waiting for transfer to complete. Hardware is probably broken.\n"); + host->cur_msg->status = -ETIMEDOUT; + spi_finalize_current_message(host); +} + +static void spi_engine_release_hw(void *p) +{ + struct spi_engine *spi_engine = p; + + writel_relaxed(0xff, spi_engine->base + SPI_ENGINE_REG_INT_PENDING); + writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_INT_ENABLE); + writel_relaxed(0x01, spi_engine->base + SPI_ENGINE_REG_RESET); +} + static int spi_engine_probe(struct platform_device *pdev) { struct spi_engine *spi_engine; @@ -473,35 +638,28 @@ static int spi_engine_probe(struct platform_device *pdev) if (irq < 0) return irq; - spi_engine = devm_kzalloc(&pdev->dev, sizeof(*spi_engine), GFP_KERNEL); - if (!spi_engine) - return -ENOMEM; - - host = spi_alloc_host(&pdev->dev, 0); + host = devm_spi_alloc_host(&pdev->dev, sizeof(*spi_engine)); if (!host) return -ENOMEM; - spi_controller_set_devdata(host, spi_engine); + spi_engine = spi_controller_get_devdata(host); spin_lock_init(&spi_engine->lock); + ida_init(&spi_engine->sync_ida); + timer_setup(&spi_engine->watchdog_timer, spi_engine_timeout, TIMER_IRQSAFE); + spi_engine->controller = host; spi_engine->clk = devm_clk_get_enabled(&pdev->dev, "s_axi_aclk"); - if (IS_ERR(spi_engine->clk)) { - ret = PTR_ERR(spi_engine->clk); - goto err_put_host; - } + if (IS_ERR(spi_engine->clk)) + return PTR_ERR(spi_engine->clk); spi_engine->ref_clk = devm_clk_get_enabled(&pdev->dev, "spi_clk"); - if (IS_ERR(spi_engine->ref_clk)) { - ret = PTR_ERR(spi_engine->ref_clk); - goto err_put_host; - } + if (IS_ERR(spi_engine->ref_clk)) + return PTR_ERR(spi_engine->ref_clk); spi_engine->base = devm_platform_ioremap_resource(pdev, 0); - if (IS_ERR(spi_engine->base)) { - ret = PTR_ERR(spi_engine->base); - goto err_put_host; - } + if (IS_ERR(spi_engine->base)) + return PTR_ERR(spi_engine->base); version = readl(spi_engine->base + SPI_ENGINE_REG_VERSION); if (SPI_ENGINE_VERSION_MAJOR(version) != 1) { @@ -509,54 +667,42 @@ static int spi_engine_probe(struct platform_device *pdev) SPI_ENGINE_VERSION_MAJOR(version), SPI_ENGINE_VERSION_MINOR(version), SPI_ENGINE_VERSION_PATCH(version)); - ret = -ENODEV; - goto err_put_host; + return -ENODEV; } writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_RESET); writel_relaxed(0xff, spi_engine->base + SPI_ENGINE_REG_INT_PENDING); writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_INT_ENABLE); - ret = request_irq(irq, spi_engine_irq, 0, pdev->name, host); + ret = devm_add_action_or_reset(&pdev->dev, spi_engine_release_hw, + spi_engine); if (ret) - goto err_put_host; + return ret; + + ret = devm_request_irq(&pdev->dev, irq, spi_engine_irq, 0, pdev->name, + host); + if (ret) + return ret; host->dev.of_node = pdev->dev.of_node; host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_3WIRE; - host->bits_per_word_mask = SPI_BPW_MASK(8); + host->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32); host->max_speed_hz = clk_get_rate(spi_engine->ref_clk) / 2; host->transfer_one_message = spi_engine_transfer_one_message; + host->prepare_message = spi_engine_prepare_message; + host->unprepare_message = spi_engine_unprepare_message; host->num_chipselect = 8; - ret = spi_register_controller(host); + if (host->max_speed_hz == 0) + return dev_err_probe(&pdev->dev, -EINVAL, "spi_clk rate is 0"); + + ret = devm_spi_register_controller(&pdev->dev, host); if (ret) - goto err_free_irq; + return ret; platform_set_drvdata(pdev, host); return 0; -err_free_irq: - free_irq(irq, host); -err_put_host: - spi_controller_put(host); - return ret; -} - -static void spi_engine_remove(struct platform_device *pdev) -{ - struct spi_controller *host = spi_controller_get(platform_get_drvdata(pdev)); - struct spi_engine *spi_engine = spi_controller_get_devdata(host); - int irq = platform_get_irq(pdev, 0); - - spi_unregister_controller(host); - - free_irq(irq, host); - - spi_controller_put(host); - - writel_relaxed(0xff, spi_engine->base + SPI_ENGINE_REG_INT_PENDING); - writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_INT_ENABLE); - writel_relaxed(0x01, spi_engine->base + SPI_ENGINE_REG_RESET); } static const struct of_device_id spi_engine_match_table[] = { @@ -567,7 +713,6 @@ MODULE_DEVICE_TABLE(of, spi_engine_match_table); static struct platform_driver spi_engine_driver = { .probe = spi_engine_probe, - .remove_new = spi_engine_remove, .driver = { .name = "spi-engine", .of_match_table = spi_engine_match_table, diff --git a/drivers/spi/spi-bcm-qspi.c b/drivers/spi/spi-bcm-qspi.c index ef08fcac2f6d..d96222e6d7d2 100644 --- a/drivers/spi/spi-bcm-qspi.c +++ b/drivers/spi/spi-bcm-qspi.c @@ -1199,7 +1199,7 @@ static int bcm_qspi_exec_mem_op(struct spi_mem *mem, if (!op->data.nbytes || !op->addr.nbytes || op->addr.nbytes > 4 || op->data.dir != SPI_MEM_DATA_IN) - return -ENOTSUPP; + return -EOPNOTSUPP; buf = op->data.buf.in; addr = op->addr.val; diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c index 3d7bf62da11c..f94e0d370d46 100644 --- a/drivers/spi/spi-cadence-quadspi.c +++ b/drivers/spi/spi-cadence-quadspi.c @@ -1840,7 +1840,7 @@ static int cqspi_probe(struct platform_device *pdev) if (ddata->jh7110_clk_init) { ret = cqspi_jh7110_clk_init(pdev, cqspi); if (ret) - goto probe_clk_failed; + goto probe_reset_failed; } if (of_device_is_compatible(pdev->dev.of_node, @@ -1901,6 +1901,8 @@ static int cqspi_probe(struct platform_device *pdev) probe_setup_failed: cqspi_controller_enable(cqspi, 0); probe_reset_failed: + if (cqspi->is_jh7110) + cqspi_jh7110_disable_clk(pdev, cqspi); clk_disable_unprepare(cqspi->clk); probe_clk_failed: return ret; diff --git a/drivers/spi/spi-cadence-xspi.c b/drivers/spi/spi-cadence-xspi.c index b7e04b03be58..8648b8eb080d 100644 --- a/drivers/spi/spi-cadence-xspi.c +++ b/drivers/spi/spi-cadence-xspi.c @@ -619,7 +619,6 @@ MODULE_DEVICE_TABLE(of, cdns_xspi_of_match); static struct platform_driver cdns_xspi_platform_driver = { .probe = cdns_xspi_probe, - .remove = NULL, .driver = { .name = CDNS_XSPI_NAME, .of_match_table = cdns_xspi_of_match, diff --git a/drivers/spi/spi-cs42l43.c b/drivers/spi/spi-cs42l43.c index d239fc5a49cc..f13073e12593 100644 --- a/drivers/spi/spi-cs42l43.c +++ b/drivers/spi/spi-cs42l43.c @@ -213,7 +213,7 @@ static int cs42l43_spi_probe(struct platform_device *pdev) if (!priv) return -ENOMEM; - priv->ctlr = devm_spi_alloc_master(&pdev->dev, sizeof(*priv->ctlr)); + priv->ctlr = devm_spi_alloc_host(&pdev->dev, sizeof(*priv->ctlr)); if (!priv->ctlr) return -ENOMEM; diff --git a/drivers/spi/spi-dw-mmio.c b/drivers/spi/spi-dw-mmio.c index 46801189a651..cc74cbe03431 100644 --- a/drivers/spi/spi-dw-mmio.c +++ b/drivers/spi/spi-dw-mmio.c @@ -411,7 +411,6 @@ static const struct of_device_id dw_spi_mmio_of_match[] = { { .compatible = "renesas,rzn1-spi", .data = dw_spi_pssi_init}, { .compatible = "snps,dwc-ssi-1.01a", .data = dw_spi_hssi_init}, { .compatible = "intel,keembay-ssi", .data = dw_spi_intel_init}, - { .compatible = "intel,thunderbay-ssi", .data = dw_spi_intel_init}, { .compatible = "intel,mountevans-imc-ssi", .data = dw_spi_mountevans_imc_init, diff --git a/drivers/spi/spi-geni-qcom.c b/drivers/spi/spi-geni-qcom.c index b956a32a4162..15f84e68d4d2 100644 --- a/drivers/spi/spi-geni-qcom.c +++ b/drivers/spi/spi-geni-qcom.c @@ -145,10 +145,10 @@ static int get_spi_clk_cfg(unsigned int speed_hz, return ret; } -static void handle_se_timeout(struct spi_master *spi, - struct spi_message *msg) +static void handle_se_timeout(struct spi_controller *spi, + struct spi_message *msg) { - struct spi_geni_master *mas = spi_master_get_devdata(spi); + struct spi_geni_master *mas = spi_controller_get_devdata(spi); unsigned long time_left; struct geni_se *se = &mas->se; const struct spi_transfer *xfer; @@ -160,9 +160,9 @@ static void handle_se_timeout(struct spi_master *spi, xfer = mas->cur_xfer; mas->cur_xfer = NULL; - if (spi->slave) { + if (spi->target) { /* - * skip CMD Cancel sequnece since spi slave + * skip CMD Cancel sequnece since spi target * doesn`t support CMD Cancel sequnece */ spin_unlock_irq(&mas->lock); @@ -225,17 +225,17 @@ reset_if_dma: } } -static void handle_gpi_timeout(struct spi_master *spi, struct spi_message *msg) +static void handle_gpi_timeout(struct spi_controller *spi, struct spi_message *msg) { - struct spi_geni_master *mas = spi_master_get_devdata(spi); + struct spi_geni_master *mas = spi_controller_get_devdata(spi); dmaengine_terminate_sync(mas->tx); dmaengine_terminate_sync(mas->rx); } -static void spi_geni_handle_err(struct spi_master *spi, struct spi_message *msg) +static void spi_geni_handle_err(struct spi_controller *spi, struct spi_message *msg) { - struct spi_geni_master *mas = spi_master_get_devdata(spi); + struct spi_geni_master *mas = spi_controller_get_devdata(spi); switch (mas->cur_xfer_mode) { case GENI_SE_FIFO: @@ -286,8 +286,8 @@ static bool spi_geni_is_abort_still_pending(struct spi_geni_master *mas) static void spi_geni_set_cs(struct spi_device *slv, bool set_flag) { - struct spi_geni_master *mas = spi_master_get_devdata(slv->master); - struct spi_master *spi = dev_get_drvdata(mas->dev); + struct spi_geni_master *mas = spi_controller_get_devdata(slv->controller); + struct spi_controller *spi = dev_get_drvdata(mas->dev); struct geni_se *se = &mas->se; unsigned long time_left; @@ -395,9 +395,9 @@ static int geni_spi_set_clock_and_bw(struct spi_geni_master *mas, } static int setup_fifo_params(struct spi_device *spi_slv, - struct spi_master *spi) + struct spi_controller *spi) { - struct spi_geni_master *mas = spi_master_get_devdata(spi); + struct spi_geni_master *mas = spi_controller_get_devdata(spi); struct geni_se *se = &mas->se; u32 loopback_cfg = 0, cpol = 0, cpha = 0, demux_output_inv = 0; u32 demux_sel; @@ -434,7 +434,7 @@ static int setup_fifo_params(struct spi_device *spi_slv, static void spi_gsi_callback_result(void *cb, const struct dmaengine_result *result) { - struct spi_master *spi = cb; + struct spi_controller *spi = cb; spi->cur_msg->status = -EIO; if (result->result != DMA_TRANS_NOERROR) { @@ -454,7 +454,7 @@ spi_gsi_callback_result(void *cb, const struct dmaengine_result *result) } static int setup_gsi_xfer(struct spi_transfer *xfer, struct spi_geni_master *mas, - struct spi_device *spi_slv, struct spi_master *spi) + struct spi_device *spi_slv, struct spi_controller *spi) { unsigned long flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK; struct dma_slave_config config = {}; @@ -560,14 +560,14 @@ static u32 get_xfer_len_in_words(struct spi_transfer *xfer, static bool geni_can_dma(struct spi_controller *ctlr, struct spi_device *slv, struct spi_transfer *xfer) { - struct spi_geni_master *mas = spi_master_get_devdata(slv->master); + struct spi_geni_master *mas = spi_controller_get_devdata(slv->controller); u32 len, fifo_size; if (mas->cur_xfer_mode == GENI_GPI_DMA) return true; - /* Set SE DMA mode for SPI slave. */ - if (ctlr->slave) + /* Set SE DMA mode for SPI target. */ + if (ctlr->target) return true; len = get_xfer_len_in_words(xfer, mas); @@ -579,10 +579,10 @@ static bool geni_can_dma(struct spi_controller *ctlr, return false; } -static int spi_geni_prepare_message(struct spi_master *spi, - struct spi_message *spi_msg) +static int spi_geni_prepare_message(struct spi_controller *spi, + struct spi_message *spi_msg) { - struct spi_geni_master *mas = spi_master_get_devdata(spi); + struct spi_geni_master *mas = spi_controller_get_devdata(spi); int ret; switch (mas->cur_xfer_mode) { @@ -657,7 +657,7 @@ static int spi_geni_init(struct spi_geni_master *mas) proto = geni_se_read_proto(se); - if (spi->slave) { + if (spi->target) { if (proto != GENI_SE_SPI_SLAVE) { dev_err(mas->dev, "Invalid proto %d\n", proto); goto out_pm; @@ -715,7 +715,7 @@ static int spi_geni_init(struct spi_geni_master *mas) } /* We always control CS manually */ - if (!spi->slave) { + if (!spi->target) { spi_tx_cfg = readl(se->base + SE_SPI_TRANS_CFG); spi_tx_cfg &= ~CS_TOGGLE; writel(spi_tx_cfg, se->base + SE_SPI_TRANS_CFG); @@ -824,7 +824,7 @@ static void geni_spi_handle_rx(struct spi_geni_master *mas) static int setup_se_xfer(struct spi_transfer *xfer, struct spi_geni_master *mas, - u16 mode, struct spi_master *spi) + u16 mode, struct spi_controller *spi) { u32 m_cmd = 0; u32 len; @@ -913,11 +913,11 @@ static int setup_se_xfer(struct spi_transfer *xfer, return ret; } -static int spi_geni_transfer_one(struct spi_master *spi, - struct spi_device *slv, - struct spi_transfer *xfer) +static int spi_geni_transfer_one(struct spi_controller *spi, + struct spi_device *slv, + struct spi_transfer *xfer) { - struct spi_geni_master *mas = spi_master_get_devdata(spi); + struct spi_geni_master *mas = spi_controller_get_devdata(spi); int ret; if (spi_geni_is_abort_still_pending(mas)) @@ -939,8 +939,8 @@ static int spi_geni_transfer_one(struct spi_master *spi, static irqreturn_t geni_spi_isr(int irq, void *data) { - struct spi_master *spi = data; - struct spi_geni_master *mas = spi_master_get_devdata(spi); + struct spi_controller *spi = data; + struct spi_geni_master *mas = spi_controller_get_devdata(spi); struct geni_se *se = &mas->se; u32 m_irq; @@ -1042,7 +1042,7 @@ static irqreturn_t geni_spi_isr(int irq, void *data) static int spi_geni_probe(struct platform_device *pdev) { int ret, irq; - struct spi_master *spi; + struct spi_controller *spi; struct spi_geni_master *mas; void __iomem *base; struct clk *clk; @@ -1064,12 +1064,12 @@ static int spi_geni_probe(struct platform_device *pdev) if (IS_ERR(clk)) return PTR_ERR(clk); - spi = devm_spi_alloc_master(dev, sizeof(*mas)); + spi = devm_spi_alloc_host(dev, sizeof(*mas)); if (!spi) return -ENOMEM; platform_set_drvdata(pdev, spi); - mas = spi_master_get_devdata(spi); + mas = spi_controller_get_devdata(spi); mas->irq = irq; mas->dev = dev; mas->se.dev = dev; @@ -1113,7 +1113,7 @@ static int spi_geni_probe(struct platform_device *pdev) pm_runtime_enable(dev); if (device_property_read_bool(&pdev->dev, "spi-slave")) - spi->slave = true; + spi->target = true; ret = geni_icc_get(&mas->se, NULL); if (ret) @@ -1135,7 +1135,7 @@ static int spi_geni_probe(struct platform_device *pdev) * for dma (gsi) mode, the gsi will set cs based on params passed in * TRE */ - if (!spi->slave && mas->cur_xfer_mode == GENI_SE_FIFO) + if (!spi->target && mas->cur_xfer_mode == GENI_SE_FIFO) spi->set_cs = spi_geni_set_cs; /* @@ -1148,7 +1148,7 @@ static int spi_geni_probe(struct platform_device *pdev) if (ret) goto spi_geni_release_dma; - ret = spi_register_master(spi); + ret = spi_register_controller(spi); if (ret) goto spi_geni_probe_free_irq; @@ -1164,11 +1164,11 @@ spi_geni_probe_runtime_disable: static void spi_geni_remove(struct platform_device *pdev) { - struct spi_master *spi = platform_get_drvdata(pdev); - struct spi_geni_master *mas = spi_master_get_devdata(spi); + struct spi_controller *spi = platform_get_drvdata(pdev); + struct spi_geni_master *mas = spi_controller_get_devdata(spi); /* Unregister _before_ disabling pm_runtime() so we stop transfers */ - spi_unregister_master(spi); + spi_unregister_controller(spi); spi_geni_release_dma_chan(mas); @@ -1178,8 +1178,8 @@ static void spi_geni_remove(struct platform_device *pdev) static int __maybe_unused spi_geni_runtime_suspend(struct device *dev) { - struct spi_master *spi = dev_get_drvdata(dev); - struct spi_geni_master *mas = spi_master_get_devdata(spi); + struct spi_controller *spi = dev_get_drvdata(dev); + struct spi_geni_master *mas = spi_controller_get_devdata(spi); int ret; /* Drop the performance state vote */ @@ -1194,8 +1194,8 @@ static int __maybe_unused spi_geni_runtime_suspend(struct device *dev) static int __maybe_unused spi_geni_runtime_resume(struct device *dev) { - struct spi_master *spi = dev_get_drvdata(dev); - struct spi_geni_master *mas = spi_master_get_devdata(spi); + struct spi_controller *spi = dev_get_drvdata(dev); + struct spi_geni_master *mas = spi_controller_get_devdata(spi); int ret; ret = geni_icc_enable(&mas->se); @@ -1211,30 +1211,30 @@ static int __maybe_unused spi_geni_runtime_resume(struct device *dev) static int __maybe_unused spi_geni_suspend(struct device *dev) { - struct spi_master *spi = dev_get_drvdata(dev); + struct spi_controller *spi = dev_get_drvdata(dev); int ret; - ret = spi_master_suspend(spi); + ret = spi_controller_suspend(spi); if (ret) return ret; ret = pm_runtime_force_suspend(dev); if (ret) - spi_master_resume(spi); + spi_controller_resume(spi); return ret; } static int __maybe_unused spi_geni_resume(struct device *dev) { - struct spi_master *spi = dev_get_drvdata(dev); + struct spi_controller *spi = dev_get_drvdata(dev); int ret; ret = pm_runtime_force_resume(dev); if (ret) return ret; - ret = spi_master_resume(spi); + ret = spi_controller_resume(spi); if (ret) pm_runtime_force_suspend(dev); diff --git a/drivers/spi/spi-ingenic.c b/drivers/spi/spi-ingenic.c index cc366936d72b..003a6d21c4c3 100644 --- a/drivers/spi/spi-ingenic.c +++ b/drivers/spi/spi-ingenic.c @@ -346,14 +346,17 @@ static bool spi_ingenic_can_dma(struct spi_controller *ctlr, static int spi_ingenic_request_dma(struct spi_controller *ctlr, struct device *dev) { - ctlr->dma_tx = dma_request_slave_channel(dev, "tx"); - if (!ctlr->dma_tx) - return -ENODEV; + struct dma_chan *chan; - ctlr->dma_rx = dma_request_slave_channel(dev, "rx"); + chan = dma_request_chan(dev, "tx"); + if (IS_ERR(chan)) + return PTR_ERR(chan); + ctlr->dma_tx = chan; - if (!ctlr->dma_rx) - return -ENODEV; + chan = dma_request_chan(dev, "rx"); + if (IS_ERR(chan)) + return PTR_ERR(chan); + ctlr->dma_rx = chan; ctlr->can_dma = spi_ingenic_can_dma; diff --git a/drivers/spi/spi-intel.c b/drivers/spi/spi-intel.c index 98ec4dc22b81..3654ae35d2db 100644 --- a/drivers/spi/spi-intel.c +++ b/drivers/spi/spi-intel.c @@ -711,8 +711,7 @@ static bool intel_spi_cmp_mem_op(const struct intel_spi_mem_op *iop, { if (iop->mem_op.cmd.nbytes != op->cmd.nbytes || iop->mem_op.cmd.buswidth != op->cmd.buswidth || - iop->mem_op.cmd.dtr != op->cmd.dtr || - iop->mem_op.cmd.opcode != op->cmd.opcode) + iop->mem_op.cmd.dtr != op->cmd.dtr) return false; if (iop->mem_op.addr.nbytes != op->addr.nbytes || @@ -737,11 +736,12 @@ intel_spi_match_mem_op(struct intel_spi *ispi, const struct spi_mem_op *op) const struct intel_spi_mem_op *iop; for (iop = ispi->mem_ops; iop->mem_op.cmd.opcode; iop++) { - if (intel_spi_cmp_mem_op(iop, op)) - break; + if (iop->mem_op.cmd.opcode == op->cmd.opcode && + intel_spi_cmp_mem_op(iop, op)) + return iop; } - return iop->mem_op.cmd.opcode ? iop : NULL; + return NULL; } static bool intel_spi_supports_mem_op(struct spi_mem *mem, diff --git a/drivers/spi/spi-ljca.c b/drivers/spi/spi-ljca.c index c5a066c73817..1cc1422ddba0 100644 --- a/drivers/spi/spi-ljca.c +++ b/drivers/spi/spi-ljca.c @@ -223,7 +223,7 @@ static int ljca_spi_probe(struct auxiliary_device *auxdev, struct ljca_spi_dev *ljca_spi; int ret; - controller = devm_spi_alloc_master(&auxdev->dev, sizeof(*ljca_spi)); + controller = devm_spi_alloc_host(&auxdev->dev, sizeof(*ljca_spi)); if (!controller) return -ENOMEM; diff --git a/drivers/spi/spi-mem.c b/drivers/spi/spi-mem.c index edd7430d4c05..2dc8ceb85374 100644 --- a/drivers/spi/spi-mem.c +++ b/drivers/spi/spi-mem.c @@ -323,7 +323,7 @@ int spi_mem_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) return ret; if (!spi_mem_internal_supports_op(mem, op)) - return -ENOTSUPP; + return -EOPNOTSUPP; if (ctlr->mem_ops && ctlr->mem_ops->exec_op && !spi_get_csgpiod(mem->spi, 0)) { ret = spi_mem_access_start(mem); @@ -339,7 +339,7 @@ int spi_mem_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) * read path) and expect the core to use the regular SPI * interface in other cases. */ - if (!ret || ret != -ENOTSUPP) + if (!ret || ret != -ENOTSUPP || ret != -EOPNOTSUPP) return ret; } @@ -559,7 +559,7 @@ spi_mem_dirmap_create(struct spi_mem *mem, if (ret) { desc->nodirmap = true; if (!spi_mem_supports_op(desc->mem, &desc->info.op_tmpl)) - ret = -ENOTSUPP; + ret = -EOPNOTSUPP; else ret = 0; } diff --git a/drivers/spi/spi-mpc52xx.c b/drivers/spi/spi-mpc52xx.c index 4a6c984b6bff..d5ac60c135c2 100644 --- a/drivers/spi/spi-mpc52xx.c +++ b/drivers/spi/spi-mpc52xx.c @@ -22,6 +22,7 @@ #include <linux/slab.h> #include <linux/of_address.h> #include <linux/of_irq.h> +#include <linux/platform_device.h> #include <asm/time.h> #include <asm/mpc52xx.h> diff --git a/drivers/spi/spi-npcm-fiu.c b/drivers/spi/spi-npcm-fiu.c index 03db9f016a11..f3bb8bbc192f 100644 --- a/drivers/spi/spi-npcm-fiu.c +++ b/drivers/spi/spi-npcm-fiu.c @@ -556,7 +556,7 @@ static int npcm_fiu_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) op->data.nbytes); if (fiu->spix_mode || op->addr.nbytes > 4) - return -ENOTSUPP; + return -EOPNOTSUPP; if (fiu->clkrate != chip->clkrate) { ret = clk_set_rate(fiu->clk, chip->clkrate); diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c index d1b6110b38fc..de63cf0557ce 100644 --- a/drivers/spi/spi-pl022.c +++ b/drivers/spi/spi-pl022.c @@ -338,14 +338,8 @@ struct vendor_data { * @clk: outgoing clock "SPICLK" for the SPI bus * @host: SPI framework hookup * @host_info: controller-specific data from machine setup - * @pump_transfers: Tasklet used in Interrupt Transfer mode - * @cur_msg: Pointer to current spi_message being processed * @cur_transfer: Pointer to current spi_transfer * @cur_chip: pointer to current clients chip(assigned from controller_state) - * @next_msg_cs_active: the next message in the queue has been examined - * and it was found that it uses the same chip select as the previous - * message, so we left it active after the previous transfer, and it's - * active already. * @tx: current position in TX buffer to be read * @tx_end: end position in TX buffer to be read * @rx: current position in RX buffer to be written @@ -362,7 +356,6 @@ struct vendor_data { * @dummypage: a dummy page used for driving data on the bus with DMA * @dma_running: indicates whether DMA is in operation * @cur_cs: current chip select index - * @cur_gpiod: current chip select GPIO descriptor */ struct pl022 { struct amba_device *adev; @@ -372,12 +365,8 @@ struct pl022 { struct clk *clk; struct spi_controller *host; struct pl022_ssp_controller *host_info; - /* Message per-transfer pump */ - struct tasklet_struct pump_transfers; - struct spi_message *cur_msg; struct spi_transfer *cur_transfer; struct chip_data *cur_chip; - bool next_msg_cs_active; void *tx; void *tx_end; void *rx; @@ -397,7 +386,6 @@ struct pl022 { bool dma_running; #endif int cur_cs; - struct gpio_desc *cur_gpiod; }; /** @@ -431,99 +419,29 @@ struct chip_data { /** * internal_cs_control - Control chip select signals via SSP_CSR. * @pl022: SSP driver private data structure - * @command: select/delect the chip + * @enable: select/delect the chip * * Used on controller with internal chip select control via SSP_CSR register * (vendor extension). Each of the 5 LSB in the register controls one chip * select signal. */ -static void internal_cs_control(struct pl022 *pl022, u32 command) +static void internal_cs_control(struct pl022 *pl022, bool enable) { u32 tmp; tmp = readw(SSP_CSR(pl022->virtbase)); - if (command == SSP_CHIP_SELECT) + if (enable) tmp &= ~BIT(pl022->cur_cs); else tmp |= BIT(pl022->cur_cs); writew(tmp, SSP_CSR(pl022->virtbase)); } -static void pl022_cs_control(struct pl022 *pl022, u32 command) +static void pl022_cs_control(struct spi_device *spi, bool enable) { + struct pl022 *pl022 = spi_controller_get_devdata(spi->controller); if (pl022->vendor->internal_cs_ctrl) - internal_cs_control(pl022, command); - else if (pl022->cur_gpiod) - /* - * This needs to be inverted since with GPIOLIB in - * control, the inversion will be handled by - * GPIOLIB's active low handling. The "command" - * passed into this function will be SSP_CHIP_SELECT - * which is enum:ed to 0, so we need the inverse - * (1) to activate chip select. - */ - gpiod_set_value(pl022->cur_gpiod, !command); -} - -/** - * giveback - current spi_message is over, schedule next message and call - * callback of this message. Assumes that caller already - * set message->status; dma and pio irqs are blocked - * @pl022: SSP driver private data structure - */ -static void giveback(struct pl022 *pl022) -{ - struct spi_transfer *last_transfer; - pl022->next_msg_cs_active = false; - - last_transfer = list_last_entry(&pl022->cur_msg->transfers, - struct spi_transfer, transfer_list); - - /* Delay if requested before any change in chip select */ - /* - * FIXME: This runs in interrupt context. - * Is this really smart? - */ - spi_transfer_delay_exec(last_transfer); - - if (!last_transfer->cs_change) { - struct spi_message *next_msg; - - /* - * cs_change was not set. We can keep the chip select - * enabled if there is message in the queue and it is - * for the same spi device. - * - * We cannot postpone this until pump_messages, because - * after calling msg->complete (below) the driver that - * sent the current message could be unloaded, which - * could invalidate the cs_control() callback... - */ - /* get a pointer to the next message, if any */ - next_msg = spi_get_next_queued_message(pl022->host); - - /* - * see if the next and current messages point - * to the same spi device. - */ - if (next_msg && next_msg->spi != pl022->cur_msg->spi) - next_msg = NULL; - if (!next_msg || pl022->cur_msg->state == STATE_ERROR) - pl022_cs_control(pl022, SSP_CHIP_DESELECT); - else - pl022->next_msg_cs_active = true; - - } - - pl022->cur_msg = NULL; - pl022->cur_transfer = NULL; - pl022->cur_chip = NULL; - - /* disable the SPI/SSP operation */ - writew((readw(SSP_CR1(pl022->virtbase)) & - (~SSP_CR1_MASK_SSE)), SSP_CR1(pl022->virtbase)); - - spi_finalize_current_message(pl022->host); + internal_cs_control(pl022, enable); } /** @@ -757,30 +675,6 @@ static void readwriter(struct pl022 *pl022) */ } -/** - * next_transfer - Move to the Next transfer in the current spi message - * @pl022: SSP driver private data structure - * - * This function moves though the linked list of spi transfers in the - * current spi message and returns with the state of current spi - * message i.e whether its last transfer is done(STATE_DONE) or - * Next transfer is ready(STATE_RUNNING) - */ -static void *next_transfer(struct pl022 *pl022) -{ - struct spi_message *msg = pl022->cur_msg; - struct spi_transfer *trans = pl022->cur_transfer; - - /* Move to next transfer */ - if (trans->transfer_list.next != &msg->transfers) { - pl022->cur_transfer = - list_entry(trans->transfer_list.next, - struct spi_transfer, transfer_list); - return STATE_RUNNING; - } - return STATE_DONE; -} - /* * This DMA functionality is only compiled in if we have * access to the generic DMA devices/DMA engine. @@ -800,7 +694,6 @@ static void unmap_free_dma_scatter(struct pl022 *pl022) static void dma_callback(void *data) { struct pl022 *pl022 = data; - struct spi_message *msg = pl022->cur_msg; BUG_ON(!pl022->sgt_rx.sgl); @@ -845,13 +738,7 @@ static void dma_callback(void *data) unmap_free_dma_scatter(pl022); - /* Update total bytes transferred */ - msg->actual_length += pl022->cur_transfer->len; - /* Move to next transfer */ - msg->state = next_transfer(pl022); - if (msg->state != STATE_DONE && pl022->cur_transfer->cs_change) - pl022_cs_control(pl022, SSP_CHIP_DESELECT); - tasklet_schedule(&pl022->pump_transfers); + spi_finalize_current_transfer(pl022->host); } static void setup_dma_scatter(struct pl022 *pl022, @@ -1189,6 +1076,9 @@ err_no_rxchan: static void terminate_dma(struct pl022 *pl022) { + if (!pl022->dma_running) + return; + struct dma_chan *rxchan = pl022->dma_rx_channel; struct dma_chan *txchan = pl022->dma_tx_channel; @@ -1200,8 +1090,7 @@ static void terminate_dma(struct pl022 *pl022) static void pl022_dma_remove(struct pl022 *pl022) { - if (pl022->dma_running) - terminate_dma(pl022); + terminate_dma(pl022); if (pl022->dma_tx_channel) dma_release_channel(pl022->dma_tx_channel); if (pl022->dma_rx_channel) @@ -1225,6 +1114,10 @@ static inline int pl022_dma_probe(struct pl022 *pl022) return 0; } +static inline void terminate_dma(struct pl022 *pl022) +{ +} + static inline void pl022_dma_remove(struct pl022 *pl022) { } @@ -1246,16 +1139,7 @@ static inline void pl022_dma_remove(struct pl022 *pl022) static irqreturn_t pl022_interrupt_handler(int irq, void *dev_id) { struct pl022 *pl022 = dev_id; - struct spi_message *msg = pl022->cur_msg; u16 irq_status = 0; - - if (unlikely(!msg)) { - dev_err(&pl022->adev->dev, - "bad message state in interrupt handler"); - /* Never fail */ - return IRQ_HANDLED; - } - /* Read the Interrupt Status Register */ irq_status = readw(SSP_MIS(pl022->virtbase)); @@ -1287,10 +1171,8 @@ static irqreturn_t pl022_interrupt_handler(int irq, void *dev_id) writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase)); writew((readw(SSP_CR1(pl022->virtbase)) & (~SSP_CR1_MASK_SSE)), SSP_CR1(pl022->virtbase)); - msg->state = STATE_ERROR; - - /* Schedule message queue handler */ - tasklet_schedule(&pl022->pump_transfers); + pl022->cur_transfer->error |= SPI_TRANS_FAIL_IO; + spi_finalize_current_transfer(pl022->host); return IRQ_HANDLED; } @@ -1318,13 +1200,7 @@ static irqreturn_t pl022_interrupt_handler(int irq, void *dev_id) "number of bytes on a 16bit bus?)\n", (u32) (pl022->rx - pl022->rx_end)); } - /* Update total bytes transferred */ - msg->actual_length += pl022->cur_transfer->len; - /* Move to next transfer */ - msg->state = next_transfer(pl022); - if (msg->state != STATE_DONE && pl022->cur_transfer->cs_change) - pl022_cs_control(pl022, SSP_CHIP_DESELECT); - tasklet_schedule(&pl022->pump_transfers); + spi_finalize_current_transfer(pl022->host); return IRQ_HANDLED; } @@ -1361,98 +1237,20 @@ static int set_up_next_transfer(struct pl022 *pl022, return 0; } -/** - * pump_transfers - Tasklet function which schedules next transfer - * when running in interrupt or DMA transfer mode. - * @data: SSP driver private data structure - * - */ -static void pump_transfers(unsigned long data) +static int do_interrupt_dma_transfer(struct pl022 *pl022) { - struct pl022 *pl022 = (struct pl022 *) data; - struct spi_message *message = NULL; - struct spi_transfer *transfer = NULL; - struct spi_transfer *previous = NULL; - - /* Get current state information */ - message = pl022->cur_msg; - transfer = pl022->cur_transfer; - - /* Handle for abort */ - if (message->state == STATE_ERROR) { - message->status = -EIO; - giveback(pl022); - return; - } - - /* Handle end of message */ - if (message->state == STATE_DONE) { - message->status = 0; - giveback(pl022); - return; - } - - /* Delay if requested at end of transfer before CS change */ - if (message->state == STATE_RUNNING) { - previous = list_entry(transfer->transfer_list.prev, - struct spi_transfer, - transfer_list); - /* - * FIXME: This runs in interrupt context. - * Is this really smart? - */ - spi_transfer_delay_exec(previous); - - /* Reselect chip select only if cs_change was requested */ - if (previous->cs_change) - pl022_cs_control(pl022, SSP_CHIP_SELECT); - } else { - /* STATE_START */ - message->state = STATE_RUNNING; - } - - if (set_up_next_transfer(pl022, transfer)) { - message->state = STATE_ERROR; - message->status = -EIO; - giveback(pl022); - return; - } - /* Flush the FIFOs and let's go! */ - flush(pl022); - - if (pl022->cur_chip->enable_dma) { - if (configure_dma(pl022)) { - dev_dbg(&pl022->adev->dev, - "configuration of DMA failed, fall back to interrupt mode\n"); - goto err_config_dma; - } - return; - } - -err_config_dma: - /* enable all interrupts except RX */ - writew(ENABLE_ALL_INTERRUPTS & ~SSP_IMSC_MASK_RXIM, SSP_IMSC(pl022->virtbase)); -} + int ret; -static void do_interrupt_dma_transfer(struct pl022 *pl022) -{ /* * Default is to enable all interrupts except RX - * this will be enabled once TX is complete */ u32 irqflags = (u32)(ENABLE_ALL_INTERRUPTS & ~SSP_IMSC_MASK_RXIM); - /* Enable target chip, if not already active */ - if (!pl022->next_msg_cs_active) - pl022_cs_control(pl022, SSP_CHIP_SELECT); + ret = set_up_next_transfer(pl022, pl022->cur_transfer); + if (ret) + return ret; - if (set_up_next_transfer(pl022, pl022->cur_transfer)) { - /* Error path */ - pl022->cur_msg->state = STATE_ERROR; - pl022->cur_msg->status = -EIO; - giveback(pl022); - return; - } /* If we're using DMA, set up DMA here */ if (pl022->cur_chip->enable_dma) { /* Configure DMA transfer */ @@ -1469,6 +1267,7 @@ err_config_dma: writew((readw(SSP_CR1(pl022->virtbase)) | SSP_CR1_MASK_SSE), SSP_CR1(pl022->virtbase)); writew(irqflags, SSP_IMSC(pl022->virtbase)); + return 1; } static void print_current_status(struct pl022 *pl022) @@ -1495,111 +1294,65 @@ static void print_current_status(struct pl022 *pl022) } -static void do_polling_transfer(struct pl022 *pl022) +static int do_polling_transfer(struct pl022 *pl022) { - struct spi_message *message = NULL; - struct spi_transfer *transfer = NULL; - struct spi_transfer *previous = NULL; + int ret; unsigned long time, timeout; - message = pl022->cur_msg; - - while (message->state != STATE_DONE) { - /* Handle for abort */ - if (message->state == STATE_ERROR) - break; - transfer = pl022->cur_transfer; - - /* Delay if requested at end of transfer */ - if (message->state == STATE_RUNNING) { - previous = - list_entry(transfer->transfer_list.prev, - struct spi_transfer, transfer_list); - spi_transfer_delay_exec(previous); - if (previous->cs_change) - pl022_cs_control(pl022, SSP_CHIP_SELECT); - } else { - /* STATE_START */ - message->state = STATE_RUNNING; - if (!pl022->next_msg_cs_active) - pl022_cs_control(pl022, SSP_CHIP_SELECT); - } - - /* Configuration Changing Per Transfer */ - if (set_up_next_transfer(pl022, transfer)) { - /* Error path */ - message->state = STATE_ERROR; - break; - } - /* Flush FIFOs and enable SSP */ - flush(pl022); - writew((readw(SSP_CR1(pl022->virtbase)) | SSP_CR1_MASK_SSE), - SSP_CR1(pl022->virtbase)); - - dev_dbg(&pl022->adev->dev, "polling transfer ongoing ...\n"); - - timeout = jiffies + msecs_to_jiffies(SPI_POLLING_TIMEOUT); - while (pl022->tx < pl022->tx_end || pl022->rx < pl022->rx_end) { - time = jiffies; - readwriter(pl022); - if (time_after(time, timeout)) { - dev_warn(&pl022->adev->dev, - "%s: timeout!\n", __func__); - message->state = STATE_TIMEOUT; - print_current_status(pl022); - goto out; - } - cpu_relax(); + /* Configuration Changing Per Transfer */ + ret = set_up_next_transfer(pl022, pl022->cur_transfer); + if (ret) + return ret; + /* Flush FIFOs and enable SSP */ + flush(pl022); + writew((readw(SSP_CR1(pl022->virtbase)) | SSP_CR1_MASK_SSE), + SSP_CR1(pl022->virtbase)); + + dev_dbg(&pl022->adev->dev, "polling transfer ongoing ...\n"); + + timeout = jiffies + msecs_to_jiffies(SPI_POLLING_TIMEOUT); + while (pl022->tx < pl022->tx_end || pl022->rx < pl022->rx_end) { + time = jiffies; + readwriter(pl022); + if (time_after(time, timeout)) { + dev_warn(&pl022->adev->dev, + "%s: timeout!\n", __func__); + print_current_status(pl022); + return -ETIMEDOUT; } - - /* Update total byte transferred */ - message->actual_length += pl022->cur_transfer->len; - /* Move to next transfer */ - message->state = next_transfer(pl022); - if (message->state != STATE_DONE - && pl022->cur_transfer->cs_change) - pl022_cs_control(pl022, SSP_CHIP_DESELECT); + cpu_relax(); } -out: - /* Handle end of message */ - if (message->state == STATE_DONE) - message->status = 0; - else if (message->state == STATE_TIMEOUT) - message->status = -EAGAIN; - else - message->status = -EIO; - giveback(pl022); - return; + return 0; } -static int pl022_transfer_one_message(struct spi_controller *host, - struct spi_message *msg) +static int pl022_transfer_one(struct spi_controller *host, struct spi_device *spi, + struct spi_transfer *transfer) { struct pl022 *pl022 = spi_controller_get_devdata(host); - /* Initial message state */ - pl022->cur_msg = msg; - msg->state = STATE_START; - - pl022->cur_transfer = list_entry(msg->transfers.next, - struct spi_transfer, transfer_list); + pl022->cur_transfer = transfer; /* Setup the SPI using the per chip configuration */ - pl022->cur_chip = spi_get_ctldata(msg->spi); - pl022->cur_cs = spi_get_chipselect(msg->spi, 0); - /* This is always available but may be set to -ENOENT */ - pl022->cur_gpiod = spi_get_csgpiod(msg->spi, 0); + pl022->cur_chip = spi_get_ctldata(spi); + pl022->cur_cs = spi_get_chipselect(spi, 0); restore_state(pl022); flush(pl022); if (pl022->cur_chip->xfer_type == POLLING_TRANSFER) - do_polling_transfer(pl022); + return do_polling_transfer(pl022); else - do_interrupt_dma_transfer(pl022); + return do_interrupt_dma_transfer(pl022); +} - return 0; +static void pl022_handle_err(struct spi_controller *ctlr, struct spi_message *message) +{ + struct pl022 *pl022 = spi_controller_get_devdata(ctlr); + + terminate_dma(pl022); + writew(DISABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase)); + writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase)); } static int pl022_unprepare_transfer_hardware(struct spi_controller *host) @@ -2138,7 +1891,9 @@ static int pl022_probe(struct amba_device *adev, const struct amba_id *id) host->cleanup = pl022_cleanup; host->setup = pl022_setup; host->auto_runtime_pm = true; - host->transfer_one_message = pl022_transfer_one_message; + host->transfer_one = pl022_transfer_one; + host->set_cs = pl022_cs_control; + host->handle_err = pl022_handle_err; host->unprepare_transfer_hardware = pl022_unprepare_transfer_hardware; host->rt = platform_info->rt; host->dev.of_node = dev->of_node; @@ -2175,10 +1930,6 @@ static int pl022_probe(struct amba_device *adev, const struct amba_id *id) goto err_no_clk; } - /* Initialize transfer pump */ - tasklet_init(&pl022->pump_transfers, pump_transfers, - (unsigned long)pl022); - /* Disable SSP */ writew((readw(SSP_CR1(pl022->virtbase)) & (~SSP_CR1_MASK_SSE)), SSP_CR1(pl022->virtbase)); @@ -2261,7 +2012,6 @@ pl022_remove(struct amba_device *adev) pl022_dma_remove(pl022); amba_release_regions(adev); - tasklet_disable(&pl022->pump_transfers); } #ifdef CONFIG_PM_SLEEP diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c index fb452bc78372..cfc3b1ddbd22 100644 --- a/drivers/spi/spi-sh-msiof.c +++ b/drivers/spi/spi-sh-msiof.c @@ -29,12 +29,15 @@ #include <asm/unaligned.h> +#define SH_MSIOF_FLAG_FIXED_DTDL_200 BIT(0) + struct sh_msiof_chipdata { u32 bits_per_word_mask; u16 tx_fifo_size; u16 rx_fifo_size; u16 ctlr_flags; u16 min_div_pow; + u32 flags; }; struct sh_msiof_spi_priv { @@ -1072,6 +1075,16 @@ static const struct sh_msiof_chipdata rcar_gen3_data = { .min_div_pow = 1, }; +static const struct sh_msiof_chipdata rcar_r8a7795_data = { + .bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16) | + SPI_BPW_MASK(24) | SPI_BPW_MASK(32), + .tx_fifo_size = 64, + .rx_fifo_size = 64, + .ctlr_flags = SPI_CONTROLLER_MUST_TX, + .min_div_pow = 1, + .flags = SH_MSIOF_FLAG_FIXED_DTDL_200, +}; + static const struct of_device_id sh_msiof_match[] __maybe_unused = { { .compatible = "renesas,sh-mobile-msiof", .data = &sh_data }, { .compatible = "renesas,msiof-r8a7743", .data = &rcar_gen2_data }, @@ -1082,6 +1095,7 @@ static const struct of_device_id sh_msiof_match[] __maybe_unused = { { .compatible = "renesas,msiof-r8a7793", .data = &rcar_gen2_data }, { .compatible = "renesas,msiof-r8a7794", .data = &rcar_gen2_data }, { .compatible = "renesas,rcar-gen2-msiof", .data = &rcar_gen2_data }, + { .compatible = "renesas,msiof-r8a7795", .data = &rcar_r8a7795_data }, { .compatible = "renesas,msiof-r8a7796", .data = &rcar_gen3_data }, { .compatible = "renesas,rcar-gen3-msiof", .data = &rcar_gen3_data }, { .compatible = "renesas,rcar-gen4-msiof", .data = &rcar_gen3_data }, @@ -1279,6 +1293,9 @@ static int sh_msiof_spi_probe(struct platform_device *pdev) return -ENXIO; } + if (chipdata->flags & SH_MSIOF_FLAG_FIXED_DTDL_200) + info->dtdl = 200; + if (info->mode == MSIOF_SPI_TARGET) ctlr = spi_alloc_target(&pdev->dev, sizeof(struct sh_msiof_spi_priv)); diff --git a/drivers/spi/spi-sprd-adi.c b/drivers/spi/spi-sprd-adi.c index bf01feedbf93..262c11d977ea 100644 --- a/drivers/spi/spi-sprd-adi.c +++ b/drivers/spi/spi-sprd-adi.c @@ -138,8 +138,7 @@ struct sprd_adi_data { u32 slave_offset; u32 slave_addr_size; int (*read_check)(u32 val, u32 reg); - int (*restart)(struct notifier_block *this, - unsigned long mode, void *cmd); + int (*restart)(struct sys_off_data *data); void (*wdg_rst)(void *p); }; @@ -150,7 +149,6 @@ struct sprd_adi { struct hwspinlock *hwlock; unsigned long slave_vbase; unsigned long slave_pbase; - struct notifier_block restart_handler; const struct sprd_adi_data *data; }; @@ -370,11 +368,9 @@ static void sprd_adi_set_wdt_rst_mode(void *p) #endif } -static int sprd_adi_restart(struct notifier_block *this, unsigned long mode, - void *cmd, struct sprd_adi_wdg *wdg) +static int sprd_adi_restart(struct sprd_adi *sadi, unsigned long mode, + const char *cmd, struct sprd_adi_wdg *wdg) { - struct sprd_adi *sadi = container_of(this, struct sprd_adi, - restart_handler); u32 val, reboot_mode = 0; if (!cmd) @@ -448,8 +444,7 @@ static int sprd_adi_restart(struct notifier_block *this, unsigned long mode, return NOTIFY_DONE; } -static int sprd_adi_restart_sc9860(struct notifier_block *this, - unsigned long mode, void *cmd) +static int sprd_adi_restart_sc9860(struct sys_off_data *data) { struct sprd_adi_wdg wdg = { .base = PMIC_WDG_BASE, @@ -458,7 +453,7 @@ static int sprd_adi_restart_sc9860(struct notifier_block *this, .wdg_clk = PMIC_CLK_EN, }; - return sprd_adi_restart(this, mode, cmd, &wdg); + return sprd_adi_restart(data->cb_data, data->mode, data->cmd, &wdg); } static void sprd_adi_hw_init(struct sprd_adi *sadi) @@ -533,7 +528,7 @@ static int sprd_adi_probe(struct platform_device *pdev) pdev->id = of_alias_get_id(np, "spi"); num_chipselect = of_get_child_count(np); - ctlr = spi_alloc_master(&pdev->dev, sizeof(struct sprd_adi)); + ctlr = spi_alloc_host(&pdev->dev, sizeof(struct sprd_adi)); if (!ctlr) return -ENOMEM; @@ -590,9 +585,9 @@ static int sprd_adi_probe(struct platform_device *pdev) } if (sadi->data->restart) { - sadi->restart_handler.notifier_call = sadi->data->restart; - sadi->restart_handler.priority = 128; - ret = register_restart_handler(&sadi->restart_handler); + ret = devm_register_restart_handler(&pdev->dev, + sadi->data->restart, + sadi); if (ret) { dev_err(&pdev->dev, "can not register restart handler\n"); goto put_ctlr; @@ -606,14 +601,6 @@ put_ctlr: return ret; } -static void sprd_adi_remove(struct platform_device *pdev) -{ - struct spi_controller *ctlr = dev_get_drvdata(&pdev->dev); - struct sprd_adi *sadi = spi_controller_get_devdata(ctlr); - - unregister_restart_handler(&sadi->restart_handler); -} - static struct sprd_adi_data sc9860_data = { .slave_offset = ADI_10BIT_SLAVE_OFFSET, .slave_addr_size = ADI_10BIT_SLAVE_ADDR_SIZE, @@ -657,7 +644,6 @@ static struct platform_driver sprd_adi_driver = { .of_match_table = sprd_adi_of_match, }, .probe = sprd_adi_probe, - .remove_new = sprd_adi_remove, }; module_platform_driver(sprd_adi_driver); diff --git a/drivers/spi/spi-sprd.c b/drivers/spi/spi-sprd.c index 95377cf748c0..831ebae10fe0 100644 --- a/drivers/spi/spi-sprd.c +++ b/drivers/spi/spi-sprd.c @@ -578,7 +578,7 @@ static void sprd_spi_dma_release(struct sprd_spi *ss) static int sprd_spi_dma_txrx_bufs(struct spi_device *sdev, struct spi_transfer *t) { - struct sprd_spi *ss = spi_master_get_devdata(sdev->master); + struct sprd_spi *ss = spi_controller_get_devdata(sdev->controller); u32 trans_len = ss->trans_len; int ret, write_size = 0; @@ -923,7 +923,7 @@ static int sprd_spi_probe(struct platform_device *pdev) int ret; pdev->id = of_alias_get_id(pdev->dev.of_node, "spi"); - sctlr = spi_alloc_master(&pdev->dev, sizeof(*ss)); + sctlr = spi_alloc_host(&pdev->dev, sizeof(*ss)); if (!sctlr) return -ENOMEM; diff --git a/drivers/spi/spi-st-ssc4.c b/drivers/spi/spi-st-ssc4.c index 7fcff9c539e2..e064025e2fd6 100644 --- a/drivers/spi/spi-st-ssc4.c +++ b/drivers/spi/spi-st-ssc4.c @@ -6,7 +6,7 @@ * Patrice Chotard <patrice.chotard@st.com> * Lee Jones <lee.jones@linaro.org> * - * SPI master mode controller driver, used in STMicroelectronics devices. + * SPI host mode controller driver, used in STMicroelectronics devices. */ #include <linux/clk.h> @@ -115,10 +115,10 @@ static void ssc_read_rx_fifo(struct spi_st *spi_st) spi_st->words_remaining -= count; } -static int spi_st_transfer_one(struct spi_master *master, +static int spi_st_transfer_one(struct spi_controller *host, struct spi_device *spi, struct spi_transfer *t) { - struct spi_st *spi_st = spi_master_get_devdata(master); + struct spi_st *spi_st = spi_controller_get_devdata(host); uint32_t ctl = 0; /* Setup transfer */ @@ -165,7 +165,7 @@ static int spi_st_transfer_one(struct spi_master *master, if (ctl) writel_relaxed(ctl, spi_st->base + SSC_CTL); - spi_finalize_current_transfer(spi->master); + spi_finalize_current_transfer(spi->controller); return t->len; } @@ -174,7 +174,7 @@ static int spi_st_transfer_one(struct spi_master *master, #define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST | SPI_LOOP | SPI_CS_HIGH) static int spi_st_setup(struct spi_device *spi) { - struct spi_st *spi_st = spi_master_get_devdata(spi->master); + struct spi_st *spi_st = spi_controller_get_devdata(spi->controller); u32 spi_st_clk, sscbrg, var; u32 hz = spi->max_speed_hz; @@ -274,35 +274,35 @@ static irqreturn_t spi_st_irq(int irq, void *dev_id) static int spi_st_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; - struct spi_master *master; + struct spi_controller *host; struct spi_st *spi_st; int irq, ret = 0; u32 var; - master = spi_alloc_master(&pdev->dev, sizeof(*spi_st)); - if (!master) + host = spi_alloc_host(&pdev->dev, sizeof(*spi_st)); + if (!host) return -ENOMEM; - master->dev.of_node = np; - master->mode_bits = MODEBITS; - master->setup = spi_st_setup; - master->transfer_one = spi_st_transfer_one; - master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16); - master->auto_runtime_pm = true; - master->bus_num = pdev->id; - master->use_gpio_descriptors = true; - spi_st = spi_master_get_devdata(master); + host->dev.of_node = np; + host->mode_bits = MODEBITS; + host->setup = spi_st_setup; + host->transfer_one = spi_st_transfer_one; + host->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16); + host->auto_runtime_pm = true; + host->bus_num = pdev->id; + host->use_gpio_descriptors = true; + spi_st = spi_controller_get_devdata(host); spi_st->clk = devm_clk_get(&pdev->dev, "ssc"); if (IS_ERR(spi_st->clk)) { dev_err(&pdev->dev, "Unable to request clock\n"); ret = PTR_ERR(spi_st->clk); - goto put_master; + goto put_host; } ret = clk_prepare_enable(spi_st->clk); if (ret) - goto put_master; + goto put_host; init_completion(&spi_st->done); @@ -324,7 +324,7 @@ static int spi_st_probe(struct platform_device *pdev) var &= ~SSC_CTL_SR; writel_relaxed(var, spi_st->base + SSC_CTL); - /* Set SSC into slave mode before reconfiguring PIO pins */ + /* Set SSC into target mode before reconfiguring PIO pins */ var = readl_relaxed(spi_st->base + SSC_CTL); var &= ~SSC_CTL_MS; writel_relaxed(var, spi_st->base + SSC_CTL); @@ -347,11 +347,11 @@ static int spi_st_probe(struct platform_device *pdev) pm_runtime_set_active(&pdev->dev); pm_runtime_enable(&pdev->dev); - platform_set_drvdata(pdev, master); + platform_set_drvdata(pdev, host); - ret = devm_spi_register_master(&pdev->dev, master); + ret = devm_spi_register_controller(&pdev->dev, host); if (ret) { - dev_err(&pdev->dev, "Failed to register master\n"); + dev_err(&pdev->dev, "Failed to register host\n"); goto rpm_disable; } @@ -361,15 +361,15 @@ rpm_disable: pm_runtime_disable(&pdev->dev); clk_disable: clk_disable_unprepare(spi_st->clk); -put_master: - spi_master_put(master); +put_host: + spi_controller_put(host); return ret; } static void spi_st_remove(struct platform_device *pdev) { - struct spi_master *master = platform_get_drvdata(pdev); - struct spi_st *spi_st = spi_master_get_devdata(master); + struct spi_controller *host = platform_get_drvdata(pdev); + struct spi_st *spi_st = spi_controller_get_devdata(host); pm_runtime_disable(&pdev->dev); @@ -381,8 +381,8 @@ static void spi_st_remove(struct platform_device *pdev) #ifdef CONFIG_PM static int spi_st_runtime_suspend(struct device *dev) { - struct spi_master *master = dev_get_drvdata(dev); - struct spi_st *spi_st = spi_master_get_devdata(master); + struct spi_controller *host = dev_get_drvdata(dev); + struct spi_st *spi_st = spi_controller_get_devdata(host); writel_relaxed(0, spi_st->base + SSC_IEN); pinctrl_pm_select_sleep_state(dev); @@ -394,8 +394,8 @@ static int spi_st_runtime_suspend(struct device *dev) static int spi_st_runtime_resume(struct device *dev) { - struct spi_master *master = dev_get_drvdata(dev); - struct spi_st *spi_st = spi_master_get_devdata(master); + struct spi_controller *host = dev_get_drvdata(dev); + struct spi_st *spi_st = spi_controller_get_devdata(host); int ret; ret = clk_prepare_enable(spi_st->clk); @@ -408,10 +408,10 @@ static int spi_st_runtime_resume(struct device *dev) #ifdef CONFIG_PM_SLEEP static int spi_st_suspend(struct device *dev) { - struct spi_master *master = dev_get_drvdata(dev); + struct spi_controller *host = dev_get_drvdata(dev); int ret; - ret = spi_master_suspend(master); + ret = spi_controller_suspend(host); if (ret) return ret; @@ -420,10 +420,10 @@ static int spi_st_suspend(struct device *dev) static int spi_st_resume(struct device *dev) { - struct spi_master *master = dev_get_drvdata(dev); + struct spi_controller *host = dev_get_drvdata(dev); int ret; - ret = spi_master_resume(master); + ret = spi_controller_resume(host); if (ret) return ret; diff --git a/drivers/spi/spi-stm32-qspi.c b/drivers/spi/spi-stm32-qspi.c index def74ae9b5f6..385832030459 100644 --- a/drivers/spi/spi-stm32-qspi.c +++ b/drivers/spi/spi-stm32-qspi.c @@ -357,7 +357,7 @@ static int stm32_qspi_get_mode(u8 buswidth) static int stm32_qspi_send(struct spi_device *spi, const struct spi_mem_op *op) { - struct stm32_qspi *qspi = spi_controller_get_devdata(spi->master); + struct stm32_qspi *qspi = spi_controller_get_devdata(spi->controller); struct stm32_qspi_flash *flash = &qspi->flash[spi_get_chipselect(spi, 0)]; u32 ccr, cr; int timeout, err = 0, err_poll_status = 0; @@ -448,7 +448,7 @@ static int stm32_qspi_poll_status(struct spi_mem *mem, const struct spi_mem_op * unsigned long polling_rate_us, unsigned long timeout_ms) { - struct stm32_qspi *qspi = spi_controller_get_devdata(mem->spi->master); + struct stm32_qspi *qspi = spi_controller_get_devdata(mem->spi->controller); int ret; if (!spi_mem_supports_op(mem, op)) @@ -476,7 +476,7 @@ static int stm32_qspi_poll_status(struct spi_mem *mem, const struct spi_mem_op * static int stm32_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) { - struct stm32_qspi *qspi = spi_controller_get_devdata(mem->spi->master); + struct stm32_qspi *qspi = spi_controller_get_devdata(mem->spi->controller); int ret; ret = pm_runtime_resume_and_get(qspi->dev); @@ -500,7 +500,7 @@ static int stm32_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) static int stm32_qspi_dirmap_create(struct spi_mem_dirmap_desc *desc) { - struct stm32_qspi *qspi = spi_controller_get_devdata(desc->mem->spi->master); + struct stm32_qspi *qspi = spi_controller_get_devdata(desc->mem->spi->controller); if (desc->info.op_tmpl.data.dir == SPI_MEM_DATA_OUT) return -EOPNOTSUPP; @@ -518,7 +518,7 @@ static int stm32_qspi_dirmap_create(struct spi_mem_dirmap_desc *desc) static ssize_t stm32_qspi_dirmap_read(struct spi_mem_dirmap_desc *desc, u64 offs, size_t len, void *buf) { - struct stm32_qspi *qspi = spi_controller_get_devdata(desc->mem->spi->master); + struct stm32_qspi *qspi = spi_controller_get_devdata(desc->mem->spi->controller); struct spi_mem_op op; u32 addr_max; int ret; @@ -640,7 +640,7 @@ end_of_transfer: static int stm32_qspi_setup(struct spi_device *spi) { - struct spi_controller *ctrl = spi->master; + struct spi_controller *ctrl = spi->controller; struct stm32_qspi *qspi = spi_controller_get_devdata(ctrl); struct stm32_qspi_flash *flash; u32 presc, mode; @@ -775,7 +775,7 @@ static int stm32_qspi_probe(struct platform_device *pdev) struct resource *res; int ret, irq; - ctrl = devm_spi_alloc_master(dev, sizeof(*qspi)); + ctrl = devm_spi_alloc_host(dev, sizeof(*qspi)); if (!ctrl) return -ENOMEM; @@ -861,7 +861,7 @@ static int stm32_qspi_probe(struct platform_device *pdev) pm_runtime_enable(dev); pm_runtime_get_noresume(dev); - ret = spi_register_master(ctrl); + ret = spi_register_controller(ctrl); if (ret) goto err_pm_runtime_free; @@ -892,7 +892,7 @@ static void stm32_qspi_remove(struct platform_device *pdev) struct stm32_qspi *qspi = platform_get_drvdata(pdev); pm_runtime_get_sync(qspi->dev); - spi_unregister_master(qspi->ctrl); + spi_unregister_controller(qspi->ctrl); /* disable qspi */ writel_relaxed(0, qspi->io_base + QSPI_CR); stm32_qspi_dma_free(qspi); diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c index e6e3e4ea29f9..e61302ef3c21 100644 --- a/drivers/spi/spi-stm32.c +++ b/drivers/spi/spi-stm32.c @@ -22,58 +22,65 @@ #define DRIVER_NAME "spi_stm32" -/* STM32F4 SPI registers */ -#define STM32F4_SPI_CR1 0x00 -#define STM32F4_SPI_CR2 0x04 -#define STM32F4_SPI_SR 0x08 -#define STM32F4_SPI_DR 0x0C -#define STM32F4_SPI_I2SCFGR 0x1C - -/* STM32F4_SPI_CR1 bit fields */ -#define STM32F4_SPI_CR1_CPHA BIT(0) -#define STM32F4_SPI_CR1_CPOL BIT(1) -#define STM32F4_SPI_CR1_MSTR BIT(2) -#define STM32F4_SPI_CR1_BR_SHIFT 3 -#define STM32F4_SPI_CR1_BR GENMASK(5, 3) -#define STM32F4_SPI_CR1_SPE BIT(6) -#define STM32F4_SPI_CR1_LSBFRST BIT(7) -#define STM32F4_SPI_CR1_SSI BIT(8) -#define STM32F4_SPI_CR1_SSM BIT(9) -#define STM32F4_SPI_CR1_RXONLY BIT(10) +/* STM32F4/7 SPI registers */ +#define STM32FX_SPI_CR1 0x00 +#define STM32FX_SPI_CR2 0x04 +#define STM32FX_SPI_SR 0x08 +#define STM32FX_SPI_DR 0x0C +#define STM32FX_SPI_I2SCFGR 0x1C + +/* STM32FX_SPI_CR1 bit fields */ +#define STM32FX_SPI_CR1_CPHA BIT(0) +#define STM32FX_SPI_CR1_CPOL BIT(1) +#define STM32FX_SPI_CR1_MSTR BIT(2) +#define STM32FX_SPI_CR1_BR_SHIFT 3 +#define STM32FX_SPI_CR1_BR GENMASK(5, 3) +#define STM32FX_SPI_CR1_SPE BIT(6) +#define STM32FX_SPI_CR1_LSBFRST BIT(7) +#define STM32FX_SPI_CR1_SSI BIT(8) +#define STM32FX_SPI_CR1_SSM BIT(9) +#define STM32FX_SPI_CR1_RXONLY BIT(10) #define STM32F4_SPI_CR1_DFF BIT(11) -#define STM32F4_SPI_CR1_CRCNEXT BIT(12) -#define STM32F4_SPI_CR1_CRCEN BIT(13) -#define STM32F4_SPI_CR1_BIDIOE BIT(14) -#define STM32F4_SPI_CR1_BIDIMODE BIT(15) -#define STM32F4_SPI_CR1_BR_MIN 0 -#define STM32F4_SPI_CR1_BR_MAX (GENMASK(5, 3) >> 3) - -/* STM32F4_SPI_CR2 bit fields */ -#define STM32F4_SPI_CR2_RXDMAEN BIT(0) -#define STM32F4_SPI_CR2_TXDMAEN BIT(1) -#define STM32F4_SPI_CR2_SSOE BIT(2) -#define STM32F4_SPI_CR2_FRF BIT(4) -#define STM32F4_SPI_CR2_ERRIE BIT(5) -#define STM32F4_SPI_CR2_RXNEIE BIT(6) -#define STM32F4_SPI_CR2_TXEIE BIT(7) - -/* STM32F4_SPI_SR bit fields */ -#define STM32F4_SPI_SR_RXNE BIT(0) -#define STM32F4_SPI_SR_TXE BIT(1) -#define STM32F4_SPI_SR_CHSIDE BIT(2) -#define STM32F4_SPI_SR_UDR BIT(3) -#define STM32F4_SPI_SR_CRCERR BIT(4) -#define STM32F4_SPI_SR_MODF BIT(5) -#define STM32F4_SPI_SR_OVR BIT(6) -#define STM32F4_SPI_SR_BSY BIT(7) -#define STM32F4_SPI_SR_FRE BIT(8) - -/* STM32F4_SPI_I2SCFGR bit fields */ -#define STM32F4_SPI_I2SCFGR_I2SMOD BIT(11) +#define STM32F7_SPI_CR1_CRCL BIT(11) +#define STM32FX_SPI_CR1_CRCNEXT BIT(12) +#define STM32FX_SPI_CR1_CRCEN BIT(13) +#define STM32FX_SPI_CR1_BIDIOE BIT(14) +#define STM32FX_SPI_CR1_BIDIMODE BIT(15) +#define STM32FX_SPI_CR1_BR_MIN 0 +#define STM32FX_SPI_CR1_BR_MAX (GENMASK(5, 3) >> 3) + +/* STM32FX_SPI_CR2 bit fields */ +#define STM32FX_SPI_CR2_RXDMAEN BIT(0) +#define STM32FX_SPI_CR2_TXDMAEN BIT(1) +#define STM32FX_SPI_CR2_SSOE BIT(2) +#define STM32FX_SPI_CR2_FRF BIT(4) +#define STM32FX_SPI_CR2_ERRIE BIT(5) +#define STM32FX_SPI_CR2_RXNEIE BIT(6) +#define STM32FX_SPI_CR2_TXEIE BIT(7) +#define STM32F7_SPI_CR2_DS GENMASK(11, 8) +#define STM32F7_SPI_CR2_FRXTH BIT(12) +#define STM32F7_SPI_CR2_LDMA_RX BIT(13) +#define STM32F7_SPI_CR2_LDMA_TX BIT(14) + +/* STM32FX_SPI_SR bit fields */ +#define STM32FX_SPI_SR_RXNE BIT(0) +#define STM32FX_SPI_SR_TXE BIT(1) +#define STM32FX_SPI_SR_CHSIDE BIT(2) +#define STM32FX_SPI_SR_UDR BIT(3) +#define STM32FX_SPI_SR_CRCERR BIT(4) +#define STM32FX_SPI_SR_MODF BIT(5) +#define STM32FX_SPI_SR_OVR BIT(6) +#define STM32FX_SPI_SR_BSY BIT(7) +#define STM32FX_SPI_SR_FRE BIT(8) +#define STM32F7_SPI_SR_FRLVL GENMASK(10, 9) +#define STM32F7_SPI_SR_FTLVL GENMASK(12, 11) + +/* STM32FX_SPI_I2SCFGR bit fields */ +#define STM32FX_SPI_I2SCFGR_I2SMOD BIT(11) /* STM32F4 SPI Baud Rate min/max divisor */ -#define STM32F4_SPI_BR_DIV_MIN (2 << STM32F4_SPI_CR1_BR_MIN) -#define STM32F4_SPI_BR_DIV_MAX (2 << STM32F4_SPI_CR1_BR_MAX) +#define STM32FX_SPI_BR_DIV_MIN (2 << STM32FX_SPI_CR1_BR_MIN) +#define STM32FX_SPI_BR_DIV_MAX (2 << STM32FX_SPI_CR1_BR_MAX) /* STM32H7 SPI registers */ #define STM32H7_SPI_CR1 0x00 @@ -147,6 +154,20 @@ /* STM32H7_SPI_I2SCFGR bit fields */ #define STM32H7_SPI_I2SCFGR_I2SMOD BIT(0) +/* STM32MP25 SPI registers bit fields */ +#define STM32MP25_SPI_HWCFGR1 0x3F0 + +/* STM32MP25_SPI_CR2 bit fields */ +#define STM32MP25_SPI_TSIZE_MAX_LIMITED GENMASK(9, 0) + +/* STM32MP25_SPI_HWCFGR1 */ +#define STM32MP25_SPI_HWCFGR1_FULLCFG GENMASK(27, 24) +#define STM32MP25_SPI_HWCFGR1_FULLCFG_LIMITED 0x0 +#define STM32MP25_SPI_HWCFGR1_FULLCFG_FULL 0x1 +#define STM32MP25_SPI_HWCFGR1_DSCFG GENMASK(19, 16) +#define STM32MP25_SPI_HWCFGR1_DSCFG_16_B 0x0 +#define STM32MP25_SPI_HWCFGR1_DSCFG_32_B 0x1 + /* STM32H7 SPI Master Baud Rate min/max divisor */ #define STM32H7_SPI_MBR_DIV_MIN (2 << STM32H7_SPI_CFG1_MBR_MIN) #define STM32H7_SPI_MBR_DIV_MAX (2 << STM32H7_SPI_CFG1_MBR_MAX) @@ -173,7 +194,7 @@ #define SPI_DMA_MIN_BYTES 16 /* STM32 SPI driver helpers */ -#define STM32_SPI_MASTER_MODE(stm32_spi) (!(stm32_spi)->device_mode) +#define STM32_SPI_HOST_MODE(stm32_spi) (!(stm32_spi)->device_mode) #define STM32_SPI_DEVICE_MODE(stm32_spi) ((stm32_spi)->device_mode) /** @@ -200,6 +221,7 @@ struct stm32_spi_reg { * @br: baud rate register and bitfields * @rx: SPI RX data register * @tx: SPI TX data register + * @fullcfg: SPI full or limited feature set register */ struct stm32_spi_regspec { const struct stm32_spi_reg en; @@ -212,6 +234,7 @@ struct stm32_spi_regspec { const struct stm32_spi_reg br; const struct stm32_spi_reg rx; const struct stm32_spi_reg tx; + const struct stm32_spi_reg fullcfg; }; struct stm32_spi; @@ -222,13 +245,15 @@ struct stm32_spi; * @get_fifo_size: routine to get fifo size * @get_bpw_mask: routine to get bits per word mask * @disable: routine to disable controller - * @config: routine to configure controller as SPI Master + * @config: routine to configure controller as SPI Host * @set_bpw: routine to configure registers to for bits per word * @set_mode: routine to configure registers to desired mode * @set_data_idleness: optional routine to configure registers to desired idle * time between frames (if driver has this functionality) * @set_number_of_data: optional routine to configure registers to desired * number of data (if driver has this functionality) + * @write_tx: routine to write to transmit register/FIFO + * @read_rx: routine to read from receive register/FIFO * @transfer_one_dma_start: routine to start transfer a single spi_transfer * using DMA * @dma_rx_cb: routine to call after DMA RX channel operation is complete @@ -241,6 +266,7 @@ struct stm32_spi; * @has_fifo: boolean to know if fifo is used for driver * @has_device_mode: is this compatible capable to switch on device mode * @flags: compatible specific SPI controller flags used at registration time + * @prevent_dma_burst: boolean to indicate to prevent DMA burst */ struct stm32_spi_cfg { const struct stm32_spi_regspec *regs; @@ -252,6 +278,8 @@ struct stm32_spi_cfg { int (*set_mode)(struct stm32_spi *spi, unsigned int comm_type); void (*set_data_idleness)(struct stm32_spi *spi, u32 length); int (*set_number_of_data)(struct stm32_spi *spi, u32 length); + void (*write_tx)(struct stm32_spi *spi); + void (*read_rx)(struct stm32_spi *spi); void (*transfer_one_dma_start)(struct stm32_spi *spi); void (*dma_rx_cb)(void *data); void (*dma_tx_cb)(void *data); @@ -263,6 +291,7 @@ struct stm32_spi_cfg { bool has_fifo; bool has_device_mode; u16 flags; + bool prevent_dma_burst; }; /** @@ -276,7 +305,9 @@ struct stm32_spi_cfg { * @lock: prevent I/O concurrent access * @irq: SPI controller interrupt line * @fifo_size: size of the embedded fifo in bytes - * @cur_midi: master inter-data idleness in ns + * @t_size_max: maximum number of data of one transfer + * @feature_set: SPI full or limited feature set + * @cur_midi: host inter-data idleness in ns * @cur_speed: speed configured in Hz * @cur_half_period: time of a half bit in us * @cur_bpw: number of bits in a single SPI data frame @@ -303,6 +334,10 @@ struct stm32_spi { spinlock_t lock; /* prevent I/O concurrent access */ int irq; unsigned int fifo_size; + unsigned int t_size_max; + unsigned int feature_set; +#define STM32_SPI_FEATURE_LIMITED STM32MP25_SPI_HWCFGR1_FULLCFG_LIMITED /* 0x0 */ +#define STM32_SPI_FEATURE_FULL STM32MP25_SPI_HWCFGR1_FULLCFG_FULL /* 0x1 */ unsigned int cur_midi; unsigned int cur_speed; @@ -324,20 +359,20 @@ struct stm32_spi { bool device_mode; }; -static const struct stm32_spi_regspec stm32f4_spi_regspec = { - .en = { STM32F4_SPI_CR1, STM32F4_SPI_CR1_SPE }, +static const struct stm32_spi_regspec stm32fx_spi_regspec = { + .en = { STM32FX_SPI_CR1, STM32FX_SPI_CR1_SPE }, - .dma_rx_en = { STM32F4_SPI_CR2, STM32F4_SPI_CR2_RXDMAEN }, - .dma_tx_en = { STM32F4_SPI_CR2, STM32F4_SPI_CR2_TXDMAEN }, + .dma_rx_en = { STM32FX_SPI_CR2, STM32FX_SPI_CR2_RXDMAEN }, + .dma_tx_en = { STM32FX_SPI_CR2, STM32FX_SPI_CR2_TXDMAEN }, - .cpol = { STM32F4_SPI_CR1, STM32F4_SPI_CR1_CPOL }, - .cpha = { STM32F4_SPI_CR1, STM32F4_SPI_CR1_CPHA }, - .lsb_first = { STM32F4_SPI_CR1, STM32F4_SPI_CR1_LSBFRST }, + .cpol = { STM32FX_SPI_CR1, STM32FX_SPI_CR1_CPOL }, + .cpha = { STM32FX_SPI_CR1, STM32FX_SPI_CR1_CPHA }, + .lsb_first = { STM32FX_SPI_CR1, STM32FX_SPI_CR1_LSBFRST }, .cs_high = {}, - .br = { STM32F4_SPI_CR1, STM32F4_SPI_CR1_BR, STM32F4_SPI_CR1_BR_SHIFT }, + .br = { STM32FX_SPI_CR1, STM32FX_SPI_CR1_BR, STM32FX_SPI_CR1_BR_SHIFT }, - .rx = { STM32F4_SPI_DR }, - .tx = { STM32F4_SPI_DR }, + .rx = { STM32FX_SPI_DR }, + .tx = { STM32FX_SPI_DR }, }; static const struct stm32_spi_regspec stm32h7_spi_regspec = { @@ -360,6 +395,28 @@ static const struct stm32_spi_regspec stm32h7_spi_regspec = { .tx = { STM32H7_SPI_TXDR }, }; +static const struct stm32_spi_regspec stm32mp25_spi_regspec = { + /* SPI data transfer is enabled but spi_ker_ck is idle. + * CFG1 and CFG2 registers are write protected when SPE is enabled. + */ + .en = { STM32H7_SPI_CR1, STM32H7_SPI_CR1_SPE }, + + .dma_rx_en = { STM32H7_SPI_CFG1, STM32H7_SPI_CFG1_RXDMAEN }, + .dma_tx_en = { STM32H7_SPI_CFG1, STM32H7_SPI_CFG1_TXDMAEN }, + + .cpol = { STM32H7_SPI_CFG2, STM32H7_SPI_CFG2_CPOL }, + .cpha = { STM32H7_SPI_CFG2, STM32H7_SPI_CFG2_CPHA }, + .lsb_first = { STM32H7_SPI_CFG2, STM32H7_SPI_CFG2_LSBFRST }, + .cs_high = { STM32H7_SPI_CFG2, STM32H7_SPI_CFG2_SSIOP }, + .br = { STM32H7_SPI_CFG1, STM32H7_SPI_CFG1_MBR, + STM32H7_SPI_CFG1_MBR_SHIFT }, + + .rx = { STM32H7_SPI_RXDR }, + .tx = { STM32H7_SPI_TXDR }, + + .fullcfg = { STM32MP25_SPI_HWCFGR1, STM32MP25_SPI_HWCFGR1_FULLCFG }, +}; + static inline void stm32_spi_set_bits(struct stm32_spi *spi, u32 offset, u32 bits) { @@ -410,6 +467,16 @@ static int stm32f4_spi_get_bpw_mask(struct stm32_spi *spi) } /** + * stm32f7_spi_get_bpw_mask - Return bits per word mask + * @spi: pointer to the spi controller data structure + */ +static int stm32f7_spi_get_bpw_mask(struct stm32_spi *spi) +{ + dev_dbg(spi->dev, "16-bit maximum data frame\n"); + return SPI_BPW_RANGE_MASK(4, 16); +} + +/** * stm32h7_spi_get_bpw_mask - Return bits per word mask * @spi: pointer to the spi controller data structure */ @@ -437,6 +504,28 @@ static int stm32h7_spi_get_bpw_mask(struct stm32_spi *spi) } /** + * stm32mp25_spi_get_bpw_mask - Return bits per word mask + * @spi: pointer to the spi controller data structure + */ +static int stm32mp25_spi_get_bpw_mask(struct stm32_spi *spi) +{ + u32 dscfg, max_bpw; + + if (spi->feature_set == STM32_SPI_FEATURE_LIMITED) { + dev_dbg(spi->dev, "8-bit or 16-bit data frame supported\n"); + return SPI_BPW_MASK(8) | SPI_BPW_MASK(16); + } + + dscfg = FIELD_GET(STM32MP25_SPI_HWCFGR1_DSCFG, + readl_relaxed(spi->base + STM32MP25_SPI_HWCFGR1)); + max_bpw = 16; + if (dscfg == STM32MP25_SPI_HWCFGR1_DSCFG_32_B) + max_bpw = 32; + dev_dbg(spi->dev, "%d-bit maximum data frame\n", max_bpw); + return SPI_BPW_RANGE_MASK(4, max_bpw); +} + +/** * stm32_spi_prepare_mbr - Determine baud rate divisor value * @spi: pointer to the spi controller data structure * @speed_hz: requested speed @@ -502,19 +591,48 @@ static u32 stm32h7_spi_prepare_fthlv(struct stm32_spi *spi, u32 xfer_len) */ static void stm32f4_spi_write_tx(struct stm32_spi *spi) { - if ((spi->tx_len > 0) && (readl_relaxed(spi->base + STM32F4_SPI_SR) & - STM32F4_SPI_SR_TXE)) { + if ((spi->tx_len > 0) && (readl_relaxed(spi->base + STM32FX_SPI_SR) & + STM32FX_SPI_SR_TXE)) { u32 offs = spi->cur_xferlen - spi->tx_len; if (spi->cur_bpw == 16) { const u16 *tx_buf16 = (const u16 *)(spi->tx_buf + offs); - writew_relaxed(*tx_buf16, spi->base + STM32F4_SPI_DR); + writew_relaxed(*tx_buf16, spi->base + STM32FX_SPI_DR); spi->tx_len -= sizeof(u16); } else { const u8 *tx_buf8 = (const u8 *)(spi->tx_buf + offs); - writeb_relaxed(*tx_buf8, spi->base + STM32F4_SPI_DR); + writeb_relaxed(*tx_buf8, spi->base + STM32FX_SPI_DR); + spi->tx_len -= sizeof(u8); + } + } + + dev_dbg(spi->dev, "%s: %d bytes left\n", __func__, spi->tx_len); +} + +/** + * stm32f7_spi_write_tx - Write bytes to Transmit Data Register + * @spi: pointer to the spi controller data structure + * + * Read from tx_buf depends on remaining bytes to avoid to read beyond + * tx_buf end. + */ +static void stm32f7_spi_write_tx(struct stm32_spi *spi) +{ + if ((spi->tx_len > 0) && (readl_relaxed(spi->base + STM32FX_SPI_SR) & + STM32FX_SPI_SR_TXE)) { + u32 offs = spi->cur_xferlen - spi->tx_len; + + if (spi->tx_len >= sizeof(u16)) { + const u16 *tx_buf16 = (const u16 *)(spi->tx_buf + offs); + + writew_relaxed(*tx_buf16, spi->base + STM32FX_SPI_DR); + spi->tx_len -= sizeof(u16); + } else { + const u8 *tx_buf8 = (const u8 *)(spi->tx_buf + offs); + + writeb_relaxed(*tx_buf8, spi->base + STM32FX_SPI_DR); spi->tx_len -= sizeof(u8); } } @@ -566,19 +684,19 @@ static void stm32h7_spi_write_txfifo(struct stm32_spi *spi) */ static void stm32f4_spi_read_rx(struct stm32_spi *spi) { - if ((spi->rx_len > 0) && (readl_relaxed(spi->base + STM32F4_SPI_SR) & - STM32F4_SPI_SR_RXNE)) { + if ((spi->rx_len > 0) && (readl_relaxed(spi->base + STM32FX_SPI_SR) & + STM32FX_SPI_SR_RXNE)) { u32 offs = spi->cur_xferlen - spi->rx_len; if (spi->cur_bpw == 16) { u16 *rx_buf16 = (u16 *)(spi->rx_buf + offs); - *rx_buf16 = readw_relaxed(spi->base + STM32F4_SPI_DR); + *rx_buf16 = readw_relaxed(spi->base + STM32FX_SPI_DR); spi->rx_len -= sizeof(u16); } else { u8 *rx_buf8 = (u8 *)(spi->rx_buf + offs); - *rx_buf8 = readb_relaxed(spi->base + STM32F4_SPI_DR); + *rx_buf8 = readb_relaxed(spi->base + STM32FX_SPI_DR); spi->rx_len -= sizeof(u8); } } @@ -587,6 +705,46 @@ static void stm32f4_spi_read_rx(struct stm32_spi *spi) } /** + * stm32f7_spi_read_rx - Read bytes from Receive Data Register + * @spi: pointer to the spi controller data structure + * + * Write in rx_buf depends on remaining bytes to avoid to write beyond + * rx_buf end. + */ +static void stm32f7_spi_read_rx(struct stm32_spi *spi) +{ + u32 sr = readl_relaxed(spi->base + STM32FX_SPI_SR); + u32 frlvl = FIELD_GET(STM32F7_SPI_SR_FRLVL, sr); + + while ((spi->rx_len > 0) && (frlvl > 0)) { + u32 offs = spi->cur_xferlen - spi->rx_len; + + if ((spi->rx_len >= sizeof(u16)) && (frlvl >= 2)) { + u16 *rx_buf16 = (u16 *)(spi->rx_buf + offs); + + *rx_buf16 = readw_relaxed(spi->base + STM32FX_SPI_DR); + spi->rx_len -= sizeof(u16); + } else { + u8 *rx_buf8 = (u8 *)(spi->rx_buf + offs); + + *rx_buf8 = readb_relaxed(spi->base + STM32FX_SPI_DR); + spi->rx_len -= sizeof(u8); + } + + sr = readl_relaxed(spi->base + STM32FX_SPI_SR); + frlvl = FIELD_GET(STM32F7_SPI_SR_FRLVL, sr); + } + + if (spi->rx_len >= sizeof(u16)) + stm32_spi_clr_bits(spi, STM32FX_SPI_CR2, STM32F7_SPI_CR2_FRXTH); + else + stm32_spi_set_bits(spi, STM32FX_SPI_CR2, STM32F7_SPI_CR2_FRXTH); + + dev_dbg(spi->dev, "%s: %d bytes left (sr=%08x)\n", + __func__, spi->rx_len, sr); +} + +/** * stm32h7_spi_read_rxfifo - Read bytes in Receive Data Register * @spi: pointer to the spi controller data structure * @@ -645,10 +803,10 @@ static void stm32_spi_enable(struct stm32_spi *spi) } /** - * stm32f4_spi_disable - Disable SPI controller + * stm32fx_spi_disable - Disable SPI controller * @spi: pointer to the spi controller data structure */ -static void stm32f4_spi_disable(struct stm32_spi *spi) +static void stm32fx_spi_disable(struct stm32_spi *spi) { unsigned long flags; u32 sr; @@ -657,20 +815,20 @@ static void stm32f4_spi_disable(struct stm32_spi *spi) spin_lock_irqsave(&spi->lock, flags); - if (!(readl_relaxed(spi->base + STM32F4_SPI_CR1) & - STM32F4_SPI_CR1_SPE)) { + if (!(readl_relaxed(spi->base + STM32FX_SPI_CR1) & + STM32FX_SPI_CR1_SPE)) { spin_unlock_irqrestore(&spi->lock, flags); return; } /* Disable interrupts */ - stm32_spi_clr_bits(spi, STM32F4_SPI_CR2, STM32F4_SPI_CR2_TXEIE | - STM32F4_SPI_CR2_RXNEIE | - STM32F4_SPI_CR2_ERRIE); + stm32_spi_clr_bits(spi, STM32FX_SPI_CR2, STM32FX_SPI_CR2_TXEIE | + STM32FX_SPI_CR2_RXNEIE | + STM32FX_SPI_CR2_ERRIE); /* Wait until BSY = 0 */ - if (readl_relaxed_poll_timeout_atomic(spi->base + STM32F4_SPI_SR, - sr, !(sr & STM32F4_SPI_SR_BSY), + if (readl_relaxed_poll_timeout_atomic(spi->base + STM32FX_SPI_SR, + sr, !(sr & STM32FX_SPI_SR_BSY), 10, 100000) < 0) { dev_warn(spi->dev, "disabling condition timeout\n"); } @@ -680,14 +838,14 @@ static void stm32f4_spi_disable(struct stm32_spi *spi) if (spi->cur_usedma && spi->dma_rx) dmaengine_terminate_async(spi->dma_rx); - stm32_spi_clr_bits(spi, STM32F4_SPI_CR1, STM32F4_SPI_CR1_SPE); + stm32_spi_clr_bits(spi, STM32FX_SPI_CR1, STM32FX_SPI_CR1_SPE); - stm32_spi_clr_bits(spi, STM32F4_SPI_CR2, STM32F4_SPI_CR2_TXDMAEN | - STM32F4_SPI_CR2_RXDMAEN); + stm32_spi_clr_bits(spi, STM32FX_SPI_CR2, STM32FX_SPI_CR2_TXDMAEN | + STM32FX_SPI_CR2_RXDMAEN); /* Sequence to clear OVR flag */ - readl_relaxed(spi->base + STM32F4_SPI_DR); - readl_relaxed(spi->base + STM32F4_SPI_SR); + readl_relaxed(spi->base + STM32FX_SPI_DR); + readl_relaxed(spi->base + STM32FX_SPI_SR); spin_unlock_irqrestore(&spi->lock, flags); } @@ -763,11 +921,11 @@ static bool stm32_spi_can_dma(struct spi_controller *ctrl, } /** - * stm32f4_spi_irq_event - Interrupt handler for SPI controller events + * stm32fx_spi_irq_event - Interrupt handler for SPI controller events * @irq: interrupt line * @dev_id: SPI controller ctrl interface */ -static irqreturn_t stm32f4_spi_irq_event(int irq, void *dev_id) +static irqreturn_t stm32fx_spi_irq_event(int irq, void *dev_id) { struct spi_controller *ctrl = dev_id; struct stm32_spi *spi = spi_controller_get_devdata(ctrl); @@ -776,26 +934,26 @@ static irqreturn_t stm32f4_spi_irq_event(int irq, void *dev_id) spin_lock(&spi->lock); - sr = readl_relaxed(spi->base + STM32F4_SPI_SR); + sr = readl_relaxed(spi->base + STM32FX_SPI_SR); /* * BSY flag is not handled in interrupt but it is normal behavior when * this flag is set. */ - sr &= ~STM32F4_SPI_SR_BSY; + sr &= ~STM32FX_SPI_SR_BSY; if (!spi->cur_usedma && (spi->cur_comm == SPI_SIMPLEX_TX || spi->cur_comm == SPI_3WIRE_TX)) { /* OVR flag shouldn't be handled for TX only mode */ - sr &= ~(STM32F4_SPI_SR_OVR | STM32F4_SPI_SR_RXNE); - mask |= STM32F4_SPI_SR_TXE; + sr &= ~(STM32FX_SPI_SR_OVR | STM32FX_SPI_SR_RXNE); + mask |= STM32FX_SPI_SR_TXE; } if (!spi->cur_usedma && (spi->cur_comm == SPI_FULL_DUPLEX || spi->cur_comm == SPI_SIMPLEX_RX || spi->cur_comm == SPI_3WIRE_RX)) { /* TXE flag is set and is handled when RXNE flag occurs */ - sr &= ~STM32F4_SPI_SR_TXE; - mask |= STM32F4_SPI_SR_RXNE | STM32F4_SPI_SR_OVR; + sr &= ~STM32FX_SPI_SR_TXE; + mask |= STM32FX_SPI_SR_RXNE | STM32FX_SPI_SR_OVR; } if (!(sr & mask)) { @@ -804,12 +962,12 @@ static irqreturn_t stm32f4_spi_irq_event(int irq, void *dev_id) return IRQ_NONE; } - if (sr & STM32F4_SPI_SR_OVR) { + if (sr & STM32FX_SPI_SR_OVR) { dev_warn(spi->dev, "Overrun: received value discarded\n"); /* Sequence to clear OVR flag */ - readl_relaxed(spi->base + STM32F4_SPI_DR); - readl_relaxed(spi->base + STM32F4_SPI_SR); + readl_relaxed(spi->base + STM32FX_SPI_DR); + readl_relaxed(spi->base + STM32FX_SPI_SR); /* * If overrun is detected, it means that something went wrong, @@ -820,28 +978,28 @@ static irqreturn_t stm32f4_spi_irq_event(int irq, void *dev_id) goto end_irq; } - if (sr & STM32F4_SPI_SR_TXE) { + if (sr & STM32FX_SPI_SR_TXE) { if (spi->tx_buf) - stm32f4_spi_write_tx(spi); + spi->cfg->write_tx(spi); if (spi->tx_len == 0) end = true; } - if (sr & STM32F4_SPI_SR_RXNE) { - stm32f4_spi_read_rx(spi); + if (sr & STM32FX_SPI_SR_RXNE) { + spi->cfg->read_rx(spi); if (spi->rx_len == 0) end = true; else if (spi->tx_buf)/* Load data for discontinuous mode */ - stm32f4_spi_write_tx(spi); + spi->cfg->write_tx(spi); } end_irq: if (end) { /* Immediately disable interrupts to do not generate new one */ - stm32_spi_clr_bits(spi, STM32F4_SPI_CR2, - STM32F4_SPI_CR2_TXEIE | - STM32F4_SPI_CR2_RXNEIE | - STM32F4_SPI_CR2_ERRIE); + stm32_spi_clr_bits(spi, STM32FX_SPI_CR2, + STM32FX_SPI_CR2_TXEIE | + STM32FX_SPI_CR2_RXNEIE | + STM32FX_SPI_CR2_ERRIE); spin_unlock(&spi->lock); return IRQ_WAKE_THREAD; } @@ -851,17 +1009,17 @@ end_irq: } /** - * stm32f4_spi_irq_thread - Thread of interrupt handler for SPI controller + * stm32fx_spi_irq_thread - Thread of interrupt handler for SPI controller * @irq: interrupt line * @dev_id: SPI controller interface */ -static irqreturn_t stm32f4_spi_irq_thread(int irq, void *dev_id) +static irqreturn_t stm32fx_spi_irq_thread(int irq, void *dev_id) { struct spi_controller *ctrl = dev_id; struct stm32_spi *spi = spi_controller_get_devdata(ctrl); spi_finalize_current_transfer(ctrl); - stm32f4_spi_disable(spi); + stm32fx_spi_disable(spi); return IRQ_HANDLED; } @@ -974,7 +1132,7 @@ static int stm32_spi_prepare_msg(struct spi_controller *ctrl, unsigned long flags; u32 clrb = 0, setb = 0; - /* SPI slave device may need time between data frames */ + /* SPI target device may need time between data frames */ spi->cur_midi = 0; if (np && !of_property_read_u32(np, "st,spi-midi-ns", &spi->cur_midi)) dev_dbg(spi->dev, "%dns inter-data idleness\n", spi->cur_midi); @@ -1013,7 +1171,7 @@ static int stm32_spi_prepare_msg(struct spi_controller *ctrl, int ret; ret = spi_split_transfers_maxwords(ctrl, msg, - STM32H7_SPI_TSIZE_MAX, + spi->t_size_max, GFP_KERNEL | GFP_DMA); if (ret) return ret; @@ -1034,18 +1192,18 @@ static int stm32_spi_prepare_msg(struct spi_controller *ctrl, } /** - * stm32f4_spi_dma_tx_cb - dma callback + * stm32fx_spi_dma_tx_cb - dma callback * @data: pointer to the spi controller data structure * * DMA callback is called when the transfer is complete for DMA TX channel. */ -static void stm32f4_spi_dma_tx_cb(void *data) +static void stm32fx_spi_dma_tx_cb(void *data) { struct stm32_spi *spi = data; if (spi->cur_comm == SPI_SIMPLEX_TX || spi->cur_comm == SPI_3WIRE_TX) { spi_finalize_current_transfer(spi->ctrl); - stm32f4_spi_disable(spi); + stm32fx_spi_disable(spi); } } @@ -1067,15 +1225,19 @@ static void stm32_spi_dma_rx_cb(void *data) * stm32_spi_dma_config - configure dma slave channel depending on current * transfer bits_per_word. * @spi: pointer to the spi controller data structure + * @dma_chan: pointer to the DMA channel * @dma_conf: pointer to the dma_slave_config structure * @dir: direction of the dma transfer */ static void stm32_spi_dma_config(struct stm32_spi *spi, + struct dma_chan *dma_chan, struct dma_slave_config *dma_conf, enum dma_transfer_direction dir) { enum dma_slave_buswidth buswidth; - u32 maxburst; + struct dma_slave_caps caps; + u32 maxburst = 1; + int ret; if (spi->cur_bpw <= 8) buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE; @@ -1084,15 +1246,14 @@ static void stm32_spi_dma_config(struct stm32_spi *spi, else buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES; - if (spi->cfg->has_fifo) { - /* Valid for DMA Half or Full Fifo threshold */ - if (spi->cur_fthlv == 2) - maxburst = 1; - else - maxburst = spi->cur_fthlv; - } else { - maxburst = 1; - } + /* Valid for DMA Half or Full Fifo threshold */ + if (!spi->cfg->prevent_dma_burst && spi->cfg->has_fifo && spi->cur_fthlv != 2) + maxburst = spi->cur_fthlv; + + /* Get the DMA channel caps, and adjust maxburst if possible */ + ret = dma_get_slave_caps(dma_chan, &caps); + if (!ret) + maxburst = min(maxburst, caps.max_burst); memset(dma_conf, 0, sizeof(struct dma_slave_config)); dma_conf->direction = dir; @@ -1114,21 +1275,21 @@ static void stm32_spi_dma_config(struct stm32_spi *spi, } /** - * stm32f4_spi_transfer_one_irq - transfer a single spi_transfer using + * stm32fx_spi_transfer_one_irq - transfer a single spi_transfer using * interrupts * @spi: pointer to the spi controller data structure * * It must returns 0 if the transfer is finished or 1 if the transfer is still * in progress. */ -static int stm32f4_spi_transfer_one_irq(struct stm32_spi *spi) +static int stm32fx_spi_transfer_one_irq(struct stm32_spi *spi) { unsigned long flags; u32 cr2 = 0; /* Enable the interrupts relative to the current communication mode */ if (spi->cur_comm == SPI_SIMPLEX_TX || spi->cur_comm == SPI_3WIRE_TX) { - cr2 |= STM32F4_SPI_CR2_TXEIE; + cr2 |= STM32FX_SPI_CR2_TXEIE; } else if (spi->cur_comm == SPI_FULL_DUPLEX || spi->cur_comm == SPI_SIMPLEX_RX || spi->cur_comm == SPI_3WIRE_RX) { @@ -1136,20 +1297,20 @@ static int stm32f4_spi_transfer_one_irq(struct stm32_spi *spi) * since the received data are never read. Therefore set OVR * interrupt only when rx buffer is available. */ - cr2 |= STM32F4_SPI_CR2_RXNEIE | STM32F4_SPI_CR2_ERRIE; + cr2 |= STM32FX_SPI_CR2_RXNEIE | STM32FX_SPI_CR2_ERRIE; } else { return -EINVAL; } spin_lock_irqsave(&spi->lock, flags); - stm32_spi_set_bits(spi, STM32F4_SPI_CR2, cr2); + stm32_spi_set_bits(spi, STM32FX_SPI_CR2, cr2); stm32_spi_enable(spi); /* starting data transfer when buffer is loaded */ if (spi->tx_buf) - stm32f4_spi_write_tx(spi); + spi->cfg->write_tx(spi); spin_unlock_irqrestore(&spi->lock, flags); @@ -1189,7 +1350,7 @@ static int stm32h7_spi_transfer_one_irq(struct stm32_spi *spi) if (spi->tx_buf) stm32h7_spi_write_txfifo(spi); - if (STM32_SPI_MASTER_MODE(spi)) + if (STM32_SPI_HOST_MODE(spi)) stm32_spi_set_bits(spi, STM32H7_SPI_CR1, STM32H7_SPI_CR1_CSTART); writel_relaxed(ier, spi->base + STM32H7_SPI_IER); @@ -1200,11 +1361,11 @@ static int stm32h7_spi_transfer_one_irq(struct stm32_spi *spi) } /** - * stm32f4_spi_transfer_one_dma_start - Set SPI driver registers to start + * stm32fx_spi_transfer_one_dma_start - Set SPI driver registers to start * transfer using DMA * @spi: pointer to the spi controller data structure */ -static void stm32f4_spi_transfer_one_dma_start(struct stm32_spi *spi) +static void stm32fx_spi_transfer_one_dma_start(struct stm32_spi *spi) { /* In DMA mode end of transfer is handled by DMA TX or RX callback. */ if (spi->cur_comm == SPI_SIMPLEX_RX || spi->cur_comm == SPI_3WIRE_RX || @@ -1214,13 +1375,29 @@ static void stm32f4_spi_transfer_one_dma_start(struct stm32_spi *spi) * since the received data are never read. Therefore set OVR * interrupt only when rx buffer is available. */ - stm32_spi_set_bits(spi, STM32F4_SPI_CR2, STM32F4_SPI_CR2_ERRIE); + stm32_spi_set_bits(spi, STM32FX_SPI_CR2, STM32FX_SPI_CR2_ERRIE); } stm32_spi_enable(spi); } /** + * stm32f7_spi_transfer_one_dma_start - Set SPI driver registers to start + * transfer using DMA + * @spi: pointer to the spi controller data structure + */ +static void stm32f7_spi_transfer_one_dma_start(struct stm32_spi *spi) +{ + /* Configure DMA request trigger threshold according to DMA width */ + if (spi->cur_bpw <= 8) + stm32_spi_set_bits(spi, STM32FX_SPI_CR2, STM32F7_SPI_CR2_FRXTH); + else + stm32_spi_clr_bits(spi, STM32FX_SPI_CR2, STM32F7_SPI_CR2_FRXTH); + + stm32fx_spi_transfer_one_dma_start(spi); +} + +/** * stm32h7_spi_transfer_one_dma_start - Set SPI driver registers to start * transfer using DMA * @spi: pointer to the spi controller data structure @@ -1237,7 +1414,7 @@ static void stm32h7_spi_transfer_one_dma_start(struct stm32_spi *spi) stm32_spi_enable(spi); - if (STM32_SPI_MASTER_MODE(spi)) + if (STM32_SPI_HOST_MODE(spi)) stm32_spi_set_bits(spi, STM32H7_SPI_CR1, STM32H7_SPI_CR1_CSTART); } @@ -1260,7 +1437,7 @@ static int stm32_spi_transfer_one_dma(struct stm32_spi *spi, rx_dma_desc = NULL; if (spi->rx_buf && spi->dma_rx) { - stm32_spi_dma_config(spi, &rx_dma_conf, DMA_DEV_TO_MEM); + stm32_spi_dma_config(spi, spi->dma_rx, &rx_dma_conf, DMA_DEV_TO_MEM); dmaengine_slave_config(spi->dma_rx, &rx_dma_conf); /* Enable Rx DMA request */ @@ -1276,7 +1453,7 @@ static int stm32_spi_transfer_one_dma(struct stm32_spi *spi, tx_dma_desc = NULL; if (spi->tx_buf && spi->dma_tx) { - stm32_spi_dma_config(spi, &tx_dma_conf, DMA_MEM_TO_DEV); + stm32_spi_dma_config(spi, spi->dma_tx, &tx_dma_conf, DMA_MEM_TO_DEV); dmaengine_slave_config(spi->dma_tx, &tx_dma_conf); tx_dma_desc = dmaengine_prep_slave_sg( @@ -1353,9 +1530,34 @@ dma_desc_error: static void stm32f4_spi_set_bpw(struct stm32_spi *spi) { if (spi->cur_bpw == 16) - stm32_spi_set_bits(spi, STM32F4_SPI_CR1, STM32F4_SPI_CR1_DFF); + stm32_spi_set_bits(spi, STM32FX_SPI_CR1, STM32F4_SPI_CR1_DFF); + else + stm32_spi_clr_bits(spi, STM32FX_SPI_CR1, STM32F4_SPI_CR1_DFF); +} + +/** + * stm32f7_spi_set_bpw - Configure bits per word + * @spi: pointer to the spi controller data structure + */ +static void stm32f7_spi_set_bpw(struct stm32_spi *spi) +{ + u32 bpw; + u32 cr2_clrb = 0, cr2_setb = 0; + + bpw = spi->cur_bpw - 1; + + cr2_clrb |= STM32F7_SPI_CR2_DS; + cr2_setb |= FIELD_PREP(STM32F7_SPI_CR2_DS, bpw); + + if (spi->rx_len >= sizeof(u16)) + cr2_clrb |= STM32F7_SPI_CR2_FRXTH; else - stm32_spi_clr_bits(spi, STM32F4_SPI_CR1, STM32F4_SPI_CR1_DFF); + cr2_setb |= STM32F7_SPI_CR2_FRXTH; + + writel_relaxed( + (readl_relaxed(spi->base + STM32FX_SPI_CR2) & + ~cr2_clrb) | cr2_setb, + spi->base + STM32FX_SPI_CR2); } /** @@ -1385,7 +1587,7 @@ static void stm32h7_spi_set_bpw(struct stm32_spi *spi) } /** - * stm32_spi_set_mbr - Configure baud rate divisor in master mode + * stm32_spi_set_mbr - Configure baud rate divisor in host mode * @spi: pointer to the spi controller data structure * @mbrdiv: baud rate divisor value */ @@ -1433,26 +1635,26 @@ static unsigned int stm32_spi_communication_type(struct spi_device *spi_dev, } /** - * stm32f4_spi_set_mode - configure communication mode + * stm32fx_spi_set_mode - configure communication mode * @spi: pointer to the spi controller data structure * @comm_type: type of communication to configure */ -static int stm32f4_spi_set_mode(struct stm32_spi *spi, unsigned int comm_type) +static int stm32fx_spi_set_mode(struct stm32_spi *spi, unsigned int comm_type) { if (comm_type == SPI_3WIRE_TX || comm_type == SPI_SIMPLEX_TX) { - stm32_spi_set_bits(spi, STM32F4_SPI_CR1, - STM32F4_SPI_CR1_BIDIMODE | - STM32F4_SPI_CR1_BIDIOE); + stm32_spi_set_bits(spi, STM32FX_SPI_CR1, + STM32FX_SPI_CR1_BIDIMODE | + STM32FX_SPI_CR1_BIDIOE); } else if (comm_type == SPI_FULL_DUPLEX || comm_type == SPI_SIMPLEX_RX) { - stm32_spi_clr_bits(spi, STM32F4_SPI_CR1, - STM32F4_SPI_CR1_BIDIMODE | - STM32F4_SPI_CR1_BIDIOE); + stm32_spi_clr_bits(spi, STM32FX_SPI_CR1, + STM32FX_SPI_CR1_BIDIMODE | + STM32FX_SPI_CR1_BIDIOE); } else if (comm_type == SPI_3WIRE_RX) { - stm32_spi_set_bits(spi, STM32F4_SPI_CR1, - STM32F4_SPI_CR1_BIDIMODE); - stm32_spi_clr_bits(spi, STM32F4_SPI_CR1, - STM32F4_SPI_CR1_BIDIOE); + stm32_spi_set_bits(spi, STM32FX_SPI_CR1, + STM32FX_SPI_CR1_BIDIMODE); + stm32_spi_clr_bits(spi, STM32FX_SPI_CR1, + STM32FX_SPI_CR1_BIDIOE); } else { return -EINVAL; } @@ -1497,7 +1699,7 @@ static int stm32h7_spi_set_mode(struct stm32_spi *spi, unsigned int comm_type) /** * stm32h7_spi_data_idleness - configure minimum time delay inserted between two - * consecutive data frames in master mode + * consecutive data frames in host mode * @spi: pointer to the spi controller data structure * @len: transfer len */ @@ -1531,7 +1733,7 @@ static void stm32h7_spi_data_idleness(struct stm32_spi *spi, u32 len) */ static int stm32h7_spi_number_of_data(struct stm32_spi *spi, u32 nb_words) { - if (nb_words <= STM32H7_SPI_TSIZE_MAX) { + if (nb_words <= spi->t_size_max) { writel_relaxed(FIELD_PREP(STM32H7_SPI_CR2_TSIZE, nb_words), spi->base + STM32H7_SPI_CR2); } else { @@ -1566,7 +1768,7 @@ static int stm32_spi_transfer_one_setup(struct stm32_spi *spi, spi->cfg->set_bpw(spi); /* Update spi->cur_speed with real clock speed */ - if (STM32_SPI_MASTER_MODE(spi)) { + if (STM32_SPI_HOST_MODE(spi)) { mbr = stm32_spi_prepare_mbr(spi, transfer->speed_hz, spi->cfg->baud_rate_div_min, spi->cfg->baud_rate_div_max); @@ -1586,7 +1788,7 @@ static int stm32_spi_transfer_one_setup(struct stm32_spi *spi, spi->cur_comm = comm_type; - if (STM32_SPI_MASTER_MODE(spi) && spi->cfg->set_data_idleness) + if (STM32_SPI_HOST_MODE(spi) && spi->cfg->set_data_idleness) spi->cfg->set_data_idleness(spi, transfer->len); if (spi->cur_bpw <= 8) @@ -1607,7 +1809,7 @@ static int stm32_spi_transfer_one_setup(struct stm32_spi *spi, dev_dbg(spi->dev, "data frame of %d-bit, data packet of %d data frames\n", spi->cur_bpw, spi->cur_fthlv); - if (STM32_SPI_MASTER_MODE(spi)) + if (STM32_SPI_HOST_MODE(spi)) dev_dbg(spi->dev, "speed set to %dHz\n", spi->cur_speed); dev_dbg(spi->dev, "transfer of %d bytes (%d data frames)\n", spi->cur_xferlen, nb_words); @@ -1672,30 +1874,30 @@ static int stm32_spi_unprepare_msg(struct spi_controller *ctrl, } /** - * stm32f4_spi_config - Configure SPI controller as SPI master + * stm32fx_spi_config - Configure SPI controller as SPI host * @spi: pointer to the spi controller data structure */ -static int stm32f4_spi_config(struct stm32_spi *spi) +static int stm32fx_spi_config(struct stm32_spi *spi) { unsigned long flags; spin_lock_irqsave(&spi->lock, flags); /* Ensure I2SMOD bit is kept cleared */ - stm32_spi_clr_bits(spi, STM32F4_SPI_I2SCFGR, - STM32F4_SPI_I2SCFGR_I2SMOD); + stm32_spi_clr_bits(spi, STM32FX_SPI_I2SCFGR, + STM32FX_SPI_I2SCFGR_I2SMOD); /* * - SS input value high * - transmitter half duplex direction - * - Set the master mode (default Motorola mode) - * - Consider 1 master/n slaves configuration and + * - Set the host mode (default Motorola mode) + * - Consider 1 host/n targets configuration and * SS input value is determined by the SSI bit */ - stm32_spi_set_bits(spi, STM32F4_SPI_CR1, STM32F4_SPI_CR1_SSI | - STM32F4_SPI_CR1_BIDIOE | - STM32F4_SPI_CR1_MSTR | - STM32F4_SPI_CR1_SSM); + stm32_spi_set_bits(spi, STM32FX_SPI_CR1, STM32FX_SPI_CR1_SSI | + STM32FX_SPI_CR1_BIDIOE | + STM32FX_SPI_CR1_MSTR | + STM32FX_SPI_CR1_SSM); spin_unlock_irqrestore(&spi->lock, flags); @@ -1729,8 +1931,8 @@ static int stm32h7_spi_config(struct stm32_spi *spi) cr1 |= STM32H7_SPI_CR1_HDDIR | STM32H7_SPI_CR1_MASRX | STM32H7_SPI_CR1_SSI; /* - * - Set the master mode (default Motorola mode) - * - Consider 1 master/n devices configuration and + * - Set the host mode (default Motorola mode) + * - Consider 1 host/n devices configuration and * SS input value is determined by the SSI bit * - keep control of all associated GPIOs */ @@ -1746,25 +1948,48 @@ static int stm32h7_spi_config(struct stm32_spi *spi) } static const struct stm32_spi_cfg stm32f4_spi_cfg = { - .regs = &stm32f4_spi_regspec, + .regs = &stm32fx_spi_regspec, .get_bpw_mask = stm32f4_spi_get_bpw_mask, - .disable = stm32f4_spi_disable, - .config = stm32f4_spi_config, + .disable = stm32fx_spi_disable, + .config = stm32fx_spi_config, .set_bpw = stm32f4_spi_set_bpw, - .set_mode = stm32f4_spi_set_mode, - .transfer_one_dma_start = stm32f4_spi_transfer_one_dma_start, - .dma_tx_cb = stm32f4_spi_dma_tx_cb, + .set_mode = stm32fx_spi_set_mode, + .write_tx = stm32f4_spi_write_tx, + .read_rx = stm32f4_spi_read_rx, + .transfer_one_dma_start = stm32fx_spi_transfer_one_dma_start, + .dma_tx_cb = stm32fx_spi_dma_tx_cb, .dma_rx_cb = stm32_spi_dma_rx_cb, - .transfer_one_irq = stm32f4_spi_transfer_one_irq, - .irq_handler_event = stm32f4_spi_irq_event, - .irq_handler_thread = stm32f4_spi_irq_thread, - .baud_rate_div_min = STM32F4_SPI_BR_DIV_MIN, - .baud_rate_div_max = STM32F4_SPI_BR_DIV_MAX, + .transfer_one_irq = stm32fx_spi_transfer_one_irq, + .irq_handler_event = stm32fx_spi_irq_event, + .irq_handler_thread = stm32fx_spi_irq_thread, + .baud_rate_div_min = STM32FX_SPI_BR_DIV_MIN, + .baud_rate_div_max = STM32FX_SPI_BR_DIV_MAX, .has_fifo = false, .has_device_mode = false, .flags = SPI_CONTROLLER_MUST_TX, }; +static const struct stm32_spi_cfg stm32f7_spi_cfg = { + .regs = &stm32fx_spi_regspec, + .get_bpw_mask = stm32f7_spi_get_bpw_mask, + .disable = stm32fx_spi_disable, + .config = stm32fx_spi_config, + .set_bpw = stm32f7_spi_set_bpw, + .set_mode = stm32fx_spi_set_mode, + .write_tx = stm32f7_spi_write_tx, + .read_rx = stm32f7_spi_read_rx, + .transfer_one_dma_start = stm32f7_spi_transfer_one_dma_start, + .dma_tx_cb = stm32fx_spi_dma_tx_cb, + .dma_rx_cb = stm32_spi_dma_rx_cb, + .transfer_one_irq = stm32fx_spi_transfer_one_irq, + .irq_handler_event = stm32fx_spi_irq_event, + .irq_handler_thread = stm32fx_spi_irq_thread, + .baud_rate_div_min = STM32FX_SPI_BR_DIV_MIN, + .baud_rate_div_max = STM32FX_SPI_BR_DIV_MAX, + .has_fifo = false, + .flags = SPI_CONTROLLER_MUST_TX, +}; + static const struct stm32_spi_cfg stm32h7_spi_cfg = { .regs = &stm32h7_spi_regspec, .get_fifo_size = stm32h7_spi_get_fifo_size, @@ -1775,6 +2000,8 @@ static const struct stm32_spi_cfg stm32h7_spi_cfg = { .set_mode = stm32h7_spi_set_mode, .set_data_idleness = stm32h7_spi_data_idleness, .set_number_of_data = stm32h7_spi_number_of_data, + .write_tx = stm32h7_spi_write_txfifo, + .read_rx = stm32h7_spi_read_rxfifo, .transfer_one_dma_start = stm32h7_spi_transfer_one_dma_start, .dma_rx_cb = stm32_spi_dma_rx_cb, /* @@ -1789,9 +2016,40 @@ static const struct stm32_spi_cfg stm32h7_spi_cfg = { .has_device_mode = true, }; +/* + * STM32MP2 is compatible with the STM32H7 except: + * - enforce the DMA maxburst value to 1 + * - spi8 have limited feature set (TSIZE_MAX = 1024, BPW of 8 OR 16) + */ +static const struct stm32_spi_cfg stm32mp25_spi_cfg = { + .regs = &stm32mp25_spi_regspec, + .get_fifo_size = stm32h7_spi_get_fifo_size, + .get_bpw_mask = stm32mp25_spi_get_bpw_mask, + .disable = stm32h7_spi_disable, + .config = stm32h7_spi_config, + .set_bpw = stm32h7_spi_set_bpw, + .set_mode = stm32h7_spi_set_mode, + .set_data_idleness = stm32h7_spi_data_idleness, + .set_number_of_data = stm32h7_spi_number_of_data, + .transfer_one_dma_start = stm32h7_spi_transfer_one_dma_start, + .dma_rx_cb = stm32_spi_dma_rx_cb, + /* + * dma_tx_cb is not necessary since in case of TX, dma is followed by + * SPI access hence handling is performed within the SPI interrupt + */ + .transfer_one_irq = stm32h7_spi_transfer_one_irq, + .irq_handler_thread = stm32h7_spi_irq_thread, + .baud_rate_div_min = STM32H7_SPI_MBR_DIV_MIN, + .baud_rate_div_max = STM32H7_SPI_MBR_DIV_MAX, + .has_fifo = true, + .prevent_dma_burst = true, +}; + static const struct of_device_id stm32_spi_of_match[] = { + { .compatible = "st,stm32mp25-spi", .data = (void *)&stm32mp25_spi_cfg }, { .compatible = "st,stm32h7-spi", .data = (void *)&stm32h7_spi_cfg }, { .compatible = "st,stm32f4-spi", .data = (void *)&stm32f4_spi_cfg }, + { .compatible = "st,stm32f7-spi", .data = (void *)&stm32f7_spi_cfg }, {}, }; MODULE_DEVICE_TABLE(of, stm32_spi_of_match); @@ -1820,9 +2078,9 @@ static int stm32_spi_probe(struct platform_device *pdev) } if (device_mode) - ctrl = devm_spi_alloc_slave(&pdev->dev, sizeof(struct stm32_spi)); + ctrl = devm_spi_alloc_target(&pdev->dev, sizeof(struct stm32_spi)); else - ctrl = devm_spi_alloc_master(&pdev->dev, sizeof(struct stm32_spi)); + ctrl = devm_spi_alloc_host(&pdev->dev, sizeof(struct stm32_spi)); if (!ctrl) { dev_err(&pdev->dev, "spi controller allocation failed\n"); return -ENOMEM; @@ -1892,6 +2150,22 @@ static int stm32_spi_probe(struct platform_device *pdev) if (spi->cfg->has_fifo) spi->fifo_size = spi->cfg->get_fifo_size(spi); + spi->feature_set = STM32_SPI_FEATURE_FULL; + if (spi->cfg->regs->fullcfg.reg) { + spi->feature_set = + FIELD_GET(STM32MP25_SPI_HWCFGR1_FULLCFG, + readl_relaxed(spi->base + spi->cfg->regs->fullcfg.reg)); + + dev_dbg(spi->dev, "%s feature set\n", + spi->feature_set == STM32_SPI_FEATURE_FULL ? "full" : "limited"); + } + + /* Only for STM32H7 and after */ + spi->t_size_max = spi->feature_set == STM32_SPI_FEATURE_FULL ? + STM32H7_SPI_TSIZE_MAX : + STM32MP25_SPI_TSIZE_MAX_LIMITED; + dev_dbg(spi->dev, "one message max size %d\n", spi->t_size_max); + ret = spi->cfg->config(spi); if (ret) { dev_err(&pdev->dev, "controller configuration failed: %d\n", @@ -1913,7 +2187,7 @@ static int stm32_spi_probe(struct platform_device *pdev) ctrl->unprepare_message = stm32_spi_unprepare_msg; ctrl->flags = spi->cfg->flags; if (STM32_SPI_DEVICE_MODE(spi)) - ctrl->slave_abort = stm32h7_spi_device_abort; + ctrl->target_abort = stm32h7_spi_device_abort; spi->dma_tx = dma_request_chan(spi->dev, "tx"); if (IS_ERR(spi->dma_tx)) { @@ -1960,7 +2234,7 @@ static int stm32_spi_probe(struct platform_device *pdev) pm_runtime_put_autosuspend(&pdev->dev); dev_info(&pdev->dev, "driver initialized (%s mode)\n", - STM32_SPI_MASTER_MODE(spi) ? "master" : "device"); + STM32_SPI_HOST_MODE(spi) ? "host" : "device"); return 0; diff --git a/drivers/spi/spi-sun4i.c b/drivers/spi/spi-sun4i.c index b8947265d329..11d8bd27b3e9 100644 --- a/drivers/spi/spi-sun4i.c +++ b/drivers/spi/spi-sun4i.c @@ -75,7 +75,7 @@ #define SUN4I_FIFO_STA_TF_CNT_BITS 16 struct sun4i_spi { - struct spi_master *master; + struct spi_controller *host; void __iomem *base_addr; struct clk *hclk; struct clk *mclk; @@ -161,7 +161,7 @@ static inline void sun4i_spi_fill_fifo(struct sun4i_spi *sspi, int len) static void sun4i_spi_set_cs(struct spi_device *spi, bool enable) { - struct sun4i_spi *sspi = spi_master_get_devdata(spi->master); + struct sun4i_spi *sspi = spi_controller_get_devdata(spi->controller); u32 reg; reg = sun4i_spi_read(sspi, SUN4I_CTL_REG); @@ -201,11 +201,11 @@ static size_t sun4i_spi_max_transfer_size(struct spi_device *spi) return SUN4I_MAX_XFER_SIZE - 1; } -static int sun4i_spi_transfer_one(struct spi_master *master, +static int sun4i_spi_transfer_one(struct spi_controller *host, struct spi_device *spi, struct spi_transfer *tfr) { - struct sun4i_spi *sspi = spi_master_get_devdata(master); + struct sun4i_spi *sspi = spi_controller_get_devdata(host); unsigned int mclk_rate, div, timeout; unsigned int start, end, tx_time; unsigned int tx_len = 0; @@ -331,7 +331,7 @@ static int sun4i_spi_transfer_one(struct spi_master *master, msecs_to_jiffies(tx_time)); end = jiffies; if (!timeout) { - dev_warn(&master->dev, + dev_warn(&host->dev, "%s: timeout transferring %u bytes@%iHz for %i(%i)ms", dev_name(&spi->dev), tfr->len, tfr->speed_hz, jiffies_to_msecs(end - start), tx_time); @@ -386,8 +386,8 @@ static irqreturn_t sun4i_spi_handler(int irq, void *dev_id) static int sun4i_spi_runtime_resume(struct device *dev) { - struct spi_master *master = dev_get_drvdata(dev); - struct sun4i_spi *sspi = spi_master_get_devdata(master); + struct spi_controller *host = dev_get_drvdata(dev); + struct sun4i_spi *sspi = spi_controller_get_devdata(host); int ret; ret = clk_prepare_enable(sspi->hclk); @@ -415,8 +415,8 @@ out: static int sun4i_spi_runtime_suspend(struct device *dev) { - struct spi_master *master = dev_get_drvdata(dev); - struct sun4i_spi *sspi = spi_master_get_devdata(master); + struct spi_controller *host = dev_get_drvdata(dev); + struct sun4i_spi *sspi = spi_controller_get_devdata(host); clk_disable_unprepare(sspi->mclk); clk_disable_unprepare(sspi->hclk); @@ -426,62 +426,62 @@ static int sun4i_spi_runtime_suspend(struct device *dev) static int sun4i_spi_probe(struct platform_device *pdev) { - struct spi_master *master; + struct spi_controller *host; struct sun4i_spi *sspi; int ret = 0, irq; - master = spi_alloc_master(&pdev->dev, sizeof(struct sun4i_spi)); - if (!master) { - dev_err(&pdev->dev, "Unable to allocate SPI Master\n"); + host = spi_alloc_host(&pdev->dev, sizeof(struct sun4i_spi)); + if (!host) { + dev_err(&pdev->dev, "Unable to allocate SPI Host\n"); return -ENOMEM; } - platform_set_drvdata(pdev, master); - sspi = spi_master_get_devdata(master); + platform_set_drvdata(pdev, host); + sspi = spi_controller_get_devdata(host); sspi->base_addr = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(sspi->base_addr)) { ret = PTR_ERR(sspi->base_addr); - goto err_free_master; + goto err_free_host; } irq = platform_get_irq(pdev, 0); if (irq < 0) { ret = -ENXIO; - goto err_free_master; + goto err_free_host; } ret = devm_request_irq(&pdev->dev, irq, sun4i_spi_handler, 0, "sun4i-spi", sspi); if (ret) { dev_err(&pdev->dev, "Cannot request IRQ\n"); - goto err_free_master; + goto err_free_host; } - sspi->master = master; - master->max_speed_hz = 100 * 1000 * 1000; - master->min_speed_hz = 3 * 1000; - master->set_cs = sun4i_spi_set_cs; - master->transfer_one = sun4i_spi_transfer_one; - master->num_chipselect = 4; - master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST; - master->bits_per_word_mask = SPI_BPW_MASK(8); - master->dev.of_node = pdev->dev.of_node; - master->auto_runtime_pm = true; - master->max_transfer_size = sun4i_spi_max_transfer_size; + sspi->host = host; + host->max_speed_hz = 100 * 1000 * 1000; + host->min_speed_hz = 3 * 1000; + host->set_cs = sun4i_spi_set_cs; + host->transfer_one = sun4i_spi_transfer_one; + host->num_chipselect = 4; + host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST; + host->bits_per_word_mask = SPI_BPW_MASK(8); + host->dev.of_node = pdev->dev.of_node; + host->auto_runtime_pm = true; + host->max_transfer_size = sun4i_spi_max_transfer_size; sspi->hclk = devm_clk_get(&pdev->dev, "ahb"); if (IS_ERR(sspi->hclk)) { dev_err(&pdev->dev, "Unable to acquire AHB clock\n"); ret = PTR_ERR(sspi->hclk); - goto err_free_master; + goto err_free_host; } sspi->mclk = devm_clk_get(&pdev->dev, "mod"); if (IS_ERR(sspi->mclk)) { dev_err(&pdev->dev, "Unable to acquire module clock\n"); ret = PTR_ERR(sspi->mclk); - goto err_free_master; + goto err_free_host; } init_completion(&sspi->done); @@ -493,16 +493,16 @@ static int sun4i_spi_probe(struct platform_device *pdev) ret = sun4i_spi_runtime_resume(&pdev->dev); if (ret) { dev_err(&pdev->dev, "Couldn't resume the device\n"); - goto err_free_master; + goto err_free_host; } pm_runtime_set_active(&pdev->dev); pm_runtime_enable(&pdev->dev); pm_runtime_idle(&pdev->dev); - ret = devm_spi_register_master(&pdev->dev, master); + ret = devm_spi_register_controller(&pdev->dev, host); if (ret) { - dev_err(&pdev->dev, "cannot register SPI master\n"); + dev_err(&pdev->dev, "cannot register SPI host\n"); goto err_pm_disable; } @@ -511,8 +511,8 @@ static int sun4i_spi_probe(struct platform_device *pdev) err_pm_disable: pm_runtime_disable(&pdev->dev); sun4i_spi_runtime_suspend(&pdev->dev); -err_free_master: - spi_master_put(master); +err_free_host: + spi_controller_put(host); return ret; } diff --git a/drivers/spi/spi-sun6i.c b/drivers/spi/spi-sun6i.c index fddc63309773..cd018ea1abf1 100644 --- a/drivers/spi/spi-sun6i.c +++ b/drivers/spi/spi-sun6i.c @@ -97,7 +97,7 @@ struct sun6i_spi_cfg { }; struct sun6i_spi { - struct spi_master *master; + struct spi_controller *host; void __iomem *base_addr; dma_addr_t dma_addr_rx; dma_addr_t dma_addr_tx; @@ -181,7 +181,7 @@ static inline void sun6i_spi_fill_fifo(struct sun6i_spi *sspi) static void sun6i_spi_set_cs(struct spi_device *spi, bool enable) { - struct sun6i_spi *sspi = spi_master_get_devdata(spi->master); + struct sun6i_spi *sspi = spi_controller_get_devdata(spi->controller); u32 reg; reg = sun6i_spi_read(sspi, SUN6I_TFR_CTL_REG); @@ -212,7 +212,7 @@ static int sun6i_spi_prepare_dma(struct sun6i_spi *sspi, struct spi_transfer *tfr) { struct dma_async_tx_descriptor *rxdesc, *txdesc; - struct spi_master *master = sspi->master; + struct spi_controller *host = sspi->host; rxdesc = NULL; if (tfr->rx_buf) { @@ -223,9 +223,9 @@ static int sun6i_spi_prepare_dma(struct sun6i_spi *sspi, .src_maxburst = 8, }; - dmaengine_slave_config(master->dma_rx, &rxconf); + dmaengine_slave_config(host->dma_rx, &rxconf); - rxdesc = dmaengine_prep_slave_sg(master->dma_rx, + rxdesc = dmaengine_prep_slave_sg(host->dma_rx, tfr->rx_sg.sgl, tfr->rx_sg.nents, DMA_DEV_TO_MEM, @@ -245,38 +245,38 @@ static int sun6i_spi_prepare_dma(struct sun6i_spi *sspi, .dst_maxburst = 8, }; - dmaengine_slave_config(master->dma_tx, &txconf); + dmaengine_slave_config(host->dma_tx, &txconf); - txdesc = dmaengine_prep_slave_sg(master->dma_tx, + txdesc = dmaengine_prep_slave_sg(host->dma_tx, tfr->tx_sg.sgl, tfr->tx_sg.nents, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT); if (!txdesc) { if (rxdesc) - dmaengine_terminate_sync(master->dma_rx); + dmaengine_terminate_sync(host->dma_rx); return -EINVAL; } } if (tfr->rx_buf) { dmaengine_submit(rxdesc); - dma_async_issue_pending(master->dma_rx); + dma_async_issue_pending(host->dma_rx); } if (tfr->tx_buf) { dmaengine_submit(txdesc); - dma_async_issue_pending(master->dma_tx); + dma_async_issue_pending(host->dma_tx); } return 0; } -static int sun6i_spi_transfer_one(struct spi_master *master, +static int sun6i_spi_transfer_one(struct spi_controller *host, struct spi_device *spi, struct spi_transfer *tfr) { - struct sun6i_spi *sspi = spi_master_get_devdata(master); + struct sun6i_spi *sspi = spi_controller_get_devdata(host); unsigned int div, div_cdr1, div_cdr2, timeout; unsigned int start, end, tx_time; unsigned int trig_level; @@ -293,7 +293,7 @@ static int sun6i_spi_transfer_one(struct spi_master *master, sspi->tx_buf = tfr->tx_buf; sspi->rx_buf = tfr->rx_buf; sspi->len = tfr->len; - use_dma = master->can_dma ? master->can_dma(master, spi, tfr) : false; + use_dma = host->can_dma ? host->can_dma(host, spi, tfr) : false; /* Clear pending interrupts */ sun6i_spi_write(sspi, SUN6I_INT_STA_REG, ~0); @@ -463,7 +463,7 @@ static int sun6i_spi_transfer_one(struct spi_master *master, } else { ret = sun6i_spi_prepare_dma(sspi, tfr); if (ret) { - dev_warn(&master->dev, + dev_warn(&host->dev, "%s: prepare DMA failed, ret=%d", dev_name(&spi->dev), ret); return ret; @@ -486,7 +486,7 @@ static int sun6i_spi_transfer_one(struct spi_master *master, reg = sun6i_spi_read(sspi, SUN6I_TFR_CTL_REG); sun6i_spi_write(sspi, SUN6I_TFR_CTL_REG, reg | SUN6I_TFR_CTL_XCH); - tx_time = spi_controller_xfer_timeout(master, tfr); + tx_time = spi_controller_xfer_timeout(host, tfr); start = jiffies; timeout = wait_for_completion_timeout(&sspi->done, msecs_to_jiffies(tx_time)); @@ -502,13 +502,13 @@ static int sun6i_spi_transfer_one(struct spi_master *master, timeout = wait_for_completion_timeout(&sspi->dma_rx_done, timeout); if (!timeout) - dev_warn(&master->dev, "RX DMA timeout\n"); + dev_warn(&host->dev, "RX DMA timeout\n"); } } end = jiffies; if (!timeout) { - dev_warn(&master->dev, + dev_warn(&host->dev, "%s: timeout transferring %u bytes@%iHz for %i(%i)ms", dev_name(&spi->dev), tfr->len, tfr->speed_hz, jiffies_to_msecs(end - start), tx_time); @@ -518,8 +518,8 @@ static int sun6i_spi_transfer_one(struct spi_master *master, sun6i_spi_write(sspi, SUN6I_INT_CTL_REG, 0); if (ret && use_dma) { - dmaengine_terminate_sync(master->dma_rx); - dmaengine_terminate_sync(master->dma_tx); + dmaengine_terminate_sync(host->dma_rx); + dmaengine_terminate_sync(host->dma_tx); } return ret; @@ -564,8 +564,8 @@ static irqreturn_t sun6i_spi_handler(int irq, void *dev_id) static int sun6i_spi_runtime_resume(struct device *dev) { - struct spi_master *master = dev_get_drvdata(dev); - struct sun6i_spi *sspi = spi_master_get_devdata(master); + struct spi_controller *host = dev_get_drvdata(dev); + struct sun6i_spi *sspi = spi_controller_get_devdata(host); int ret; ret = clk_prepare_enable(sspi->hclk); @@ -601,8 +601,8 @@ out: static int sun6i_spi_runtime_suspend(struct device *dev) { - struct spi_master *master = dev_get_drvdata(dev); - struct sun6i_spi *sspi = spi_master_get_devdata(master); + struct spi_controller *host = dev_get_drvdata(dev); + struct sun6i_spi *sspi = spi_controller_get_devdata(host); reset_control_assert(sspi->rstc); clk_disable_unprepare(sspi->mclk); @@ -611,11 +611,11 @@ static int sun6i_spi_runtime_suspend(struct device *dev) return 0; } -static bool sun6i_spi_can_dma(struct spi_master *master, +static bool sun6i_spi_can_dma(struct spi_controller *host, struct spi_device *spi, struct spi_transfer *xfer) { - struct sun6i_spi *sspi = spi_master_get_devdata(master); + struct sun6i_spi *sspi = spi_controller_get_devdata(host); /* * If the number of spi words to transfer is less or equal than @@ -627,67 +627,67 @@ static bool sun6i_spi_can_dma(struct spi_master *master, static int sun6i_spi_probe(struct platform_device *pdev) { - struct spi_master *master; + struct spi_controller *host; struct sun6i_spi *sspi; struct resource *mem; int ret = 0, irq; - master = spi_alloc_master(&pdev->dev, sizeof(struct sun6i_spi)); - if (!master) { - dev_err(&pdev->dev, "Unable to allocate SPI Master\n"); + host = spi_alloc_host(&pdev->dev, sizeof(struct sun6i_spi)); + if (!host) { + dev_err(&pdev->dev, "Unable to allocate SPI Host\n"); return -ENOMEM; } - platform_set_drvdata(pdev, master); - sspi = spi_master_get_devdata(master); + platform_set_drvdata(pdev, host); + sspi = spi_controller_get_devdata(host); sspi->base_addr = devm_platform_get_and_ioremap_resource(pdev, 0, &mem); if (IS_ERR(sspi->base_addr)) { ret = PTR_ERR(sspi->base_addr); - goto err_free_master; + goto err_free_host; } irq = platform_get_irq(pdev, 0); if (irq < 0) { ret = -ENXIO; - goto err_free_master; + goto err_free_host; } ret = devm_request_irq(&pdev->dev, irq, sun6i_spi_handler, 0, "sun6i-spi", sspi); if (ret) { dev_err(&pdev->dev, "Cannot request IRQ\n"); - goto err_free_master; + goto err_free_host; } - sspi->master = master; + sspi->host = host; sspi->cfg = of_device_get_match_data(&pdev->dev); - master->max_speed_hz = 100 * 1000 * 1000; - master->min_speed_hz = 3 * 1000; - master->use_gpio_descriptors = true; - master->set_cs = sun6i_spi_set_cs; - master->transfer_one = sun6i_spi_transfer_one; - master->num_chipselect = 4; - master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST | - sspi->cfg->mode_bits; - master->bits_per_word_mask = SPI_BPW_MASK(8); - master->dev.of_node = pdev->dev.of_node; - master->auto_runtime_pm = true; - master->max_transfer_size = sun6i_spi_max_transfer_size; + host->max_speed_hz = 100 * 1000 * 1000; + host->min_speed_hz = 3 * 1000; + host->use_gpio_descriptors = true; + host->set_cs = sun6i_spi_set_cs; + host->transfer_one = sun6i_spi_transfer_one; + host->num_chipselect = 4; + host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST | + sspi->cfg->mode_bits; + host->bits_per_word_mask = SPI_BPW_MASK(8); + host->dev.of_node = pdev->dev.of_node; + host->auto_runtime_pm = true; + host->max_transfer_size = sun6i_spi_max_transfer_size; sspi->hclk = devm_clk_get(&pdev->dev, "ahb"); if (IS_ERR(sspi->hclk)) { dev_err(&pdev->dev, "Unable to acquire AHB clock\n"); ret = PTR_ERR(sspi->hclk); - goto err_free_master; + goto err_free_host; } sspi->mclk = devm_clk_get(&pdev->dev, "mod"); if (IS_ERR(sspi->mclk)) { dev_err(&pdev->dev, "Unable to acquire module clock\n"); ret = PTR_ERR(sspi->mclk); - goto err_free_master; + goto err_free_host; } init_completion(&sspi->done); @@ -697,34 +697,34 @@ static int sun6i_spi_probe(struct platform_device *pdev) if (IS_ERR(sspi->rstc)) { dev_err(&pdev->dev, "Couldn't get reset controller\n"); ret = PTR_ERR(sspi->rstc); - goto err_free_master; + goto err_free_host; } - master->dma_tx = dma_request_chan(&pdev->dev, "tx"); - if (IS_ERR(master->dma_tx)) { + host->dma_tx = dma_request_chan(&pdev->dev, "tx"); + if (IS_ERR(host->dma_tx)) { /* Check tx to see if we need defer probing driver */ - if (PTR_ERR(master->dma_tx) == -EPROBE_DEFER) { + if (PTR_ERR(host->dma_tx) == -EPROBE_DEFER) { ret = -EPROBE_DEFER; - goto err_free_master; + goto err_free_host; } dev_warn(&pdev->dev, "Failed to request TX DMA channel\n"); - master->dma_tx = NULL; + host->dma_tx = NULL; } - master->dma_rx = dma_request_chan(&pdev->dev, "rx"); - if (IS_ERR(master->dma_rx)) { - if (PTR_ERR(master->dma_rx) == -EPROBE_DEFER) { + host->dma_rx = dma_request_chan(&pdev->dev, "rx"); + if (IS_ERR(host->dma_rx)) { + if (PTR_ERR(host->dma_rx) == -EPROBE_DEFER) { ret = -EPROBE_DEFER; goto err_free_dma_tx; } dev_warn(&pdev->dev, "Failed to request RX DMA channel\n"); - master->dma_rx = NULL; + host->dma_rx = NULL; } - if (master->dma_tx && master->dma_rx) { + if (host->dma_tx && host->dma_rx) { sspi->dma_addr_tx = mem->start + SUN6I_TXDATA_REG; sspi->dma_addr_rx = mem->start + SUN6I_RXDATA_REG; - master->can_dma = sun6i_spi_can_dma; + host->can_dma = sun6i_spi_can_dma; } /* @@ -742,9 +742,9 @@ static int sun6i_spi_probe(struct platform_device *pdev) pm_runtime_set_active(&pdev->dev); pm_runtime_enable(&pdev->dev); - ret = devm_spi_register_master(&pdev->dev, master); + ret = devm_spi_register_controller(&pdev->dev, host); if (ret) { - dev_err(&pdev->dev, "cannot register SPI master\n"); + dev_err(&pdev->dev, "cannot register SPI host\n"); goto err_pm_disable; } @@ -754,26 +754,26 @@ err_pm_disable: pm_runtime_disable(&pdev->dev); sun6i_spi_runtime_suspend(&pdev->dev); err_free_dma_rx: - if (master->dma_rx) - dma_release_channel(master->dma_rx); + if (host->dma_rx) + dma_release_channel(host->dma_rx); err_free_dma_tx: - if (master->dma_tx) - dma_release_channel(master->dma_tx); -err_free_master: - spi_master_put(master); + if (host->dma_tx) + dma_release_channel(host->dma_tx); +err_free_host: + spi_controller_put(host); return ret; } static void sun6i_spi_remove(struct platform_device *pdev) { - struct spi_master *master = platform_get_drvdata(pdev); + struct spi_controller *host = platform_get_drvdata(pdev); pm_runtime_force_suspend(&pdev->dev); - if (master->dma_tx) - dma_release_channel(master->dma_tx); - if (master->dma_rx) - dma_release_channel(master->dma_rx); + if (host->dma_tx) + dma_release_channel(host->dma_tx); + if (host->dma_rx) + dma_release_channel(host->dma_rx); } static const struct sun6i_spi_cfg sun6i_a31_spi_cfg = { diff --git a/drivers/spi/spi-sunplus-sp7021.c b/drivers/spi/spi-sunplus-sp7021.c index eb8f835a4771..4e481380c259 100644 --- a/drivers/spi/spi-sunplus-sp7021.c +++ b/drivers/spi/spi-sunplus-sp7021.c @@ -70,8 +70,8 @@ #define SP7021_FIFO_DATA_LEN (16) enum { - SP7021_MASTER_MODE = 0, - SP7021_SLAVE_MODE = 1, + SP7021_HOST_MODE = 0, + SP7021_TARGET_MODE = 1, }; struct sp7021_spi_ctlr { @@ -88,7 +88,7 @@ struct sp7021_spi_ctlr { // data xfer lock struct mutex buf_lock; struct completion isr_done; - struct completion slave_isr; + struct completion target_isr; unsigned int rx_cur_len; unsigned int tx_cur_len; unsigned int data_unit; @@ -96,7 +96,7 @@ struct sp7021_spi_ctlr { u8 *rx_buf; }; -static irqreturn_t sp7021_spi_slave_irq(int irq, void *dev) +static irqreturn_t sp7021_spi_target_irq(int irq, void *dev) { struct sp7021_spi_ctlr *pspim = dev; unsigned int data_status; @@ -104,25 +104,25 @@ static irqreturn_t sp7021_spi_slave_irq(int irq, void *dev) data_status = readl(pspim->s_base + SP7021_DATA_RDY_REG); data_status |= SP7021_SLAVE_CLR_INT; writel(data_status , pspim->s_base + SP7021_DATA_RDY_REG); - complete(&pspim->slave_isr); + complete(&pspim->target_isr); return IRQ_HANDLED; } -static int sp7021_spi_slave_abort(struct spi_controller *ctlr) +static int sp7021_spi_target_abort(struct spi_controller *ctlr) { - struct sp7021_spi_ctlr *pspim = spi_master_get_devdata(ctlr); + struct sp7021_spi_ctlr *pspim = spi_controller_get_devdata(ctlr); - complete(&pspim->slave_isr); + complete(&pspim->target_isr); complete(&pspim->isr_done); return 0; } -static int sp7021_spi_slave_tx(struct spi_device *spi, struct spi_transfer *xfer) +static int sp7021_spi_target_tx(struct spi_device *spi, struct spi_transfer *xfer) { struct sp7021_spi_ctlr *pspim = spi_controller_get_devdata(spi->controller); u32 value; - reinit_completion(&pspim->slave_isr); + reinit_completion(&pspim->target_isr); value = SP7021_SLAVE_DMA_EN | SP7021_SLAVE_DMA_RW | FIELD_PREP(SP7021_SLAVE_DMA_CMD, 3); writel(value, pspim->s_base + SP7021_SLAVE_DMA_CTRL_REG); writel(xfer->len, pspim->s_base + SP7021_SLAVE_DMA_LENGTH_REG); @@ -137,7 +137,7 @@ static int sp7021_spi_slave_tx(struct spi_device *spi, struct spi_transfer *xfer return 0; } -static int sp7021_spi_slave_rx(struct spi_device *spi, struct spi_transfer *xfer) +static int sp7021_spi_target_rx(struct spi_device *spi, struct spi_transfer *xfer) { struct sp7021_spi_ctlr *pspim = spi_controller_get_devdata(spi->controller); u32 value; @@ -155,7 +155,7 @@ static int sp7021_spi_slave_rx(struct spi_device *spi, struct spi_transfer *xfer return 0; } -static void sp7021_spi_master_rb(struct sp7021_spi_ctlr *pspim, unsigned int len) +static void sp7021_spi_host_rb(struct sp7021_spi_ctlr *pspim, unsigned int len) { int i; @@ -166,7 +166,7 @@ static void sp7021_spi_master_rb(struct sp7021_spi_ctlr *pspim, unsigned int len } } -static void sp7021_spi_master_wb(struct sp7021_spi_ctlr *pspim, unsigned int len) +static void sp7021_spi_host_wb(struct sp7021_spi_ctlr *pspim, unsigned int len) { int i; @@ -177,7 +177,7 @@ static void sp7021_spi_master_wb(struct sp7021_spi_ctlr *pspim, unsigned int len } } -static irqreturn_t sp7021_spi_master_irq(int irq, void *dev) +static irqreturn_t sp7021_spi_host_irq(int irq, void *dev) { struct sp7021_spi_ctlr *pspim = dev; unsigned int tx_cnt, total_len; @@ -206,9 +206,9 @@ static irqreturn_t sp7021_spi_master_irq(int irq, void *dev) fd_status, rx_cnt, tx_cnt, tx_len); if (rx_cnt > 0) - sp7021_spi_master_rb(pspim, rx_cnt); + sp7021_spi_host_rb(pspim, rx_cnt); if (tx_cnt > 0) - sp7021_spi_master_wb(pspim, tx_cnt); + sp7021_spi_host_wb(pspim, tx_cnt); fd_status = readl(pspim->m_base + SP7021_SPI_STATUS_REG); tx_len = FIELD_GET(SP7021_TX_LEN_MASK, fd_status); @@ -224,7 +224,7 @@ static irqreturn_t sp7021_spi_master_irq(int irq, void *dev) rx_cnt = FIELD_GET(SP7021_RX_CNT_MASK, fd_status); if (rx_cnt > 0) - sp7021_spi_master_rb(pspim, rx_cnt); + sp7021_spi_host_rb(pspim, rx_cnt); } value = readl(pspim->m_base + SP7021_INT_BUSY_REG); value |= SP7021_CLR_MASTER_INT; @@ -240,7 +240,7 @@ static irqreturn_t sp7021_spi_master_irq(int irq, void *dev) static void sp7021_prep_transfer(struct spi_controller *ctlr, struct spi_device *spi) { - struct sp7021_spi_ctlr *pspim = spi_master_get_devdata(ctlr); + struct sp7021_spi_ctlr *pspim = spi_controller_get_devdata(ctlr); pspim->tx_cur_len = 0; pspim->rx_cur_len = 0; @@ -251,7 +251,7 @@ static void sp7021_prep_transfer(struct spi_controller *ctlr, struct spi_device static int sp7021_spi_controller_prepare_message(struct spi_controller *ctlr, struct spi_message *msg) { - struct sp7021_spi_ctlr *pspim = spi_master_get_devdata(ctlr); + struct sp7021_spi_ctlr *pspim = spi_controller_get_devdata(ctlr); struct spi_device *s = msg->spi; u32 valus, rs = 0; @@ -283,7 +283,7 @@ static int sp7021_spi_controller_prepare_message(struct spi_controller *ctlr, static void sp7021_spi_setup_clk(struct spi_controller *ctlr, struct spi_transfer *xfer) { - struct sp7021_spi_ctlr *pspim = spi_master_get_devdata(ctlr); + struct sp7021_spi_ctlr *pspim = spi_controller_get_devdata(ctlr); u32 clk_rate, clk_sel, div; clk_rate = clk_get_rate(pspim->spi_clk); @@ -295,10 +295,10 @@ static void sp7021_spi_setup_clk(struct spi_controller *ctlr, struct spi_transfe writel(pspim->xfer_conf, pspim->m_base + SP7021_SPI_CONFIG_REG); } -static int sp7021_spi_master_transfer_one(struct spi_controller *ctlr, struct spi_device *spi, +static int sp7021_spi_host_transfer_one(struct spi_controller *ctlr, struct spi_device *spi, struct spi_transfer *xfer) { - struct sp7021_spi_ctlr *pspim = spi_master_get_devdata(ctlr); + struct sp7021_spi_ctlr *pspim = spi_controller_get_devdata(ctlr); unsigned long timeout = msecs_to_jiffies(1000); unsigned int xfer_cnt, xfer_len, last_len; unsigned int i, len_temp; @@ -323,7 +323,7 @@ static int sp7021_spi_master_transfer_one(struct spi_controller *ctlr, struct sp if (pspim->tx_cur_len < xfer_len) { len_temp = min(pspim->data_unit, xfer_len); - sp7021_spi_master_wb(pspim, len_temp); + sp7021_spi_host_wb(pspim, len_temp); } reg_temp = readl(pspim->m_base + SP7021_SPI_CONFIG_REG); reg_temp &= ~SP7021_CLEAN_RW_BYTE; @@ -359,10 +359,10 @@ static int sp7021_spi_master_transfer_one(struct spi_controller *ctlr, struct sp return 0; } -static int sp7021_spi_slave_transfer_one(struct spi_controller *ctlr, struct spi_device *spi, +static int sp7021_spi_target_transfer_one(struct spi_controller *ctlr, struct spi_device *spi, struct spi_transfer *xfer) { - struct sp7021_spi_ctlr *pspim = spi_master_get_devdata(ctlr); + struct sp7021_spi_ctlr *pspim = spi_controller_get_devdata(ctlr); struct device *dev = pspim->dev; int ret; @@ -371,14 +371,14 @@ static int sp7021_spi_slave_transfer_one(struct spi_controller *ctlr, struct spi xfer->len, DMA_TO_DEVICE); if (dma_mapping_error(dev, xfer->tx_dma)) return -ENOMEM; - ret = sp7021_spi_slave_tx(spi, xfer); + ret = sp7021_spi_target_tx(spi, xfer); dma_unmap_single(dev, xfer->tx_dma, xfer->len, DMA_TO_DEVICE); } else if (xfer->rx_buf && !xfer->tx_buf) { xfer->rx_dma = dma_map_single(dev, xfer->rx_buf, xfer->len, DMA_FROM_DEVICE); if (dma_mapping_error(dev, xfer->rx_dma)) return -ENOMEM; - ret = sp7021_spi_slave_rx(spi, xfer); + ret = sp7021_spi_target_rx(spi, xfer); dma_unmap_single(dev, xfer->rx_dma, xfer->len, DMA_FROM_DEVICE); } else { dev_dbg(&ctlr->dev, "%s() wrong command\n", __func__); @@ -409,14 +409,14 @@ static int sp7021_spi_controller_probe(struct platform_device *pdev) pdev->id = of_alias_get_id(pdev->dev.of_node, "sp_spi"); if (device_property_read_bool(dev, "spi-slave")) - mode = SP7021_SLAVE_MODE; + mode = SP7021_TARGET_MODE; else - mode = SP7021_MASTER_MODE; + mode = SP7021_HOST_MODE; - if (mode == SP7021_SLAVE_MODE) - ctlr = devm_spi_alloc_slave(dev, sizeof(*pspim)); + if (mode == SP7021_TARGET_MODE) + ctlr = devm_spi_alloc_target(dev, sizeof(*pspim)); else - ctlr = devm_spi_alloc_master(dev, sizeof(*pspim)); + ctlr = devm_spi_alloc_host(dev, sizeof(*pspim)); if (!ctlr) return -ENOMEM; device_set_node(&ctlr->dev, dev_fwnode(dev)); @@ -424,9 +424,9 @@ static int sp7021_spi_controller_probe(struct platform_device *pdev) ctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST; ctlr->auto_runtime_pm = true; ctlr->prepare_message = sp7021_spi_controller_prepare_message; - if (mode == SP7021_SLAVE_MODE) { - ctlr->transfer_one = sp7021_spi_slave_transfer_one; - ctlr->slave_abort = sp7021_spi_slave_abort; + if (mode == SP7021_TARGET_MODE) { + ctlr->transfer_one = sp7021_spi_target_transfer_one; + ctlr->target_abort = sp7021_spi_target_abort; ctlr->flags = SPI_CONTROLLER_HALF_DUPLEX; } else { ctlr->bits_per_word_mask = SPI_BPW_MASK(8); @@ -434,7 +434,7 @@ static int sp7021_spi_controller_probe(struct platform_device *pdev) ctlr->max_speed_hz = 25000000; ctlr->use_gpio_descriptors = true; ctlr->flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX; - ctlr->transfer_one = sp7021_spi_master_transfer_one; + ctlr->transfer_one = sp7021_spi_host_transfer_one; } platform_set_drvdata(pdev, ctlr); pspim = spi_controller_get_devdata(ctlr); @@ -443,7 +443,7 @@ static int sp7021_spi_controller_probe(struct platform_device *pdev) pspim->dev = dev; mutex_init(&pspim->buf_lock); init_completion(&pspim->isr_done); - init_completion(&pspim->slave_isr); + init_completion(&pspim->target_isr); pspim->m_base = devm_platform_ioremap_resource_byname(pdev, "master"); if (IS_ERR(pspim->m_base)) @@ -485,12 +485,12 @@ static int sp7021_spi_controller_probe(struct platform_device *pdev) if (ret) return ret; - ret = devm_request_irq(dev, pspim->m_irq, sp7021_spi_master_irq, + ret = devm_request_irq(dev, pspim->m_irq, sp7021_spi_host_irq, IRQF_TRIGGER_RISING, pdev->name, pspim); if (ret) return ret; - ret = devm_request_irq(dev, pspim->s_irq, sp7021_spi_slave_irq, + ret = devm_request_irq(dev, pspim->s_irq, sp7021_spi_target_irq, IRQF_TRIGGER_RISING, pdev->name, pspim); if (ret) return ret; @@ -499,7 +499,7 @@ static int sp7021_spi_controller_probe(struct platform_device *pdev) ret = spi_register_controller(ctlr); if (ret) { pm_runtime_disable(dev); - return dev_err_probe(dev, ret, "spi_register_master fail\n"); + return dev_err_probe(dev, ret, "spi_register_controller fail\n"); } return 0; } @@ -516,7 +516,7 @@ static void sp7021_spi_controller_remove(struct platform_device *pdev) static int __maybe_unused sp7021_spi_controller_suspend(struct device *dev) { struct spi_controller *ctlr = dev_get_drvdata(dev); - struct sp7021_spi_ctlr *pspim = spi_master_get_devdata(ctlr); + struct sp7021_spi_ctlr *pspim = spi_controller_get_devdata(ctlr); return reset_control_assert(pspim->rstc); } @@ -524,7 +524,7 @@ static int __maybe_unused sp7021_spi_controller_suspend(struct device *dev) static int __maybe_unused sp7021_spi_controller_resume(struct device *dev) { struct spi_controller *ctlr = dev_get_drvdata(dev); - struct sp7021_spi_ctlr *pspim = spi_master_get_devdata(ctlr); + struct sp7021_spi_ctlr *pspim = spi_controller_get_devdata(ctlr); reset_control_deassert(pspim->rstc); return clk_prepare_enable(pspim->spi_clk); @@ -534,7 +534,7 @@ static int __maybe_unused sp7021_spi_controller_resume(struct device *dev) static int sp7021_spi_runtime_suspend(struct device *dev) { struct spi_controller *ctlr = dev_get_drvdata(dev); - struct sp7021_spi_ctlr *pspim = spi_master_get_devdata(ctlr); + struct sp7021_spi_ctlr *pspim = spi_controller_get_devdata(ctlr); return reset_control_assert(pspim->rstc); } @@ -542,7 +542,7 @@ static int sp7021_spi_runtime_suspend(struct device *dev) static int sp7021_spi_runtime_resume(struct device *dev) { struct spi_controller *ctlr = dev_get_drvdata(dev); - struct sp7021_spi_ctlr *pspim = spi_master_get_devdata(ctlr); + struct sp7021_spi_ctlr *pspim = spi_controller_get_devdata(ctlr); return reset_control_deassert(pspim->rstc); } diff --git a/drivers/spi/spi-synquacer.c b/drivers/spi/spi-synquacer.c index aeaf7db022f0..7cb4301a6fb2 100644 --- a/drivers/spi/spi-synquacer.c +++ b/drivers/spi/spi-synquacer.c @@ -225,11 +225,11 @@ static int write_fifo(struct synquacer_spi *sspi) return 0; } -static int synquacer_spi_config(struct spi_master *master, +static int synquacer_spi_config(struct spi_controller *host, struct spi_device *spi, struct spi_transfer *xfer) { - struct synquacer_spi *sspi = spi_master_get_devdata(master); + struct synquacer_spi *sspi = spi_controller_get_devdata(host); unsigned int speed, mode, bpw, cs, bus_width, transfer_mode; u32 rate, val, div; @@ -263,7 +263,7 @@ static int synquacer_spi_config(struct spi_master *master, } sspi->transfer_mode = transfer_mode; - rate = master->max_speed_hz; + rate = host->max_speed_hz; div = DIV_ROUND_UP(rate, speed); if (div > 254) { @@ -350,11 +350,11 @@ static int synquacer_spi_config(struct spi_master *master, return 0; } -static int synquacer_spi_transfer_one(struct spi_master *master, +static int synquacer_spi_transfer_one(struct spi_controller *host, struct spi_device *spi, struct spi_transfer *xfer) { - struct synquacer_spi *sspi = spi_master_get_devdata(master); + struct synquacer_spi *sspi = spi_controller_get_devdata(host); int ret; int status = 0; u32 words; @@ -378,7 +378,7 @@ static int synquacer_spi_transfer_one(struct spi_master *master, if (bpw == 8 && !(xfer->len % 4) && !(spi->mode & SPI_LSB_FIRST)) xfer->bits_per_word = 32; - ret = synquacer_spi_config(master, spi, xfer); + ret = synquacer_spi_config(host, spi, xfer); /* restore */ xfer->bits_per_word = bpw; @@ -482,7 +482,7 @@ static int synquacer_spi_transfer_one(struct spi_master *master, static void synquacer_spi_set_cs(struct spi_device *spi, bool enable) { - struct synquacer_spi *sspi = spi_master_get_devdata(spi->master); + struct synquacer_spi *sspi = spi_controller_get_devdata(spi->controller); u32 val; val = readl(sspi->regs + SYNQUACER_HSSPI_REG_DMSTART); @@ -517,11 +517,11 @@ static int synquacer_spi_wait_status_update(struct synquacer_spi *sspi, return -EBUSY; } -static int synquacer_spi_enable(struct spi_master *master) +static int synquacer_spi_enable(struct spi_controller *host) { u32 val; int status; - struct synquacer_spi *sspi = spi_master_get_devdata(master); + struct synquacer_spi *sspi = spi_controller_get_devdata(host); /* Disable module */ writel(0, sspi->regs + SYNQUACER_HSSPI_REG_MCTRL); @@ -601,18 +601,18 @@ static irqreturn_t sq_spi_tx_handler(int irq, void *priv) static int synquacer_spi_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; - struct spi_master *master; + struct spi_controller *host; struct synquacer_spi *sspi; int ret; int rx_irq, tx_irq; - master = spi_alloc_master(&pdev->dev, sizeof(*sspi)); - if (!master) + host = spi_alloc_host(&pdev->dev, sizeof(*sspi)); + if (!host) return -ENOMEM; - platform_set_drvdata(pdev, master); + platform_set_drvdata(pdev, host); - sspi = spi_master_get_devdata(master); + sspi = spi_controller_get_devdata(host); sspi->dev = &pdev->dev; init_completion(&sspi->transfer_done); @@ -625,7 +625,7 @@ static int synquacer_spi_probe(struct platform_device *pdev) sspi->clk_src_type = SYNQUACER_HSSPI_CLOCK_SRC_IHCLK; /* Default */ device_property_read_u32(&pdev->dev, "socionext,ihclk-rate", - &master->max_speed_hz); /* for ACPI */ + &host->max_speed_hz); /* for ACPI */ if (dev_of_node(&pdev->dev)) { if (device_property_match_string(&pdev->dev, @@ -655,21 +655,21 @@ static int synquacer_spi_probe(struct platform_device *pdev) goto put_spi; } - master->max_speed_hz = clk_get_rate(sspi->clk); + host->max_speed_hz = clk_get_rate(sspi->clk); } - if (!master->max_speed_hz) { + if (!host->max_speed_hz) { dev_err(&pdev->dev, "missing clock source\n"); ret = -EINVAL; goto disable_clk; } - master->min_speed_hz = master->max_speed_hz / 254; + host->min_speed_hz = host->max_speed_hz / 254; sspi->aces = device_property_read_bool(&pdev->dev, "socionext,set-aces"); sspi->rtm = device_property_read_bool(&pdev->dev, "socionext,use-rtm"); - master->num_chipselect = SYNQUACER_HSSPI_NUM_CHIP_SELECT; + host->num_chipselect = SYNQUACER_HSSPI_NUM_CHIP_SELECT; rx_irq = platform_get_irq(pdev, 0); if (rx_irq <= 0) { @@ -699,27 +699,27 @@ static int synquacer_spi_probe(struct platform_device *pdev) goto disable_clk; } - master->dev.of_node = np; - master->dev.fwnode = pdev->dev.fwnode; - master->auto_runtime_pm = true; - master->bus_num = pdev->id; + host->dev.of_node = np; + host->dev.fwnode = pdev->dev.fwnode; + host->auto_runtime_pm = true; + host->bus_num = pdev->id; - master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_TX_DUAL | SPI_RX_DUAL | - SPI_TX_QUAD | SPI_RX_QUAD; - master->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(24) | - SPI_BPW_MASK(16) | SPI_BPW_MASK(8); + host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_TX_DUAL | SPI_RX_DUAL | + SPI_TX_QUAD | SPI_RX_QUAD; + host->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(24) | + SPI_BPW_MASK(16) | SPI_BPW_MASK(8); - master->set_cs = synquacer_spi_set_cs; - master->transfer_one = synquacer_spi_transfer_one; + host->set_cs = synquacer_spi_set_cs; + host->transfer_one = synquacer_spi_transfer_one; - ret = synquacer_spi_enable(master); + ret = synquacer_spi_enable(host); if (ret) goto disable_clk; pm_runtime_set_active(sspi->dev); pm_runtime_enable(sspi->dev); - ret = devm_spi_register_master(sspi->dev, master); + ret = devm_spi_register_controller(sspi->dev, host); if (ret) goto disable_pm; @@ -730,15 +730,15 @@ disable_pm: disable_clk: clk_disable_unprepare(sspi->clk); put_spi: - spi_master_put(master); + spi_controller_put(host); return ret; } static void synquacer_spi_remove(struct platform_device *pdev) { - struct spi_master *master = platform_get_drvdata(pdev); - struct synquacer_spi *sspi = spi_master_get_devdata(master); + struct spi_controller *host = platform_get_drvdata(pdev); + struct synquacer_spi *sspi = spi_controller_get_devdata(host); pm_runtime_disable(sspi->dev); @@ -747,11 +747,11 @@ static void synquacer_spi_remove(struct platform_device *pdev) static int __maybe_unused synquacer_spi_suspend(struct device *dev) { - struct spi_master *master = dev_get_drvdata(dev); - struct synquacer_spi *sspi = spi_master_get_devdata(master); + struct spi_controller *host = dev_get_drvdata(dev); + struct synquacer_spi *sspi = spi_controller_get_devdata(host); int ret; - ret = spi_master_suspend(master); + ret = spi_controller_suspend(host); if (ret) return ret; @@ -763,8 +763,8 @@ static int __maybe_unused synquacer_spi_suspend(struct device *dev) static int __maybe_unused synquacer_spi_resume(struct device *dev) { - struct spi_master *master = dev_get_drvdata(dev); - struct synquacer_spi *sspi = spi_master_get_devdata(master); + struct spi_controller *host = dev_get_drvdata(dev); + struct synquacer_spi *sspi = spi_controller_get_devdata(host); int ret; if (!pm_runtime_suspended(dev)) { @@ -778,7 +778,7 @@ static int __maybe_unused synquacer_spi_resume(struct device *dev) return ret; } - ret = synquacer_spi_enable(master); + ret = synquacer_spi_enable(host); if (ret) { clk_disable_unprepare(sspi->clk); dev_err(dev, "failed to enable spi (%d)\n", ret); @@ -786,7 +786,7 @@ static int __maybe_unused synquacer_spi_resume(struct device *dev) } } - ret = spi_master_resume(master); + ret = spi_controller_resume(host); if (ret < 0) clk_disable_unprepare(sspi->clk); diff --git a/drivers/spi/spi-tegra114.c b/drivers/spi/spi-tegra114.c index 460f232dad50..bc7cc4088eea 100644 --- a/drivers/spi/spi-tegra114.c +++ b/drivers/spi/spi-tegra114.c @@ -164,7 +164,7 @@ struct tegra_spi_client_data { struct tegra_spi_data { struct device *dev; - struct spi_master *master; + struct spi_controller *host; spinlock_t lock; struct clk *clk; @@ -718,7 +718,7 @@ static void tegra_spi_deinit_dma_param(struct tegra_spi_data *tspi, static int tegra_spi_set_hw_cs_timing(struct spi_device *spi) { - struct tegra_spi_data *tspi = spi_master_get_devdata(spi->master); + struct tegra_spi_data *tspi = spi_controller_get_devdata(spi->controller); struct spi_delay *setup = &spi->cs_setup; struct spi_delay *hold = &spi->cs_hold; struct spi_delay *inactive = &spi->cs_inactive; @@ -772,7 +772,7 @@ static u32 tegra_spi_setup_transfer_one(struct spi_device *spi, bool is_first_of_msg, bool is_single_xfer) { - struct tegra_spi_data *tspi = spi_master_get_devdata(spi->master); + struct tegra_spi_data *tspi = spi_controller_get_devdata(spi->controller); struct tegra_spi_client_data *cdata = spi->controller_data; u32 speed = t->speed_hz; u8 bits_per_word = t->bits_per_word; @@ -865,7 +865,7 @@ static u32 tegra_spi_setup_transfer_one(struct spi_device *spi, static int tegra_spi_start_transfer_one(struct spi_device *spi, struct spi_transfer *t, u32 command1) { - struct tegra_spi_data *tspi = spi_master_get_devdata(spi->master); + struct tegra_spi_data *tspi = spi_controller_get_devdata(spi->controller); unsigned total_fifo_words; int ret; @@ -912,10 +912,10 @@ static struct tegra_spi_client_data *tegra_spi_parse_cdata_dt(struct spi_device *spi) { struct tegra_spi_client_data *cdata; - struct device_node *slave_np; + struct device_node *target_np; - slave_np = spi->dev.of_node; - if (!slave_np) { + target_np = spi->dev.of_node; + if (!target_np) { dev_dbg(&spi->dev, "device node not found\n"); return NULL; } @@ -924,9 +924,9 @@ static struct tegra_spi_client_data if (!cdata) return NULL; - of_property_read_u32(slave_np, "nvidia,tx-clk-tap-delay", + of_property_read_u32(target_np, "nvidia,tx-clk-tap-delay", &cdata->tx_clk_tap_delay); - of_property_read_u32(slave_np, "nvidia,rx-clk-tap-delay", + of_property_read_u32(target_np, "nvidia,rx-clk-tap-delay", &cdata->rx_clk_tap_delay); return cdata; } @@ -942,7 +942,7 @@ static void tegra_spi_cleanup(struct spi_device *spi) static int tegra_spi_setup(struct spi_device *spi) { - struct tegra_spi_data *tspi = spi_master_get_devdata(spi->master); + struct tegra_spi_data *tspi = spi_controller_get_devdata(spi->controller); struct tegra_spi_client_data *cdata = spi->controller_data; u32 val; unsigned long flags; @@ -993,7 +993,7 @@ static int tegra_spi_setup(struct spi_device *spi) static void tegra_spi_transfer_end(struct spi_device *spi) { - struct tegra_spi_data *tspi = spi_master_get_devdata(spi->master); + struct tegra_spi_data *tspi = spi_controller_get_devdata(spi->controller); int cs_val = (spi->mode & SPI_CS_HIGH) ? 0 : 1; /* GPIO based chip select control */ @@ -1025,11 +1025,11 @@ static void tegra_spi_dump_regs(struct tegra_spi_data *tspi) tegra_spi_readl(tspi, SPI_FIFO_STATUS)); } -static int tegra_spi_transfer_one_message(struct spi_master *master, +static int tegra_spi_transfer_one_message(struct spi_controller *host, struct spi_message *msg) { bool is_first_msg = true; - struct tegra_spi_data *tspi = spi_master_get_devdata(master); + struct tegra_spi_data *tspi = spi_controller_get_devdata(host); struct spi_transfer *xfer; struct spi_device *spi = msg->spi; int ret; @@ -1078,7 +1078,7 @@ static int tegra_spi_transfer_one_message(struct spi_master *master, reset_control_assert(tspi->rst); udelay(2); reset_control_deassert(tspi->rst); - tspi->last_used_cs = master->num_chipselect + 1; + tspi->last_used_cs = host->num_chipselect + 1; goto complete_xfer; } @@ -1112,7 +1112,7 @@ complete_xfer: ret = 0; exit: msg->status = ret; - spi_finalize_current_message(master); + spi_finalize_current_message(host); return ret; } @@ -1293,40 +1293,40 @@ MODULE_DEVICE_TABLE(of, tegra_spi_of_match); static int tegra_spi_probe(struct platform_device *pdev) { - struct spi_master *master; + struct spi_controller *host; struct tegra_spi_data *tspi; struct resource *r; int ret, spi_irq; int bus_num; - master = spi_alloc_master(&pdev->dev, sizeof(*tspi)); - if (!master) { - dev_err(&pdev->dev, "master allocation failed\n"); + host = spi_alloc_host(&pdev->dev, sizeof(*tspi)); + if (!host) { + dev_err(&pdev->dev, "host allocation failed\n"); return -ENOMEM; } - platform_set_drvdata(pdev, master); - tspi = spi_master_get_devdata(master); + platform_set_drvdata(pdev, host); + tspi = spi_controller_get_devdata(host); if (of_property_read_u32(pdev->dev.of_node, "spi-max-frequency", - &master->max_speed_hz)) - master->max_speed_hz = 25000000; /* 25MHz */ + &host->max_speed_hz)) + host->max_speed_hz = 25000000; /* 25MHz */ /* the spi->mode bits understood by this driver: */ - master->use_gpio_descriptors = true; - master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST | - SPI_TX_DUAL | SPI_RX_DUAL | SPI_3WIRE; - master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32); - master->setup = tegra_spi_setup; - master->cleanup = tegra_spi_cleanup; - master->transfer_one_message = tegra_spi_transfer_one_message; - master->set_cs_timing = tegra_spi_set_hw_cs_timing; - master->num_chipselect = MAX_CHIP_SELECT; - master->auto_runtime_pm = true; + host->use_gpio_descriptors = true; + host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST | + SPI_TX_DUAL | SPI_RX_DUAL | SPI_3WIRE; + host->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32); + host->setup = tegra_spi_setup; + host->cleanup = tegra_spi_cleanup; + host->transfer_one_message = tegra_spi_transfer_one_message; + host->set_cs_timing = tegra_spi_set_hw_cs_timing; + host->num_chipselect = MAX_CHIP_SELECT; + host->auto_runtime_pm = true; bus_num = of_alias_get_id(pdev->dev.of_node, "spi"); if (bus_num >= 0) - master->bus_num = bus_num; + host->bus_num = bus_num; - tspi->master = master; + tspi->host = host; tspi->dev = &pdev->dev; spin_lock_init(&tspi->lock); @@ -1334,20 +1334,20 @@ static int tegra_spi_probe(struct platform_device *pdev) if (!tspi->soc_data) { dev_err(&pdev->dev, "unsupported tegra\n"); ret = -ENODEV; - goto exit_free_master; + goto exit_free_host; } tspi->base = devm_platform_get_and_ioremap_resource(pdev, 0, &r); if (IS_ERR(tspi->base)) { ret = PTR_ERR(tspi->base); - goto exit_free_master; + goto exit_free_host; } tspi->phys = r->start; spi_irq = platform_get_irq(pdev, 0); if (spi_irq < 0) { ret = spi_irq; - goto exit_free_master; + goto exit_free_host; } tspi->irq = spi_irq; @@ -1355,14 +1355,14 @@ static int tegra_spi_probe(struct platform_device *pdev) if (IS_ERR(tspi->clk)) { dev_err(&pdev->dev, "can not get clock\n"); ret = PTR_ERR(tspi->clk); - goto exit_free_master; + goto exit_free_host; } tspi->rst = devm_reset_control_get_exclusive(&pdev->dev, "spi"); if (IS_ERR(tspi->rst)) { dev_err(&pdev->dev, "can not get reset\n"); ret = PTR_ERR(tspi->rst); - goto exit_free_master; + goto exit_free_host; } tspi->max_buf_size = SPI_FIFO_DEPTH << 2; @@ -1370,7 +1370,7 @@ static int tegra_spi_probe(struct platform_device *pdev) ret = tegra_spi_init_dma_param(tspi, true); if (ret < 0) - goto exit_free_master; + goto exit_free_host; ret = tegra_spi_init_dma_param(tspi, false); if (ret < 0) goto exit_rx_dma_free; @@ -1401,7 +1401,7 @@ static int tegra_spi_probe(struct platform_device *pdev) tspi->spi_cs_timing1 = tegra_spi_readl(tspi, SPI_CS_TIMING1); tspi->spi_cs_timing2 = tegra_spi_readl(tspi, SPI_CS_TIMING2); tspi->def_command2_reg = tegra_spi_readl(tspi, SPI_COMMAND2); - tspi->last_used_cs = master->num_chipselect + 1; + tspi->last_used_cs = host->num_chipselect + 1; pm_runtime_put(&pdev->dev); ret = request_threaded_irq(tspi->irq, tegra_spi_isr, tegra_spi_isr_thread, IRQF_ONESHOT, @@ -1412,10 +1412,10 @@ static int tegra_spi_probe(struct platform_device *pdev) goto exit_pm_disable; } - master->dev.of_node = pdev->dev.of_node; - ret = devm_spi_register_master(&pdev->dev, master); + host->dev.of_node = pdev->dev.of_node; + ret = devm_spi_register_controller(&pdev->dev, host); if (ret < 0) { - dev_err(&pdev->dev, "can not register to master err %d\n", ret); + dev_err(&pdev->dev, "can not register to host err %d\n", ret); goto exit_free_irq; } return ret; @@ -1429,15 +1429,15 @@ exit_pm_disable: tegra_spi_deinit_dma_param(tspi, false); exit_rx_dma_free: tegra_spi_deinit_dma_param(tspi, true); -exit_free_master: - spi_master_put(master); +exit_free_host: + spi_controller_put(host); return ret; } static void tegra_spi_remove(struct platform_device *pdev) { - struct spi_master *master = platform_get_drvdata(pdev); - struct tegra_spi_data *tspi = spi_master_get_devdata(master); + struct spi_controller *host = platform_get_drvdata(pdev); + struct tegra_spi_data *tspi = spi_controller_get_devdata(host); free_irq(tspi->irq, tspi); @@ -1455,15 +1455,15 @@ static void tegra_spi_remove(struct platform_device *pdev) #ifdef CONFIG_PM_SLEEP static int tegra_spi_suspend(struct device *dev) { - struct spi_master *master = dev_get_drvdata(dev); + struct spi_controller *host = dev_get_drvdata(dev); - return spi_master_suspend(master); + return spi_controller_suspend(host); } static int tegra_spi_resume(struct device *dev) { - struct spi_master *master = dev_get_drvdata(dev); - struct tegra_spi_data *tspi = spi_master_get_devdata(master); + struct spi_controller *host = dev_get_drvdata(dev); + struct tegra_spi_data *tspi = spi_controller_get_devdata(host); int ret; ret = pm_runtime_resume_and_get(dev); @@ -1473,17 +1473,17 @@ static int tegra_spi_resume(struct device *dev) } tegra_spi_writel(tspi, tspi->command1_reg, SPI_COMMAND1); tegra_spi_writel(tspi, tspi->def_command2_reg, SPI_COMMAND2); - tspi->last_used_cs = master->num_chipselect + 1; + tspi->last_used_cs = host->num_chipselect + 1; pm_runtime_put(dev); - return spi_master_resume(master); + return spi_controller_resume(host); } #endif static int tegra_spi_runtime_suspend(struct device *dev) { - struct spi_master *master = dev_get_drvdata(dev); - struct tegra_spi_data *tspi = spi_master_get_devdata(master); + struct spi_controller *host = dev_get_drvdata(dev); + struct tegra_spi_data *tspi = spi_controller_get_devdata(host); /* Flush all write which are in PPSB queue by reading back */ tegra_spi_readl(tspi, SPI_COMMAND1); @@ -1494,8 +1494,8 @@ static int tegra_spi_runtime_suspend(struct device *dev) static int tegra_spi_runtime_resume(struct device *dev) { - struct spi_master *master = dev_get_drvdata(dev); - struct tegra_spi_data *tspi = spi_master_get_devdata(master); + struct spi_controller *host = dev_get_drvdata(dev); + struct tegra_spi_data *tspi = spi_controller_get_devdata(host); int ret; ret = clk_prepare_enable(tspi->clk); diff --git a/drivers/spi/spi-tegra20-sflash.c b/drivers/spi/spi-tegra20-sflash.c index 0c5507473f97..9f6b9f89be5b 100644 --- a/drivers/spi/spi-tegra20-sflash.c +++ b/drivers/spi/spi-tegra20-sflash.c @@ -102,7 +102,7 @@ struct tegra_sflash_data { struct device *dev; - struct spi_master *master; + struct spi_controller *host; spinlock_t lock; struct clk *clk; @@ -251,7 +251,7 @@ static int tegra_sflash_start_transfer_one(struct spi_device *spi, struct spi_transfer *t, bool is_first_of_msg, bool is_single_xfer) { - struct tegra_sflash_data *tsd = spi_master_get_devdata(spi->master); + struct tegra_sflash_data *tsd = spi_controller_get_devdata(spi->controller); u32 speed; u32 command; @@ -303,12 +303,12 @@ static int tegra_sflash_start_transfer_one(struct spi_device *spi, return tegra_sflash_start_cpu_based_transfer(tsd, t); } -static int tegra_sflash_transfer_one_message(struct spi_master *master, +static int tegra_sflash_transfer_one_message(struct spi_controller *host, struct spi_message *msg) { bool is_first_msg = true; int single_xfer; - struct tegra_sflash_data *tsd = spi_master_get_devdata(master); + struct tegra_sflash_data *tsd = spi_controller_get_devdata(host); struct spi_transfer *xfer; struct spi_device *spi = msg->spi; int ret; @@ -351,7 +351,7 @@ static int tegra_sflash_transfer_one_message(struct spi_master *master, exit: tegra_sflash_writel(tsd, tsd->def_command_reg, SPI_COMMAND); msg->status = ret; - spi_finalize_current_message(master); + spi_finalize_current_message(host); return ret; } @@ -416,7 +416,7 @@ MODULE_DEVICE_TABLE(of, tegra_sflash_of_match); static int tegra_sflash_probe(struct platform_device *pdev) { - struct spi_master *master; + struct spi_controller *host; struct tegra_sflash_data *tsd; int ret; const struct of_device_id *match; @@ -427,37 +427,37 @@ static int tegra_sflash_probe(struct platform_device *pdev) return -ENODEV; } - master = spi_alloc_master(&pdev->dev, sizeof(*tsd)); - if (!master) { - dev_err(&pdev->dev, "master allocation failed\n"); + host = spi_alloc_host(&pdev->dev, sizeof(*tsd)); + if (!host) { + dev_err(&pdev->dev, "host allocation failed\n"); return -ENOMEM; } /* the spi->mode bits understood by this driver: */ - master->mode_bits = SPI_CPOL | SPI_CPHA; - master->transfer_one_message = tegra_sflash_transfer_one_message; - master->auto_runtime_pm = true; - master->num_chipselect = MAX_CHIP_SELECT; - - platform_set_drvdata(pdev, master); - tsd = spi_master_get_devdata(master); - tsd->master = master; + host->mode_bits = SPI_CPOL | SPI_CPHA; + host->transfer_one_message = tegra_sflash_transfer_one_message; + host->auto_runtime_pm = true; + host->num_chipselect = MAX_CHIP_SELECT; + + platform_set_drvdata(pdev, host); + tsd = spi_controller_get_devdata(host); + tsd->host = host; tsd->dev = &pdev->dev; spin_lock_init(&tsd->lock); if (of_property_read_u32(tsd->dev->of_node, "spi-max-frequency", - &master->max_speed_hz)) - master->max_speed_hz = 25000000; /* 25MHz */ + &host->max_speed_hz)) + host->max_speed_hz = 25000000; /* 25MHz */ tsd->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(tsd->base)) { ret = PTR_ERR(tsd->base); - goto exit_free_master; + goto exit_free_host; } ret = platform_get_irq(pdev, 0); if (ret < 0) - goto exit_free_master; + goto exit_free_host; tsd->irq = ret; ret = request_irq(tsd->irq, tegra_sflash_isr, 0, @@ -465,7 +465,7 @@ static int tegra_sflash_probe(struct platform_device *pdev) if (ret < 0) { dev_err(&pdev->dev, "Failed to register ISR for IRQ %d\n", tsd->irq); - goto exit_free_master; + goto exit_free_host; } tsd->clk = devm_clk_get(&pdev->dev, NULL); @@ -505,10 +505,10 @@ static int tegra_sflash_probe(struct platform_device *pdev) tegra_sflash_writel(tsd, tsd->def_command_reg, SPI_COMMAND); pm_runtime_put(&pdev->dev); - master->dev.of_node = pdev->dev.of_node; - ret = devm_spi_register_master(&pdev->dev, master); + host->dev.of_node = pdev->dev.of_node; + ret = devm_spi_register_controller(&pdev->dev, host); if (ret < 0) { - dev_err(&pdev->dev, "can not register to master err %d\n", ret); + dev_err(&pdev->dev, "can not register to host err %d\n", ret); goto exit_pm_disable; } return ret; @@ -519,15 +519,15 @@ exit_pm_disable: tegra_sflash_runtime_suspend(&pdev->dev); exit_free_irq: free_irq(tsd->irq, tsd); -exit_free_master: - spi_master_put(master); +exit_free_host: + spi_controller_put(host); return ret; } static void tegra_sflash_remove(struct platform_device *pdev) { - struct spi_master *master = platform_get_drvdata(pdev); - struct tegra_sflash_data *tsd = spi_master_get_devdata(master); + struct spi_controller *host = platform_get_drvdata(pdev); + struct tegra_sflash_data *tsd = spi_controller_get_devdata(host); free_irq(tsd->irq, tsd); @@ -539,15 +539,15 @@ static void tegra_sflash_remove(struct platform_device *pdev) #ifdef CONFIG_PM_SLEEP static int tegra_sflash_suspend(struct device *dev) { - struct spi_master *master = dev_get_drvdata(dev); + struct spi_controller *host = dev_get_drvdata(dev); - return spi_master_suspend(master); + return spi_controller_suspend(host); } static int tegra_sflash_resume(struct device *dev) { - struct spi_master *master = dev_get_drvdata(dev); - struct tegra_sflash_data *tsd = spi_master_get_devdata(master); + struct spi_controller *host = dev_get_drvdata(dev); + struct tegra_sflash_data *tsd = spi_controller_get_devdata(host); int ret; ret = pm_runtime_resume_and_get(dev); @@ -558,14 +558,14 @@ static int tegra_sflash_resume(struct device *dev) tegra_sflash_writel(tsd, tsd->command_reg, SPI_COMMAND); pm_runtime_put(dev); - return spi_master_resume(master); + return spi_controller_resume(host); } #endif static int tegra_sflash_runtime_suspend(struct device *dev) { - struct spi_master *master = dev_get_drvdata(dev); - struct tegra_sflash_data *tsd = spi_master_get_devdata(master); + struct spi_controller *host = dev_get_drvdata(dev); + struct tegra_sflash_data *tsd = spi_controller_get_devdata(host); /* Flush all write which are in PPSB queue by reading back */ tegra_sflash_readl(tsd, SPI_COMMAND); @@ -576,8 +576,8 @@ static int tegra_sflash_runtime_suspend(struct device *dev) static int tegra_sflash_runtime_resume(struct device *dev) { - struct spi_master *master = dev_get_drvdata(dev); - struct tegra_sflash_data *tsd = spi_master_get_devdata(master); + struct spi_controller *host = dev_get_drvdata(dev); + struct tegra_sflash_data *tsd = spi_controller_get_devdata(host); int ret; ret = clk_prepare_enable(tsd->clk); diff --git a/drivers/spi/spi-tegra20-slink.c b/drivers/spi/spi-tegra20-slink.c index f5cd365c913a..ed1393d159ae 100644 --- a/drivers/spi/spi-tegra20-slink.c +++ b/drivers/spi/spi-tegra20-slink.c @@ -152,7 +152,7 @@ struct tegra_slink_chip_data { struct tegra_slink_data { struct device *dev; - struct spi_master *master; + struct spi_controller *host; const struct tegra_slink_chip_data *chip_data; spinlock_t lock; @@ -671,7 +671,7 @@ static void tegra_slink_deinit_dma_param(struct tegra_slink_data *tspi, static int tegra_slink_start_transfer_one(struct spi_device *spi, struct spi_transfer *t) { - struct tegra_slink_data *tspi = spi_master_get_devdata(spi->master); + struct tegra_slink_data *tspi = spi_controller_get_devdata(spi->controller); u32 speed; u8 bits_per_word; unsigned total_fifo_words; @@ -737,7 +737,7 @@ static int tegra_slink_setup(struct spi_device *spi) SLINK_CS_POLARITY3, }; - struct tegra_slink_data *tspi = spi_master_get_devdata(spi->master); + struct tegra_slink_data *tspi = spi_controller_get_devdata(spi->controller); u32 val; unsigned long flags; int ret; @@ -768,10 +768,10 @@ static int tegra_slink_setup(struct spi_device *spi) return 0; } -static int tegra_slink_prepare_message(struct spi_master *master, +static int tegra_slink_prepare_message(struct spi_controller *host, struct spi_message *msg) { - struct tegra_slink_data *tspi = spi_master_get_devdata(master); + struct tegra_slink_data *tspi = spi_controller_get_devdata(host); struct spi_device *spi = msg->spi; tegra_slink_clear_status(tspi); @@ -794,11 +794,11 @@ static int tegra_slink_prepare_message(struct spi_master *master, return 0; } -static int tegra_slink_transfer_one(struct spi_master *master, +static int tegra_slink_transfer_one(struct spi_controller *host, struct spi_device *spi, struct spi_transfer *xfer) { - struct tegra_slink_data *tspi = spi_master_get_devdata(master); + struct tegra_slink_data *tspi = spi_controller_get_devdata(host); int ret; reinit_completion(&tspi->xfer_completion); @@ -825,10 +825,10 @@ static int tegra_slink_transfer_one(struct spi_master *master, return 0; } -static int tegra_slink_unprepare_message(struct spi_master *master, +static int tegra_slink_unprepare_message(struct spi_controller *host, struct spi_message *msg) { - struct tegra_slink_data *tspi = spi_master_get_devdata(master); + struct tegra_slink_data *tspi = spi_controller_get_devdata(host); tegra_slink_writel(tspi, tspi->def_command_reg, SLINK_COMMAND); tegra_slink_writel(tspi, tspi->def_command2_reg, SLINK_COMMAND2); @@ -999,7 +999,7 @@ MODULE_DEVICE_TABLE(of, tegra_slink_of_match); static int tegra_slink_probe(struct platform_device *pdev) { - struct spi_master *master; + struct spi_controller *host; struct tegra_slink_data *tspi; struct resource *r; int ret, spi_irq; @@ -1007,36 +1007,36 @@ static int tegra_slink_probe(struct platform_device *pdev) cdata = of_device_get_match_data(&pdev->dev); - master = spi_alloc_master(&pdev->dev, sizeof(*tspi)); - if (!master) { - dev_err(&pdev->dev, "master allocation failed\n"); + host = spi_alloc_host(&pdev->dev, sizeof(*tspi)); + if (!host) { + dev_err(&pdev->dev, "host allocation failed\n"); return -ENOMEM; } /* the spi->mode bits understood by this driver: */ - master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; - master->setup = tegra_slink_setup; - master->prepare_message = tegra_slink_prepare_message; - master->transfer_one = tegra_slink_transfer_one; - master->unprepare_message = tegra_slink_unprepare_message; - master->auto_runtime_pm = true; - master->num_chipselect = MAX_CHIP_SELECT; - - platform_set_drvdata(pdev, master); - tspi = spi_master_get_devdata(master); - tspi->master = master; + host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; + host->setup = tegra_slink_setup; + host->prepare_message = tegra_slink_prepare_message; + host->transfer_one = tegra_slink_transfer_one; + host->unprepare_message = tegra_slink_unprepare_message; + host->auto_runtime_pm = true; + host->num_chipselect = MAX_CHIP_SELECT; + + platform_set_drvdata(pdev, host); + tspi = spi_controller_get_devdata(host); + tspi->host = host; tspi->dev = &pdev->dev; tspi->chip_data = cdata; spin_lock_init(&tspi->lock); if (of_property_read_u32(tspi->dev->of_node, "spi-max-frequency", - &master->max_speed_hz)) - master->max_speed_hz = 25000000; /* 25MHz */ + &host->max_speed_hz)) + host->max_speed_hz = 25000000; /* 25MHz */ tspi->base = devm_platform_get_and_ioremap_resource(pdev, 0, &r); if (IS_ERR(tspi->base)) { ret = PTR_ERR(tspi->base); - goto exit_free_master; + goto exit_free_host; } tspi->phys = r->start; @@ -1045,26 +1045,26 @@ static int tegra_slink_probe(struct platform_device *pdev) if (IS_ERR(tspi->clk)) { ret = PTR_ERR(tspi->clk); dev_err(&pdev->dev, "Can not get clock %d\n", ret); - goto exit_free_master; + goto exit_free_host; } tspi->rst = devm_reset_control_get_exclusive(&pdev->dev, "spi"); if (IS_ERR(tspi->rst)) { dev_err(&pdev->dev, "can not get reset\n"); ret = PTR_ERR(tspi->rst); - goto exit_free_master; + goto exit_free_host; } ret = devm_tegra_core_dev_init_opp_table_common(&pdev->dev); if (ret) - goto exit_free_master; + goto exit_free_host; tspi->max_buf_size = SLINK_FIFO_DEPTH << 2; tspi->dma_buf_size = DEFAULT_SPI_DMA_BUF_LEN; ret = tegra_slink_init_dma_param(tspi, true); if (ret < 0) - goto exit_free_master; + goto exit_free_host; ret = tegra_slink_init_dma_param(tspi, false); if (ret < 0) goto exit_rx_dma_free; @@ -1103,10 +1103,10 @@ static int tegra_slink_probe(struct platform_device *pdev) tegra_slink_writel(tspi, tspi->def_command_reg, SLINK_COMMAND); tegra_slink_writel(tspi, tspi->def_command2_reg, SLINK_COMMAND2); - master->dev.of_node = pdev->dev.of_node; - ret = spi_register_master(master); + host->dev.of_node = pdev->dev.of_node; + ret = spi_register_controller(host); if (ret < 0) { - dev_err(&pdev->dev, "can not register to master err %d\n", ret); + dev_err(&pdev->dev, "can not register to host err %d\n", ret); goto exit_free_irq; } @@ -1124,17 +1124,17 @@ exit_pm_disable: tegra_slink_deinit_dma_param(tspi, false); exit_rx_dma_free: tegra_slink_deinit_dma_param(tspi, true); -exit_free_master: - spi_master_put(master); +exit_free_host: + spi_controller_put(host); return ret; } static void tegra_slink_remove(struct platform_device *pdev) { - struct spi_master *master = spi_master_get(platform_get_drvdata(pdev)); - struct tegra_slink_data *tspi = spi_master_get_devdata(master); + struct spi_controller *host = spi_controller_get(platform_get_drvdata(pdev)); + struct tegra_slink_data *tspi = spi_controller_get_devdata(host); - spi_unregister_master(master); + spi_unregister_controller(host); free_irq(tspi->irq, tspi); @@ -1146,21 +1146,21 @@ static void tegra_slink_remove(struct platform_device *pdev) if (tspi->rx_dma_chan) tegra_slink_deinit_dma_param(tspi, true); - spi_master_put(master); + spi_controller_put(host); } #ifdef CONFIG_PM_SLEEP static int tegra_slink_suspend(struct device *dev) { - struct spi_master *master = dev_get_drvdata(dev); + struct spi_controller *host = dev_get_drvdata(dev); - return spi_master_suspend(master); + return spi_controller_suspend(host); } static int tegra_slink_resume(struct device *dev) { - struct spi_master *master = dev_get_drvdata(dev); - struct tegra_slink_data *tspi = spi_master_get_devdata(master); + struct spi_controller *host = dev_get_drvdata(dev); + struct tegra_slink_data *tspi = spi_controller_get_devdata(host); int ret; ret = pm_runtime_resume_and_get(dev); @@ -1172,14 +1172,14 @@ static int tegra_slink_resume(struct device *dev) tegra_slink_writel(tspi, tspi->command2_reg, SLINK_COMMAND2); pm_runtime_put(dev); - return spi_master_resume(master); + return spi_controller_resume(host); } #endif static int __maybe_unused tegra_slink_runtime_suspend(struct device *dev) { - struct spi_master *master = dev_get_drvdata(dev); - struct tegra_slink_data *tspi = spi_master_get_devdata(master); + struct spi_controller *host = dev_get_drvdata(dev); + struct tegra_slink_data *tspi = spi_controller_get_devdata(host); /* Flush all write which are in PPSB queue by reading back */ tegra_slink_readl(tspi, SLINK_MAS_DATA); @@ -1190,8 +1190,8 @@ static int __maybe_unused tegra_slink_runtime_suspend(struct device *dev) static int __maybe_unused tegra_slink_runtime_resume(struct device *dev) { - struct spi_master *master = dev_get_drvdata(dev); - struct tegra_slink_data *tspi = spi_master_get_devdata(master); + struct spi_controller *host = dev_get_drvdata(dev); + struct tegra_slink_data *tspi = spi_controller_get_devdata(host); int ret; ret = clk_prepare_enable(tspi->clk); diff --git a/drivers/spi/spi-tegra210-quad.c b/drivers/spi/spi-tegra210-quad.c index e9ad9b0b598b..afbd64a217eb 100644 --- a/drivers/spi/spi-tegra210-quad.c +++ b/drivers/spi/spi-tegra210-quad.c @@ -175,7 +175,7 @@ struct tegra_qspi_client_data { struct tegra_qspi { struct device *dev; - struct spi_master *master; + struct spi_controller *host; /* lock to protect data accessed by irq */ spinlock_t lock; @@ -809,7 +809,7 @@ err_out: static u32 tegra_qspi_setup_transfer_one(struct spi_device *spi, struct spi_transfer *t, bool is_first_of_msg) { - struct tegra_qspi *tqspi = spi_master_get_devdata(spi->master); + struct tegra_qspi *tqspi = spi_controller_get_devdata(spi->controller); struct tegra_qspi_client_data *cdata = spi->controller_data; u32 command1, command2, speed = t->speed_hz; u8 bits_per_word = t->bits_per_word; @@ -870,7 +870,7 @@ static u32 tegra_qspi_setup_transfer_one(struct spi_device *spi, struct spi_tran static int tegra_qspi_start_transfer_one(struct spi_device *spi, struct spi_transfer *t, u32 command1) { - struct tegra_qspi *tqspi = spi_master_get_devdata(spi->master); + struct tegra_qspi *tqspi = spi_controller_get_devdata(spi->controller); unsigned int total_fifo_words; u8 bus_width = 0; int ret; @@ -925,7 +925,7 @@ static int tegra_qspi_start_transfer_one(struct spi_device *spi, static struct tegra_qspi_client_data *tegra_qspi_parse_cdata_dt(struct spi_device *spi) { struct tegra_qspi_client_data *cdata; - struct tegra_qspi *tqspi = spi_master_get_devdata(spi->master); + struct tegra_qspi *tqspi = spi_controller_get_devdata(spi->controller); cdata = devm_kzalloc(tqspi->dev, sizeof(*cdata), GFP_KERNEL); if (!cdata) @@ -941,7 +941,7 @@ static struct tegra_qspi_client_data *tegra_qspi_parse_cdata_dt(struct spi_devic static int tegra_qspi_setup(struct spi_device *spi) { - struct tegra_qspi *tqspi = spi_master_get_devdata(spi->master); + struct tegra_qspi *tqspi = spi_controller_get_devdata(spi->controller); struct tegra_qspi_client_data *cdata = spi->controller_data; unsigned long flags; u32 val; @@ -1005,7 +1005,7 @@ static void tegra_qspi_handle_error(struct tegra_qspi *tqspi) static void tegra_qspi_transfer_end(struct spi_device *spi) { - struct tegra_qspi *tqspi = spi_master_get_devdata(spi->master); + struct tegra_qspi *tqspi = spi_controller_get_devdata(spi->controller); int cs_val = (spi->mode & SPI_CS_HIGH) ? 0 : 1; if (cs_val) @@ -1316,10 +1316,10 @@ static bool tegra_qspi_validate_cmb_seq(struct tegra_qspi *tqspi, return true; } -static int tegra_qspi_transfer_one_message(struct spi_master *master, +static int tegra_qspi_transfer_one_message(struct spi_controller *host, struct spi_message *msg) { - struct tegra_qspi *tqspi = spi_master_get_devdata(master); + struct tegra_qspi *tqspi = spi_controller_get_devdata(host); int ret; if (tegra_qspi_validate_cmb_seq(tqspi, msg)) @@ -1327,7 +1327,7 @@ static int tegra_qspi_transfer_one_message(struct spi_master *master, else ret = tegra_qspi_non_combined_seq_xfer(tqspi, msg); - spi_finalize_current_message(master); + spi_finalize_current_message(host); return ret; } @@ -1533,38 +1533,38 @@ MODULE_DEVICE_TABLE(acpi, tegra_qspi_acpi_match); static int tegra_qspi_probe(struct platform_device *pdev) { - struct spi_master *master; + struct spi_controller *host; struct tegra_qspi *tqspi; struct resource *r; int ret, qspi_irq; int bus_num; - master = devm_spi_alloc_master(&pdev->dev, sizeof(*tqspi)); - if (!master) + host = devm_spi_alloc_host(&pdev->dev, sizeof(*tqspi)); + if (!host) return -ENOMEM; - platform_set_drvdata(pdev, master); - tqspi = spi_master_get_devdata(master); + platform_set_drvdata(pdev, host); + tqspi = spi_controller_get_devdata(host); - master->mode_bits = SPI_MODE_0 | SPI_MODE_3 | SPI_CS_HIGH | - SPI_TX_DUAL | SPI_RX_DUAL | SPI_TX_QUAD | SPI_RX_QUAD; - master->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(16) | SPI_BPW_MASK(8); - master->flags = SPI_CONTROLLER_HALF_DUPLEX; - master->setup = tegra_qspi_setup; - master->transfer_one_message = tegra_qspi_transfer_one_message; - master->num_chipselect = 1; - master->auto_runtime_pm = true; + host->mode_bits = SPI_MODE_0 | SPI_MODE_3 | SPI_CS_HIGH | + SPI_TX_DUAL | SPI_RX_DUAL | SPI_TX_QUAD | SPI_RX_QUAD; + host->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(16) | SPI_BPW_MASK(8); + host->flags = SPI_CONTROLLER_HALF_DUPLEX; + host->setup = tegra_qspi_setup; + host->transfer_one_message = tegra_qspi_transfer_one_message; + host->num_chipselect = 1; + host->auto_runtime_pm = true; bus_num = of_alias_get_id(pdev->dev.of_node, "spi"); if (bus_num >= 0) - master->bus_num = bus_num; + host->bus_num = bus_num; - tqspi->master = master; + tqspi->host = host; tqspi->dev = &pdev->dev; spin_lock_init(&tqspi->lock); tqspi->soc_data = device_get_match_data(&pdev->dev); - master->num_chipselect = tqspi->soc_data->cs_count; + host->num_chipselect = tqspi->soc_data->cs_count; tqspi->base = devm_platform_get_and_ioremap_resource(pdev, 0, &r); if (IS_ERR(tqspi->base)) return PTR_ERR(tqspi->base); @@ -1625,10 +1625,10 @@ static int tegra_qspi_probe(struct platform_device *pdev) goto exit_pm_disable; } - master->dev.of_node = pdev->dev.of_node; - ret = spi_register_master(master); + host->dev.of_node = pdev->dev.of_node; + ret = spi_register_controller(host); if (ret < 0) { - dev_err(&pdev->dev, "failed to register master: %d\n", ret); + dev_err(&pdev->dev, "failed to register host: %d\n", ret); goto exit_free_irq; } @@ -1644,10 +1644,10 @@ exit_pm_disable: static void tegra_qspi_remove(struct platform_device *pdev) { - struct spi_master *master = platform_get_drvdata(pdev); - struct tegra_qspi *tqspi = spi_master_get_devdata(master); + struct spi_controller *host = platform_get_drvdata(pdev); + struct tegra_qspi *tqspi = spi_controller_get_devdata(host); - spi_unregister_master(master); + spi_unregister_controller(host); free_irq(tqspi->irq, tqspi); pm_runtime_force_suspend(&pdev->dev); tegra_qspi_deinit_dma(tqspi); @@ -1655,15 +1655,15 @@ static void tegra_qspi_remove(struct platform_device *pdev) static int __maybe_unused tegra_qspi_suspend(struct device *dev) { - struct spi_master *master = dev_get_drvdata(dev); + struct spi_controller *host = dev_get_drvdata(dev); - return spi_master_suspend(master); + return spi_controller_suspend(host); } static int __maybe_unused tegra_qspi_resume(struct device *dev) { - struct spi_master *master = dev_get_drvdata(dev); - struct tegra_qspi *tqspi = spi_master_get_devdata(master); + struct spi_controller *host = dev_get_drvdata(dev); + struct tegra_qspi *tqspi = spi_controller_get_devdata(host); int ret; ret = pm_runtime_resume_and_get(dev); @@ -1676,13 +1676,13 @@ static int __maybe_unused tegra_qspi_resume(struct device *dev) tegra_qspi_writel(tqspi, tqspi->def_command2_reg, QSPI_COMMAND2); pm_runtime_put(dev); - return spi_master_resume(master); + return spi_controller_resume(host); } static int __maybe_unused tegra_qspi_runtime_suspend(struct device *dev) { - struct spi_master *master = dev_get_drvdata(dev); - struct tegra_qspi *tqspi = spi_master_get_devdata(master); + struct spi_controller *host = dev_get_drvdata(dev); + struct tegra_qspi *tqspi = spi_controller_get_devdata(host); /* Runtime pm disabled with ACPI */ if (has_acpi_companion(tqspi->dev)) @@ -1697,8 +1697,8 @@ static int __maybe_unused tegra_qspi_runtime_suspend(struct device *dev) static int __maybe_unused tegra_qspi_runtime_resume(struct device *dev) { - struct spi_master *master = dev_get_drvdata(dev); - struct tegra_qspi *tqspi = spi_master_get_devdata(master); + struct spi_controller *host = dev_get_drvdata(dev); + struct tegra_qspi *tqspi = spi_controller_get_devdata(host); int ret; /* Runtime pm disabled with ACPI */ diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c index 4c81516b67db..0fe6899e78dd 100644 --- a/drivers/spi/spi-ti-qspi.c +++ b/drivers/spi/spi-ti-qspi.c @@ -40,7 +40,7 @@ struct ti_qspi { /* list synchronization */ struct mutex list_lock; - struct spi_master *master; + struct spi_controller *host; void __iomem *base; void __iomem *mmap_base; size_t mmap_size; @@ -137,20 +137,20 @@ static inline void ti_qspi_write(struct ti_qspi *qspi, static int ti_qspi_setup(struct spi_device *spi) { - struct ti_qspi *qspi = spi_master_get_devdata(spi->master); + struct ti_qspi *qspi = spi_controller_get_devdata(spi->controller); int ret; - if (spi->master->busy) { - dev_dbg(qspi->dev, "master busy doing other transfers\n"); + if (spi->controller->busy) { + dev_dbg(qspi->dev, "host busy doing other transfers\n"); return -EBUSY; } - if (!qspi->master->max_speed_hz) { + if (!qspi->host->max_speed_hz) { dev_err(qspi->dev, "spi max frequency not defined\n"); return -EINVAL; } - spi->max_speed_hz = min(spi->max_speed_hz, qspi->master->max_speed_hz); + spi->max_speed_hz = min(spi->max_speed_hz, qspi->host->max_speed_hz); ret = pm_runtime_resume_and_get(qspi->dev); if (ret < 0) { @@ -526,7 +526,7 @@ static int ti_qspi_dma_xfer_sg(struct ti_qspi *qspi, struct sg_table rx_sg, static void ti_qspi_enable_memory_map(struct spi_device *spi) { - struct ti_qspi *qspi = spi_master_get_devdata(spi->master); + struct ti_qspi *qspi = spi_controller_get_devdata(spi->controller); ti_qspi_write(qspi, MM_SWITCH, QSPI_SPI_SWITCH_REG); if (qspi->ctrl_base) { @@ -540,7 +540,7 @@ static void ti_qspi_enable_memory_map(struct spi_device *spi) static void ti_qspi_disable_memory_map(struct spi_device *spi) { - struct ti_qspi *qspi = spi_master_get_devdata(spi->master); + struct ti_qspi *qspi = spi_controller_get_devdata(spi->controller); ti_qspi_write(qspi, 0, QSPI_SPI_SWITCH_REG); if (qspi->ctrl_base) @@ -554,7 +554,7 @@ static void ti_qspi_setup_mmap_read(struct spi_device *spi, u8 opcode, u8 data_nbits, u8 addr_width, u8 dummy_bytes) { - struct ti_qspi *qspi = spi_master_get_devdata(spi->master); + struct ti_qspi *qspi = spi_controller_get_devdata(spi->controller); u32 memval = opcode; switch (data_nbits) { @@ -576,7 +576,7 @@ static void ti_qspi_setup_mmap_read(struct spi_device *spi, u8 opcode, static int ti_qspi_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op) { - struct ti_qspi *qspi = spi_controller_get_devdata(mem->spi->master); + struct ti_qspi *qspi = spi_controller_get_devdata(mem->spi->controller); size_t max_len; if (op->data.dir == SPI_MEM_DATA_IN) { @@ -606,19 +606,19 @@ static int ti_qspi_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op) static int ti_qspi_exec_mem_op(struct spi_mem *mem, const struct spi_mem_op *op) { - struct ti_qspi *qspi = spi_master_get_devdata(mem->spi->master); + struct ti_qspi *qspi = spi_controller_get_devdata(mem->spi->controller); u32 from = 0; int ret = 0; /* Only optimize read path. */ if (!op->data.nbytes || op->data.dir != SPI_MEM_DATA_IN || !op->addr.nbytes || op->addr.nbytes > 4) - return -ENOTSUPP; + return -EOPNOTSUPP; /* Address exceeds MMIO window size, fall back to regular mode. */ from = op->addr.val; if (from + op->data.nbytes > qspi->mmap_size) - return -ENOTSUPP; + return -EOPNOTSUPP; mutex_lock(&qspi->list_lock); @@ -633,10 +633,10 @@ static int ti_qspi_exec_mem_op(struct spi_mem *mem, struct sg_table sgt; if (virt_addr_valid(op->data.buf.in) && - !spi_controller_dma_map_mem_op_data(mem->spi->master, op, + !spi_controller_dma_map_mem_op_data(mem->spi->controller, op, &sgt)) { ret = ti_qspi_dma_xfer_sg(qspi, sgt, from); - spi_controller_dma_unmap_mem_op_data(mem->spi->master, + spi_controller_dma_unmap_mem_op_data(mem->spi->controller, op, &sgt); } else { ret = ti_qspi_dma_bounce_buffer(qspi, from, @@ -658,10 +658,10 @@ static const struct spi_controller_mem_ops ti_qspi_mem_ops = { .adjust_op_size = ti_qspi_adjust_op_size, }; -static int ti_qspi_start_transfer_one(struct spi_master *master, +static int ti_qspi_start_transfer_one(struct spi_controller *host, struct spi_message *m) { - struct ti_qspi *qspi = spi_master_get_devdata(master); + struct ti_qspi *qspi = spi_controller_get_devdata(host); struct spi_device *spi = m->spi; struct spi_transfer *t; int status = 0, ret; @@ -720,7 +720,7 @@ static int ti_qspi_start_transfer_one(struct spi_master *master, ti_qspi_write(qspi, qspi->cmd | QSPI_INVAL, QSPI_SPI_CMD_REG); m->status = status; - spi_finalize_current_message(master); + spi_finalize_current_message(host); return status; } @@ -756,33 +756,33 @@ MODULE_DEVICE_TABLE(of, ti_qspi_match); static int ti_qspi_probe(struct platform_device *pdev) { struct ti_qspi *qspi; - struct spi_master *master; + struct spi_controller *host; struct resource *r, *res_mmap; struct device_node *np = pdev->dev.of_node; u32 max_freq; int ret = 0, num_cs, irq; dma_cap_mask_t mask; - master = spi_alloc_master(&pdev->dev, sizeof(*qspi)); - if (!master) + host = spi_alloc_host(&pdev->dev, sizeof(*qspi)); + if (!host) return -ENOMEM; - master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_RX_DUAL | SPI_RX_QUAD; + host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_RX_DUAL | SPI_RX_QUAD; - master->flags = SPI_CONTROLLER_HALF_DUPLEX; - master->setup = ti_qspi_setup; - master->auto_runtime_pm = true; - master->transfer_one_message = ti_qspi_start_transfer_one; - master->dev.of_node = pdev->dev.of_node; - master->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(16) | - SPI_BPW_MASK(8); - master->mem_ops = &ti_qspi_mem_ops; + host->flags = SPI_CONTROLLER_HALF_DUPLEX; + host->setup = ti_qspi_setup; + host->auto_runtime_pm = true; + host->transfer_one_message = ti_qspi_start_transfer_one; + host->dev.of_node = pdev->dev.of_node; + host->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(16) | + SPI_BPW_MASK(8); + host->mem_ops = &ti_qspi_mem_ops; if (!of_property_read_u32(np, "num-cs", &num_cs)) - master->num_chipselect = num_cs; + host->num_chipselect = num_cs; - qspi = spi_master_get_devdata(master); - qspi->master = master; + qspi = spi_controller_get_devdata(host); + qspi->host = host; qspi->dev = &pdev->dev; platform_set_drvdata(pdev, qspi); @@ -792,7 +792,7 @@ static int ti_qspi_probe(struct platform_device *pdev) if (r == NULL) { dev_err(&pdev->dev, "missing platform data\n"); ret = -ENODEV; - goto free_master; + goto free_host; } } @@ -812,7 +812,7 @@ static int ti_qspi_probe(struct platform_device *pdev) irq = platform_get_irq(pdev, 0); if (irq < 0) { ret = irq; - goto free_master; + goto free_host; } mutex_init(&qspi->list_lock); @@ -820,7 +820,7 @@ static int ti_qspi_probe(struct platform_device *pdev) qspi->base = devm_ioremap_resource(&pdev->dev, r); if (IS_ERR(qspi->base)) { ret = PTR_ERR(qspi->base); - goto free_master; + goto free_host; } @@ -830,7 +830,7 @@ static int ti_qspi_probe(struct platform_device *pdev) "syscon-chipselects"); if (IS_ERR(qspi->ctrl_base)) { ret = PTR_ERR(qspi->ctrl_base); - goto free_master; + goto free_host; } ret = of_property_read_u32_index(np, "syscon-chipselects", @@ -838,7 +838,7 @@ static int ti_qspi_probe(struct platform_device *pdev) if (ret) { dev_err(&pdev->dev, "couldn't get ctrl_mod reg index\n"); - goto free_master; + goto free_host; } } @@ -853,7 +853,7 @@ static int ti_qspi_probe(struct platform_device *pdev) pm_runtime_enable(&pdev->dev); if (!of_property_read_u32(np, "spi-max-frequency", &max_freq)) - master->max_speed_hz = max_freq; + host->max_speed_hz = max_freq; dma_cap_zero(mask); dma_cap_set(DMA_MEMCPY, mask); @@ -876,7 +876,7 @@ static int ti_qspi_probe(struct platform_device *pdev) dma_release_channel(qspi->rx_chan); goto no_dma; } - master->dma_rx = qspi->rx_chan; + host->dma_rx = qspi->rx_chan; init_completion(&qspi->transfer_complete); if (res_mmap) qspi->mmap_phys_base = (dma_addr_t)res_mmap->start; @@ -889,39 +889,40 @@ no_dma: "mmap failed with error %ld using PIO mode\n", PTR_ERR(qspi->mmap_base)); qspi->mmap_base = NULL; - master->mem_ops = NULL; + host->mem_ops = NULL; } } qspi->mmap_enabled = false; qspi->current_cs = -1; - ret = devm_spi_register_master(&pdev->dev, master); + ret = devm_spi_register_controller(&pdev->dev, host); if (!ret) return 0; ti_qspi_dma_cleanup(qspi); pm_runtime_disable(&pdev->dev); -free_master: - spi_master_put(master); +free_host: + spi_controller_put(host); return ret; } -static int ti_qspi_remove(struct platform_device *pdev) +static void ti_qspi_remove(struct platform_device *pdev) { struct ti_qspi *qspi = platform_get_drvdata(pdev); int rc; - rc = spi_master_suspend(qspi->master); - if (rc) - return rc; + rc = spi_controller_suspend(qspi->host); + if (rc) { + dev_alert(&pdev->dev, "spi_controller_suspend() failed (%pe)\n", + ERR_PTR(rc)); + return; + } pm_runtime_put_sync(&pdev->dev); pm_runtime_disable(&pdev->dev); ti_qspi_dma_cleanup(qspi); - - return 0; } static const struct dev_pm_ops ti_qspi_pm_ops = { @@ -930,7 +931,7 @@ static const struct dev_pm_ops ti_qspi_pm_ops = { static struct platform_driver ti_qspi_driver = { .probe = ti_qspi_probe, - .remove = ti_qspi_remove, + .remove_new = ti_qspi_remove, .driver = { .name = "ti-qspi", .pm = &ti_qspi_pm_ops, diff --git a/drivers/spi/spi-topcliff-pch.c b/drivers/spi/spi-topcliff-pch.c index af5846cfe5e9..271f3e7f834b 100644 --- a/drivers/spi/spi-topcliff-pch.c +++ b/drivers/spi/spi-topcliff-pch.c @@ -124,7 +124,7 @@ struct pch_spi_dma_ctrl { * struct pch_spi_data - Holds the SPI channel specific details * @io_remap_addr: The remapped PCI base address * @io_base_addr: Base address - * @master: Pointer to the SPI master structure + * @host: Pointer to the SPI controller structure * @work: Reference to work queue handler * @wait: Wait queue for waking up upon receiving an * interrupt. @@ -161,7 +161,7 @@ struct pch_spi_dma_ctrl { struct pch_spi_data { void __iomem *io_remap_addr; unsigned long io_base_addr; - struct spi_master *master; + struct spi_controller *host; struct work_struct work; wait_queue_head_t wait; u8 transfer_complete; @@ -216,48 +216,48 @@ static const struct pci_device_id pch_spi_pcidev_id[] = { /** * pch_spi_writereg() - Performs register writes - * @master: Pointer to struct spi_master. + * @host: Pointer to struct spi_controller. * @idx: Register offset. * @val: Value to be written to register. */ -static inline void pch_spi_writereg(struct spi_master *master, int idx, u32 val) +static inline void pch_spi_writereg(struct spi_controller *host, int idx, u32 val) { - struct pch_spi_data *data = spi_master_get_devdata(master); + struct pch_spi_data *data = spi_controller_get_devdata(host); iowrite32(val, (data->io_remap_addr + idx)); } /** * pch_spi_readreg() - Performs register reads - * @master: Pointer to struct spi_master. + * @host: Pointer to struct spi_controller. * @idx: Register offset. */ -static inline u32 pch_spi_readreg(struct spi_master *master, int idx) +static inline u32 pch_spi_readreg(struct spi_controller *host, int idx) { - struct pch_spi_data *data = spi_master_get_devdata(master); + struct pch_spi_data *data = spi_controller_get_devdata(host); return ioread32(data->io_remap_addr + idx); } -static inline void pch_spi_setclr_reg(struct spi_master *master, int idx, +static inline void pch_spi_setclr_reg(struct spi_controller *host, int idx, u32 set, u32 clr) { - u32 tmp = pch_spi_readreg(master, idx); + u32 tmp = pch_spi_readreg(host, idx); tmp = (tmp & ~clr) | set; - pch_spi_writereg(master, idx, tmp); + pch_spi_writereg(host, idx, tmp); } -static void pch_spi_set_master_mode(struct spi_master *master) +static void pch_spi_set_host_mode(struct spi_controller *host) { - pch_spi_setclr_reg(master, PCH_SPCR, SPCR_MSTR_BIT, 0); + pch_spi_setclr_reg(host, PCH_SPCR, SPCR_MSTR_BIT, 0); } /** * pch_spi_clear_fifo() - Clears the Transmit and Receive FIFOs - * @master: Pointer to struct spi_master. + * @host: Pointer to struct spi_controller. */ -static void pch_spi_clear_fifo(struct spi_master *master) +static void pch_spi_clear_fifo(struct spi_controller *host) { - pch_spi_setclr_reg(master, PCH_SPCR, SPCR_FICLR_BIT, 0); - pch_spi_setclr_reg(master, PCH_SPCR, 0, SPCR_FICLR_BIT); + pch_spi_setclr_reg(host, PCH_SPCR, SPCR_FICLR_BIT, 0); + pch_spi_setclr_reg(host, PCH_SPCR, 0, SPCR_FICLR_BIT); } static void pch_spi_handler_sub(struct pch_spi_data *data, u32 reg_spsr_val, @@ -312,7 +312,7 @@ static void pch_spi_handler_sub(struct pch_spi_data *data, u32 reg_spsr_val, if (reg_spsr_val & SPSR_FI_BIT) { if ((tx_index == bpw_len) && (rx_index == tx_index)) { /* disable interrupts */ - pch_spi_setclr_reg(data->master, PCH_SPCR, 0, + pch_spi_setclr_reg(data->host, PCH_SPCR, 0, PCH_ALL); /* transfer is completed; @@ -321,7 +321,7 @@ static void pch_spi_handler_sub(struct pch_spi_data *data, u32 reg_spsr_val, data->transfer_active = false; wake_up(&data->wait); } else { - dev_vdbg(&data->master->dev, + dev_vdbg(&data->host->dev, "%s : Transfer is not completed", __func__); } @@ -383,10 +383,10 @@ static irqreturn_t pch_spi_handler(int irq, void *dev_id) /** * pch_spi_set_baud_rate() - Sets SPBR field in SPBRR - * @master: Pointer to struct spi_master. + * @host: Pointer to struct spi_controller. * @speed_hz: Baud rate. */ -static void pch_spi_set_baud_rate(struct spi_master *master, u32 speed_hz) +static void pch_spi_set_baud_rate(struct spi_controller *host, u32 speed_hz) { u32 n_spbr = PCH_CLOCK_HZ / (speed_hz * 2); @@ -394,21 +394,21 @@ static void pch_spi_set_baud_rate(struct spi_master *master, u32 speed_hz) if (n_spbr > PCH_MAX_SPBR) n_spbr = PCH_MAX_SPBR; - pch_spi_setclr_reg(master, PCH_SPBRR, n_spbr, MASK_SPBRR_SPBR_BITS); + pch_spi_setclr_reg(host, PCH_SPBRR, n_spbr, MASK_SPBRR_SPBR_BITS); } /** * pch_spi_set_bits_per_word() - Sets SIZE field in SPBRR - * @master: Pointer to struct spi_master. + * @host: Pointer to struct spi_controller. * @bits_per_word: Bits per word for SPI transfer. */ -static void pch_spi_set_bits_per_word(struct spi_master *master, +static void pch_spi_set_bits_per_word(struct spi_controller *host, u8 bits_per_word) { if (bits_per_word == 8) - pch_spi_setclr_reg(master, PCH_SPBRR, 0, SPBRR_SIZE_BIT); + pch_spi_setclr_reg(host, PCH_SPBRR, 0, SPBRR_SIZE_BIT); else - pch_spi_setclr_reg(master, PCH_SPBRR, SPBRR_SIZE_BIT, 0); + pch_spi_setclr_reg(host, PCH_SPBRR, SPBRR_SIZE_BIT, 0); } /** @@ -420,12 +420,12 @@ static void pch_spi_setup_transfer(struct spi_device *spi) u32 flags = 0; dev_dbg(&spi->dev, "%s SPBRR content =%x setting baud rate=%d\n", - __func__, pch_spi_readreg(spi->master, PCH_SPBRR), + __func__, pch_spi_readreg(spi->controller, PCH_SPBRR), spi->max_speed_hz); - pch_spi_set_baud_rate(spi->master, spi->max_speed_hz); + pch_spi_set_baud_rate(spi->controller, spi->max_speed_hz); /* set bits per word */ - pch_spi_set_bits_per_word(spi->master, spi->bits_per_word); + pch_spi_set_bits_per_word(spi->controller, spi->bits_per_word); if (!(spi->mode & SPI_LSB_FIRST)) flags |= SPCR_LSBF_BIT; @@ -433,29 +433,29 @@ static void pch_spi_setup_transfer(struct spi_device *spi) flags |= SPCR_CPOL_BIT; if (spi->mode & SPI_CPHA) flags |= SPCR_CPHA_BIT; - pch_spi_setclr_reg(spi->master, PCH_SPCR, flags, + pch_spi_setclr_reg(spi->controller, PCH_SPCR, flags, (SPCR_LSBF_BIT | SPCR_CPOL_BIT | SPCR_CPHA_BIT)); /* Clear the FIFO by toggling FICLR to 1 and back to 0 */ - pch_spi_clear_fifo(spi->master); + pch_spi_clear_fifo(spi->controller); } /** * pch_spi_reset() - Clears SPI registers - * @master: Pointer to struct spi_master. + * @host: Pointer to struct spi_controller. */ -static void pch_spi_reset(struct spi_master *master) +static void pch_spi_reset(struct spi_controller *host) { /* write 1 to reset SPI */ - pch_spi_writereg(master, PCH_SRST, 0x1); + pch_spi_writereg(host, PCH_SRST, 0x1); /* clear reset */ - pch_spi_writereg(master, PCH_SRST, 0x0); + pch_spi_writereg(host, PCH_SRST, 0x0); } static int pch_spi_transfer(struct spi_device *pspi, struct spi_message *pmsg) { - struct pch_spi_data *data = spi_master_get_devdata(pspi->master); + struct pch_spi_data *data = spi_controller_get_devdata(pspi->controller); int retval; unsigned long flags; @@ -524,15 +524,15 @@ static void pch_spi_set_tx(struct pch_spi_data *data, int *bpw) /* set baud rate if needed */ if (data->cur_trans->speed_hz) { - dev_dbg(&data->master->dev, "%s:setting baud rate\n", __func__); - pch_spi_set_baud_rate(data->master, data->cur_trans->speed_hz); + dev_dbg(&data->host->dev, "%s:setting baud rate\n", __func__); + pch_spi_set_baud_rate(data->host, data->cur_trans->speed_hz); } /* set bits per word if needed */ if (data->cur_trans->bits_per_word && (data->current_msg->spi->bits_per_word != data->cur_trans->bits_per_word)) { - dev_dbg(&data->master->dev, "%s:set bits per word\n", __func__); - pch_spi_set_bits_per_word(data->master, + dev_dbg(&data->host->dev, "%s:set bits per word\n", __func__); + pch_spi_set_bits_per_word(data->host, data->cur_trans->bits_per_word); *bpw = data->cur_trans->bits_per_word; } else { @@ -590,13 +590,13 @@ static void pch_spi_set_tx(struct pch_spi_data *data, int *bpw) if (n_writes > PCH_MAX_FIFO_DEPTH) n_writes = PCH_MAX_FIFO_DEPTH; - dev_dbg(&data->master->dev, + dev_dbg(&data->host->dev, "\n%s:Pulling down SSN low - writing 0x2 to SSNXCR\n", __func__); - pch_spi_writereg(data->master, PCH_SSNXCR, SSN_LOW); + pch_spi_writereg(data->host, PCH_SSNXCR, SSN_LOW); for (j = 0; j < n_writes; j++) - pch_spi_writereg(data->master, PCH_SPDWR, data->pkt_tx_buff[j]); + pch_spi_writereg(data->host, PCH_SPDWR, data->pkt_tx_buff[j]); /* update tx_index */ data->tx_index = j; @@ -609,13 +609,13 @@ static void pch_spi_set_tx(struct pch_spi_data *data, int *bpw) static void pch_spi_nomore_transfer(struct pch_spi_data *data) { struct spi_message *pmsg, *tmp; - dev_dbg(&data->master->dev, "%s called\n", __func__); + dev_dbg(&data->host->dev, "%s called\n", __func__); /* Invoke complete callback * [To the spi core..indicating end of transfer] */ data->current_msg->status = 0; if (data->current_msg->complete) { - dev_dbg(&data->master->dev, + dev_dbg(&data->host->dev, "%s:Invoking callback of SPI core\n", __func__); data->current_msg->complete(data->current_msg->context); } @@ -623,7 +623,7 @@ static void pch_spi_nomore_transfer(struct pch_spi_data *data) /* update status in global variable */ data->bcurrent_msg_processing = false; - dev_dbg(&data->master->dev, + dev_dbg(&data->host->dev, "%s:data->bcurrent_msg_processing = false\n", __func__); data->current_msg = NULL; @@ -638,11 +638,11 @@ static void pch_spi_nomore_transfer(struct pch_spi_data *data) * bpw;sfer requests in the current message or there are *more messages) */ - dev_dbg(&data->master->dev, "%s:Invoke queue_work\n", __func__); + dev_dbg(&data->host->dev, "%s:Invoke queue_work\n", __func__); schedule_work(&data->work); } else if (data->board_dat->suspend_sts || data->status == STATUS_EXITING) { - dev_dbg(&data->master->dev, + dev_dbg(&data->host->dev, "%s suspend/remove initiated, flushing queue\n", __func__); list_for_each_entry_safe(pmsg, tmp, data->queue.next, queue) { @@ -662,14 +662,14 @@ static void pch_spi_set_ir(struct pch_spi_data *data) /* enable interrupts, set threshold, enable SPI */ if ((data->bpw_len) > PCH_MAX_FIFO_DEPTH) /* set receive threshold to PCH_RX_THOLD */ - pch_spi_setclr_reg(data->master, PCH_SPCR, + pch_spi_setclr_reg(data->host, PCH_SPCR, PCH_RX_THOLD << SPCR_RFIC_FIELD | SPCR_FIE_BIT | SPCR_RFIE_BIT | SPCR_ORIE_BIT | SPCR_SPE_BIT, MASK_RFIC_SPCR_BITS | PCH_ALL); else /* set receive threshold to maximum */ - pch_spi_setclr_reg(data->master, PCH_SPCR, + pch_spi_setclr_reg(data->host, PCH_SPCR, PCH_RX_THOLD_MAX << SPCR_RFIC_FIELD | SPCR_FIE_BIT | SPCR_ORIE_BIT | SPCR_SPE_BIT, @@ -677,18 +677,18 @@ static void pch_spi_set_ir(struct pch_spi_data *data) /* Wait until the transfer completes; go to sleep after initiating the transfer. */ - dev_dbg(&data->master->dev, + dev_dbg(&data->host->dev, "%s:waiting for transfer to get over\n", __func__); wait_event_interruptible(data->wait, data->transfer_complete); /* clear all interrupts */ - pch_spi_writereg(data->master, PCH_SPSR, - pch_spi_readreg(data->master, PCH_SPSR)); + pch_spi_writereg(data->host, PCH_SPSR, + pch_spi_readreg(data->host, PCH_SPSR)); /* Disable interrupts and SPI transfer */ - pch_spi_setclr_reg(data->master, PCH_SPCR, 0, PCH_ALL | SPCR_SPE_BIT); + pch_spi_setclr_reg(data->host, PCH_SPCR, 0, PCH_ALL | SPCR_SPE_BIT); /* clear FIFO */ - pch_spi_clear_fifo(data->master); + pch_spi_clear_fifo(data->host); } static void pch_spi_copy_rx_data(struct pch_spi_data *data, int bpw) @@ -750,25 +750,25 @@ static int pch_spi_start_transfer(struct pch_spi_data *data) spin_lock_irqsave(&data->lock, flags); /* disable interrupts, SPI set enable */ - pch_spi_setclr_reg(data->master, PCH_SPCR, SPCR_SPE_BIT, PCH_ALL); + pch_spi_setclr_reg(data->host, PCH_SPCR, SPCR_SPE_BIT, PCH_ALL); spin_unlock_irqrestore(&data->lock, flags); /* Wait until the transfer completes; go to sleep after initiating the transfer. */ - dev_dbg(&data->master->dev, + dev_dbg(&data->host->dev, "%s:waiting for transfer to get over\n", __func__); rtn = wait_event_interruptible_timeout(data->wait, data->transfer_complete, msecs_to_jiffies(2 * HZ)); if (!rtn) - dev_err(&data->master->dev, + dev_err(&data->host->dev, "%s wait-event timeout\n", __func__); - dma_sync_sg_for_cpu(&data->master->dev, dma->sg_rx_p, dma->nent, + dma_sync_sg_for_cpu(&data->host->dev, dma->sg_rx_p, dma->nent, DMA_FROM_DEVICE); - dma_sync_sg_for_cpu(&data->master->dev, dma->sg_tx_p, dma->nent, + dma_sync_sg_for_cpu(&data->host->dev, dma->sg_tx_p, dma->nent, DMA_FROM_DEVICE); memset(data->dma.tx_buf_virt, 0, PAGE_SIZE); @@ -780,14 +780,14 @@ static int pch_spi_start_transfer(struct pch_spi_data *data) spin_lock_irqsave(&data->lock, flags); /* clear fifo threshold, disable interrupts, disable SPI transfer */ - pch_spi_setclr_reg(data->master, PCH_SPCR, 0, + pch_spi_setclr_reg(data->host, PCH_SPCR, 0, MASK_RFIC_SPCR_BITS | MASK_TFIC_SPCR_BITS | PCH_ALL | SPCR_SPE_BIT); /* clear all interrupts */ - pch_spi_writereg(data->master, PCH_SPSR, - pch_spi_readreg(data->master, PCH_SPSR)); + pch_spi_writereg(data->host, PCH_SPSR, + pch_spi_readreg(data->host, PCH_SPSR)); /* clear FIFO */ - pch_spi_clear_fifo(data->master); + pch_spi_clear_fifo(data->host); spin_unlock_irqrestore(&data->lock, flags); @@ -846,7 +846,7 @@ static void pch_spi_request_dma(struct pch_spi_data *data, int bpw) param->width = width; chan = dma_request_channel(mask, pch_spi_filter, param); if (!chan) { - dev_err(&data->master->dev, + dev_err(&data->host->dev, "ERROR: dma_request_channel FAILS(Tx)\n"); goto out; } @@ -860,7 +860,7 @@ static void pch_spi_request_dma(struct pch_spi_data *data, int bpw) param->width = width; chan = dma_request_channel(mask, pch_spi_filter, param); if (!chan) { - dev_err(&data->master->dev, + dev_err(&data->host->dev, "ERROR: dma_request_channel FAILS(Rx)\n"); dma_release_channel(dma->chan_tx); dma->chan_tx = NULL; @@ -913,9 +913,9 @@ static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw) /* set baud rate if needed */ if (data->cur_trans->speed_hz) { - dev_dbg(&data->master->dev, "%s:setting baud rate\n", __func__); + dev_dbg(&data->host->dev, "%s:setting baud rate\n", __func__); spin_lock_irqsave(&data->lock, flags); - pch_spi_set_baud_rate(data->master, data->cur_trans->speed_hz); + pch_spi_set_baud_rate(data->host, data->cur_trans->speed_hz); spin_unlock_irqrestore(&data->lock, flags); } @@ -923,9 +923,9 @@ static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw) if (data->cur_trans->bits_per_word && (data->current_msg->spi->bits_per_word != data->cur_trans->bits_per_word)) { - dev_dbg(&data->master->dev, "%s:set bits per word\n", __func__); + dev_dbg(&data->host->dev, "%s:set bits per word\n", __func__); spin_lock_irqsave(&data->lock, flags); - pch_spi_set_bits_per_word(data->master, + pch_spi_set_bits_per_word(data->host, data->cur_trans->bits_per_word); spin_unlock_irqrestore(&data->lock, flags); *bpw = data->cur_trans->bits_per_word; @@ -969,12 +969,12 @@ static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw) size = data->bpw_len; rem = data->bpw_len; } - dev_dbg(&data->master->dev, "%s num=%d size=%d rem=%d\n", + dev_dbg(&data->host->dev, "%s num=%d size=%d rem=%d\n", __func__, num, size, rem); spin_lock_irqsave(&data->lock, flags); /* set receive fifo threshold and transmit fifo threshold */ - pch_spi_setclr_reg(data->master, PCH_SPCR, + pch_spi_setclr_reg(data->host, PCH_SPCR, ((size - 1) << SPCR_RFIC_FIELD) | (PCH_TX_THOLD << SPCR_TFIC_FIELD), MASK_RFIC_SPCR_BITS | MASK_TFIC_SPCR_BITS); @@ -1016,11 +1016,11 @@ static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw) num, DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); if (!desc_rx) { - dev_err(&data->master->dev, + dev_err(&data->host->dev, "%s:dmaengine_prep_slave_sg Failed\n", __func__); return; } - dma_sync_sg_for_device(&data->master->dev, sg, num, DMA_FROM_DEVICE); + dma_sync_sg_for_device(&data->host->dev, sg, num, DMA_FROM_DEVICE); desc_rx->callback = pch_dma_rx_complete; desc_rx->callback_param = data; dma->nent = num; @@ -1078,20 +1078,20 @@ static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw) sg, num, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); if (!desc_tx) { - dev_err(&data->master->dev, + dev_err(&data->host->dev, "%s:dmaengine_prep_slave_sg Failed\n", __func__); return; } - dma_sync_sg_for_device(&data->master->dev, sg, num, DMA_TO_DEVICE); + dma_sync_sg_for_device(&data->host->dev, sg, num, DMA_TO_DEVICE); desc_tx->callback = NULL; desc_tx->callback_param = data; dma->nent = num; dma->desc_tx = desc_tx; - dev_dbg(&data->master->dev, "%s:Pulling down SSN low - writing 0x2 to SSNXCR\n", __func__); + dev_dbg(&data->host->dev, "%s:Pulling down SSN low - writing 0x2 to SSNXCR\n", __func__); spin_lock_irqsave(&data->lock, flags); - pch_spi_writereg(data->master, PCH_SSNXCR, SSN_LOW); + pch_spi_writereg(data->host, PCH_SSNXCR, SSN_LOW); desc_rx->tx_submit(desc_rx); desc_tx->tx_submit(desc_tx); spin_unlock_irqrestore(&data->lock, flags); @@ -1107,12 +1107,12 @@ static void pch_spi_process_messages(struct work_struct *pwork) int bpw; data = container_of(pwork, struct pch_spi_data, work); - dev_dbg(&data->master->dev, "%s data initialized\n", __func__); + dev_dbg(&data->host->dev, "%s data initialized\n", __func__); spin_lock(&data->lock); /* check if suspend has been initiated;if yes flush queue */ if (data->board_dat->suspend_sts || (data->status == STATUS_EXITING)) { - dev_dbg(&data->master->dev, + dev_dbg(&data->host->dev, "%s suspend/remove initiated, flushing queue\n", __func__); list_for_each_entry_safe(pmsg, tmp, data->queue.next, queue) { pmsg->status = -EIO; @@ -1132,7 +1132,7 @@ static void pch_spi_process_messages(struct work_struct *pwork) } data->bcurrent_msg_processing = true; - dev_dbg(&data->master->dev, + dev_dbg(&data->host->dev, "%s Set data->bcurrent_msg_processing= true\n", __func__); /* Get the message from the queue and delete it from there. */ @@ -1150,7 +1150,7 @@ static void pch_spi_process_messages(struct work_struct *pwork) if (data->use_dma) pch_spi_request_dma(data, data->current_msg->spi->bits_per_word); - pch_spi_writereg(data->master, PCH_SSNXCR, SSN_NO_CONTROL); + pch_spi_writereg(data->host, PCH_SSNXCR, SSN_NO_CONTROL); do { int cnt; /* If we are already processing a message get the next @@ -1161,14 +1161,14 @@ static void pch_spi_process_messages(struct work_struct *pwork) data->cur_trans = list_entry(data->current_msg->transfers.next, struct spi_transfer, transfer_list); - dev_dbg(&data->master->dev, + dev_dbg(&data->host->dev, "%s :Getting 1st transfer message\n", __func__); } else { data->cur_trans = list_entry(data->cur_trans->transfer_list.next, struct spi_transfer, transfer_list); - dev_dbg(&data->master->dev, + dev_dbg(&data->host->dev, "%s :Getting next transfer message\n", __func__); } @@ -1210,7 +1210,7 @@ static void pch_spi_process_messages(struct work_struct *pwork) data->cur_trans->len = data->save_total_len; data->current_msg->actual_length += data->cur_trans->len; - dev_dbg(&data->master->dev, + dev_dbg(&data->host->dev, "%s:data->current_msg->actual_length=%d\n", __func__, data->current_msg->actual_length); @@ -1229,7 +1229,7 @@ static void pch_spi_process_messages(struct work_struct *pwork) } while (data->cur_trans != NULL); out: - pch_spi_writereg(data->master, PCH_SSNXCR, SSN_HIGH); + pch_spi_writereg(data->host, PCH_SSNXCR, SSN_HIGH); if (data->use_dma) pch_spi_release_dma(data); } @@ -1248,7 +1248,7 @@ static int pch_spi_get_resources(struct pch_spi_board_data *board_dat, dev_dbg(&board_dat->pdev->dev, "%s ENTRY\n", __func__); /* reset PCH SPI h/w */ - pch_spi_reset(data->master); + pch_spi_reset(data->host); dev_dbg(&board_dat->pdev->dev, "%s pch_spi_reset invoked successfully\n", __func__); @@ -1297,22 +1297,22 @@ static int pch_alloc_dma_buf(struct pch_spi_board_data *board_dat, static int pch_spi_pd_probe(struct platform_device *plat_dev) { int ret; - struct spi_master *master; + struct spi_controller *host; struct pch_spi_board_data *board_dat = dev_get_platdata(&plat_dev->dev); struct pch_spi_data *data; dev_dbg(&plat_dev->dev, "%s:debug\n", __func__); - master = spi_alloc_master(&board_dat->pdev->dev, + host = spi_alloc_host(&board_dat->pdev->dev, sizeof(struct pch_spi_data)); - if (!master) { - dev_err(&plat_dev->dev, "spi_alloc_master[%d] failed.\n", + if (!host) { + dev_err(&plat_dev->dev, "spi_alloc_host[%d] failed.\n", plat_dev->id); return -ENOMEM; } - data = spi_master_get_devdata(master); - data->master = master; + data = spi_controller_get_devdata(host); + data->host = host; platform_set_drvdata(plat_dev, data); @@ -1330,13 +1330,13 @@ static int pch_spi_pd_probe(struct platform_device *plat_dev) dev_dbg(&plat_dev->dev, "[ch%d] remap_addr=%p\n", plat_dev->id, data->io_remap_addr); - /* initialize members of SPI master */ - master->num_chipselect = PCH_MAX_CS; - master->transfer = pch_spi_transfer; - master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST; - master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16); - master->max_speed_hz = PCH_MAX_BAUDRATE; - master->flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX; + /* initialize members of SPI host */ + host->num_chipselect = PCH_MAX_CS; + host->transfer = pch_spi_transfer; + host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST; + host->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16); + host->max_speed_hz = PCH_MAX_BAUDRATE; + host->flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX; data->board_dat = board_dat; data->plat_dev = plat_dev; @@ -1365,25 +1365,25 @@ static int pch_spi_pd_probe(struct platform_device *plat_dev) } data->irq_reg_sts = true; - pch_spi_set_master_mode(master); + pch_spi_set_host_mode(host); if (use_dma) { dev_info(&plat_dev->dev, "Use DMA for data transfers\n"); ret = pch_alloc_dma_buf(board_dat, data); if (ret) - goto err_spi_register_master; + goto err_spi_register_controller; } - ret = spi_register_master(master); + ret = spi_register_controller(host); if (ret != 0) { dev_err(&plat_dev->dev, - "%s spi_register_master FAILED\n", __func__); - goto err_spi_register_master; + "%s spi_register_controller FAILED\n", __func__); + goto err_spi_register_controller; } return 0; -err_spi_register_master: +err_spi_register_controller: pch_free_dma_buf(board_dat, data); free_irq(board_dat->pdev->irq, data); err_request_irq: @@ -1391,7 +1391,7 @@ err_request_irq: err_spi_get_resources: pci_iounmap(board_dat->pdev, data->io_remap_addr); err_pci_iomap: - spi_master_put(master); + spi_controller_put(host); return ret; } @@ -1427,13 +1427,13 @@ static void pch_spi_pd_remove(struct platform_device *plat_dev) /* disable interrupts & free IRQ */ if (data->irq_reg_sts) { /* disable interrupts */ - pch_spi_setclr_reg(data->master, PCH_SPCR, 0, PCH_ALL); + pch_spi_setclr_reg(data->host, PCH_SPCR, 0, PCH_ALL); data->irq_reg_sts = false; free_irq(board_dat->pdev->irq, data); } pci_iounmap(board_dat->pdev, data->io_remap_addr); - spi_unregister_master(data->master); + spi_unregister_controller(data->host); } #ifdef CONFIG_PM static int pch_spi_pd_suspend(struct platform_device *pd_dev, @@ -1463,8 +1463,8 @@ static int pch_spi_pd_suspend(struct platform_device *pd_dev, /* Free IRQ */ if (data->irq_reg_sts) { /* disable all interrupts */ - pch_spi_setclr_reg(data->master, PCH_SPCR, 0, PCH_ALL); - pch_spi_reset(data->master); + pch_spi_setclr_reg(data->host, PCH_SPCR, 0, PCH_ALL); + pch_spi_reset(data->host); free_irq(board_dat->pdev->irq, data); data->irq_reg_sts = false; @@ -1498,8 +1498,8 @@ static int pch_spi_pd_resume(struct platform_device *pd_dev) } /* reset PCH SPI h/w */ - pch_spi_reset(data->master); - pch_spi_set_master_mode(data->master); + pch_spi_reset(data->host); + pch_spi_set_host_mode(data->host); data->irq_reg_sts = true; } return 0; diff --git a/drivers/spi/spi-uniphier.c b/drivers/spi/spi-uniphier.c index f5344527af0b..4a18cf896194 100644 --- a/drivers/spi/spi-uniphier.c +++ b/drivers/spi/spi-uniphier.c @@ -26,7 +26,7 @@ struct uniphier_spi_priv { void __iomem *base; dma_addr_t base_dma_addr; struct clk *clk; - struct spi_master *master; + struct spi_controller *host; struct completion xfer_done; int error; @@ -127,7 +127,7 @@ static inline void uniphier_spi_irq_disable(struct uniphier_spi_priv *priv, static void uniphier_spi_set_mode(struct spi_device *spi) { - struct uniphier_spi_priv *priv = spi_master_get_devdata(spi->master); + struct uniphier_spi_priv *priv = spi_controller_get_devdata(spi->controller); u32 val1, val2; /* @@ -180,7 +180,7 @@ static void uniphier_spi_set_mode(struct spi_device *spi) static void uniphier_spi_set_transfer_size(struct spi_device *spi, int size) { - struct uniphier_spi_priv *priv = spi_master_get_devdata(spi->master); + struct uniphier_spi_priv *priv = spi_controller_get_devdata(spi->controller); u32 val; val = readl(priv->base + SSI_TXWDS); @@ -198,7 +198,7 @@ static void uniphier_spi_set_transfer_size(struct spi_device *spi, int size) static void uniphier_spi_set_baudrate(struct spi_device *spi, unsigned int speed) { - struct uniphier_spi_priv *priv = spi_master_get_devdata(spi->master); + struct uniphier_spi_priv *priv = spi_controller_get_devdata(spi->controller); u32 val, ckdiv; /* @@ -217,7 +217,7 @@ static void uniphier_spi_set_baudrate(struct spi_device *spi, static void uniphier_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t) { - struct uniphier_spi_priv *priv = spi_master_get_devdata(spi->master); + struct uniphier_spi_priv *priv = spi_controller_get_devdata(spi->controller); u32 val; priv->error = 0; @@ -333,7 +333,7 @@ static void uniphier_spi_fill_tx_fifo(struct uniphier_spi_priv *priv) static void uniphier_spi_set_cs(struct spi_device *spi, bool enable) { - struct uniphier_spi_priv *priv = spi_master_get_devdata(spi->master); + struct uniphier_spi_priv *priv = spi_controller_get_devdata(spi->controller); u32 val; val = readl(priv->base + SSI_FPS); @@ -346,16 +346,16 @@ static void uniphier_spi_set_cs(struct spi_device *spi, bool enable) writel(val, priv->base + SSI_FPS); } -static bool uniphier_spi_can_dma(struct spi_master *master, +static bool uniphier_spi_can_dma(struct spi_controller *host, struct spi_device *spi, struct spi_transfer *t) { - struct uniphier_spi_priv *priv = spi_master_get_devdata(master); + struct uniphier_spi_priv *priv = spi_controller_get_devdata(host); unsigned int bpw = bytes_per_word(priv->bits_per_word); - if ((!master->dma_tx && !master->dma_rx) - || (!master->dma_tx && t->tx_buf) - || (!master->dma_rx && t->rx_buf)) + if ((!host->dma_tx && !host->dma_rx) + || (!host->dma_tx && t->tx_buf) + || (!host->dma_rx && t->rx_buf)) return false; return DIV_ROUND_UP(t->len, bpw) > SSI_FIFO_DEPTH; @@ -363,33 +363,33 @@ static bool uniphier_spi_can_dma(struct spi_master *master, static void uniphier_spi_dma_rxcb(void *data) { - struct spi_master *master = data; - struct uniphier_spi_priv *priv = spi_master_get_devdata(master); + struct spi_controller *host = data; + struct uniphier_spi_priv *priv = spi_controller_get_devdata(host); int state = atomic_fetch_andnot(SSI_DMA_RX_BUSY, &priv->dma_busy); uniphier_spi_irq_disable(priv, SSI_IE_RXRE); if (!(state & SSI_DMA_TX_BUSY)) - spi_finalize_current_transfer(master); + spi_finalize_current_transfer(host); } static void uniphier_spi_dma_txcb(void *data) { - struct spi_master *master = data; - struct uniphier_spi_priv *priv = spi_master_get_devdata(master); + struct spi_controller *host = data; + struct uniphier_spi_priv *priv = spi_controller_get_devdata(host); int state = atomic_fetch_andnot(SSI_DMA_TX_BUSY, &priv->dma_busy); uniphier_spi_irq_disable(priv, SSI_IE_TXRE); if (!(state & SSI_DMA_RX_BUSY)) - spi_finalize_current_transfer(master); + spi_finalize_current_transfer(host); } -static int uniphier_spi_transfer_one_dma(struct spi_master *master, +static int uniphier_spi_transfer_one_dma(struct spi_controller *host, struct spi_device *spi, struct spi_transfer *t) { - struct uniphier_spi_priv *priv = spi_master_get_devdata(master); + struct uniphier_spi_priv *priv = spi_controller_get_devdata(host); struct dma_async_tx_descriptor *rxdesc = NULL, *txdesc = NULL; int buswidth; @@ -412,23 +412,23 @@ static int uniphier_spi_transfer_one_dma(struct spi_master *master, .src_maxburst = SSI_FIFO_BURST_NUM, }; - dmaengine_slave_config(master->dma_rx, &rxconf); + dmaengine_slave_config(host->dma_rx, &rxconf); rxdesc = dmaengine_prep_slave_sg( - master->dma_rx, + host->dma_rx, t->rx_sg.sgl, t->rx_sg.nents, DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); if (!rxdesc) goto out_err_prep; rxdesc->callback = uniphier_spi_dma_rxcb; - rxdesc->callback_param = master; + rxdesc->callback_param = host; uniphier_spi_irq_enable(priv, SSI_IE_RXRE); atomic_or(SSI_DMA_RX_BUSY, &priv->dma_busy); dmaengine_submit(rxdesc); - dma_async_issue_pending(master->dma_rx); + dma_async_issue_pending(host->dma_rx); } if (priv->tx_buf) { @@ -439,23 +439,23 @@ static int uniphier_spi_transfer_one_dma(struct spi_master *master, .dst_maxburst = SSI_FIFO_BURST_NUM, }; - dmaengine_slave_config(master->dma_tx, &txconf); + dmaengine_slave_config(host->dma_tx, &txconf); txdesc = dmaengine_prep_slave_sg( - master->dma_tx, + host->dma_tx, t->tx_sg.sgl, t->tx_sg.nents, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); if (!txdesc) goto out_err_prep; txdesc->callback = uniphier_spi_dma_txcb; - txdesc->callback_param = master; + txdesc->callback_param = host; uniphier_spi_irq_enable(priv, SSI_IE_TXRE); atomic_or(SSI_DMA_TX_BUSY, &priv->dma_busy); dmaengine_submit(txdesc); - dma_async_issue_pending(master->dma_tx); + dma_async_issue_pending(host->dma_tx); } /* signal that we need to wait for completion */ @@ -463,17 +463,17 @@ static int uniphier_spi_transfer_one_dma(struct spi_master *master, out_err_prep: if (rxdesc) - dmaengine_terminate_sync(master->dma_rx); + dmaengine_terminate_sync(host->dma_rx); return -EINVAL; } -static int uniphier_spi_transfer_one_irq(struct spi_master *master, +static int uniphier_spi_transfer_one_irq(struct spi_controller *host, struct spi_device *spi, struct spi_transfer *t) { - struct uniphier_spi_priv *priv = spi_master_get_devdata(master); - struct device *dev = master->dev.parent; + struct uniphier_spi_priv *priv = spi_controller_get_devdata(host); + struct device *dev = host->dev.parent; unsigned long time_left; reinit_completion(&priv->xfer_done); @@ -495,11 +495,11 @@ static int uniphier_spi_transfer_one_irq(struct spi_master *master, return priv->error; } -static int uniphier_spi_transfer_one_poll(struct spi_master *master, +static int uniphier_spi_transfer_one_poll(struct spi_controller *host, struct spi_device *spi, struct spi_transfer *t) { - struct uniphier_spi_priv *priv = spi_master_get_devdata(master); + struct uniphier_spi_priv *priv = spi_controller_get_devdata(host); int loop = SSI_POLL_TIMEOUT_US * 10; while (priv->tx_bytes) { @@ -520,14 +520,14 @@ static int uniphier_spi_transfer_one_poll(struct spi_master *master, return 0; irq_transfer: - return uniphier_spi_transfer_one_irq(master, spi, t); + return uniphier_spi_transfer_one_irq(host, spi, t); } -static int uniphier_spi_transfer_one(struct spi_master *master, +static int uniphier_spi_transfer_one(struct spi_controller *host, struct spi_device *spi, struct spi_transfer *t) { - struct uniphier_spi_priv *priv = spi_master_get_devdata(master); + struct uniphier_spi_priv *priv = spi_controller_get_devdata(host); unsigned long threshold; bool use_dma; @@ -537,9 +537,9 @@ static int uniphier_spi_transfer_one(struct spi_master *master, uniphier_spi_setup_transfer(spi, t); - use_dma = master->can_dma ? master->can_dma(master, spi, t) : false; + use_dma = host->can_dma ? host->can_dma(host, spi, t) : false; if (use_dma) - return uniphier_spi_transfer_one_dma(master, spi, t); + return uniphier_spi_transfer_one_dma(host, spi, t); /* * If the transfer operation will take longer than @@ -548,33 +548,33 @@ static int uniphier_spi_transfer_one(struct spi_master *master, threshold = DIV_ROUND_UP(SSI_POLL_TIMEOUT_US * priv->speed_hz, USEC_PER_SEC * BITS_PER_BYTE); if (t->len > threshold) - return uniphier_spi_transfer_one_irq(master, spi, t); + return uniphier_spi_transfer_one_irq(host, spi, t); else - return uniphier_spi_transfer_one_poll(master, spi, t); + return uniphier_spi_transfer_one_poll(host, spi, t); } -static int uniphier_spi_prepare_transfer_hardware(struct spi_master *master) +static int uniphier_spi_prepare_transfer_hardware(struct spi_controller *host) { - struct uniphier_spi_priv *priv = spi_master_get_devdata(master); + struct uniphier_spi_priv *priv = spi_controller_get_devdata(host); writel(SSI_CTL_EN, priv->base + SSI_CTL); return 0; } -static int uniphier_spi_unprepare_transfer_hardware(struct spi_master *master) +static int uniphier_spi_unprepare_transfer_hardware(struct spi_controller *host) { - struct uniphier_spi_priv *priv = spi_master_get_devdata(master); + struct uniphier_spi_priv *priv = spi_controller_get_devdata(host); writel(0, priv->base + SSI_CTL); return 0; } -static void uniphier_spi_handle_err(struct spi_master *master, +static void uniphier_spi_handle_err(struct spi_controller *host, struct spi_message *msg) { - struct uniphier_spi_priv *priv = spi_master_get_devdata(master); + struct uniphier_spi_priv *priv = spi_controller_get_devdata(host); u32 val; /* stop running spi transfer */ @@ -587,12 +587,12 @@ static void uniphier_spi_handle_err(struct spi_master *master, uniphier_spi_irq_disable(priv, SSI_IE_ALL_MASK); if (atomic_read(&priv->dma_busy) & SSI_DMA_TX_BUSY) { - dmaengine_terminate_async(master->dma_tx); + dmaengine_terminate_async(host->dma_tx); atomic_andnot(SSI_DMA_TX_BUSY, &priv->dma_busy); } if (atomic_read(&priv->dma_busy) & SSI_DMA_RX_BUSY) { - dmaengine_terminate_async(master->dma_rx); + dmaengine_terminate_async(host->dma_rx); atomic_andnot(SSI_DMA_RX_BUSY, &priv->dma_busy); } } @@ -641,7 +641,7 @@ done: static int uniphier_spi_probe(struct platform_device *pdev) { struct uniphier_spi_priv *priv; - struct spi_master *master; + struct spi_controller *host; struct resource *res; struct dma_slave_caps caps; u32 dma_tx_burst = 0, dma_rx_burst = 0; @@ -649,20 +649,20 @@ static int uniphier_spi_probe(struct platform_device *pdev) int irq; int ret; - master = spi_alloc_master(&pdev->dev, sizeof(*priv)); - if (!master) + host = spi_alloc_host(&pdev->dev, sizeof(*priv)); + if (!host) return -ENOMEM; - platform_set_drvdata(pdev, master); + platform_set_drvdata(pdev, host); - priv = spi_master_get_devdata(master); - priv->master = master; + priv = spi_controller_get_devdata(host); + priv->host = host; priv->is_save_param = false; priv->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res); if (IS_ERR(priv->base)) { ret = PTR_ERR(priv->base); - goto out_master_put; + goto out_host_put; } priv->base_dma_addr = res->start; @@ -670,12 +670,12 @@ static int uniphier_spi_probe(struct platform_device *pdev) if (IS_ERR(priv->clk)) { dev_err(&pdev->dev, "failed to get clock\n"); ret = PTR_ERR(priv->clk); - goto out_master_put; + goto out_host_put; } ret = clk_prepare_enable(priv->clk); if (ret) - goto out_master_put; + goto out_host_put; irq = platform_get_irq(pdev, 0); if (irq < 0) { @@ -694,35 +694,35 @@ static int uniphier_spi_probe(struct platform_device *pdev) clk_rate = clk_get_rate(priv->clk); - master->max_speed_hz = DIV_ROUND_UP(clk_rate, SSI_MIN_CLK_DIVIDER); - master->min_speed_hz = DIV_ROUND_UP(clk_rate, SSI_MAX_CLK_DIVIDER); - master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST; - master->dev.of_node = pdev->dev.of_node; - master->bus_num = pdev->id; - master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32); + host->max_speed_hz = DIV_ROUND_UP(clk_rate, SSI_MIN_CLK_DIVIDER); + host->min_speed_hz = DIV_ROUND_UP(clk_rate, SSI_MAX_CLK_DIVIDER); + host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST; + host->dev.of_node = pdev->dev.of_node; + host->bus_num = pdev->id; + host->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32); - master->set_cs = uniphier_spi_set_cs; - master->transfer_one = uniphier_spi_transfer_one; - master->prepare_transfer_hardware + host->set_cs = uniphier_spi_set_cs; + host->transfer_one = uniphier_spi_transfer_one; + host->prepare_transfer_hardware = uniphier_spi_prepare_transfer_hardware; - master->unprepare_transfer_hardware + host->unprepare_transfer_hardware = uniphier_spi_unprepare_transfer_hardware; - master->handle_err = uniphier_spi_handle_err; - master->can_dma = uniphier_spi_can_dma; + host->handle_err = uniphier_spi_handle_err; + host->can_dma = uniphier_spi_can_dma; - master->num_chipselect = 1; - master->flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX; + host->num_chipselect = 1; + host->flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX; - master->dma_tx = dma_request_chan(&pdev->dev, "tx"); - if (IS_ERR_OR_NULL(master->dma_tx)) { - if (PTR_ERR(master->dma_tx) == -EPROBE_DEFER) { + host->dma_tx = dma_request_chan(&pdev->dev, "tx"); + if (IS_ERR_OR_NULL(host->dma_tx)) { + if (PTR_ERR(host->dma_tx) == -EPROBE_DEFER) { ret = -EPROBE_DEFER; goto out_disable_clk; } - master->dma_tx = NULL; + host->dma_tx = NULL; dma_tx_burst = INT_MAX; } else { - ret = dma_get_slave_caps(master->dma_tx, &caps); + ret = dma_get_slave_caps(host->dma_tx, &caps); if (ret) { dev_err(&pdev->dev, "failed to get TX DMA capacities: %d\n", ret); @@ -731,16 +731,16 @@ static int uniphier_spi_probe(struct platform_device *pdev) dma_tx_burst = caps.max_burst; } - master->dma_rx = dma_request_chan(&pdev->dev, "rx"); - if (IS_ERR_OR_NULL(master->dma_rx)) { - if (PTR_ERR(master->dma_rx) == -EPROBE_DEFER) { + host->dma_rx = dma_request_chan(&pdev->dev, "rx"); + if (IS_ERR_OR_NULL(host->dma_rx)) { + if (PTR_ERR(host->dma_rx) == -EPROBE_DEFER) { ret = -EPROBE_DEFER; goto out_release_dma; } - master->dma_rx = NULL; + host->dma_rx = NULL; dma_rx_burst = INT_MAX; } else { - ret = dma_get_slave_caps(master->dma_rx, &caps); + ret = dma_get_slave_caps(host->dma_rx, &caps); if (ret) { dev_err(&pdev->dev, "failed to get RX DMA capacities: %d\n", ret); @@ -749,41 +749,41 @@ static int uniphier_spi_probe(struct platform_device *pdev) dma_rx_burst = caps.max_burst; } - master->max_dma_len = min(dma_tx_burst, dma_rx_burst); + host->max_dma_len = min(dma_tx_burst, dma_rx_burst); - ret = devm_spi_register_master(&pdev->dev, master); + ret = devm_spi_register_controller(&pdev->dev, host); if (ret) goto out_release_dma; return 0; out_release_dma: - if (!IS_ERR_OR_NULL(master->dma_rx)) { - dma_release_channel(master->dma_rx); - master->dma_rx = NULL; + if (!IS_ERR_OR_NULL(host->dma_rx)) { + dma_release_channel(host->dma_rx); + host->dma_rx = NULL; } - if (!IS_ERR_OR_NULL(master->dma_tx)) { - dma_release_channel(master->dma_tx); - master->dma_tx = NULL; + if (!IS_ERR_OR_NULL(host->dma_tx)) { + dma_release_channel(host->dma_tx); + host->dma_tx = NULL; } out_disable_clk: clk_disable_unprepare(priv->clk); -out_master_put: - spi_master_put(master); +out_host_put: + spi_controller_put(host); return ret; } static void uniphier_spi_remove(struct platform_device *pdev) { - struct spi_master *master = platform_get_drvdata(pdev); - struct uniphier_spi_priv *priv = spi_master_get_devdata(master); + struct spi_controller *host = platform_get_drvdata(pdev); + struct uniphier_spi_priv *priv = spi_controller_get_devdata(host); - if (master->dma_tx) - dma_release_channel(master->dma_tx); - if (master->dma_rx) - dma_release_channel(master->dma_rx); + if (host->dma_tx) + dma_release_channel(host->dma_tx); + if (host->dma_rx) + dma_release_channel(host->dma_rx); clk_disable_unprepare(priv->clk); } diff --git a/drivers/spi/spi-wpcm-fiu.c b/drivers/spi/spi-wpcm-fiu.c index 852ffe013d32..6b16a22cc3a4 100644 --- a/drivers/spi/spi-wpcm-fiu.c +++ b/drivers/spi/spi-wpcm-fiu.c @@ -361,7 +361,7 @@ static int wpcm_fiu_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) wpcm_fiu_stall_host(fiu, false); - return -ENOTSUPP; + return -EOPNOTSUPP; } static int wpcm_fiu_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op) @@ -441,7 +441,7 @@ static int wpcm_fiu_probe(struct platform_device *pdev) struct wpcm_fiu_spi *fiu; struct resource *res; - ctrl = devm_spi_alloc_master(dev, sizeof(*fiu)); + ctrl = devm_spi_alloc_host(dev, sizeof(*fiu)); if (!ctrl) return -ENOMEM; diff --git a/drivers/spi/spi-xcomm.c b/drivers/spi/spi-xcomm.c index a3d57554f5ba..63354dd3110f 100644 --- a/drivers/spi/spi-xcomm.c +++ b/drivers/spi/spi-xcomm.c @@ -132,10 +132,10 @@ static int spi_xcomm_txrx_bufs(struct spi_xcomm *spi_xcomm, return t->len; } -static int spi_xcomm_transfer_one(struct spi_master *master, +static int spi_xcomm_transfer_one(struct spi_controller *host, struct spi_message *msg) { - struct spi_xcomm *spi_xcomm = spi_master_get_devdata(master); + struct spi_xcomm *spi_xcomm = spi_controller_get_devdata(host); unsigned int settings = spi_xcomm->settings; struct spi_device *spi = msg->spi; unsigned cs_change = 0; @@ -197,7 +197,7 @@ static int spi_xcomm_transfer_one(struct spi_master *master, spi_xcomm_chipselect(spi_xcomm, spi, false); msg->status = status; - spi_finalize_current_message(master); + spi_finalize_current_message(host); return status; } @@ -205,27 +205,27 @@ static int spi_xcomm_transfer_one(struct spi_master *master, static int spi_xcomm_probe(struct i2c_client *i2c) { struct spi_xcomm *spi_xcomm; - struct spi_master *master; + struct spi_controller *host; int ret; - master = spi_alloc_master(&i2c->dev, sizeof(*spi_xcomm)); - if (!master) + host = spi_alloc_host(&i2c->dev, sizeof(*spi_xcomm)); + if (!host) return -ENOMEM; - spi_xcomm = spi_master_get_devdata(master); + spi_xcomm = spi_controller_get_devdata(host); spi_xcomm->i2c = i2c; - master->num_chipselect = 16; - master->mode_bits = SPI_CPHA | SPI_CPOL | SPI_3WIRE; - master->bits_per_word_mask = SPI_BPW_MASK(8); - master->flags = SPI_CONTROLLER_HALF_DUPLEX; - master->transfer_one_message = spi_xcomm_transfer_one; - master->dev.of_node = i2c->dev.of_node; - i2c_set_clientdata(i2c, master); + host->num_chipselect = 16; + host->mode_bits = SPI_CPHA | SPI_CPOL | SPI_3WIRE; + host->bits_per_word_mask = SPI_BPW_MASK(8); + host->flags = SPI_CONTROLLER_HALF_DUPLEX; + host->transfer_one_message = spi_xcomm_transfer_one; + host->dev.of_node = i2c->dev.of_node; + i2c_set_clientdata(i2c, host); - ret = devm_spi_register_master(&i2c->dev, master); + ret = devm_spi_register_controller(&i2c->dev, host); if (ret < 0) - spi_master_put(master); + spi_controller_put(host); return ret; } diff --git a/drivers/spi/spi-xilinx.c b/drivers/spi/spi-xilinx.c index 8e6e3876aa9a..12355957be97 100644 --- a/drivers/spi/spi-xilinx.c +++ b/drivers/spi/spi-xilinx.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Xilinx SPI controller driver (master mode only) + * Xilinx SPI controller driver (host mode only) * * Author: MontaVista Software, Inc. * source@mvista.com @@ -83,7 +83,7 @@ struct xilinx_spi { void __iomem *regs; /* virt. address of the control registers */ int irq; - bool force_irq; /* force irq to setup master inhibit */ + bool force_irq; /* force irq to setup host inhibit */ u8 *rx_ptr; /* pointer in the Tx buffer */ const u8 *tx_ptr; /* pointer in the Rx buffer */ u8 bytes_per_word; @@ -174,10 +174,10 @@ static void xspi_init_hw(struct xilinx_spi *xspi) regs_base + XIPIF_V123B_IIER_OFFSET); /* Disable the global IPIF interrupt */ xspi->write_fn(0, regs_base + XIPIF_V123B_DGIER_OFFSET); - /* Deselect the slave on the SPI bus */ + /* Deselect the Target on the SPI bus */ xspi->write_fn(0xffff, regs_base + XSPI_SSR_OFFSET); - /* Disable the transmitter, enable Manual Slave Select Assertion, - * put SPI controller into master mode, and enable it */ + /* Disable the transmitter, enable Manual Target Select Assertion, + * put SPI controller into host mode, and enable it */ xspi->write_fn(XSPI_CR_MANUAL_SSELECT | XSPI_CR_MASTER_MODE | XSPI_CR_ENABLE | XSPI_CR_TXFIFO_RESET | XSPI_CR_RXFIFO_RESET, regs_base + XSPI_CR_OFFSET); @@ -185,12 +185,12 @@ static void xspi_init_hw(struct xilinx_spi *xspi) static void xilinx_spi_chipselect(struct spi_device *spi, int is_on) { - struct xilinx_spi *xspi = spi_master_get_devdata(spi->master); + struct xilinx_spi *xspi = spi_controller_get_devdata(spi->controller); u16 cr; u32 cs; if (is_on == BITBANG_CS_INACTIVE) { - /* Deselect the slave on the SPI bus */ + /* Deselect the target on the SPI bus */ xspi->write_fn(xspi->cs_inactive, xspi->regs + XSPI_SSR_OFFSET); return; } @@ -225,7 +225,7 @@ static void xilinx_spi_chipselect(struct spi_device *spi, int is_on) static int xilinx_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t) { - struct xilinx_spi *xspi = spi_master_get_devdata(spi->master); + struct xilinx_spi *xspi = spi_controller_get_devdata(spi->controller); if (spi->mode & SPI_CS_HIGH) xspi->cs_inactive &= ~BIT(spi_get_chipselect(spi, 0)); @@ -237,7 +237,7 @@ static int xilinx_spi_setup_transfer(struct spi_device *spi, static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t) { - struct xilinx_spi *xspi = spi_master_get_devdata(spi->master); + struct xilinx_spi *xspi = spi_controller_get_devdata(spi->controller); int remaining_words; /* the number of words left to transfer */ bool use_irq = false; u16 cr = 0; @@ -335,9 +335,9 @@ static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t) } -/* This driver supports single master mode only. Hence Tx FIFO Empty +/* This driver supports single host mode only. Hence Tx FIFO Empty * is the only interrupt we care about. - * Receive FIFO Overrun, Transmit FIFO Underrun, Mode Fault, and Slave Mode + * Receive FIFO Overrun, Transmit FIFO Underrun, Mode Fault, and Target Mode * Fault are not to happen. */ static irqreturn_t xilinx_spi_irq(int irq, void *dev_id) @@ -393,7 +393,7 @@ static int xilinx_spi_probe(struct platform_device *pdev) struct xspi_platform_data *pdata; struct resource *res; int ret, num_cs = 0, bits_per_word; - struct spi_master *master; + struct spi_controller *host; bool force_irq = false; u32 tmp; u8 i; @@ -415,26 +415,26 @@ static int xilinx_spi_probe(struct platform_device *pdev) if (!num_cs) { dev_err(&pdev->dev, - "Missing slave select configuration data\n"); + "Missing target select configuration data\n"); return -EINVAL; } if (num_cs > XILINX_SPI_MAX_CS) { - dev_err(&pdev->dev, "Invalid number of spi slaves\n"); + dev_err(&pdev->dev, "Invalid number of spi targets\n"); return -EINVAL; } - master = devm_spi_alloc_master(&pdev->dev, sizeof(struct xilinx_spi)); - if (!master) + host = devm_spi_alloc_host(&pdev->dev, sizeof(struct xilinx_spi)); + if (!host) return -ENODEV; /* the spi->mode bits understood by this driver: */ - master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST | SPI_LOOP | - SPI_CS_HIGH; + host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST | SPI_LOOP | + SPI_CS_HIGH; - xspi = spi_master_get_devdata(master); + xspi = spi_controller_get_devdata(host); xspi->cs_inactive = 0xffffffff; - xspi->bitbang.master = master; + xspi->bitbang.master = host; xspi->bitbang.chipselect = xilinx_spi_chipselect; xspi->bitbang.setup_transfer = xilinx_spi_setup_transfer; xspi->bitbang.txrx_bufs = xilinx_spi_txrx_bufs; @@ -444,9 +444,9 @@ static int xilinx_spi_probe(struct platform_device *pdev) if (IS_ERR(xspi->regs)) return PTR_ERR(xspi->regs); - master->bus_num = pdev->id; - master->num_chipselect = num_cs; - master->dev.of_node = pdev->dev.of_node; + host->bus_num = pdev->id; + host->num_chipselect = num_cs; + host->dev.of_node = pdev->dev.of_node; /* * Detect endianess on the IP via loop bit in CR. Detection @@ -466,7 +466,7 @@ static int xilinx_spi_probe(struct platform_device *pdev) xspi->write_fn = xspi_write32_be; } - master->bits_per_word_mask = SPI_BPW_MASK(bits_per_word); + host->bits_per_word_mask = SPI_BPW_MASK(bits_per_word); xspi->bytes_per_word = bits_per_word / 8; xspi->buffer_size = xilinx_spi_find_buffer_size(xspi); @@ -496,17 +496,17 @@ static int xilinx_spi_probe(struct platform_device *pdev) if (pdata) { for (i = 0; i < pdata->num_devices; i++) - spi_new_device(master, pdata->devices + i); + spi_new_device(host, pdata->devices + i); } - platform_set_drvdata(pdev, master); + platform_set_drvdata(pdev, host); return 0; } static void xilinx_spi_remove(struct platform_device *pdev) { - struct spi_master *master = platform_get_drvdata(pdev); - struct xilinx_spi *xspi = spi_master_get_devdata(master); + struct spi_controller *host = platform_get_drvdata(pdev); + struct xilinx_spi *xspi = spi_controller_get_devdata(host); void __iomem *regs_base = xspi->regs; spi_bitbang_stop(&xspi->bitbang); @@ -516,7 +516,7 @@ static void xilinx_spi_remove(struct platform_device *pdev) /* Disable the global IPIF interrupt */ xspi->write_fn(0, regs_base + XIPIF_V123B_DGIER_OFFSET); - spi_master_put(xspi->bitbang.master); + spi_controller_put(xspi->bitbang.master); } /* work with hotplug and coldplug */ diff --git a/drivers/spi/spi-xlp.c b/drivers/spi/spi-xlp.c index 3b91cdd5ae21..49302364b7bd 100644 --- a/drivers/spi/spi-xlp.c +++ b/drivers/spi/spi-xlp.c @@ -95,7 +95,7 @@ struct xlp_spi_priv { int rx_len; /* rx xfer length */ int txerrors; /* TXFIFO underflow count */ int rxerrors; /* RXFIFO overflow count */ - int cs; /* slave device chip select */ + int cs; /* target device chip select */ u32 spi_clk; /* spi clock frequency */ bool cmd_cont; /* cs active */ struct completion done; /* completion notification */ @@ -138,7 +138,7 @@ static int xlp_spi_setup(struct spi_device *spi) u32 fdiv, cfg; int cs; - xspi = spi_master_get_devdata(spi->master); + xspi = spi_controller_get_devdata(spi->controller); cs = spi_get_chipselect(spi, 0); /* * The value of fdiv must be between 4 and 65535. @@ -343,17 +343,17 @@ static int xlp_spi_txrx_bufs(struct xlp_spi_priv *xs, struct spi_transfer *t) return bytesleft; } -static int xlp_spi_transfer_one(struct spi_master *master, +static int xlp_spi_transfer_one(struct spi_controller *host, struct spi_device *spi, struct spi_transfer *t) { - struct xlp_spi_priv *xspi = spi_master_get_devdata(master); + struct xlp_spi_priv *xspi = spi_controller_get_devdata(host); int ret = 0; xspi->cs = spi_get_chipselect(spi, 0); xspi->dev = spi->dev; - if (spi_transfer_is_last(master, t)) + if (spi_transfer_is_last(host, t)) xspi->cmd_cont = 0; else xspi->cmd_cont = 1; @@ -361,13 +361,13 @@ static int xlp_spi_transfer_one(struct spi_master *master, if (xlp_spi_txrx_bufs(xspi, t)) ret = -EIO; - spi_finalize_current_transfer(master); + spi_finalize_current_transfer(host); return ret; } static int xlp_spi_probe(struct platform_device *pdev) { - struct spi_master *master; + struct spi_controller *host; struct xlp_spi_priv *xspi; struct clk *clk; int irq, err; @@ -398,28 +398,28 @@ static int xlp_spi_probe(struct platform_device *pdev) xspi->spi_clk = clk_get_rate(clk); - master = spi_alloc_master(&pdev->dev, 0); - if (!master) { - dev_err(&pdev->dev, "could not alloc master\n"); + host = spi_alloc_host(&pdev->dev, 0); + if (!host) { + dev_err(&pdev->dev, "could not alloc host\n"); return -ENOMEM; } - master->bus_num = 0; - master->num_chipselect = XLP_SPI_MAX_CS; - master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; - master->setup = xlp_spi_setup; - master->transfer_one = xlp_spi_transfer_one; - master->dev.of_node = pdev->dev.of_node; + host->bus_num = 0; + host->num_chipselect = XLP_SPI_MAX_CS; + host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; + host->setup = xlp_spi_setup; + host->transfer_one = xlp_spi_transfer_one; + host->dev.of_node = pdev->dev.of_node; init_completion(&xspi->done); - spi_master_set_devdata(master, xspi); + spi_controller_set_devdata(host, xspi); xlp_spi_sysctl_setup(xspi); /* register spi controller */ - err = devm_spi_register_master(&pdev->dev, master); + err = devm_spi_register_controller(&pdev->dev, host); if (err) { - dev_err(&pdev->dev, "spi register master failed!\n"); - spi_master_put(master); + dev_err(&pdev->dev, "spi register host failed!\n"); + spi_controller_put(host); return err; } diff --git a/drivers/spi/spi-xtensa-xtfpga.c b/drivers/spi/spi-xtensa-xtfpga.c index dbd85d7a1526..3c7721894376 100644 --- a/drivers/spi/spi-xtensa-xtfpga.c +++ b/drivers/spi/spi-xtensa-xtfpga.c @@ -53,7 +53,7 @@ static inline void xtfpga_spi_wait_busy(struct xtfpga_spi *xspi) static u32 xtfpga_spi_txrx_word(struct spi_device *spi, unsigned nsecs, u32 v, u8 bits, unsigned flags) { - struct xtfpga_spi *xspi = spi_master_get_devdata(spi->master); + struct xtfpga_spi *xspi = spi_controller_get_devdata(spi->controller); xspi->data = (xspi->data << bits) | (v & GENMASK(bits - 1, 0)); xspi->data_sz += bits; @@ -71,7 +71,7 @@ static u32 xtfpga_spi_txrx_word(struct spi_device *spi, unsigned nsecs, static void xtfpga_spi_chipselect(struct spi_device *spi, int is_on) { - struct xtfpga_spi *xspi = spi_master_get_devdata(spi->master); + struct xtfpga_spi *xspi = spi_controller_get_devdata(spi->controller); WARN_ON(xspi->data_sz != 0); xspi->data_sz = 0; @@ -81,19 +81,19 @@ static int xtfpga_spi_probe(struct platform_device *pdev) { struct xtfpga_spi *xspi; int ret; - struct spi_master *master; + struct spi_controller *host; - master = devm_spi_alloc_master(&pdev->dev, sizeof(struct xtfpga_spi)); - if (!master) + host = devm_spi_alloc_host(&pdev->dev, sizeof(struct xtfpga_spi)); + if (!host) return -ENOMEM; - master->flags = SPI_CONTROLLER_NO_RX; - master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 16); - master->bus_num = pdev->dev.id; - master->dev.of_node = pdev->dev.of_node; + host->flags = SPI_CONTROLLER_NO_RX; + host->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 16); + host->bus_num = pdev->dev.id; + host->dev.of_node = pdev->dev.of_node; - xspi = spi_master_get_devdata(master); - xspi->bitbang.master = master; + xspi = spi_controller_get_devdata(host); + xspi->bitbang.master = host; xspi->bitbang.chipselect = xtfpga_spi_chipselect; xspi->bitbang.txrx_word[SPI_MODE_0] = xtfpga_spi_txrx_word; xspi->regs = devm_platform_ioremap_resource(pdev, 0); @@ -113,17 +113,17 @@ static int xtfpga_spi_probe(struct platform_device *pdev) return ret; } - platform_set_drvdata(pdev, master); + platform_set_drvdata(pdev, host); return 0; } static void xtfpga_spi_remove(struct platform_device *pdev) { - struct spi_master *master = platform_get_drvdata(pdev); - struct xtfpga_spi *xspi = spi_master_get_devdata(master); + struct spi_controller *host = platform_get_drvdata(pdev); + struct xtfpga_spi *xspi = spi_controller_get_devdata(host); spi_bitbang_stop(&xspi->bitbang); - spi_master_put(master); + spi_controller_put(host); } MODULE_ALIAS("platform:" XTFPGA_SPI_NAME); diff --git a/drivers/spi/spi-zynq-qspi.c b/drivers/spi/spi-zynq-qspi.c index 0db69a2a72ff..d6325c6be3d4 100644 --- a/drivers/spi/spi-zynq-qspi.c +++ b/drivers/spi/spi-zynq-qspi.c @@ -54,10 +54,10 @@ #define ZYNQ_QSPI_CONFIG_MSTREN_MASK BIT(0) /* Master Mode */ /* - * QSPI Configuration Register - Baud rate and slave select + * QSPI Configuration Register - Baud rate and target select * * These are the values used in the calculation of baud rate divisor and - * setting the slave select. + * setting the target select. */ #define ZYNQ_QSPI_CONFIG_BAUD_DIV_MAX GENMASK(2, 0) /* Baud rate maximum */ #define ZYNQ_QSPI_CONFIG_BAUD_DIV_SHIFT 3 /* Baud rate divisor shift */ @@ -164,14 +164,14 @@ static inline void zynq_qspi_write(struct zynq_qspi *xqspi, u32 offset, * * The default settings of the QSPI controller's configurable parameters on * reset are - * - Master mode + * - Host mode * - Baud rate divisor is set to 2 * - Tx threshold set to 1l Rx threshold set to 32 * - Flash memory interface mode enabled * - Size of the word to be transferred as 8 bit * This function performs the following actions * - Disable and clear all the interrupts - * - Enable manual slave select + * - Enable manual target select * - Enable manual start * - Deselect all the chip select lines * - Set the size of the word to be transferred as 32 bit @@ -289,7 +289,7 @@ static void zynq_qspi_txfifo_op(struct zynq_qspi *xqspi, unsigned int size) */ static void zynq_qspi_chipselect(struct spi_device *spi, bool assert) { - struct spi_controller *ctlr = spi->master; + struct spi_controller *ctlr = spi->controller; struct zynq_qspi *xqspi = spi_controller_get_devdata(ctlr); u32 config_reg; @@ -377,7 +377,7 @@ static int zynq_qspi_config_op(struct zynq_qspi *xqspi, struct spi_device *spi) */ static int zynq_qspi_setup_op(struct spi_device *spi) { - struct spi_controller *ctlr = spi->master; + struct spi_controller *ctlr = spi->controller; struct zynq_qspi *qspi = spi_controller_get_devdata(ctlr); if (ctlr->busy) @@ -525,7 +525,7 @@ static irqreturn_t zynq_qspi_irq(int irq, void *dev_id) static int zynq_qspi_exec_mem_op(struct spi_mem *mem, const struct spi_mem_op *op) { - struct zynq_qspi *xqspi = spi_controller_get_devdata(mem->spi->master); + struct zynq_qspi *xqspi = spi_controller_get_devdata(mem->spi->controller); int err = 0, i; u8 *tmpbuf; @@ -637,7 +637,7 @@ static int zynq_qspi_probe(struct platform_device *pdev) struct zynq_qspi *xqspi; u32 num_cs; - ctlr = spi_alloc_master(&pdev->dev, sizeof(*xqspi)); + ctlr = spi_alloc_host(&pdev->dev, sizeof(*xqspi)); if (!ctlr) return -ENOMEM; @@ -647,14 +647,14 @@ static int zynq_qspi_probe(struct platform_device *pdev) xqspi->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(xqspi->regs)) { ret = PTR_ERR(xqspi->regs); - goto remove_master; + goto remove_ctlr; } xqspi->pclk = devm_clk_get(&pdev->dev, "pclk"); if (IS_ERR(xqspi->pclk)) { dev_err(&pdev->dev, "pclk clock not found.\n"); ret = PTR_ERR(xqspi->pclk); - goto remove_master; + goto remove_ctlr; } init_completion(&xqspi->data_completion); @@ -663,13 +663,13 @@ static int zynq_qspi_probe(struct platform_device *pdev) if (IS_ERR(xqspi->refclk)) { dev_err(&pdev->dev, "ref_clk clock not found.\n"); ret = PTR_ERR(xqspi->refclk); - goto remove_master; + goto remove_ctlr; } ret = clk_prepare_enable(xqspi->pclk); if (ret) { dev_err(&pdev->dev, "Unable to enable APB clock.\n"); - goto remove_master; + goto remove_ctlr; } ret = clk_prepare_enable(xqspi->refclk); @@ -715,7 +715,7 @@ static int zynq_qspi_probe(struct platform_device *pdev) ret = devm_spi_register_controller(&pdev->dev, ctlr); if (ret) { - dev_err(&pdev->dev, "spi_register_master failed\n"); + dev_err(&pdev->dev, "devm_spi_register_controller failed\n"); goto clk_dis_all; } @@ -725,7 +725,7 @@ clk_dis_all: clk_disable_unprepare(xqspi->refclk); clk_dis_pclk: clk_disable_unprepare(xqspi->pclk); -remove_master: +remove_ctlr: spi_controller_put(ctlr); return ret; diff --git a/drivers/spi/spi-zynqmp-gqspi.c b/drivers/spi/spi-zynqmp-gqspi.c index 9a46b2478f4e..99524a3c9f38 100644 --- a/drivers/spi/spi-zynqmp-gqspi.c +++ b/drivers/spi/spi-zynqmp-gqspi.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-or-later /* * Xilinx Zynq UltraScale+ MPSoC Quad-SPI (QSPI) controller driver - * (master mode only) + * (host mode only) * * Copyright (C) 2009 - 2015 Xilinx, Inc. */ @@ -235,21 +235,21 @@ static inline void zynqmp_gqspi_write(struct zynqmp_qspi *xqspi, u32 offset, } /** - * zynqmp_gqspi_selectslave - For selection of slave device + * zynqmp_gqspi_selecttarget - For selection of target device * @instanceptr: Pointer to the zynqmp_qspi structure - * @slavecs: For chip select - * @slavebus: To check which bus is selected- upper or lower + * @targetcs: For chip select + * @targetbus: To check which bus is selected- upper or lower */ -static void zynqmp_gqspi_selectslave(struct zynqmp_qspi *instanceptr, - u8 slavecs, u8 slavebus) +static void zynqmp_gqspi_selecttarget(struct zynqmp_qspi *instanceptr, + u8 targetcs, u8 targetbus) { /* * Bus and CS lines selected here will be updated in the instance and * used for subsequent GENFIFO entries during transfer. */ - /* Choose slave select line */ - switch (slavecs) { + /* Choose target select line */ + switch (targetcs) { case GQSPI_SELECT_FLASH_CS_BOTH: instanceptr->genfifocs = GQSPI_GENFIFO_CS_LOWER | GQSPI_GENFIFO_CS_UPPER; @@ -261,11 +261,11 @@ static void zynqmp_gqspi_selectslave(struct zynqmp_qspi *instanceptr, instanceptr->genfifocs = GQSPI_GENFIFO_CS_LOWER; break; default: - dev_warn(instanceptr->dev, "Invalid slave select\n"); + dev_warn(instanceptr->dev, "Invalid target select\n"); } /* Choose the bus */ - switch (slavebus) { + switch (targetbus) { case GQSPI_SELECT_FLASH_BUS_BOTH: instanceptr->genfifobus = GQSPI_GENFIFO_BUS_LOWER | GQSPI_GENFIFO_BUS_UPPER; @@ -277,7 +277,7 @@ static void zynqmp_gqspi_selectslave(struct zynqmp_qspi *instanceptr, instanceptr->genfifobus = GQSPI_GENFIFO_BUS_LOWER; break; default: - dev_warn(instanceptr->dev, "Invalid slave bus\n"); + dev_warn(instanceptr->dev, "Invalid target bus\n"); } } @@ -337,13 +337,13 @@ static void zynqmp_qspi_set_tapdelay(struct zynqmp_qspi *xqspi, u32 baudrateval) * * The default settings of the QSPI controller's configurable parameters on * reset are - * - Master mode + * - Host mode * - TX threshold set to 1 * - RX threshold set to 1 * - Flash memory interface mode enabled * This function performs the following actions * - Disable and clear all the interrupts - * - Enable manual slave select + * - Enable manual target select * - Enable manual start * - Deselect all the chip select lines * - Set the little endian mode of TX FIFO @@ -426,9 +426,9 @@ static void zynqmp_qspi_init_hw(struct zynqmp_qspi *xqspi) GQSPI_RX_FIFO_THRESHOLD); zynqmp_gqspi_write(xqspi, GQSPI_GF_THRESHOLD_OFST, GQSPI_GEN_FIFO_THRESHOLD_RESET_VAL); - zynqmp_gqspi_selectslave(xqspi, - GQSPI_SELECT_FLASH_CS_LOWER, - GQSPI_SELECT_FLASH_BUS_LOWER); + zynqmp_gqspi_selecttarget(xqspi, + GQSPI_SELECT_FLASH_CS_LOWER, + GQSPI_SELECT_FLASH_BUS_LOWER); /* Initialize DMA */ zynqmp_gqspi_write(xqspi, GQSPI_QSPIDMA_DST_CTRL_OFST, @@ -459,7 +459,7 @@ static void zynqmp_qspi_copy_read_data(struct zynqmp_qspi *xqspi, */ static void zynqmp_qspi_chipselect(struct spi_device *qspi, bool is_high) { - struct zynqmp_qspi *xqspi = spi_master_get_devdata(qspi->master); + struct zynqmp_qspi *xqspi = spi_controller_get_devdata(qspi->controller); ulong timeout; u32 genfifoentry = 0, statusreg; @@ -594,7 +594,7 @@ static int zynqmp_qspi_config_op(struct zynqmp_qspi *xqspi, */ static int zynqmp_qspi_setup_op(struct spi_device *qspi) { - struct spi_controller *ctlr = qspi->master; + struct spi_controller *ctlr = qspi->controller; struct zynqmp_qspi *xqspi = spi_controller_get_devdata(ctlr); if (ctlr->busy) @@ -1048,7 +1048,7 @@ static int zynqmp_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) { struct zynqmp_qspi *xqspi = spi_controller_get_devdata - (mem->spi->master); + (mem->spi->controller); int err = 0, i; u32 genfifoentry = 0; u16 opcode = op->cmd.opcode; @@ -1224,7 +1224,7 @@ static int zynqmp_qspi_probe(struct platform_device *pdev) u32 num_cs; const struct qspi_platform_data *p_data; - ctlr = spi_alloc_master(&pdev->dev, sizeof(*xqspi)); + ctlr = spi_alloc_host(&pdev->dev, sizeof(*xqspi)); if (!ctlr) return -ENOMEM; @@ -1240,27 +1240,27 @@ static int zynqmp_qspi_probe(struct platform_device *pdev) xqspi->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(xqspi->regs)) { ret = PTR_ERR(xqspi->regs); - goto remove_master; + goto remove_ctlr; } xqspi->pclk = devm_clk_get(&pdev->dev, "pclk"); if (IS_ERR(xqspi->pclk)) { dev_err(dev, "pclk clock not found.\n"); ret = PTR_ERR(xqspi->pclk); - goto remove_master; + goto remove_ctlr; } xqspi->refclk = devm_clk_get(&pdev->dev, "ref_clk"); if (IS_ERR(xqspi->refclk)) { dev_err(dev, "ref_clk clock not found.\n"); ret = PTR_ERR(xqspi->refclk); - goto remove_master; + goto remove_ctlr; } ret = clk_prepare_enable(xqspi->pclk); if (ret) { dev_err(dev, "Unable to enable APB clock.\n"); - goto remove_master; + goto remove_ctlr; } ret = clk_prepare_enable(xqspi->refclk); @@ -1346,7 +1346,7 @@ clk_dis_all: clk_disable_unprepare(xqspi->refclk); clk_dis_pclk: clk_disable_unprepare(xqspi->pclk); -remove_master: +remove_ctlr: spi_controller_put(ctlr); return ret; diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 8ead7acb99f3..7477a11e12be 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -612,10 +612,21 @@ static int spi_dev_check(struct device *dev, void *data) { struct spi_device *spi = to_spi_device(dev); struct spi_device *new_spi = data; - - if (spi->controller == new_spi->controller && - spi_get_chipselect(spi, 0) == spi_get_chipselect(new_spi, 0)) - return -EBUSY; + int idx, nw_idx; + u8 cs, cs_nw; + + if (spi->controller == new_spi->controller) { + for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) { + cs = spi_get_chipselect(spi, idx); + for (nw_idx = 0; nw_idx < SPI_CS_CNT_MAX; nw_idx++) { + cs_nw = spi_get_chipselect(new_spi, nw_idx); + if (cs != 0xFF && cs_nw != 0xFF && cs == cs_nw) { + dev_err(dev, "chipselect %d already in use\n", cs_nw); + return -EBUSY; + } + } + } + } return 0; } @@ -629,13 +640,32 @@ static int __spi_add_device(struct spi_device *spi) { struct spi_controller *ctlr = spi->controller; struct device *dev = ctlr->dev.parent; - int status; + int status, idx, nw_idx; + u8 cs, nw_cs; + + for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) { + /* Chipselects are numbered 0..max; validate. */ + cs = spi_get_chipselect(spi, idx); + if (cs != 0xFF && cs >= ctlr->num_chipselect) { + dev_err(dev, "cs%d >= max %d\n", spi_get_chipselect(spi, idx), + ctlr->num_chipselect); + return -EINVAL; + } + } - /* Chipselects are numbered 0..max; validate. */ - if (spi_get_chipselect(spi, 0) >= ctlr->num_chipselect) { - dev_err(dev, "cs%d >= max %d\n", spi_get_chipselect(spi, 0), - ctlr->num_chipselect); - return -EINVAL; + /* + * Make sure that multiple logical CS doesn't map to the same physical CS. + * For example, spi->chip_select[0] != spi->chip_select[1] and so on. + */ + for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) { + cs = spi_get_chipselect(spi, idx); + for (nw_idx = idx + 1; nw_idx < SPI_CS_CNT_MAX; nw_idx++) { + nw_cs = spi_get_chipselect(spi, nw_idx); + if (cs != 0xFF && nw_cs != 0xFF && cs == nw_cs) { + dev_err(dev, "chipselect %d already in use\n", nw_cs); + return -EBUSY; + } + } } /* Set the bus ID string */ @@ -647,11 +677,8 @@ static int __spi_add_device(struct spi_device *spi) * its configuration. */ status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check); - if (status) { - dev_err(dev, "chipselect %d already in use\n", - spi_get_chipselect(spi, 0)); + if (status) return status; - } /* Controller may unregister concurrently */ if (IS_ENABLED(CONFIG_SPI_DYNAMIC) && @@ -659,8 +686,15 @@ static int __spi_add_device(struct spi_device *spi) return -ENODEV; } - if (ctlr->cs_gpiods) - spi_set_csgpiod(spi, 0, ctlr->cs_gpiods[spi_get_chipselect(spi, 0)]); + if (ctlr->cs_gpiods) { + u8 cs; + + for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) { + cs = spi_get_chipselect(spi, idx); + if (cs != 0xFF) + spi_set_csgpiod(spi, idx, ctlr->cs_gpiods[cs]); + } + } /* * Drivers may modify this initial i/o setup, but will @@ -701,6 +735,9 @@ int spi_add_device(struct spi_device *spi) struct spi_controller *ctlr = spi->controller; int status; + /* Set the bus ID string */ + spi_dev_set_name(spi); + mutex_lock(&ctlr->add_lock); status = __spi_add_device(spi); mutex_unlock(&ctlr->add_lock); @@ -727,6 +764,7 @@ struct spi_device *spi_new_device(struct spi_controller *ctlr, { struct spi_device *proxy; int status; + u8 idx; /* * NOTE: caller did any chip->bus_num checks necessary. @@ -742,6 +780,18 @@ struct spi_device *spi_new_device(struct spi_controller *ctlr, WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias)); + /* + * Zero(0) is a valid physical CS value and can be located at any + * logical CS in the spi->chip_select[]. If all the physical CS + * are initialized to 0 then It would be difficult to differentiate + * between a valid physical CS 0 & an unused logical CS whose physical + * CS can be 0. As a solution to this issue initialize all the CS to 0xFF. + * Now all the unused logical CS will have 0xFF physical CS value & can be + * ignore while performing physical CS validity checks. + */ + for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) + spi_set_chipselect(proxy, idx, 0xFF); + spi_set_chipselect(proxy, 0, chip->chip_select); proxy->max_speed_hz = chip->max_speed_hz; proxy->mode = chip->mode; @@ -750,6 +800,15 @@ struct spi_device *spi_new_device(struct spi_controller *ctlr, proxy->dev.platform_data = (void *) chip->platform_data; proxy->controller_data = chip->controller_data; proxy->controller_state = NULL; + /* + * spi->chip_select[i] gives the corresponding physical CS for logical CS i + * logical CS number is represented by setting the ith bit in spi->cs_index_mask + * So, for example, if spi->cs_index_mask = 0x01 then logical CS number is 0 and + * spi->chip_select[0] will give the physical CS. + * By default spi->chip_select[0] will hold the physical CS number so, set + * spi->cs_index_mask as 0x01. + */ + proxy->cs_index_mask = 0x01; if (chip->swnode) { status = device_add_software_node(&proxy->dev, chip->swnode); @@ -942,32 +1001,51 @@ static void spi_res_release(struct spi_controller *ctlr, struct spi_message *mes } /*-------------------------------------------------------------------------*/ +static inline bool spi_is_last_cs(struct spi_device *spi) +{ + u8 idx; + bool last = false; + + for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) { + if ((spi->cs_index_mask >> idx) & 0x01) { + if (spi->controller->last_cs[idx] == spi_get_chipselect(spi, idx)) + last = true; + } + } + return last; +} + static void spi_set_cs(struct spi_device *spi, bool enable, bool force) { bool activate = enable; + u8 idx; /* * Avoid calling into the driver (or doing delays) if the chip select * isn't actually changing from the last time this was called. */ - if (!force && ((enable && spi->controller->last_cs == spi_get_chipselect(spi, 0)) || - (!enable && spi->controller->last_cs != spi_get_chipselect(spi, 0))) && + if (!force && ((enable && spi->controller->last_cs_index_mask == spi->cs_index_mask && + spi_is_last_cs(spi)) || + (!enable && spi->controller->last_cs_index_mask == spi->cs_index_mask && + !spi_is_last_cs(spi))) && (spi->controller->last_cs_mode_high == (spi->mode & SPI_CS_HIGH))) return; trace_spi_set_cs(spi, activate); - spi->controller->last_cs = enable ? spi_get_chipselect(spi, 0) : -1; + spi->controller->last_cs_index_mask = spi->cs_index_mask; + for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) + spi->controller->last_cs[idx] = enable ? spi_get_chipselect(spi, 0) : -1; spi->controller->last_cs_mode_high = spi->mode & SPI_CS_HIGH; - if ((spi_get_csgpiod(spi, 0) || !spi->controller->set_cs_timing) && !activate) - spi_delay_exec(&spi->cs_hold, NULL); - if (spi->mode & SPI_CS_HIGH) enable = !enable; - if (spi_get_csgpiod(spi, 0)) { + if (spi_is_csgpiod(spi)) { + if (!spi->controller->set_cs_timing && !activate) + spi_delay_exec(&spi->cs_hold, NULL); + if (!(spi->mode & SPI_NO_CS)) { /* * Historically ACPI has no means of the GPIO polarity and @@ -979,26 +1057,38 @@ static void spi_set_cs(struct spi_device *spi, bool enable, bool force) * ambiguity. That's why we use enable, that takes SPI_CS_HIGH * into account. */ - if (has_acpi_companion(&spi->dev)) - gpiod_set_value_cansleep(spi_get_csgpiod(spi, 0), !enable); - else - /* Polarity handled by GPIO library */ - gpiod_set_value_cansleep(spi_get_csgpiod(spi, 0), activate); + for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) { + if (((spi->cs_index_mask >> idx) & 0x01) && + spi_get_csgpiod(spi, idx)) { + if (has_acpi_companion(&spi->dev)) + gpiod_set_value_cansleep(spi_get_csgpiod(spi, idx), + !enable); + else + /* Polarity handled by GPIO library */ + gpiod_set_value_cansleep(spi_get_csgpiod(spi, idx), + activate); + + if (activate) + spi_delay_exec(&spi->cs_setup, NULL); + else + spi_delay_exec(&spi->cs_inactive, NULL); + } + } } /* Some SPI masters need both GPIO CS & slave_select */ if ((spi->controller->flags & SPI_CONTROLLER_GPIO_SS) && spi->controller->set_cs) spi->controller->set_cs(spi, !enable); + + if (!spi->controller->set_cs_timing) { + if (activate) + spi_delay_exec(&spi->cs_setup, NULL); + else + spi_delay_exec(&spi->cs_inactive, NULL); + } } else if (spi->controller->set_cs) { spi->controller->set_cs(spi, !enable); } - - if (spi_get_csgpiod(spi, 0) || !spi->controller->set_cs_timing) { - if (activate) - spi_delay_exec(&spi->cs_setup, NULL); - else - spi_delay_exec(&spi->cs_inactive, NULL); - } } #ifdef CONFIG_HAS_DMA @@ -1361,6 +1451,9 @@ static int spi_transfer_wait(struct spi_controller *ctlr, "SPI transfer timed out\n"); return -ETIMEDOUT; } + + if (xfer->error & SPI_TRANS_FAIL_IO) + return -EIO; } return 0; @@ -2222,8 +2315,8 @@ static void of_spi_parse_dt_cs_delay(struct device_node *nc, static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi, struct device_node *nc) { - u32 value; - int rc; + u32 value, cs[SPI_CS_CNT_MAX]; + int rc, idx; /* Mode (clock phase/polarity/etc.) */ if (of_property_read_bool(nc, "spi-cpha")) @@ -2295,14 +2388,53 @@ static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi, return 0; } + if (ctlr->num_chipselect > SPI_CS_CNT_MAX) { + dev_err(&ctlr->dev, "No. of CS is more than max. no. of supported CS\n"); + return -EINVAL; + } + + /* + * Zero(0) is a valid physical CS value and can be located at any + * logical CS in the spi->chip_select[]. If all the physical CS + * are initialized to 0 then It would be difficult to differentiate + * between a valid physical CS 0 & an unused logical CS whose physical + * CS can be 0. As a solution to this issue initialize all the CS to 0xFF. + * Now all the unused logical CS will have 0xFF physical CS value & can be + * ignore while performing physical CS validity checks. + */ + for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) + spi_set_chipselect(spi, idx, 0xFF); + /* Device address */ - rc = of_property_read_u32(nc, "reg", &value); - if (rc) { + rc = of_property_read_variable_u32_array(nc, "reg", &cs[0], 1, + SPI_CS_CNT_MAX); + if (rc < 0) { dev_err(&ctlr->dev, "%pOF has no valid 'reg' property (%d)\n", nc, rc); return rc; } - spi_set_chipselect(spi, 0, value); + if (rc > ctlr->num_chipselect) { + dev_err(&ctlr->dev, "%pOF has number of CS > ctlr->num_chipselect (%d)\n", + nc, rc); + return rc; + } + if ((of_property_read_bool(nc, "parallel-memories")) && + (!(ctlr->flags & SPI_CONTROLLER_MULTI_CS))) { + dev_err(&ctlr->dev, "SPI controller doesn't support multi CS\n"); + return -EINVAL; + } + for (idx = 0; idx < rc; idx++) + spi_set_chipselect(spi, idx, cs[idx]); + + /* + * spi->chip_select[i] gives the corresponding physical CS for logical CS i + * logical CS number is represented by setting the ith bit in spi->cs_index_mask + * So, for example, if spi->cs_index_mask = 0x01 then logical CS number is 0 and + * spi->chip_select[0] will give the physical CS. + * By default spi->chip_select[0] will hold the physical CS number so, set + * spi->cs_index_mask as 0x01. + */ + spi->cs_index_mask = 0x01; /* Device speed */ if (!of_property_read_u32(nc, "spi-max-frequency", &value)) @@ -2408,6 +2540,7 @@ struct spi_device *spi_new_ancillary_device(struct spi_device *spi, struct spi_controller *ctlr = spi->controller; struct spi_device *ancillary; int rc = 0; + u8 idx; /* Alloc an spi_device */ ancillary = spi_alloc_device(ctlr); @@ -2418,12 +2551,33 @@ struct spi_device *spi_new_ancillary_device(struct spi_device *spi, strscpy(ancillary->modalias, "dummy", sizeof(ancillary->modalias)); + /* + * Zero(0) is a valid physical CS value and can be located at any + * logical CS in the spi->chip_select[]. If all the physical CS + * are initialized to 0 then It would be difficult to differentiate + * between a valid physical CS 0 & an unused logical CS whose physical + * CS can be 0. As a solution to this issue initialize all the CS to 0xFF. + * Now all the unused logical CS will have 0xFF physical CS value & can be + * ignore while performing physical CS validity checks. + */ + for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) + spi_set_chipselect(ancillary, idx, 0xFF); + /* Use provided chip-select for ancillary device */ spi_set_chipselect(ancillary, 0, chip_select); /* Take over SPI mode/speed from SPI main device */ ancillary->max_speed_hz = spi->max_speed_hz; ancillary->mode = spi->mode; + /* + * spi->chip_select[i] gives the corresponding physical CS for logical CS i + * logical CS number is represented by setting the ith bit in spi->cs_index_mask + * So, for example, if spi->cs_index_mask = 0x01 then logical CS number is 0 and + * spi->chip_select[0] will give the physical CS. + * By default spi->chip_select[0] will hold the physical CS number so, set + * spi->cs_index_mask as 0x01. + */ + ancillary->cs_index_mask = 0x01; WARN_ON(!mutex_is_locked(&ctlr->add_lock)); @@ -2626,6 +2780,7 @@ struct spi_device *acpi_spi_device_alloc(struct spi_controller *ctlr, struct acpi_spi_lookup lookup = {}; struct spi_device *spi; int ret; + u8 idx; if (!ctlr && index == -1) return ERR_PTR(-EINVAL); @@ -2661,12 +2816,33 @@ struct spi_device *acpi_spi_device_alloc(struct spi_controller *ctlr, return ERR_PTR(-ENOMEM); } + /* + * Zero(0) is a valid physical CS value and can be located at any + * logical CS in the spi->chip_select[]. If all the physical CS + * are initialized to 0 then It would be difficult to differentiate + * between a valid physical CS 0 & an unused logical CS whose physical + * CS can be 0. As a solution to this issue initialize all the CS to 0xFF. + * Now all the unused logical CS will have 0xFF physical CS value & can be + * ignore while performing physical CS validity checks. + */ + for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) + spi_set_chipselect(spi, idx, 0xFF); + ACPI_COMPANION_SET(&spi->dev, adev); spi->max_speed_hz = lookup.max_speed_hz; spi->mode |= lookup.mode; spi->irq = lookup.irq; spi->bits_per_word = lookup.bits_per_word; spi_set_chipselect(spi, 0, lookup.chip_select); + /* + * spi->chip_select[i] gives the corresponding physical CS for logical CS i + * logical CS number is represented by setting the ith bit in spi->cs_index_mask + * So, for example, if spi->cs_index_mask = 0x01 then logical CS number is 0 and + * spi->chip_select[0] will give the physical CS. + * By default spi->chip_select[0] will hold the physical CS number so, set + * spi->cs_index_mask as 0x01. + */ + spi->cs_index_mask = 0x01; return spi; } @@ -3100,6 +3276,7 @@ int spi_register_controller(struct spi_controller *ctlr) struct boardinfo *bi; int first_dynamic; int status; + int idx; if (!dev) return -ENODEV; @@ -3164,7 +3341,8 @@ int spi_register_controller(struct spi_controller *ctlr) } /* Setting last_cs to -1 means no chip selected */ - ctlr->last_cs = -1; + for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) + ctlr->last_cs[idx] = -1; status = device_add(&ctlr->dev); if (status < 0) @@ -3889,7 +4067,7 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message) * cs_change is set for each transfer. */ if ((spi->mode & SPI_CS_WORD) && (!(ctlr->mode_bits & SPI_CS_WORD) || - spi_get_csgpiod(spi, 0))) { + spi_is_csgpiod(spi))) { size_t maxsize = BITS_TO_BYTES(spi->bits_per_word); int ret; diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig index c81a00fbca7d..59883502eff4 100644 --- a/drivers/thermal/Kconfig +++ b/drivers/thermal/Kconfig @@ -76,10 +76,6 @@ config THERMAL_OF Say 'Y' here if you need to build thermal infrastructure based on device tree. -config THERMAL_ACPI - depends on ACPI - bool - config THERMAL_WRITABLE_TRIPS bool "Enable writable trip points" help diff --git a/drivers/thermal/Makefile b/drivers/thermal/Makefile index c934cab309ae..a8318d671036 100644 --- a/drivers/thermal/Makefile +++ b/drivers/thermal/Makefile @@ -13,7 +13,6 @@ thermal_sys-$(CONFIG_THERMAL_NETLINK) += thermal_netlink.o # interface to/from other layers providing sensors thermal_sys-$(CONFIG_THERMAL_HWMON) += thermal_hwmon.o thermal_sys-$(CONFIG_THERMAL_OF) += thermal_of.o -thermal_sys-$(CONFIG_THERMAL_ACPI) += thermal_acpi.o # governors CFLAGS_gov_power_allocator.o := -I$(src) diff --git a/drivers/thermal/amlogic_thermal.c b/drivers/thermal/amlogic_thermal.c index 5877cde25b79..df7a5ed55385 100644 --- a/drivers/thermal/amlogic_thermal.c +++ b/drivers/thermal/amlogic_thermal.c @@ -167,13 +167,11 @@ static int amlogic_thermal_enable(struct amlogic_thermal *data) return 0; } -static int amlogic_thermal_disable(struct amlogic_thermal *data) +static void amlogic_thermal_disable(struct amlogic_thermal *data) { regmap_update_bits(data->regmap, TSENSOR_CFG_REG1, TSENSOR_CFG_REG1_ENABLE, 0); clk_disable_unprepare(data->clk); - - return 0; } static int amlogic_thermal_get_temp(struct thermal_zone_device *tz, int *temp) @@ -298,27 +296,30 @@ static void amlogic_thermal_remove(struct platform_device *pdev) amlogic_thermal_disable(data); } -static int __maybe_unused amlogic_thermal_suspend(struct device *dev) +static int amlogic_thermal_suspend(struct device *dev) { struct amlogic_thermal *data = dev_get_drvdata(dev); - return amlogic_thermal_disable(data); + amlogic_thermal_disable(data); + + return 0; } -static int __maybe_unused amlogic_thermal_resume(struct device *dev) +static int amlogic_thermal_resume(struct device *dev) { struct amlogic_thermal *data = dev_get_drvdata(dev); return amlogic_thermal_enable(data); } -static SIMPLE_DEV_PM_OPS(amlogic_thermal_pm_ops, - amlogic_thermal_suspend, amlogic_thermal_resume); +static DEFINE_SIMPLE_DEV_PM_OPS(amlogic_thermal_pm_ops, + amlogic_thermal_suspend, + amlogic_thermal_resume); static struct platform_driver amlogic_thermal_driver = { .driver = { .name = "amlogic_thermal", - .pm = &amlogic_thermal_pm_ops, + .pm = pm_ptr(&amlogic_thermal_pm_ops), .of_match_table = of_amlogic_thermal_match, }, .probe = amlogic_thermal_probe, diff --git a/drivers/thermal/cpuidle_cooling.c b/drivers/thermal/cpuidle_cooling.c index 69f4c0a8dfcc..f678c1281862 100644 --- a/drivers/thermal/cpuidle_cooling.c +++ b/drivers/thermal/cpuidle_cooling.c @@ -66,7 +66,7 @@ static unsigned int cpuidle_cooling_runtime(unsigned int idle_duration_us, * @state : a pointer to the state variable to be filled * * The function always returns 100 as the injection ratio. It is - * percentile based for consistency accross different platforms. + * percentile based for consistency across different platforms. * * Return: The function can not fail, it is always zero */ @@ -146,7 +146,7 @@ static int cpuidle_cooling_set_cur_state(struct thermal_cooling_device *cdev, return 0; } -/** +/* * cpuidle_cooling_ops - thermal cooling device ops */ static struct thermal_cooling_device_ops cpuidle_cooling_ops = { diff --git a/drivers/thermal/gov_power_allocator.c b/drivers/thermal/gov_power_allocator.c index 83d4f451b1a9..7b6aa265ff6a 100644 --- a/drivers/thermal/gov_power_allocator.c +++ b/drivers/thermal/gov_power_allocator.c @@ -47,6 +47,22 @@ static inline s64 div_frac(s64 x, s64 y) } /** + * struct power_actor - internal power information for power actor + * @req_power: requested power value (not weighted) + * @max_power: max allocatable power for this actor + * @granted_power: granted power for this actor + * @extra_actor_power: extra power that this actor can receive + * @weighted_req_power: weighted requested power as input to IPA + */ +struct power_actor { + u32 req_power; + u32 max_power; + u32 granted_power; + u32 extra_actor_power; + u32 weighted_req_power; +}; + +/** * struct power_allocator_params - parameters for the power allocator governor * @allocated_tzp: whether we have allocated tzp for this thermal zone and * it needs to be freed on unbind @@ -59,9 +75,12 @@ static inline s64 div_frac(s64 x, s64 y) * governor switches on when this trip point is crossed. * If the thermal zone only has one passive trip point, * @trip_switch_on should be NULL. - * @trip_max_desired_temperature: last passive trip point of the thermal - * zone. The temperature we are - * controlling for. + * @trip_max: last passive trip point of the thermal zone. The + * temperature we are controlling for. + * @total_weight: Sum of all thermal instances weights + * @num_actors: number of cooling devices supporting IPA callbacks + * @buffer_size: internal buffer size, to avoid runtime re-calculation + * @power: buffer for all power actors internal power information */ struct power_allocator_params { bool allocated_tzp; @@ -69,9 +88,20 @@ struct power_allocator_params { s32 prev_err; u32 sustainable_power; const struct thermal_trip *trip_switch_on; - const struct thermal_trip *trip_max_desired_temperature; + const struct thermal_trip *trip_max; + int total_weight; + unsigned int num_actors; + unsigned int buffer_size; + struct power_actor *power; }; +static bool power_actor_is_valid(struct power_allocator_params *params, + struct thermal_instance *instance) +{ + return (instance->trip == params->trip_max && + cdev_is_power_actor(instance->cdev)); +} + /** * estimate_sustainable_power() - Estimate the sustainable power of a thermal zone * @tz: thermal zone we are operating in @@ -85,20 +115,17 @@ struct power_allocator_params { */ static u32 estimate_sustainable_power(struct thermal_zone_device *tz) { - u32 sustainable_power = 0; - struct thermal_instance *instance; struct power_allocator_params *params = tz->governor_data; + struct thermal_cooling_device *cdev; + struct thermal_instance *instance; + u32 sustainable_power = 0; + u32 min_power; list_for_each_entry(instance, &tz->thermal_instances, tz_node) { - struct thermal_cooling_device *cdev = instance->cdev; - u32 min_power; - - if (instance->trip != params->trip_max_desired_temperature) - continue; - - if (!cdev_is_power_actor(cdev)) + if (!power_actor_is_valid(params, instance)) continue; + cdev = instance->cdev; if (cdev->ops->state2power(cdev, instance->upper, &min_power)) continue; @@ -212,10 +239,10 @@ static u32 pid_controller(struct thermal_zone_device *tz, int control_temp, u32 max_allocatable_power) { + struct power_allocator_params *params = tz->governor_data; s64 p, i, d, power_range; s32 err, max_power_frac; u32 sustainable_power; - struct power_allocator_params *params = tz->governor_data; max_power_frac = int_to_frac(max_allocatable_power); @@ -303,15 +330,10 @@ power_actor_set_power(struct thermal_cooling_device *cdev, /** * divvy_up_power() - divvy the allocated power between the actors - * @req_power: each actor's requested power - * @max_power: each actor's maximum available power - * @num_actors: size of the @req_power, @max_power and @granted_power's array - * @total_req_power: sum of @req_power + * @power: buffer for all power actors internal power information + * @num_actors: number of power actors in this thermal zone + * @total_req_power: sum of all weighted requested power for all actors * @power_range: total allocated power - * @granted_power: output array: each actor's granted power - * @extra_actor_power: an appropriately sized array to be used in the - * function as temporary storage of the extra power given - * to the actors * * This function divides the total allocated power (@power_range) * fairly between the actors. It first tries to give each actor a @@ -324,15 +346,12 @@ power_actor_set_power(struct thermal_cooling_device *cdev, * If any actor received more than their maximum power, then that * surplus is re-divvied among the actors based on how far they are * from their respective maximums. - * - * Granted power for each actor is written to @granted_power, which - * should've been allocated by the calling function. */ -static void divvy_up_power(u32 *req_power, u32 *max_power, int num_actors, - u32 total_req_power, u32 power_range, - u32 *granted_power, u32 *extra_actor_power) +static void divvy_up_power(struct power_actor *power, int num_actors, + u32 total_req_power, u32 power_range) { - u32 extra_power, capped_extra_power; + u32 capped_extra_power = 0; + u32 extra_power = 0; int i; /* @@ -341,24 +360,23 @@ static void divvy_up_power(u32 *req_power, u32 *max_power, int num_actors, if (!total_req_power) total_req_power = 1; - capped_extra_power = 0; - extra_power = 0; for (i = 0; i < num_actors; i++) { - u64 req_range = (u64)req_power[i] * power_range; + struct power_actor *pa = &power[i]; + u64 req_range = (u64)pa->req_power * power_range; - granted_power[i] = DIV_ROUND_CLOSEST_ULL(req_range, - total_req_power); + pa->granted_power = DIV_ROUND_CLOSEST_ULL(req_range, + total_req_power); - if (granted_power[i] > max_power[i]) { - extra_power += granted_power[i] - max_power[i]; - granted_power[i] = max_power[i]; + if (pa->granted_power > pa->max_power) { + extra_power += pa->granted_power - pa->max_power; + pa->granted_power = pa->max_power; } - extra_actor_power[i] = max_power[i] - granted_power[i]; - capped_extra_power += extra_actor_power[i]; + pa->extra_actor_power = pa->max_power - pa->granted_power; + capped_extra_power += pa->extra_actor_power; } - if (!extra_power) + if (!extra_power || !capped_extra_power) return; /* @@ -366,127 +384,95 @@ static void divvy_up_power(u32 *req_power, u32 *max_power, int num_actors, * how far they are from the max */ extra_power = min(extra_power, capped_extra_power); - if (capped_extra_power > 0) - for (i = 0; i < num_actors; i++) { - u64 extra_range = (u64)extra_actor_power[i] * extra_power; - granted_power[i] += DIV_ROUND_CLOSEST_ULL(extra_range, - capped_extra_power); - } + + for (i = 0; i < num_actors; i++) { + struct power_actor *pa = &power[i]; + u64 extra_range = pa->extra_actor_power; + + extra_range *= extra_power; + pa->granted_power += DIV_ROUND_CLOSEST_ULL(extra_range, + capped_extra_power); + } } -static int allocate_power(struct thermal_zone_device *tz, - int control_temp) +static int allocate_power(struct thermal_zone_device *tz, int control_temp) { - struct thermal_instance *instance; struct power_allocator_params *params = tz->governor_data; - const struct thermal_trip *trip_max_desired_temperature = - params->trip_max_desired_temperature; - u32 *req_power, *max_power, *granted_power, *extra_actor_power; - u32 *weighted_req_power; - u32 total_req_power, max_allocatable_power, total_weighted_req_power; - u32 total_granted_power, power_range; - int i, num_actors, total_weight, ret = 0; - - num_actors = 0; - total_weight = 0; - list_for_each_entry(instance, &tz->thermal_instances, tz_node) { - if ((instance->trip == trip_max_desired_temperature) && - cdev_is_power_actor(instance->cdev)) { - num_actors++; - total_weight += instance->weight; - } - } + unsigned int num_actors = params->num_actors; + struct power_actor *power = params->power; + struct thermal_cooling_device *cdev; + struct thermal_instance *instance; + u32 total_weighted_req_power = 0; + u32 max_allocatable_power = 0; + u32 total_granted_power = 0; + u32 total_req_power = 0; + u32 power_range, weight; + int i = 0, ret; if (!num_actors) return -ENODEV; - /* - * We need to allocate five arrays of the same size: - * req_power, max_power, granted_power, extra_actor_power and - * weighted_req_power. They are going to be needed until this - * function returns. Allocate them all in one go to simplify - * the allocation and deallocation logic. - */ - BUILD_BUG_ON(sizeof(*req_power) != sizeof(*max_power)); - BUILD_BUG_ON(sizeof(*req_power) != sizeof(*granted_power)); - BUILD_BUG_ON(sizeof(*req_power) != sizeof(*extra_actor_power)); - BUILD_BUG_ON(sizeof(*req_power) != sizeof(*weighted_req_power)); - req_power = kcalloc(num_actors * 5, sizeof(*req_power), GFP_KERNEL); - if (!req_power) - return -ENOMEM; - - max_power = &req_power[num_actors]; - granted_power = &req_power[2 * num_actors]; - extra_actor_power = &req_power[3 * num_actors]; - weighted_req_power = &req_power[4 * num_actors]; - - i = 0; - total_weighted_req_power = 0; - total_req_power = 0; - max_allocatable_power = 0; + /* Clean all buffers for new power estimations */ + memset(power, 0, params->buffer_size); list_for_each_entry(instance, &tz->thermal_instances, tz_node) { - int weight; - struct thermal_cooling_device *cdev = instance->cdev; + struct power_actor *pa = &power[i]; - if (instance->trip != trip_max_desired_temperature) + if (!power_actor_is_valid(params, instance)) continue; - if (!cdev_is_power_actor(cdev)) - continue; + cdev = instance->cdev; - if (cdev->ops->get_requested_power(cdev, &req_power[i])) + ret = cdev->ops->get_requested_power(cdev, &pa->req_power); + if (ret) continue; - if (!total_weight) + if (!params->total_weight) weight = 1 << FRAC_BITS; else weight = instance->weight; - weighted_req_power[i] = frac_to_int(weight * req_power[i]); + pa->weighted_req_power = frac_to_int(weight * pa->req_power); - if (cdev->ops->state2power(cdev, instance->lower, - &max_power[i])) + ret = cdev->ops->state2power(cdev, instance->lower, + &pa->max_power); + if (ret) continue; - total_req_power += req_power[i]; - max_allocatable_power += max_power[i]; - total_weighted_req_power += weighted_req_power[i]; + total_req_power += pa->req_power; + max_allocatable_power += pa->max_power; + total_weighted_req_power += pa->weighted_req_power; i++; } power_range = pid_controller(tz, control_temp, max_allocatable_power); - divvy_up_power(weighted_req_power, max_power, num_actors, - total_weighted_req_power, power_range, granted_power, - extra_actor_power); + divvy_up_power(power, num_actors, total_weighted_req_power, + power_range); - total_granted_power = 0; i = 0; list_for_each_entry(instance, &tz->thermal_instances, tz_node) { - if (instance->trip != trip_max_desired_temperature) - continue; + struct power_actor *pa = &power[i]; - if (!cdev_is_power_actor(instance->cdev)) + if (!power_actor_is_valid(params, instance)) continue; power_actor_set_power(instance->cdev, instance, - granted_power[i]); - total_granted_power += granted_power[i]; + pa->granted_power); + total_granted_power += pa->granted_power; + trace_thermal_power_actor(tz, i, pa->req_power, + pa->granted_power); i++; } - trace_thermal_power_allocator(tz, req_power, total_req_power, - granted_power, total_granted_power, + trace_thermal_power_allocator(tz, total_req_power, total_granted_power, num_actors, power_range, max_allocatable_power, tz->temperature, control_temp - tz->temperature); - kfree(req_power); - - return ret; + return 0; } /** @@ -531,13 +517,13 @@ static void get_governor_trips(struct thermal_zone_device *tz, if (last_passive) { params->trip_switch_on = first_passive; - params->trip_max_desired_temperature = last_passive; + params->trip_max = last_passive; } else if (first_passive) { params->trip_switch_on = NULL; - params->trip_max_desired_temperature = first_passive; + params->trip_max = first_passive; } else { params->trip_switch_on = NULL; - params->trip_max_desired_temperature = last_active; + params->trip_max = last_active; } } @@ -549,19 +535,19 @@ static void reset_pid_controller(struct power_allocator_params *params) static void allow_maximum_power(struct thermal_zone_device *tz, bool update) { - struct thermal_instance *instance; struct power_allocator_params *params = tz->governor_data; + struct thermal_cooling_device *cdev; + struct thermal_instance *instance; u32 req_power; list_for_each_entry(instance, &tz->thermal_instances, tz_node) { - struct thermal_cooling_device *cdev = instance->cdev; - - if (instance->trip != params->trip_max_desired_temperature || - (!cdev_is_power_actor(instance->cdev))) + if (!power_actor_is_valid(params, instance)) continue; + cdev = instance->cdev; + instance->target = 0; - mutex_lock(&instance->cdev->lock); + mutex_lock(&cdev->lock); /* * Call for updating the cooling devices local stats and avoid * periods of dozen of seconds when those have not been @@ -570,9 +556,9 @@ static void allow_maximum_power(struct thermal_zone_device *tz, bool update) cdev->ops->get_requested_power(cdev, &req_power); if (update) - __thermal_cdev_update(instance->cdev); + __thermal_cdev_update(cdev); - mutex_unlock(&instance->cdev->lock); + mutex_unlock(&cdev->lock); } } @@ -580,30 +566,99 @@ static void allow_maximum_power(struct thermal_zone_device *tz, bool update) * check_power_actors() - Check all cooling devices and warn when they are * not power actors * @tz: thermal zone to operate on + * @params: power allocator private data * * Check all cooling devices in the @tz and warn every time they are missing * power actor API. The warning should help to investigate the issue, which * could be e.g. lack of Energy Model for a given device. * - * Return: 0 on success, -EINVAL if any cooling device does not implement - * the power actor API. + * If all of the cooling devices currently attached to @tz implement the power + * actor API, return the number of them (which may be 0, because some cooling + * devices may be attached later). Otherwise, return -EINVAL. */ -static int check_power_actors(struct thermal_zone_device *tz) +static int check_power_actors(struct thermal_zone_device *tz, + struct power_allocator_params *params) { struct thermal_instance *instance; int ret = 0; list_for_each_entry(instance, &tz->thermal_instances, tz_node) { + if (instance->trip != params->trip_max) + continue; + if (!cdev_is_power_actor(instance->cdev)) { dev_warn(&tz->device, "power_allocator: %s is not a power actor\n", instance->cdev->type); - ret = -EINVAL; + return -EINVAL; } + ret++; } return ret; } +static int allocate_actors_buffer(struct power_allocator_params *params, + int num_actors) +{ + int ret; + + kfree(params->power); + + /* There might be no cooling devices yet. */ + if (!num_actors) { + ret = -EINVAL; + goto clean_state; + } + + params->power = kcalloc(num_actors, sizeof(struct power_actor), + GFP_KERNEL); + if (!params->power) { + ret = -ENOMEM; + goto clean_state; + } + + params->num_actors = num_actors; + params->buffer_size = num_actors * sizeof(struct power_actor); + + return 0; + +clean_state: + params->num_actors = 0; + params->buffer_size = 0; + params->power = NULL; + return ret; +} + +static void power_allocator_update_tz(struct thermal_zone_device *tz, + enum thermal_notify_event reason) +{ + struct power_allocator_params *params = tz->governor_data; + struct thermal_instance *instance; + int num_actors = 0; + + switch (reason) { + case THERMAL_TZ_BIND_CDEV: + case THERMAL_TZ_UNBIND_CDEV: + list_for_each_entry(instance, &tz->thermal_instances, tz_node) + if (power_actor_is_valid(params, instance)) + num_actors++; + + if (num_actors == params->num_actors) + return; + + allocate_actors_buffer(params, num_actors); + break; + case THERMAL_INSTANCE_WEIGHT_CHANGED: + params->total_weight = 0; + list_for_each_entry(instance, &tz->thermal_instances, tz_node) + if (power_actor_is_valid(params, instance)) + params->total_weight += instance->weight; + break; + default: + break; + } +} + /** * power_allocator_bind() - bind the power_allocator governor to a thermal zone * @tz: thermal zone to bind it to @@ -616,17 +671,34 @@ static int check_power_actors(struct thermal_zone_device *tz) */ static int power_allocator_bind(struct thermal_zone_device *tz) { - int ret; struct power_allocator_params *params; - - ret = check_power_actors(tz); - if (ret) - return ret; + int ret; params = kzalloc(sizeof(*params), GFP_KERNEL); if (!params) return -ENOMEM; + get_governor_trips(tz, params); + if (!params->trip_max) { + dev_warn(&tz->device, "power_allocator: missing trip_max\n"); + kfree(params); + return -EINVAL; + } + + ret = check_power_actors(tz, params); + if (ret < 0) { + dev_warn(&tz->device, "power_allocator: binding failed\n"); + kfree(params); + return ret; + } + + ret = allocate_actors_buffer(params, ret); + if (ret) { + dev_warn(&tz->device, "power_allocator: allocation failed\n"); + kfree(params); + return ret; + } + if (!tz->tzp) { tz->tzp = kzalloc(sizeof(*tz->tzp), GFP_KERNEL); if (!tz->tzp) { @@ -640,14 +712,9 @@ static int power_allocator_bind(struct thermal_zone_device *tz) if (!tz->tzp->sustainable_power) dev_warn(&tz->device, "power_allocator: sustainable_power will be estimated\n"); - get_governor_trips(tz, params); - - if (params->trip_max_desired_temperature) { - int temp = params->trip_max_desired_temperature->temperature; - - estimate_pid_constants(tz, tz->tzp->sustainable_power, - params->trip_switch_on, temp); - } + estimate_pid_constants(tz, tz->tzp->sustainable_power, + params->trip_switch_on, + params->trip_max->temperature); reset_pid_controller(params); @@ -656,6 +723,7 @@ static int power_allocator_bind(struct thermal_zone_device *tz) return 0; free_params: + kfree(params->power); kfree(params); return ret; @@ -672,6 +740,7 @@ static void power_allocator_unbind(struct thermal_zone_device *tz) tz->tzp = NULL; } + kfree(params->power); kfree(tz->governor_data); tz->governor_data = NULL; } @@ -688,7 +757,7 @@ static int power_allocator_throttle(struct thermal_zone_device *tz, * We get called for every trip point but we only need to do * our calculations once */ - if (trip != params->trip_max_desired_temperature) + if (trip != params->trip_max) return 0; trip = params->trip_switch_on; @@ -702,7 +771,7 @@ static int power_allocator_throttle(struct thermal_zone_device *tz, tz->passive = 1; - return allocate_power(tz, params->trip_max_desired_temperature->temperature); + return allocate_power(tz, params->trip_max->temperature); } static struct thermal_governor thermal_gov_power_allocator = { @@ -710,5 +779,6 @@ static struct thermal_governor thermal_gov_power_allocator = { .bind_to_tz = power_allocator_bind, .unbind_from_tz = power_allocator_unbind, .throttle = power_allocator_throttle, + .update_tz = power_allocator_update_tz, }; THERMAL_GOVERNOR_DECLARE(thermal_gov_power_allocator); diff --git a/drivers/thermal/intel/Kconfig b/drivers/thermal/intel/Kconfig index ecd7e07eece0..b43953b5539f 100644 --- a/drivers/thermal/intel/Kconfig +++ b/drivers/thermal/intel/Kconfig @@ -85,7 +85,7 @@ config INTEL_BXT_PMIC_THERMAL config INTEL_PCH_THERMAL tristate "Intel PCH Thermal Reporting Driver" depends on X86 && PCI - select THERMAL_ACPI if ACPI + select ACPI_THERMAL_LIB if ACPI help Enable this to support thermal reporting on certain intel PCHs. Thermal reporting device will provide temperature reading, diff --git a/drivers/thermal/intel/int340x_thermal/Kconfig b/drivers/thermal/intel/int340x_thermal/Kconfig index 300ea53e9b33..e76b13e44d03 100644 --- a/drivers/thermal/intel/int340x_thermal/Kconfig +++ b/drivers/thermal/intel/int340x_thermal/Kconfig @@ -9,7 +9,7 @@ config INT340X_THERMAL select THERMAL_GOV_USER_SPACE select ACPI_THERMAL_REL select ACPI_FAN - select THERMAL_ACPI + select ACPI_THERMAL_LIB select INTEL_SOC_DTS_IOSF_CORE select INTEL_TCC select PROC_THERMAL_MMIO_RAPL if POWERCAP diff --git a/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c b/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c index a03b67579dd9..3e4bfe817fac 100644 --- a/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c +++ b/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c @@ -225,7 +225,8 @@ EXPORT_SYMBOL_GPL(int340x_thermal_zone_remove); static int int340x_update_one_trip(struct thermal_trip *trip, void *arg) { - struct acpi_device *zone_adev = arg; + struct int34x_thermal_zone *int34x_zone = arg; + struct acpi_device *zone_adev = int34x_zone->adev; int temp, err; switch (trip->type) { @@ -249,14 +250,15 @@ static int int340x_update_one_trip(struct thermal_trip *trip, void *arg) if (err) temp = THERMAL_TEMP_INVALID; - trip->temperature = temp; + thermal_zone_set_trip_temp(int34x_zone->zone, trip, temp); + return 0; } void int340x_thermal_update_trips(struct int34x_thermal_zone *int34x_zone) { thermal_zone_for_each_trip(int34x_zone->zone, int340x_update_one_trip, - int34x_zone->adev); + int34x_zone); } EXPORT_SYMBOL_GPL(int340x_thermal_update_trips); diff --git a/drivers/thermal/intel/intel_hfi.c b/drivers/thermal/intel/intel_hfi.c index c69db6c90869..22445403b520 100644 --- a/drivers/thermal/intel/intel_hfi.c +++ b/drivers/thermal/intel/intel_hfi.c @@ -24,6 +24,7 @@ #include <linux/bitops.h> #include <linux/cpufeature.h> #include <linux/cpumask.h> +#include <linux/delay.h> #include <linux/gfp.h> #include <linux/io.h> #include <linux/kernel.h> @@ -347,6 +348,52 @@ static void init_hfi_instance(struct hfi_instance *hfi_instance) hfi_instance->data = hfi_instance->hdr + hfi_features.hdr_size; } +/* Caller must hold hfi_instance_lock. */ +static void hfi_enable(void) +{ + u64 msr_val; + + rdmsrl(MSR_IA32_HW_FEEDBACK_CONFIG, msr_val); + msr_val |= HW_FEEDBACK_CONFIG_HFI_ENABLE_BIT; + wrmsrl(MSR_IA32_HW_FEEDBACK_CONFIG, msr_val); +} + +static void hfi_set_hw_table(struct hfi_instance *hfi_instance) +{ + phys_addr_t hw_table_pa; + u64 msr_val; + + hw_table_pa = virt_to_phys(hfi_instance->hw_table); + msr_val = hw_table_pa | HW_FEEDBACK_PTR_VALID_BIT; + wrmsrl(MSR_IA32_HW_FEEDBACK_PTR, msr_val); +} + +/* Caller must hold hfi_instance_lock. */ +static void hfi_disable(void) +{ + u64 msr_val; + int i; + + rdmsrl(MSR_IA32_HW_FEEDBACK_CONFIG, msr_val); + msr_val &= ~HW_FEEDBACK_CONFIG_HFI_ENABLE_BIT; + wrmsrl(MSR_IA32_HW_FEEDBACK_CONFIG, msr_val); + + /* + * Wait for hardware to acknowledge the disabling of HFI. Some + * processors may not do it. Wait for ~2ms. This is a reasonable + * time for hardware to complete any pending actions on the HFI + * memory. + */ + for (i = 0; i < 2000; i++) { + rdmsrl(MSR_IA32_PACKAGE_THERM_STATUS, msr_val); + if (msr_val & PACKAGE_THERM_STATUS_HFI_UPDATED) + break; + + udelay(1); + cpu_relax(); + } +} + /** * intel_hfi_online() - Enable HFI on @cpu * @cpu: CPU in which the HFI will be enabled @@ -364,8 +411,6 @@ void intel_hfi_online(unsigned int cpu) { struct hfi_instance *hfi_instance; struct hfi_cpu_info *info; - phys_addr_t hw_table_pa; - u64 msr_val; u16 die_id; /* Nothing to do if hfi_instances are missing. */ @@ -392,25 +437,26 @@ void intel_hfi_online(unsigned int cpu) /* * Now check if the HFI instance of the package/die of @cpu has been * initialized (by checking its header). In such case, all we have to - * do is to add @cpu to this instance's cpumask. + * do is to add @cpu to this instance's cpumask and enable the instance + * if needed. */ mutex_lock(&hfi_instance_lock); - if (hfi_instance->hdr) { - cpumask_set_cpu(cpu, hfi_instance->cpus); - goto unlock; - } + if (hfi_instance->hdr) + goto enable; /* * Hardware is programmed with the physical address of the first page * frame of the table. Hence, the allocated memory must be page-aligned. + * + * Some processors do not forget the initial address of the HFI table + * even after having been reprogrammed. Keep using the same pages. Do + * not free them. */ hfi_instance->hw_table = alloc_pages_exact(hfi_features.nr_table_pages, GFP_KERNEL | __GFP_ZERO); if (!hfi_instance->hw_table) goto unlock; - hw_table_pa = virt_to_phys(hfi_instance->hw_table); - /* * Allocate memory to keep a local copy of the table that * hardware generates. @@ -420,31 +466,20 @@ void intel_hfi_online(unsigned int cpu) if (!hfi_instance->local_table) goto free_hw_table; - /* - * Program the address of the feedback table of this die/package. On - * some processors, hardware remembers the old address of the HFI table - * even after having been reprogrammed and re-enabled. Thus, do not free - * the pages allocated for the table or reprogram the hardware with a - * new base address. Namely, program the hardware only once. - */ - msr_val = hw_table_pa | HW_FEEDBACK_PTR_VALID_BIT; - wrmsrl(MSR_IA32_HW_FEEDBACK_PTR, msr_val); - init_hfi_instance(hfi_instance); INIT_DELAYED_WORK(&hfi_instance->update_work, hfi_update_work_fn); raw_spin_lock_init(&hfi_instance->table_lock); raw_spin_lock_init(&hfi_instance->event_lock); +enable: cpumask_set_cpu(cpu, hfi_instance->cpus); - /* - * Enable the hardware feedback interface and never disable it. See - * comment on programming the address of the table. - */ - rdmsrl(MSR_IA32_HW_FEEDBACK_CONFIG, msr_val); - msr_val |= HW_FEEDBACK_CONFIG_HFI_ENABLE_BIT; - wrmsrl(MSR_IA32_HW_FEEDBACK_CONFIG, msr_val); + /* Enable this HFI instance if this is its first online CPU. */ + if (cpumask_weight(hfi_instance->cpus) == 1) { + hfi_set_hw_table(hfi_instance); + hfi_enable(); + } unlock: mutex_unlock(&hfi_instance_lock); @@ -484,6 +519,10 @@ void intel_hfi_offline(unsigned int cpu) mutex_lock(&hfi_instance_lock); cpumask_clear_cpu(cpu, hfi_instance->cpus); + + if (!cpumask_weight(hfi_instance->cpus)) + hfi_disable(); + mutex_unlock(&hfi_instance_lock); } diff --git a/drivers/thermal/loongson2_thermal.c b/drivers/thermal/loongson2_thermal.c index 133098dc0854..99ca0c7bc41c 100644 --- a/drivers/thermal/loongson2_thermal.c +++ b/drivers/thermal/loongson2_thermal.c @@ -127,7 +127,7 @@ static int loongson2_thermal_probe(struct platform_device *pdev) if (!IS_ERR(tzd)) break; - if (PTR_ERR(tzd) != ENODEV) + if (PTR_ERR(tzd) != -ENODEV) continue; return dev_err_probe(dev, PTR_ERR(tzd), "failed to register"); diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c index 123ec81e1943..6482513bfe66 100644 --- a/drivers/thermal/samsung/exynos_tmu.c +++ b/drivers/thermal/samsung/exynos_tmu.c @@ -138,12 +138,10 @@ enum soc_type { /** * struct exynos_tmu_data : A structure to hold the private data of the TMU * driver - * @id: identifier of the one instance of the TMU controller. * @base: base address of the single instance of the TMU controller. * @base_second: base address of the common registers of the TMU controller. * @irq: irq number of the TMU controller. * @soc: id of the SOC type. - * @irq_work: pointer to the irq work structure. * @lock: lock to implement synchronization. * @clk: pointer to the clock structure. * @clk_sec: pointer to the clock structure for accessing the base_second. @@ -159,13 +157,13 @@ enum soc_type { * @reference_voltage: reference voltage of amplifier * in the positive-TC generator block * 0 < reference_voltage <= 31 - * @regulator: pointer to the TMU regulator structure. - * @reg_conf: pointer to structure to register with core thermal. * @tzd: pointer to thermal_zone_device structure - * @ntrip: number of supported trip points. * @enabled: current status of TMU device - * @tmu_set_trip_temp: SoC specific method to set trip (rising threshold) - * @tmu_set_trip_hyst: SoC specific to set hysteresis (falling threshold) + * @tmu_set_low_temp: SoC specific method to set trip (falling threshold) + * @tmu_set_high_temp: SoC specific method to set trip (rising threshold) + * @tmu_set_crit_temp: SoC specific method to set critical temperature + * @tmu_disable_low: SoC specific method to disable an interrupt (falling threshold) + * @tmu_disable_high: SoC specific method to disable an interrupt (rising threshold) * @tmu_initialize: SoC specific TMU initialization method * @tmu_control: SoC specific TMU control method * @tmu_read: SoC specific TMU temperature read method @@ -173,12 +171,10 @@ enum soc_type { * @tmu_clear_irqs: SoC specific TMU interrupts clearing method */ struct exynos_tmu_data { - int id; void __iomem *base; void __iomem *base_second; int irq; enum soc_type soc; - struct work_struct irq_work; struct mutex lock; struct clk *clk, *clk_sec, *sclk; u32 cal_type; @@ -188,15 +184,14 @@ struct exynos_tmu_data { u16 temp_error1, temp_error2; u8 gain; u8 reference_voltage; - struct regulator *regulator; struct thermal_zone_device *tzd; - unsigned int ntrip; bool enabled; - void (*tmu_set_trip_temp)(struct exynos_tmu_data *data, int trip, - u8 temp); - void (*tmu_set_trip_hyst)(struct exynos_tmu_data *data, int trip, - u8 temp, u8 hyst); + void (*tmu_set_low_temp)(struct exynos_tmu_data *data, u8 temp); + void (*tmu_set_high_temp)(struct exynos_tmu_data *data, u8 temp); + void (*tmu_set_crit_temp)(struct exynos_tmu_data *data, u8 temp); + void (*tmu_disable_low)(struct exynos_tmu_data *data); + void (*tmu_disable_high)(struct exynos_tmu_data *data); void (*tmu_initialize)(struct platform_device *pdev); void (*tmu_control)(struct platform_device *pdev, bool on); int (*tmu_read)(struct exynos_tmu_data *data); @@ -258,25 +253,8 @@ static void sanitize_temp_error(struct exynos_tmu_data *data, u32 trim_info) static int exynos_tmu_initialize(struct platform_device *pdev) { struct exynos_tmu_data *data = platform_get_drvdata(pdev); - struct thermal_zone_device *tzd = data->tzd; - int num_trips = thermal_zone_get_num_trips(tzd); unsigned int status; - int ret = 0, temp; - - ret = thermal_zone_get_crit_temp(tzd, &temp); - if (ret && data->soc != SOC_ARCH_EXYNOS5433) { /* FIXME */ - dev_err(&pdev->dev, - "No CRITICAL trip point defined in device tree!\n"); - goto out; - } - - if (num_trips > data->ntrip) { - dev_info(&pdev->dev, - "More trip points than supported by this TMU.\n"); - dev_info(&pdev->dev, - "%d trip points should be configured in polling mode.\n", - num_trips - data->ntrip); - } + int ret = 0; mutex_lock(&data->lock); clk_enable(data->clk); @@ -287,34 +265,44 @@ static int exynos_tmu_initialize(struct platform_device *pdev) if (!status) { ret = -EBUSY; } else { - int i, ntrips = - min_t(int, num_trips, data->ntrip); - data->tmu_initialize(pdev); + data->tmu_clear_irqs(data); + } - /* Write temperature code for rising and falling threshold */ - for (i = 0; i < ntrips; i++) { + if (!IS_ERR(data->clk_sec)) + clk_disable(data->clk_sec); + clk_disable(data->clk); + mutex_unlock(&data->lock); - struct thermal_trip trip; + return ret; +} - ret = thermal_zone_get_trip(tzd, i, &trip); - if (ret) - goto err; +static int exynos_thermal_zone_configure(struct platform_device *pdev) +{ + struct exynos_tmu_data *data = platform_get_drvdata(pdev); + struct thermal_zone_device *tzd = data->tzd; + int ret, temp; - data->tmu_set_trip_temp(data, i, trip.temperature / MCELSIUS); - data->tmu_set_trip_hyst(data, i, trip.temperature / MCELSIUS, - trip.hysteresis / MCELSIUS); - } + ret = thermal_zone_get_crit_temp(tzd, &temp); + if (ret) { + /* FIXME: Remove this special case */ + if (data->soc == SOC_ARCH_EXYNOS5433) + return 0; - data->tmu_clear_irqs(data); + dev_err(&pdev->dev, + "No CRITICAL trip point defined in device tree!\n"); + return ret; } -err: + + mutex_lock(&data->lock); + clk_enable(data->clk); + + data->tmu_set_crit_temp(data, temp / MCELSIUS); + clk_disable(data->clk); mutex_unlock(&data->lock); - if (!IS_ERR(data->clk_sec)) - clk_disable(data->clk_sec); -out: - return ret; + + return 0; } static u32 get_con_reg(struct exynos_tmu_data *data, u32 con) @@ -347,30 +335,74 @@ static void exynos_tmu_control(struct platform_device *pdev, bool on) mutex_unlock(&data->lock); } -static void exynos4210_tmu_set_trip_temp(struct exynos_tmu_data *data, - int trip_id, u8 temp) +static void exynos_tmu_update_bit(struct exynos_tmu_data *data, int reg_off, + int bit_off, bool enable) +{ + u32 interrupt_en; + + interrupt_en = readl(data->base + reg_off); + if (enable) + interrupt_en |= BIT(bit_off); + else + interrupt_en &= ~BIT(bit_off); + writel(interrupt_en, data->base + reg_off); +} + +static void exynos_tmu_update_temp(struct exynos_tmu_data *data, int reg_off, + int bit_off, u8 temp) { - struct thermal_trip trip; - u8 ref, th_code; + u16 tmu_temp_mask; + u32 th; - if (thermal_zone_get_trip(data->tzd, 0, &trip)) - return; + tmu_temp_mask = + (data->soc == SOC_ARCH_EXYNOS7) ? EXYNOS7_TMU_TEMP_MASK + : EXYNOS_TMU_TEMP_MASK; - ref = trip.temperature / MCELSIUS; + th = readl(data->base + reg_off); + th &= ~(tmu_temp_mask << bit_off); + th |= temp_to_code(data, temp) << bit_off; + writel(th, data->base + reg_off); +} - if (trip_id == 0) { - th_code = temp_to_code(data, ref); - writeb(th_code, data->base + EXYNOS4210_TMU_REG_THRESHOLD_TEMP); - } +static void exynos4210_tmu_set_low_temp(struct exynos_tmu_data *data, u8 temp) +{ + /* + * Failing thresholds are not supported on Exynos 4210. + * We use polling instead. + */ +} + +static void exynos4210_tmu_set_high_temp(struct exynos_tmu_data *data, u8 temp) +{ + temp = temp_to_code(data, temp); + writeb(temp, data->base + EXYNOS4210_TMU_REG_TRIG_LEVEL0 + 4); + exynos_tmu_update_bit(data, EXYNOS_TMU_REG_INTEN, + EXYNOS_TMU_INTEN_RISE0_SHIFT + 4, true); +} - temp -= ref; - writeb(temp, data->base + EXYNOS4210_TMU_REG_TRIG_LEVEL0 + trip_id * 4); +static void exynos4210_tmu_disable_low(struct exynos_tmu_data *data) +{ + /* Again, this is handled by polling. */ } -/* failing thresholds are not supported on Exynos4210 */ -static void exynos4210_tmu_set_trip_hyst(struct exynos_tmu_data *data, - int trip, u8 temp, u8 hyst) +static void exynos4210_tmu_disable_high(struct exynos_tmu_data *data) { + exynos_tmu_update_bit(data, EXYNOS_TMU_REG_INTEN, + EXYNOS_TMU_INTEN_RISE0_SHIFT + 4, false); +} + +static void exynos4210_tmu_set_crit_temp(struct exynos_tmu_data *data, u8 temp) +{ + /* + * Hardware critical temperature handling is not supported on Exynos 4210. + * We still set the critical temperature threshold, but this is only to + * make sure it is handled as soon as possible. It is just a normal interrupt. + */ + + temp = temp_to_code(data, temp); + writeb(temp, data->base + EXYNOS4210_TMU_REG_TRIG_LEVEL0 + 12); + exynos_tmu_update_bit(data, EXYNOS_TMU_REG_INTEN, + EXYNOS_TMU_INTEN_RISE0_SHIFT + 12, true); } static void exynos4210_tmu_initialize(struct platform_device *pdev) @@ -378,35 +410,35 @@ static void exynos4210_tmu_initialize(struct platform_device *pdev) struct exynos_tmu_data *data = platform_get_drvdata(pdev); sanitize_temp_error(data, readl(data->base + EXYNOS_TMU_REG_TRIMINFO)); + + writeb(0, data->base + EXYNOS4210_TMU_REG_THRESHOLD_TEMP); } -static void exynos4412_tmu_set_trip_temp(struct exynos_tmu_data *data, - int trip, u8 temp) +static void exynos4412_tmu_set_low_temp(struct exynos_tmu_data *data, u8 temp) { - u32 th, con; - - th = readl(data->base + EXYNOS_THD_TEMP_RISE); - th &= ~(0xff << 8 * trip); - th |= temp_to_code(data, temp) << 8 * trip; - writel(th, data->base + EXYNOS_THD_TEMP_RISE); + exynos_tmu_update_temp(data, EXYNOS_THD_TEMP_FALL, 0, temp); + exynos_tmu_update_bit(data, EXYNOS_TMU_REG_INTEN, + EXYNOS_TMU_INTEN_FALL0_SHIFT, true); +} - if (trip == 3) { - con = readl(data->base + EXYNOS_TMU_REG_CONTROL); - con |= (1 << EXYNOS_TMU_THERM_TRIP_EN_SHIFT); - writel(con, data->base + EXYNOS_TMU_REG_CONTROL); - } +static void exynos4412_tmu_set_high_temp(struct exynos_tmu_data *data, u8 temp) +{ + exynos_tmu_update_temp(data, EXYNOS_THD_TEMP_RISE, 8, temp); + exynos_tmu_update_bit(data, EXYNOS_TMU_REG_INTEN, + EXYNOS_TMU_INTEN_RISE0_SHIFT + 4, true); } -static void exynos4412_tmu_set_trip_hyst(struct exynos_tmu_data *data, - int trip, u8 temp, u8 hyst) +static void exynos4412_tmu_disable_low(struct exynos_tmu_data *data) { - u32 th; + exynos_tmu_update_bit(data, EXYNOS_TMU_REG_INTEN, + EXYNOS_TMU_INTEN_FALL0_SHIFT, false); +} - th = readl(data->base + EXYNOS_THD_TEMP_FALL); - th &= ~(0xff << 8 * trip); - if (hyst) - th |= temp_to_code(data, temp - hyst) << 8 * trip; - writel(th, data->base + EXYNOS_THD_TEMP_FALL); +static void exynos4412_tmu_set_crit_temp(struct exynos_tmu_data *data, u8 temp) +{ + exynos_tmu_update_temp(data, EXYNOS_THD_TEMP_RISE, 24, temp); + exynos_tmu_update_bit(data, EXYNOS_TMU_REG_CONTROL, + EXYNOS_TMU_THERM_TRIP_EN_SHIFT, true); } static void exynos4412_tmu_initialize(struct platform_device *pdev) @@ -436,44 +468,39 @@ static void exynos4412_tmu_initialize(struct platform_device *pdev) sanitize_temp_error(data, trim_info); } -static void exynos5433_tmu_set_trip_temp(struct exynos_tmu_data *data, - int trip, u8 temp) +static void exynos5433_tmu_set_low_temp(struct exynos_tmu_data *data, u8 temp) { - unsigned int reg_off, j; - u32 th; - - if (trip > 3) { - reg_off = EXYNOS5433_THD_TEMP_RISE7_4; - j = trip - 4; - } else { - reg_off = EXYNOS5433_THD_TEMP_RISE3_0; - j = trip; - } + exynos_tmu_update_temp(data, EXYNOS5433_THD_TEMP_FALL3_0, 0, temp); + exynos_tmu_update_bit(data, EXYNOS5433_TMU_REG_INTEN, + EXYNOS_TMU_INTEN_FALL0_SHIFT, true); +} - th = readl(data->base + reg_off); - th &= ~(0xff << j * 8); - th |= (temp_to_code(data, temp) << j * 8); - writel(th, data->base + reg_off); +static void exynos5433_tmu_set_high_temp(struct exynos_tmu_data *data, u8 temp) +{ + exynos_tmu_update_temp(data, EXYNOS5433_THD_TEMP_RISE3_0, 8, temp); + exynos_tmu_update_bit(data, EXYNOS5433_TMU_REG_INTEN, + EXYNOS7_TMU_INTEN_RISE0_SHIFT + 1, true); } -static void exynos5433_tmu_set_trip_hyst(struct exynos_tmu_data *data, - int trip, u8 temp, u8 hyst) +static void exynos5433_tmu_disable_low(struct exynos_tmu_data *data) { - unsigned int reg_off, j; - u32 th; + exynos_tmu_update_bit(data, EXYNOS5433_TMU_REG_INTEN, + EXYNOS_TMU_INTEN_FALL0_SHIFT, false); +} - if (trip > 3) { - reg_off = EXYNOS5433_THD_TEMP_FALL7_4; - j = trip - 4; - } else { - reg_off = EXYNOS5433_THD_TEMP_FALL3_0; - j = trip; - } +static void exynos5433_tmu_disable_high(struct exynos_tmu_data *data) +{ + exynos_tmu_update_bit(data, EXYNOS5433_TMU_REG_INTEN, + EXYNOS7_TMU_INTEN_RISE0_SHIFT + 1, false); +} - th = readl(data->base + reg_off); - th &= ~(0xff << j * 8); - th |= (temp_to_code(data, temp - hyst) << j * 8); - writel(th, data->base + reg_off); +static void exynos5433_tmu_set_crit_temp(struct exynos_tmu_data *data, u8 temp) +{ + exynos_tmu_update_temp(data, EXYNOS5433_THD_TEMP_RISE7_4, 24, temp); + exynos_tmu_update_bit(data, EXYNOS_TMU_REG_CONTROL, + EXYNOS_TMU_THERM_TRIP_EN_SHIFT, true); + exynos_tmu_update_bit(data, EXYNOS5433_TMU_REG_INTEN, + EXYNOS7_TMU_INTEN_RISE0_SHIFT + 7, true); } static void exynos5433_tmu_initialize(struct platform_device *pdev) @@ -509,34 +536,41 @@ static void exynos5433_tmu_initialize(struct platform_device *pdev) cal_type ? 2 : 1); } -static void exynos7_tmu_set_trip_temp(struct exynos_tmu_data *data, - int trip, u8 temp) +static void exynos7_tmu_set_low_temp(struct exynos_tmu_data *data, u8 temp) { - unsigned int reg_off, bit_off; - u32 th; - - reg_off = ((7 - trip) / 2) * 4; - bit_off = ((8 - trip) % 2); + exynos_tmu_update_temp(data, EXYNOS7_THD_TEMP_FALL7_6 + 12, 0, temp); + exynos_tmu_update_bit(data, EXYNOS7_TMU_REG_INTEN, + EXYNOS_TMU_INTEN_FALL0_SHIFT + 0, true); +} - th = readl(data->base + EXYNOS7_THD_TEMP_RISE7_6 + reg_off); - th &= ~(EXYNOS7_TMU_TEMP_MASK << (16 * bit_off)); - th |= temp_to_code(data, temp) << (16 * bit_off); - writel(th, data->base + EXYNOS7_THD_TEMP_RISE7_6 + reg_off); +static void exynos7_tmu_set_high_temp(struct exynos_tmu_data *data, u8 temp) +{ + exynos_tmu_update_temp(data, EXYNOS7_THD_TEMP_RISE7_6 + 12, 16, temp); + exynos_tmu_update_bit(data, EXYNOS7_TMU_REG_INTEN, + EXYNOS7_TMU_INTEN_RISE0_SHIFT + 1, true); } -static void exynos7_tmu_set_trip_hyst(struct exynos_tmu_data *data, - int trip, u8 temp, u8 hyst) +static void exynos7_tmu_disable_low(struct exynos_tmu_data *data) { - unsigned int reg_off, bit_off; - u32 th; + exynos_tmu_update_bit(data, EXYNOS7_TMU_REG_INTEN, + EXYNOS_TMU_INTEN_FALL0_SHIFT + 0, false); +} - reg_off = ((7 - trip) / 2) * 4; - bit_off = ((8 - trip) % 2); +static void exynos7_tmu_disable_high(struct exynos_tmu_data *data) +{ + exynos_tmu_update_bit(data, EXYNOS7_TMU_REG_INTEN, + EXYNOS7_TMU_INTEN_RISE0_SHIFT + 1, false); +} - th = readl(data->base + EXYNOS7_THD_TEMP_FALL7_6 + reg_off); - th &= ~(EXYNOS7_TMU_TEMP_MASK << (16 * bit_off)); - th |= temp_to_code(data, temp - hyst) << (16 * bit_off); - writel(th, data->base + EXYNOS7_THD_TEMP_FALL7_6 + reg_off); +static void exynos7_tmu_set_crit_temp(struct exynos_tmu_data *data, u8 temp) +{ + /* + * Like Exynos 4210, Exynos 7 does not seem to support critical temperature + * handling in hardware. Again, we still set a separate interrupt for it. + */ + exynos_tmu_update_temp(data, EXYNOS7_THD_TEMP_RISE7_6 + 0, 16, temp); + exynos_tmu_update_bit(data, EXYNOS7_TMU_REG_INTEN, + EXYNOS7_TMU_INTEN_RISE0_SHIFT + 7, true); } static void exynos7_tmu_initialize(struct platform_device *pdev) @@ -551,95 +585,51 @@ static void exynos7_tmu_initialize(struct platform_device *pdev) static void exynos4210_tmu_control(struct platform_device *pdev, bool on) { struct exynos_tmu_data *data = platform_get_drvdata(pdev); - struct thermal_zone_device *tz = data->tzd; - struct thermal_trip trip; - unsigned int con, interrupt_en = 0, i; + unsigned int con; con = get_con_reg(data, readl(data->base + EXYNOS_TMU_REG_CONTROL)); - if (on) { - for (i = 0; i < data->ntrip; i++) { - if (thermal_zone_get_trip(tz, i, &trip)) - continue; - - interrupt_en |= - (1 << (EXYNOS_TMU_INTEN_RISE0_SHIFT + i * 4)); - } - - if (data->soc != SOC_ARCH_EXYNOS4210) - interrupt_en |= - interrupt_en << EXYNOS_TMU_INTEN_FALL0_SHIFT; - - con |= (1 << EXYNOS_TMU_CORE_EN_SHIFT); - } else { - con &= ~(1 << EXYNOS_TMU_CORE_EN_SHIFT); - } + if (on) + con |= BIT(EXYNOS_TMU_CORE_EN_SHIFT); + else + con &= ~BIT(EXYNOS_TMU_CORE_EN_SHIFT); - writel(interrupt_en, data->base + EXYNOS_TMU_REG_INTEN); writel(con, data->base + EXYNOS_TMU_REG_CONTROL); } static void exynos5433_tmu_control(struct platform_device *pdev, bool on) { struct exynos_tmu_data *data = platform_get_drvdata(pdev); - struct thermal_zone_device *tz = data->tzd; - struct thermal_trip trip; - unsigned int con, interrupt_en = 0, pd_det_en, i; + unsigned int con, pd_det_en; con = get_con_reg(data, readl(data->base + EXYNOS_TMU_REG_CONTROL)); - if (on) { - for (i = 0; i < data->ntrip; i++) { - if (thermal_zone_get_trip(tz, i, &trip)) - continue; - - interrupt_en |= - (1 << (EXYNOS7_TMU_INTEN_RISE0_SHIFT + i)); - } - - interrupt_en |= - interrupt_en << EXYNOS_TMU_INTEN_FALL0_SHIFT; - - con |= (1 << EXYNOS_TMU_CORE_EN_SHIFT); - } else - con &= ~(1 << EXYNOS_TMU_CORE_EN_SHIFT); + if (on) + con |= BIT(EXYNOS_TMU_CORE_EN_SHIFT); + else + con &= ~BIT(EXYNOS_TMU_CORE_EN_SHIFT); pd_det_en = on ? EXYNOS5433_PD_DET_EN : 0; writel(pd_det_en, data->base + EXYNOS5433_TMU_PD_DET_EN); - writel(interrupt_en, data->base + EXYNOS5433_TMU_REG_INTEN); writel(con, data->base + EXYNOS_TMU_REG_CONTROL); } static void exynos7_tmu_control(struct platform_device *pdev, bool on) { struct exynos_tmu_data *data = platform_get_drvdata(pdev); - struct thermal_zone_device *tz = data->tzd; - struct thermal_trip trip; - unsigned int con, interrupt_en = 0, i; + unsigned int con; con = get_con_reg(data, readl(data->base + EXYNOS_TMU_REG_CONTROL)); if (on) { - for (i = 0; i < data->ntrip; i++) { - if (thermal_zone_get_trip(tz, i, &trip)) - continue; - - interrupt_en |= - (1 << (EXYNOS7_TMU_INTEN_RISE0_SHIFT + i)); - } - - interrupt_en |= - interrupt_en << EXYNOS_TMU_INTEN_FALL0_SHIFT; - - con |= (1 << EXYNOS_TMU_CORE_EN_SHIFT); - con |= (1 << EXYNOS7_PD_DET_EN_SHIFT); + con |= BIT(EXYNOS_TMU_CORE_EN_SHIFT); + con |= BIT(EXYNOS7_PD_DET_EN_SHIFT); } else { - con &= ~(1 << EXYNOS_TMU_CORE_EN_SHIFT); - con &= ~(1 << EXYNOS7_PD_DET_EN_SHIFT); + con &= ~BIT(EXYNOS_TMU_CORE_EN_SHIFT); + con &= ~BIT(EXYNOS7_PD_DET_EN_SHIFT); } - writel(interrupt_en, data->base + EXYNOS7_TMU_REG_INTEN); writel(con, data->base + EXYNOS_TMU_REG_CONTROL); } @@ -766,10 +756,9 @@ static int exynos7_tmu_read(struct exynos_tmu_data *data) EXYNOS7_TMU_TEMP_MASK; } -static void exynos_tmu_work(struct work_struct *work) +static irqreturn_t exynos_tmu_threaded_irq(int irq, void *id) { - struct exynos_tmu_data *data = container_of(work, - struct exynos_tmu_data, irq_work); + struct exynos_tmu_data *data = id; thermal_zone_device_update(data->tzd, THERMAL_EVENT_UNSPECIFIED); @@ -781,7 +770,8 @@ static void exynos_tmu_work(struct work_struct *work) clk_disable(data->clk); mutex_unlock(&data->lock); - enable_irq(data->irq); + + return IRQ_HANDLED; } static void exynos4210_tmu_clear_irqs(struct exynos_tmu_data *data) @@ -815,16 +805,6 @@ static void exynos4210_tmu_clear_irqs(struct exynos_tmu_data *data) writel(val_irq, data->base + tmu_intclear); } -static irqreturn_t exynos_tmu_irq(int irq, void *id) -{ - struct exynos_tmu_data *data = id; - - disable_irq_nosync(irq); - schedule_work(&data->irq_work); - - return IRQ_HANDLED; -} - static const struct of_device_id exynos_tmu_match[] = { { .compatible = "samsung,exynos3250-tmu", @@ -866,10 +846,6 @@ static int exynos_map_dt_data(struct platform_device *pdev) if (!data || !pdev->dev.of_node) return -ENODEV; - data->id = of_alias_get_id(pdev->dev.of_node, "tmuctrl"); - if (data->id < 0) - data->id = 0; - data->irq = irq_of_parse_and_map(pdev->dev.of_node, 0); if (data->irq <= 0) { dev_err(&pdev->dev, "failed to get IRQ\n"); @@ -891,13 +867,15 @@ static int exynos_map_dt_data(struct platform_device *pdev) switch (data->soc) { case SOC_ARCH_EXYNOS4210: - data->tmu_set_trip_temp = exynos4210_tmu_set_trip_temp; - data->tmu_set_trip_hyst = exynos4210_tmu_set_trip_hyst; + data->tmu_set_low_temp = exynos4210_tmu_set_low_temp; + data->tmu_set_high_temp = exynos4210_tmu_set_high_temp; + data->tmu_disable_low = exynos4210_tmu_disable_low; + data->tmu_disable_high = exynos4210_tmu_disable_high; + data->tmu_set_crit_temp = exynos4210_tmu_set_crit_temp; data->tmu_initialize = exynos4210_tmu_initialize; data->tmu_control = exynos4210_tmu_control; data->tmu_read = exynos4210_tmu_read; data->tmu_clear_irqs = exynos4210_tmu_clear_irqs; - data->ntrip = 4; data->gain = 15; data->reference_voltage = 7; data->efuse_value = 55; @@ -910,14 +888,16 @@ static int exynos_map_dt_data(struct platform_device *pdev) case SOC_ARCH_EXYNOS5260: case SOC_ARCH_EXYNOS5420: case SOC_ARCH_EXYNOS5420_TRIMINFO: - data->tmu_set_trip_temp = exynos4412_tmu_set_trip_temp; - data->tmu_set_trip_hyst = exynos4412_tmu_set_trip_hyst; + data->tmu_set_low_temp = exynos4412_tmu_set_low_temp; + data->tmu_set_high_temp = exynos4412_tmu_set_high_temp; + data->tmu_disable_low = exynos4412_tmu_disable_low; + data->tmu_disable_high = exynos4210_tmu_disable_high; + data->tmu_set_crit_temp = exynos4412_tmu_set_crit_temp; data->tmu_initialize = exynos4412_tmu_initialize; data->tmu_control = exynos4210_tmu_control; data->tmu_read = exynos4412_tmu_read; data->tmu_set_emulation = exynos4412_tmu_set_emulation; data->tmu_clear_irqs = exynos4210_tmu_clear_irqs; - data->ntrip = 4; data->gain = 8; data->reference_voltage = 16; data->efuse_value = 55; @@ -929,14 +909,16 @@ static int exynos_map_dt_data(struct platform_device *pdev) data->max_efuse_value = 100; break; case SOC_ARCH_EXYNOS5433: - data->tmu_set_trip_temp = exynos5433_tmu_set_trip_temp; - data->tmu_set_trip_hyst = exynos5433_tmu_set_trip_hyst; + data->tmu_set_low_temp = exynos5433_tmu_set_low_temp; + data->tmu_set_high_temp = exynos5433_tmu_set_high_temp; + data->tmu_disable_low = exynos5433_tmu_disable_low; + data->tmu_disable_high = exynos5433_tmu_disable_high; + data->tmu_set_crit_temp = exynos5433_tmu_set_crit_temp; data->tmu_initialize = exynos5433_tmu_initialize; data->tmu_control = exynos5433_tmu_control; data->tmu_read = exynos4412_tmu_read; data->tmu_set_emulation = exynos4412_tmu_set_emulation; data->tmu_clear_irqs = exynos4210_tmu_clear_irqs; - data->ntrip = 8; data->gain = 8; if (res.start == EXYNOS5433_G3D_BASE) data->reference_voltage = 23; @@ -947,14 +929,16 @@ static int exynos_map_dt_data(struct platform_device *pdev) data->max_efuse_value = 150; break; case SOC_ARCH_EXYNOS7: - data->tmu_set_trip_temp = exynos7_tmu_set_trip_temp; - data->tmu_set_trip_hyst = exynos7_tmu_set_trip_hyst; + data->tmu_set_low_temp = exynos7_tmu_set_low_temp; + data->tmu_set_high_temp = exynos7_tmu_set_high_temp; + data->tmu_disable_low = exynos7_tmu_disable_low; + data->tmu_disable_high = exynos7_tmu_disable_high; + data->tmu_set_crit_temp = exynos7_tmu_set_crit_temp; data->tmu_initialize = exynos7_tmu_initialize; data->tmu_control = exynos7_tmu_control; data->tmu_read = exynos7_tmu_read; data->tmu_set_emulation = exynos4412_tmu_set_emulation; data->tmu_clear_irqs = exynos4210_tmu_clear_irqs; - data->ntrip = 8; data->gain = 9; data->reference_voltage = 17; data->efuse_value = 75; @@ -990,9 +974,32 @@ static int exynos_map_dt_data(struct platform_device *pdev) return 0; } +static int exynos_set_trips(struct thermal_zone_device *tz, int low, int high) +{ + struct exynos_tmu_data *data = thermal_zone_device_priv(tz); + + mutex_lock(&data->lock); + clk_enable(data->clk); + + if (low > INT_MIN) + data->tmu_set_low_temp(data, low / MCELSIUS); + else + data->tmu_disable_low(data); + if (high < INT_MAX) + data->tmu_set_high_temp(data, high / MCELSIUS); + else + data->tmu_disable_high(data); + + clk_disable(data->clk); + mutex_unlock(&data->lock); + + return 0; +} + static const struct thermal_zone_device_ops exynos_sensor_ops = { .get_temp = exynos_get_temp, .set_emul_temp = exynos_tmu_set_emulation, + .set_trips = exynos_set_trips, }; static int exynos_tmu_probe(struct platform_device *pdev) @@ -1013,44 +1020,40 @@ static int exynos_tmu_probe(struct platform_device *pdev) * TODO: Add regulator as an SOC feature, so that regulator enable * is a compulsory call. */ - data->regulator = devm_regulator_get_optional(&pdev->dev, "vtmu"); - if (!IS_ERR(data->regulator)) { - ret = regulator_enable(data->regulator); - if (ret) { - dev_err(&pdev->dev, "failed to enable vtmu\n"); - return ret; - } - } else { - if (PTR_ERR(data->regulator) == -EPROBE_DEFER) - return -EPROBE_DEFER; - dev_info(&pdev->dev, "Regulator node (vtmu) not found\n"); + ret = devm_regulator_get_enable_optional(&pdev->dev, "vtmu"); + switch (ret) { + case 0: + case -ENODEV: + break; + case -EPROBE_DEFER: + return -EPROBE_DEFER; + default: + dev_err(&pdev->dev, "Failed to get enabled regulator: %d\n", + ret); + return ret; } ret = exynos_map_dt_data(pdev); if (ret) - goto err_sensor; - - INIT_WORK(&data->irq_work, exynos_tmu_work); + return ret; data->clk = devm_clk_get(&pdev->dev, "tmu_apbif"); if (IS_ERR(data->clk)) { dev_err(&pdev->dev, "Failed to get clock\n"); - ret = PTR_ERR(data->clk); - goto err_sensor; + return PTR_ERR(data->clk); } data->clk_sec = devm_clk_get(&pdev->dev, "tmu_triminfo_apbif"); if (IS_ERR(data->clk_sec)) { if (data->soc == SOC_ARCH_EXYNOS5420_TRIMINFO) { dev_err(&pdev->dev, "Failed to get triminfo clock\n"); - ret = PTR_ERR(data->clk_sec); - goto err_sensor; + return PTR_ERR(data->clk_sec); } } else { ret = clk_prepare(data->clk_sec); if (ret) { dev_err(&pdev->dev, "Failed to get clock\n"); - goto err_sensor; + return ret; } } @@ -1080,10 +1083,12 @@ static int exynos_tmu_probe(struct platform_device *pdev) break; } - /* - * data->tzd must be registered before calling exynos_tmu_initialize(), - * requesting irq and calling exynos_tmu_control(). - */ + ret = exynos_tmu_initialize(pdev); + if (ret) { + dev_err(&pdev->dev, "Failed to initialize TMU\n"); + goto err_sclk; + } + data->tzd = devm_thermal_of_zone_register(&pdev->dev, 0, data, &exynos_sensor_ops); if (IS_ERR(data->tzd)) { @@ -1094,14 +1099,17 @@ static int exynos_tmu_probe(struct platform_device *pdev) goto err_sclk; } - ret = exynos_tmu_initialize(pdev); + ret = exynos_thermal_zone_configure(pdev); if (ret) { - dev_err(&pdev->dev, "Failed to initialize TMU\n"); + dev_err(&pdev->dev, "Failed to configure the thermal zone\n"); goto err_sclk; } - ret = devm_request_irq(&pdev->dev, data->irq, exynos_tmu_irq, - IRQF_TRIGGER_RISING | IRQF_SHARED, dev_name(&pdev->dev), data); + ret = devm_request_threaded_irq(&pdev->dev, data->irq, NULL, + exynos_tmu_threaded_irq, + IRQF_TRIGGER_RISING + | IRQF_SHARED | IRQF_ONESHOT, + dev_name(&pdev->dev), data); if (ret) { dev_err(&pdev->dev, "Failed to request irq: %d\n", data->irq); goto err_sclk; @@ -1117,10 +1125,6 @@ err_clk: err_clk_sec: if (!IS_ERR(data->clk_sec)) clk_unprepare(data->clk_sec); -err_sensor: - if (!IS_ERR(data->regulator)) - regulator_disable(data->regulator); - return ret; } @@ -1134,9 +1138,6 @@ static void exynos_tmu_remove(struct platform_device *pdev) clk_unprepare(data->clk); if (!IS_ERR(data->clk_sec)) clk_unprepare(data->clk_sec); - - if (!IS_ERR(data->regulator)) - regulator_disable(data->regulator); } #ifdef CONFIG_PM_SLEEP diff --git a/drivers/thermal/sun8i_thermal.c b/drivers/thermal/sun8i_thermal.c index f989b55a8aa8..6a8e386dbc8d 100644 --- a/drivers/thermal/sun8i_thermal.c +++ b/drivers/thermal/sun8i_thermal.c @@ -606,6 +606,18 @@ static const struct ths_thermal_chip sun50i_h6_ths = { .calc_temp = sun8i_ths_calc_temp, }; +static const struct ths_thermal_chip sun20i_d1_ths = { + .sensor_num = 1, + .has_bus_clk_reset = true, + .offset = 188552, + .scale = 673, + .temp_data_base = SUN50I_H6_THS_TEMP_DATA, + .calibrate = sun50i_h6_ths_calibrate, + .init = sun50i_h6_thermal_init, + .irq_ack = sun50i_h6_irq_ack, + .calc_temp = sun8i_ths_calc_temp, +}; + static const struct of_device_id of_ths_match[] = { { .compatible = "allwinner,sun8i-a83t-ths", .data = &sun8i_a83t_ths }, { .compatible = "allwinner,sun8i-h3-ths", .data = &sun8i_h3_ths }, @@ -614,6 +626,7 @@ static const struct of_device_id of_ths_match[] = { { .compatible = "allwinner,sun50i-a100-ths", .data = &sun50i_a100_ths }, { .compatible = "allwinner,sun50i-h5-ths", .data = &sun50i_h5_ths }, { .compatible = "allwinner,sun50i-h6-ths", .data = &sun50i_h6_ths }, + { .compatible = "allwinner,sun20i-d1-ths", .data = &sun20i_d1_ths }, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(of, of_ths_match); diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c index 9c17d35ccbbd..fa88d8707241 100644 --- a/drivers/thermal/thermal_core.c +++ b/drivers/thermal/thermal_core.c @@ -37,8 +37,6 @@ static LIST_HEAD(thermal_governor_list); static DEFINE_MUTEX(thermal_list_lock); static DEFINE_MUTEX(thermal_governor_lock); -static atomic_t in_suspend; - static struct thermal_governor *def_governor; /* @@ -203,9 +201,6 @@ int thermal_zone_device_set_policy(struct thermal_zone_device *tz, mutex_lock(&thermal_governor_lock); mutex_lock(&tz->lock); - if (!device_is_registered(&tz->device)) - goto exit; - gov = __find_governor(strim(policy)); if (!gov) goto exit; @@ -314,21 +309,43 @@ static void handle_non_critical_trips(struct thermal_zone_device *tz, def_governor->throttle(tz, trip); } -void thermal_zone_device_critical(struct thermal_zone_device *tz) +void thermal_governor_update_tz(struct thermal_zone_device *tz, + enum thermal_notify_event reason) +{ + if (!tz->governor || !tz->governor->update_tz) + return; + + tz->governor->update_tz(tz, reason); +} + +static void thermal_zone_device_halt(struct thermal_zone_device *tz, bool shutdown) { /* * poweroff_delay_ms must be a carefully profiled positive value. * Its a must for forced_emergency_poweroff_work to be scheduled. */ int poweroff_delay_ms = CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS; + const char *msg = "Temperature too high"; - dev_emerg(&tz->device, "%s: critical temperature reached, " - "shutting down\n", tz->type); + dev_emerg(&tz->device, "%s: critical temperature reached\n", tz->type); + + if (shutdown) + hw_protection_shutdown(msg, poweroff_delay_ms); + else + hw_protection_reboot(msg, poweroff_delay_ms); +} - hw_protection_shutdown("Temperature too high", poweroff_delay_ms); +void thermal_zone_device_critical(struct thermal_zone_device *tz) +{ + thermal_zone_device_halt(tz, true); } EXPORT_SYMBOL(thermal_zone_device_critical); +void thermal_zone_device_critical_reboot(struct thermal_zone_device *tz) +{ + thermal_zone_device_halt(tz, false); +} + static void handle_critical_trips(struct thermal_zone_device *tz, const struct thermal_trip *trip) { @@ -345,22 +362,51 @@ static void handle_critical_trips(struct thermal_zone_device *tz, } static void handle_thermal_trip(struct thermal_zone_device *tz, - const struct thermal_trip *trip) + struct thermal_trip *trip) { if (trip->temperature == THERMAL_TEMP_INVALID) return; - if (tz->last_temperature != THERMAL_TEMP_INVALID) { - if (tz->last_temperature < trip->temperature && - tz->temperature >= trip->temperature) + if (tz->last_temperature == THERMAL_TEMP_INVALID) { + /* Initialization. */ + trip->threshold = trip->temperature; + if (tz->temperature >= trip->threshold) + trip->threshold -= trip->hysteresis; + } else if (tz->last_temperature < trip->threshold) { + /* + * The trip threshold is equal to the trip temperature, unless + * the latter has changed in the meantime. In either case, + * the trip is crossed if the current zone temperature is at + * least equal to its temperature, but otherwise ensure that + * the threshold and the trip temperature will be equal. + */ + if (tz->temperature >= trip->temperature) { thermal_notify_tz_trip_up(tz->id, thermal_zone_trip_id(tz, trip), tz->temperature); - if (tz->last_temperature >= trip->temperature && - tz->temperature < trip->temperature - trip->hysteresis) + trip->threshold = trip->temperature - trip->hysteresis; + } else { + trip->threshold = trip->temperature; + } + } else { + /* + * The previous zone temperature was above or equal to the trip + * threshold, which would be equal to the "low temperature" of + * the trip (its temperature minus its hysteresis), unless the + * trip temperature or hysteresis had changed. In either case, + * the trip is crossed if the current zone temperature is below + * the low temperature of the trip, but otherwise ensure that + * the trip threshold will be equal to the low temperature of + * the trip. + */ + if (tz->temperature < trip->temperature - trip->hysteresis) { thermal_notify_tz_trip_down(tz->id, thermal_zone_trip_id(tz, trip), tz->temperature); + trip->threshold = trip->temperature; + } else { + trip->threshold = trip->temperature - trip->hysteresis; + } } if (trip->type == THERMAL_TRIP_CRITICAL || trip->type == THERMAL_TRIP_HOT) @@ -390,9 +436,20 @@ static void update_temperature(struct thermal_zone_device *tz) thermal_genl_sampling_temp(tz->id, temp); } +static void thermal_zone_device_check(struct work_struct *work) +{ + struct thermal_zone_device *tz = container_of(work, struct + thermal_zone_device, + poll_queue.work); + thermal_zone_device_update(tz, THERMAL_EVENT_UNSPECIFIED); +} + static void thermal_zone_device_init(struct thermal_zone_device *tz) { struct thermal_instance *pos; + + INIT_DELAYED_WORK(&tz->poll_queue, thermal_zone_device_check); + tz->temperature = THERMAL_TEMP_INVALID; tz->prev_low_trip = -INT_MAX; tz->prev_high_trip = INT_MAX; @@ -403,14 +460,9 @@ static void thermal_zone_device_init(struct thermal_zone_device *tz) void __thermal_zone_device_update(struct thermal_zone_device *tz, enum thermal_notify_event event) { - const struct thermal_trip *trip; - - if (atomic_read(&in_suspend)) - return; + struct thermal_trip *trip; - if (WARN_ONCE(!tz->ops->get_temp, - "'%s' must not be called without 'get_temp' ops set\n", - __func__)) + if (tz->suspended) return; if (!thermal_zone_device_is_enabled(tz)) @@ -442,12 +494,6 @@ static int thermal_zone_device_set_mode(struct thermal_zone_device *tz, return ret; } - if (!device_is_registered(&tz->device)) { - mutex_unlock(&tz->lock); - - return -ENODEV; - } - if (tz->ops->change_mode) ret = tz->ops->change_mode(tz, mode); @@ -485,24 +531,21 @@ int thermal_zone_device_is_enabled(struct thermal_zone_device *tz) return tz->mode == THERMAL_DEVICE_ENABLED; } +static bool thermal_zone_is_present(struct thermal_zone_device *tz) +{ + return !list_empty(&tz->node); +} + void thermal_zone_device_update(struct thermal_zone_device *tz, enum thermal_notify_event event) { mutex_lock(&tz->lock); - if (device_is_registered(&tz->device)) + if (thermal_zone_is_present(tz)) __thermal_zone_device_update(tz, event); mutex_unlock(&tz->lock); } EXPORT_SYMBOL_GPL(thermal_zone_device_update); -static void thermal_zone_device_check(struct work_struct *work) -{ - struct thermal_zone_device *tz = container_of(work, struct - thermal_zone_device, - poll_queue.work); - thermal_zone_device_update(tz, THERMAL_EVENT_UNSPECIFIED); -} - int for_each_thermal_governor(int (*cb)(struct thermal_governor *, void *), void *data) { @@ -694,6 +737,8 @@ int thermal_bind_cdev_to_trip(struct thermal_zone_device *tz, list_add_tail(&dev->tz_node, &tz->thermal_instances); list_add_tail(&dev->cdev_node, &cdev->thermal_instances); atomic_set(&tz->need_update, 1); + + thermal_governor_update_tz(tz, THERMAL_TZ_BIND_CDEV); } mutex_unlock(&cdev->lock); mutex_unlock(&tz->lock); @@ -752,6 +797,9 @@ int thermal_unbind_cdev_from_trip(struct thermal_zone_device *tz, if (pos->tz == tz && pos->trip == trip && pos->cdev == cdev) { list_del(&pos->tz_node); list_del(&pos->cdev_node); + + thermal_governor_update_tz(tz, THERMAL_TZ_UNBIND_CDEV); + mutex_unlock(&cdev->lock); mutex_unlock(&tz->lock); goto unbind; @@ -793,7 +841,7 @@ static void thermal_release(struct device *dev) tz = to_thermal_zone(dev); thermal_zone_destroy_device_groups(tz); mutex_destroy(&tz->lock); - kfree(tz); + complete(&tz->removal); } else if (!strncmp(dev_name(dev), "cooling_device", sizeof("cooling_device") - 1)) { cdev = to_cooling_device(dev); @@ -1260,7 +1308,7 @@ thermal_zone_device_register_with_trips(const char *type, struct thermal_trip *t return ERR_PTR(-EINVAL); } - if (!ops) { + if (!ops || !ops->get_temp) { pr_err("Thermal zone device ops not defined\n"); return ERR_PTR(-EINVAL); } @@ -1284,8 +1332,10 @@ thermal_zone_device_register_with_trips(const char *type, struct thermal_trip *t } INIT_LIST_HEAD(&tz->thermal_instances); + INIT_LIST_HEAD(&tz->node); ida_init(&tz->ida); mutex_init(&tz->lock); + init_completion(&tz->removal); id = ida_alloc(&thermal_tz_ida, GFP_KERNEL); if (id < 0) { result = id; @@ -1348,14 +1398,14 @@ thermal_zone_device_register_with_trips(const char *type, struct thermal_trip *t } mutex_lock(&thermal_list_lock); + mutex_lock(&tz->lock); list_add_tail(&tz->node, &thermal_tz_list); + mutex_unlock(&tz->lock); mutex_unlock(&thermal_list_lock); /* Bind cooling devices for this zone */ bind_tz(tz); - INIT_DELAYED_WORK(&tz->poll_queue, thermal_zone_device_check); - thermal_zone_device_init(tz); /* Update the new thermal zone and mark it as already updated. */ if (atomic_cmpxchg(&tz->need_update, 1, 0)) @@ -1369,7 +1419,6 @@ unregister: device_del(&tz->device); release_device: put_device(&tz->device); - tz = NULL; remove_id: ida_free(&thermal_tz_ida, id); free_tzp: @@ -1439,7 +1488,10 @@ void thermal_zone_device_unregister(struct thermal_zone_device *tz) mutex_unlock(&thermal_list_lock); return; } + + mutex_lock(&tz->lock); list_del(&tz->node); + mutex_unlock(&tz->lock); /* Unbind all cdevs associated with 'this' thermal zone */ list_for_each_entry(cdev, &thermal_cdev_list, node) @@ -1456,15 +1508,16 @@ void thermal_zone_device_unregister(struct thermal_zone_device *tz) ida_free(&thermal_tz_ida, tz->id); ida_destroy(&tz->ida); - mutex_lock(&tz->lock); device_del(&tz->device); - mutex_unlock(&tz->lock); kfree(tz->tzp); put_device(&tz->device); thermal_notify_tz_delete(tz_id); + + wait_for_completion(&tz->removal); + kfree(tz); } EXPORT_SYMBOL_GPL(thermal_zone_device_unregister); @@ -1506,6 +1559,22 @@ exit: } EXPORT_SYMBOL_GPL(thermal_zone_get_zone_by_name); +static void thermal_zone_device_resume(struct work_struct *work) +{ + struct thermal_zone_device *tz; + + tz = container_of(work, struct thermal_zone_device, poll_queue.work); + + mutex_lock(&tz->lock); + + tz->suspended = false; + + thermal_zone_device_init(tz); + __thermal_zone_device_update(tz, THERMAL_EVENT_UNSPECIFIED); + + mutex_unlock(&tz->lock); +} + static int thermal_pm_notify(struct notifier_block *nb, unsigned long mode, void *_unused) { @@ -1515,17 +1584,43 @@ static int thermal_pm_notify(struct notifier_block *nb, case PM_HIBERNATION_PREPARE: case PM_RESTORE_PREPARE: case PM_SUSPEND_PREPARE: - atomic_set(&in_suspend, 1); + mutex_lock(&thermal_list_lock); + + list_for_each_entry(tz, &thermal_tz_list, node) { + mutex_lock(&tz->lock); + + tz->suspended = true; + + mutex_unlock(&tz->lock); + } + + mutex_unlock(&thermal_list_lock); break; case PM_POST_HIBERNATION: case PM_POST_RESTORE: case PM_POST_SUSPEND: - atomic_set(&in_suspend, 0); + mutex_lock(&thermal_list_lock); + list_for_each_entry(tz, &thermal_tz_list, node) { - thermal_zone_device_init(tz); - thermal_zone_device_update(tz, - THERMAL_EVENT_UNSPECIFIED); + mutex_lock(&tz->lock); + + cancel_delayed_work(&tz->poll_queue); + + /* + * Replace the work function with the resume one, which + * will restore the original work function and schedule + * the polling work if needed. + */ + INIT_DELAYED_WORK(&tz->poll_queue, + thermal_zone_device_resume); + /* Queue up the work without a delay. */ + mod_delayed_work(system_freezable_power_efficient_wq, + &tz->poll_queue, 0); + + mutex_unlock(&tz->lock); } + + mutex_unlock(&thermal_list_lock); break; default: break; diff --git a/drivers/thermal/thermal_core.h b/drivers/thermal/thermal_core.h index 0a3b3ec5120b..4e023d54fd27 100644 --- a/drivers/thermal/thermal_core.h +++ b/drivers/thermal/thermal_core.h @@ -114,16 +114,19 @@ int thermal_zone_device_set_policy(struct thermal_zone_device *, char *); int thermal_build_list_of_policies(char *buf); void __thermal_zone_device_update(struct thermal_zone_device *tz, enum thermal_notify_event event); +void thermal_zone_device_critical_reboot(struct thermal_zone_device *tz); +void thermal_governor_update_tz(struct thermal_zone_device *tz, + enum thermal_notify_event reason); /* Helpers */ #define for_each_trip(__tz, __trip) \ for (__trip = __tz->trips; __trip - __tz->trips < __tz->num_trips; __trip++) void __thermal_zone_set_trips(struct thermal_zone_device *tz); -int __thermal_zone_get_trip(struct thermal_zone_device *tz, int trip_id, - struct thermal_trip *trip); -int thermal_zone_trip_id(struct thermal_zone_device *tz, +int thermal_zone_trip_id(const struct thermal_zone_device *tz, const struct thermal_trip *trip); +void thermal_zone_trip_updated(struct thermal_zone_device *tz, + const struct thermal_trip *trip); int __thermal_zone_get_temp(struct thermal_zone_device *tz, int *temp); /* sysfs I/F */ diff --git a/drivers/thermal/thermal_helpers.c b/drivers/thermal/thermal_helpers.c index 69e8ea4aa908..c3982e0f0075 100644 --- a/drivers/thermal/thermal_helpers.c +++ b/drivers/thermal/thermal_helpers.c @@ -82,20 +82,18 @@ EXPORT_SYMBOL(get_thermal_instance); */ int __thermal_zone_get_temp(struct thermal_zone_device *tz, int *temp) { - int ret = -EINVAL; - int count; + const struct thermal_trip *trip; int crit_temp = INT_MAX; - struct thermal_trip trip; + int ret = -EINVAL; lockdep_assert_held(&tz->lock); ret = tz->ops->get_temp(tz, temp); if (IS_ENABLED(CONFIG_THERMAL_EMULATION) && tz->emul_temperature) { - for (count = 0; count < tz->num_trips; count++) { - ret = __thermal_zone_get_trip(tz, count, &trip); - if (!ret && trip.type == THERMAL_TRIP_CRITICAL) { - crit_temp = trip.temperature; + for_each_trip(tz, trip) { + if (trip->type == THERMAL_TRIP_CRITICAL) { + crit_temp = trip->temperature; break; } } @@ -139,10 +137,7 @@ int thermal_zone_get_temp(struct thermal_zone_device *tz, int *temp) goto unlock; } - if (device_is_registered(&tz->device)) - ret = __thermal_zone_get_temp(tz, temp); - else - ret = -ENODEV; + ret = __thermal_zone_get_temp(tz, temp); unlock: mutex_unlock(&tz->lock); diff --git a/drivers/thermal/thermal_hwmon.c b/drivers/thermal/thermal_hwmon.c index c3ae44659b81..252116f1e535 100644 --- a/drivers/thermal/thermal_hwmon.c +++ b/drivers/thermal/thermal_hwmon.c @@ -80,10 +80,7 @@ temp_crit_show(struct device *dev, struct device_attribute *attr, char *buf) mutex_lock(&tz->lock); - if (device_is_registered(&tz->device)) - ret = tz->ops->get_crit_temp(tz, &temperature); - else - ret = -ENODEV; + ret = tz->ops->get_crit_temp(tz, &temperature); mutex_unlock(&tz->lock); diff --git a/drivers/thermal/thermal_netlink.c b/drivers/thermal/thermal_netlink.c index 08bc46c3ec7b..332052e24a86 100644 --- a/drivers/thermal/thermal_netlink.c +++ b/drivers/thermal/thermal_netlink.c @@ -13,9 +13,14 @@ #include "thermal_core.h" +enum thermal_genl_multicast_groups { + THERMAL_GENL_SAMPLING_GROUP = 0, + THERMAL_GENL_EVENT_GROUP = 1, +}; + static const struct genl_multicast_group thermal_genl_mcgrps[] = { - { .name = THERMAL_GENL_SAMPLING_GROUP_NAME, }, - { .name = THERMAL_GENL_EVENT_GROUP_NAME, }, + [THERMAL_GENL_SAMPLING_GROUP] = { .name = THERMAL_GENL_SAMPLING_GROUP_NAME, }, + [THERMAL_GENL_EVENT_GROUP] = { .name = THERMAL_GENL_EVENT_GROUP_NAME, }, }; static const struct nla_policy thermal_genl_policy[THERMAL_GENL_ATTR_MAX + 1] = { @@ -71,6 +76,11 @@ typedef int (*cb_t)(struct param *); static struct genl_family thermal_gnl_family; +static int thermal_group_has_listeners(enum thermal_genl_multicast_groups group) +{ + return genl_has_listeners(&thermal_gnl_family, &init_net, group); +} + /************************** Sampling encoding *******************************/ int thermal_genl_sampling_temp(int id, int temp) @@ -78,6 +88,9 @@ int thermal_genl_sampling_temp(int id, int temp) struct sk_buff *skb; void *hdr; + if (!thermal_group_has_listeners(THERMAL_GENL_SAMPLING_GROUP)) + return 0; + skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); if (!skb) return -ENOMEM; @@ -95,7 +108,7 @@ int thermal_genl_sampling_temp(int id, int temp) genlmsg_end(skb, hdr); - genlmsg_multicast(&thermal_gnl_family, skb, 0, 0, GFP_KERNEL); + genlmsg_multicast(&thermal_gnl_family, skb, 0, THERMAL_GENL_SAMPLING_GROUP, GFP_KERNEL); return 0; out_cancel: @@ -275,6 +288,9 @@ static int thermal_genl_send_event(enum thermal_genl_event event, int ret = -EMSGSIZE; void *hdr; + if (!thermal_group_has_listeners(THERMAL_GENL_EVENT_GROUP)) + return 0; + msg = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); if (!msg) return -ENOMEM; @@ -290,7 +306,7 @@ static int thermal_genl_send_event(enum thermal_genl_event event, genlmsg_end(msg, hdr); - genlmsg_multicast(&thermal_gnl_family, msg, 0, 1, GFP_KERNEL); + genlmsg_multicast(&thermal_gnl_family, msg, 0, THERMAL_GENL_EVENT_GROUP, GFP_KERNEL); return 0; @@ -450,10 +466,10 @@ out_cancel_nest: static int thermal_genl_cmd_tz_get_trip(struct param *p) { struct sk_buff *msg = p->msg; + const struct thermal_trip *trip; struct thermal_zone_device *tz; struct nlattr *start_trip; - struct thermal_trip trip; - int ret, i, id; + int id; if (!p->attrs[THERMAL_GENL_ATTR_TZ_ID]) return -EINVAL; @@ -470,16 +486,12 @@ static int thermal_genl_cmd_tz_get_trip(struct param *p) mutex_lock(&tz->lock); - for (i = 0; i < tz->num_trips; i++) { - - ret = __thermal_zone_get_trip(tz, i, &trip); - if (ret) - goto out_cancel_nest; - - if (nla_put_u32(msg, THERMAL_GENL_ATTR_TZ_TRIP_ID, i) || - nla_put_u32(msg, THERMAL_GENL_ATTR_TZ_TRIP_TYPE, trip.type) || - nla_put_u32(msg, THERMAL_GENL_ATTR_TZ_TRIP_TEMP, trip.temperature) || - nla_put_u32(msg, THERMAL_GENL_ATTR_TZ_TRIP_HYST, trip.hysteresis)) + for_each_trip(tz, trip) { + if (nla_put_u32(msg, THERMAL_GENL_ATTR_TZ_TRIP_ID, + thermal_zone_trip_id(tz, trip)) || + nla_put_u32(msg, THERMAL_GENL_ATTR_TZ_TRIP_TYPE, trip->type) || + nla_put_u32(msg, THERMAL_GENL_ATTR_TZ_TRIP_TEMP, trip->temperature) || + nla_put_u32(msg, THERMAL_GENL_ATTR_TZ_TRIP_HYST, trip->hysteresis)) goto out_cancel_nest; } diff --git a/drivers/thermal/thermal_of.c b/drivers/thermal/thermal_of.c index 1e0655b63259..4d6c22e0ed85 100644 --- a/drivers/thermal/thermal_of.c +++ b/drivers/thermal/thermal_of.c @@ -475,6 +475,7 @@ static struct thermal_zone_device *thermal_of_zone_register(struct device_node * struct thermal_zone_params tzp = {}; struct thermal_zone_device_ops *of_ops; struct device_node *np; + const char *action; int delay, pdelay; int ntrips, mask; int ret; @@ -511,6 +512,11 @@ static struct thermal_zone_device *thermal_of_zone_register(struct device_node * mask = GENMASK_ULL((ntrips) - 1, 0); + ret = of_property_read_string(np, "critical-action", &action); + if (!ret) + if (!of_ops->critical && !strcasecmp(action, "reboot")) + of_ops->critical = thermal_zone_device_critical_reboot; + tz = thermal_zone_device_register_with_trips(np->name, trips, ntrips, mask, data, of_ops, &tzp, pdelay, delay); diff --git a/drivers/thermal/thermal_sysfs.c b/drivers/thermal/thermal_sysfs.c index eef40d4f3063..f4033865b093 100644 --- a/drivers/thermal/thermal_sysfs.c +++ b/drivers/thermal/thermal_sysfs.c @@ -83,25 +83,12 @@ trip_point_type_show(struct device *dev, struct device_attribute *attr, char *buf) { struct thermal_zone_device *tz = to_thermal_zone(dev); - struct thermal_trip trip; - int trip_id, result; + int trip_id; if (sscanf(attr->attr.name, "trip_point_%d_type", &trip_id) != 1) return -EINVAL; - mutex_lock(&tz->lock); - - if (device_is_registered(dev)) - result = __thermal_zone_get_trip(tz, trip_id, &trip); - else - result = -ENODEV; - - mutex_unlock(&tz->lock); - - if (result) - return result; - - switch (trip.type) { + switch (tz->trips[trip_id].type) { case THERMAL_TRIP_CRITICAL: return sprintf(buf, "critical\n"); case THERMAL_TRIP_HOT: @@ -120,28 +107,33 @@ trip_point_temp_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct thermal_zone_device *tz = to_thermal_zone(dev); - struct thermal_trip trip; + struct thermal_trip *trip; int trip_id, ret; + int temp; + + ret = kstrtoint(buf, 10, &temp); + if (ret) + return -EINVAL; if (sscanf(attr->attr.name, "trip_point_%d_temp", &trip_id) != 1) return -EINVAL; mutex_lock(&tz->lock); - if (!device_is_registered(dev)) { - ret = -ENODEV; - goto unlock; - } + trip = &tz->trips[trip_id]; - ret = __thermal_zone_get_trip(tz, trip_id, &trip); - if (ret) - goto unlock; + if (temp != trip->temperature) { + if (tz->ops->set_trip_temp) { + ret = tz->ops->set_trip_temp(tz, trip_id, temp); + if (ret) + goto unlock; + } - ret = kstrtoint(buf, 10, &trip.temperature); - if (ret) - goto unlock; + thermal_zone_set_trip_temp(tz, trip, temp); + + __thermal_zone_device_update(tz, THERMAL_TRIP_CHANGED); + } - ret = thermal_zone_set_trip(tz, trip_id, &trip); unlock: mutex_unlock(&tz->lock); @@ -153,25 +145,12 @@ trip_point_temp_show(struct device *dev, struct device_attribute *attr, char *buf) { struct thermal_zone_device *tz = to_thermal_zone(dev); - struct thermal_trip trip; - int trip_id, ret; + int trip_id; if (sscanf(attr->attr.name, "trip_point_%d_temp", &trip_id) != 1) return -EINVAL; - mutex_lock(&tz->lock); - - if (device_is_registered(dev)) - ret = __thermal_zone_get_trip(tz, trip_id, &trip); - else - ret = -ENODEV; - - mutex_unlock(&tz->lock); - - if (ret) - return ret; - - return sprintf(buf, "%d\n", trip.temperature); + return sprintf(buf, "%d\n", tz->trips[trip_id].temperature); } static ssize_t @@ -179,28 +158,33 @@ trip_point_hyst_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct thermal_zone_device *tz = to_thermal_zone(dev); - struct thermal_trip trip; + struct thermal_trip *trip; int trip_id, ret; + int hyst; + + ret = kstrtoint(buf, 10, &hyst); + if (ret || hyst < 0) + return -EINVAL; if (sscanf(attr->attr.name, "trip_point_%d_hyst", &trip_id) != 1) return -EINVAL; mutex_lock(&tz->lock); - if (!device_is_registered(dev)) { - ret = -ENODEV; - goto unlock; - } + trip = &tz->trips[trip_id]; - ret = __thermal_zone_get_trip(tz, trip_id, &trip); - if (ret) - goto unlock; + if (hyst != trip->hysteresis) { + if (tz->ops->set_trip_hyst) { + ret = tz->ops->set_trip_hyst(tz, trip_id, hyst); + if (ret) + goto unlock; + } - ret = kstrtoint(buf, 10, &trip.hysteresis); - if (ret) - goto unlock; + trip->hysteresis = hyst; + + thermal_zone_trip_updated(tz, trip); + } - ret = thermal_zone_set_trip(tz, trip_id, &trip); unlock: mutex_unlock(&tz->lock); @@ -212,22 +196,12 @@ trip_point_hyst_show(struct device *dev, struct device_attribute *attr, char *buf) { struct thermal_zone_device *tz = to_thermal_zone(dev); - struct thermal_trip trip; - int trip_id, ret; + int trip_id; if (sscanf(attr->attr.name, "trip_point_%d_hyst", &trip_id) != 1) return -EINVAL; - mutex_lock(&tz->lock); - - if (device_is_registered(dev)) - ret = __thermal_zone_get_trip(tz, trip_id, &trip); - else - ret = -ENODEV; - - mutex_unlock(&tz->lock); - - return ret ? ret : sprintf(buf, "%d\n", trip.hysteresis); + return sprintf(buf, "%d\n", tz->trips[trip_id].hysteresis); } static ssize_t @@ -276,11 +250,6 @@ emul_temp_store(struct device *dev, struct device_attribute *attr, mutex_lock(&tz->lock); - if (!device_is_registered(dev)) { - ret = -ENODEV; - goto unlock; - } - if (!tz->ops->set_emul_temp) tz->emul_temperature = temperature; else @@ -289,7 +258,6 @@ emul_temp_store(struct device *dev, struct device_attribute *attr, if (!ret) __thermal_zone_device_update(tz, THERMAL_EVENT_UNSPECIFIED); -unlock: mutex_unlock(&tz->lock); return ret ? ret : count; @@ -968,7 +936,16 @@ ssize_t weight_store(struct device *dev, struct device_attribute *attr, return ret; instance = container_of(attr, struct thermal_instance, weight_attr); + + /* Don't race with governors using the 'weight' value */ + mutex_lock(&instance->tz->lock); + instance->weight = weight; + thermal_governor_update_tz(instance->tz, + THERMAL_INSTANCE_WEIGHT_CHANGED); + + mutex_unlock(&instance->tz->lock); + return count; } diff --git a/drivers/thermal/thermal_trace_ipa.h b/drivers/thermal/thermal_trace_ipa.h index 84568db5421b..b16b5dd863d9 100644 --- a/drivers/thermal/thermal_trace_ipa.h +++ b/drivers/thermal/thermal_trace_ipa.h @@ -8,19 +8,14 @@ #include <linux/tracepoint.h> TRACE_EVENT(thermal_power_allocator, - TP_PROTO(struct thermal_zone_device *tz, u32 *req_power, - u32 total_req_power, u32 *granted_power, - u32 total_granted_power, size_t num_actors, - u32 power_range, u32 max_allocatable_power, - int current_temp, s32 delta_temp), - TP_ARGS(tz, req_power, total_req_power, granted_power, - total_granted_power, num_actors, power_range, - max_allocatable_power, current_temp, delta_temp), + TP_PROTO(struct thermal_zone_device *tz, u32 total_req_power, + u32 total_granted_power, int num_actors, u32 power_range, + u32 max_allocatable_power, int current_temp, s32 delta_temp), + TP_ARGS(tz, total_req_power, total_granted_power, num_actors, + power_range, max_allocatable_power, current_temp, delta_temp), TP_STRUCT__entry( __field(int, tz_id ) - __dynamic_array(u32, req_power, num_actors ) __field(u32, total_req_power ) - __dynamic_array(u32, granted_power, num_actors) __field(u32, total_granted_power ) __field(size_t, num_actors ) __field(u32, power_range ) @@ -30,11 +25,7 @@ TRACE_EVENT(thermal_power_allocator, ), TP_fast_assign( __entry->tz_id = tz->id; - memcpy(__get_dynamic_array(req_power), req_power, - num_actors * sizeof(*req_power)); __entry->total_req_power = total_req_power; - memcpy(__get_dynamic_array(granted_power), granted_power, - num_actors * sizeof(*granted_power)); __entry->total_granted_power = total_granted_power; __entry->num_actors = num_actors; __entry->power_range = power_range; @@ -43,18 +34,35 @@ TRACE_EVENT(thermal_power_allocator, __entry->delta_temp = delta_temp; ), - TP_printk("thermal_zone_id=%d req_power={%s} total_req_power=%u granted_power={%s} total_granted_power=%u power_range=%u max_allocatable_power=%u current_temperature=%d delta_temperature=%d", - __entry->tz_id, - __print_array(__get_dynamic_array(req_power), - __entry->num_actors, 4), - __entry->total_req_power, - __print_array(__get_dynamic_array(granted_power), - __entry->num_actors, 4), + TP_printk("thermal_zone_id=%d total_req_power=%u total_granted_power=%u power_range=%u max_allocatable_power=%u current_temperature=%d delta_temperature=%d", + __entry->tz_id, __entry->total_req_power, __entry->total_granted_power, __entry->power_range, __entry->max_allocatable_power, __entry->current_temp, __entry->delta_temp) ); +TRACE_EVENT(thermal_power_actor, + TP_PROTO(struct thermal_zone_device *tz, int actor_id, u32 req_power, + u32 granted_power), + TP_ARGS(tz, actor_id, req_power, granted_power), + TP_STRUCT__entry( + __field(int, tz_id) + __field(int, actor_id) + __field(u32, req_power) + __field(u32, granted_power) + ), + TP_fast_assign( + __entry->tz_id = tz->id; + __entry->actor_id = actor_id; + __entry->req_power = req_power; + __entry->granted_power = granted_power; + ), + + TP_printk("thermal_zone_id=%d actor_id=%d req_power=%u granted_power=%u", + __entry->tz_id, __entry->actor_id, __entry->req_power, + __entry->granted_power) +); + TRACE_EVENT(thermal_power_allocator_pid, TP_PROTO(struct thermal_zone_device *tz, s32 err, s32 err_integral, s64 p, s64 i, s64 d, s32 output), diff --git a/drivers/thermal/thermal_trip.c b/drivers/thermal/thermal_trip.c index e42456442c68..8bffa1e5e206 100644 --- a/drivers/thermal/thermal_trip.c +++ b/drivers/thermal/thermal_trip.c @@ -63,25 +63,21 @@ EXPORT_SYMBOL_GPL(thermal_zone_get_num_trips); */ void __thermal_zone_set_trips(struct thermal_zone_device *tz) { - struct thermal_trip trip; + const struct thermal_trip *trip; int low = -INT_MAX, high = INT_MAX; bool same_trip = false; - int i, ret; + int ret; lockdep_assert_held(&tz->lock); if (!tz->ops->set_trips) return; - for (i = 0; i < tz->num_trips; i++) { + for_each_trip(tz, trip) { bool low_set = false; int trip_low; - ret = __thermal_zone_get_trip(tz, i , &trip); - if (ret) - return; - - trip_low = trip.temperature - trip.hysteresis; + trip_low = trip->temperature - trip->hysteresis; if (trip_low < tz->temperature && trip_low > low) { low = trip_low; @@ -89,9 +85,9 @@ void __thermal_zone_set_trips(struct thermal_zone_device *tz) same_trip = false; } - if (trip.temperature > tz->temperature && - trip.temperature < high) { - high = trip.temperature; + if (trip->temperature > tz->temperature && + trip->temperature < high) { + high = trip->temperature; same_trip = low_set; } } @@ -147,46 +143,7 @@ int thermal_zone_get_trip(struct thermal_zone_device *tz, int trip_id, } EXPORT_SYMBOL_GPL(thermal_zone_get_trip); -int thermal_zone_set_trip(struct thermal_zone_device *tz, int trip_id, - const struct thermal_trip *trip) -{ - struct thermal_trip t; - int ret; - - if (!tz->ops->set_trip_temp && !tz->ops->set_trip_hyst && !tz->trips) - return -EINVAL; - - ret = __thermal_zone_get_trip(tz, trip_id, &t); - if (ret) - return ret; - - if (t.type != trip->type) - return -EINVAL; - - if (t.temperature != trip->temperature && tz->ops->set_trip_temp) { - ret = tz->ops->set_trip_temp(tz, trip_id, trip->temperature); - if (ret) - return ret; - } - - if (t.hysteresis != trip->hysteresis && tz->ops->set_trip_hyst) { - ret = tz->ops->set_trip_hyst(tz, trip_id, trip->hysteresis); - if (ret) - return ret; - } - - if (tz->trips && (t.temperature != trip->temperature || t.hysteresis != trip->hysteresis)) - tz->trips[trip_id] = *trip; - - thermal_notify_tz_trip_change(tz->id, trip_id, trip->type, - trip->temperature, trip->hysteresis); - - __thermal_zone_device_update(tz, THERMAL_TRIP_CHANGED); - - return 0; -} - -int thermal_zone_trip_id(struct thermal_zone_device *tz, +int thermal_zone_trip_id(const struct thermal_zone_device *tz, const struct thermal_trip *trip) { /* @@ -195,3 +152,24 @@ int thermal_zone_trip_id(struct thermal_zone_device *tz, */ return trip - tz->trips; } +void thermal_zone_trip_updated(struct thermal_zone_device *tz, + const struct thermal_trip *trip) +{ + thermal_notify_tz_trip_change(tz->id, thermal_zone_trip_id(tz, trip), + trip->type, trip->temperature, + trip->hysteresis); + __thermal_zone_device_update(tz, THERMAL_TRIP_CHANGED); +} + +void thermal_zone_set_trip_temp(struct thermal_zone_device *tz, + struct thermal_trip *trip, int temp) +{ + if (trip->temperature == temp) + return; + + trip->temperature = temp; + thermal_notify_tz_trip_change(tz->id, thermal_zone_trip_id(tz, trip), + trip->type, trip->temperature, + trip->hysteresis); +} +EXPORT_SYMBOL_GPL(thermal_zone_set_trip_temp); diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c index efe3e3b85769..fdd0fc7b8f25 100644 --- a/drivers/usb/gadget/function/f_fs.c +++ b/drivers/usb/gadget/function/f_fs.c @@ -831,7 +831,7 @@ static void ffs_user_copy_worker(struct work_struct *work) io_data->kiocb->ki_complete(io_data->kiocb, ret); if (io_data->ffs->ffs_eventfd && !kiocb_has_eventfd) - eventfd_signal(io_data->ffs->ffs_eventfd, 1); + eventfd_signal(io_data->ffs->ffs_eventfd); if (io_data->read) kfree(io_data->to_free); @@ -2738,7 +2738,7 @@ static void __ffs_event_add(struct ffs_data *ffs, ffs->ev.types[ffs->ev.count++] = type; wake_up_locked(&ffs->ev.waitq); if (ffs->ffs_eventfd) - eventfd_signal(ffs->ffs_eventfd, 1); + eventfd_signal(ffs->ffs_eventfd); } static void ffs_event_add(struct ffs_data *ffs, diff --git a/drivers/usb/gadget/udc/max3420_udc.c b/drivers/usb/gadget/udc/max3420_udc.c index 2d57786d3db7..89e8cf2a2a7d 100644 --- a/drivers/usb/gadget/udc/max3420_udc.c +++ b/drivers/usb/gadget/udc/max3420_udc.c @@ -1201,7 +1201,7 @@ static int max3420_probe(struct spi_device *spi) int err, irq; u8 reg[8]; - if (spi->master->flags & SPI_MASTER_HALF_DUPLEX) { + if (spi->master->flags & SPI_CONTROLLER_HALF_DUPLEX) { dev_err(&spi->dev, "UDC needs full duplex to work\n"); return -EINVAL; } diff --git a/drivers/usb/host/fsl-mph-dr-of.c b/drivers/usb/host/fsl-mph-dr-of.c index 8508d37a2aff..6cdc3d805c32 100644 --- a/drivers/usb/host/fsl-mph-dr-of.c +++ b/drivers/usb/host/fsl-mph-dr-of.c @@ -288,7 +288,7 @@ static void fsl_usb2_mph_dr_of_remove(struct platform_device *ofdev) #define PHYCTRL_LSFE (1 << 1) /* Line State Filter Enable */ #define PHYCTRL_PXE (1 << 0) /* PHY oscillator enable */ -int fsl_usb2_mpc5121_init(struct platform_device *pdev) +static int fsl_usb2_mpc5121_init(struct platform_device *pdev) { struct fsl_usb2_platform_data *pdata = dev_get_platdata(&pdev->dev); struct clk *clk; diff --git a/drivers/vdpa/vdpa_user/vduse_dev.c b/drivers/vdpa/vdpa_user/vduse_dev.c index 0ddd4b8abecb..1d24da79c399 100644 --- a/drivers/vdpa/vdpa_user/vduse_dev.c +++ b/drivers/vdpa/vdpa_user/vduse_dev.c @@ -493,7 +493,7 @@ static void vduse_vq_kick(struct vduse_virtqueue *vq) goto unlock; if (vq->kickfd) - eventfd_signal(vq->kickfd, 1); + eventfd_signal(vq->kickfd); else vq->kicked = true; unlock: @@ -911,7 +911,7 @@ static int vduse_kickfd_setup(struct vduse_dev *dev, eventfd_ctx_put(vq->kickfd); vq->kickfd = ctx; if (vq->ready && vq->kicked && vq->kickfd) { - eventfd_signal(vq->kickfd, 1); + eventfd_signal(vq->kickfd); vq->kicked = false; } spin_unlock(&vq->kick_lock); @@ -960,7 +960,7 @@ static bool vduse_vq_signal_irqfd(struct vduse_virtqueue *vq) spin_lock_irq(&vq->irq_lock); if (vq->ready && vq->cb.trigger) { - eventfd_signal(vq->cb.trigger, 1); + eventfd_signal(vq->cb.trigger); signal = true; } spin_unlock_irq(&vq->irq_lock); @@ -1157,7 +1157,7 @@ static long vduse_dev_ioctl(struct file *file, unsigned int cmd, fput(f); break; } - ret = receive_fd(f, perm_to_file_flags(entry.perm)); + ret = receive_fd(f, NULL, perm_to_file_flags(entry.perm)); fput(f); break; } diff --git a/drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c b/drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c index c51229fccbd6..d62fbfff20b8 100644 --- a/drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c +++ b/drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c @@ -54,7 +54,7 @@ static irqreturn_t vfio_fsl_mc_irq_handler(int irq_num, void *arg) { struct vfio_fsl_mc_irq *mc_irq = (struct vfio_fsl_mc_irq *)arg; - eventfd_signal(mc_irq->trigger, 1); + eventfd_signal(mc_irq->trigger); return IRQ_HANDLED; } diff --git a/drivers/vfio/pci/vfio_pci_core.c b/drivers/vfio/pci/vfio_pci_core.c index 1929103ee59a..1cbc990d42e0 100644 --- a/drivers/vfio/pci/vfio_pci_core.c +++ b/drivers/vfio/pci/vfio_pci_core.c @@ -443,7 +443,7 @@ static int vfio_pci_core_runtime_resume(struct device *dev) */ down_write(&vdev->memory_lock); if (vdev->pm_wake_eventfd_ctx) { - eventfd_signal(vdev->pm_wake_eventfd_ctx, 1); + eventfd_signal(vdev->pm_wake_eventfd_ctx); __vfio_pci_runtime_pm_exit(vdev); } up_write(&vdev->memory_lock); @@ -1883,7 +1883,7 @@ void vfio_pci_core_request(struct vfio_device *core_vdev, unsigned int count) pci_notice_ratelimited(pdev, "Relaying device request to user (#%u)\n", count); - eventfd_signal(vdev->req_trigger, 1); + eventfd_signal(vdev->req_trigger); } else if (count == 0) { pci_warn(pdev, "No device request channel registered, blocked until released by user\n"); @@ -2302,7 +2302,7 @@ pci_ers_result_t vfio_pci_core_aer_err_detected(struct pci_dev *pdev, mutex_lock(&vdev->igate); if (vdev->err_trigger) - eventfd_signal(vdev->err_trigger, 1); + eventfd_signal(vdev->err_trigger); mutex_unlock(&vdev->igate); diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c index cbb4bcbfbf83..237beac83809 100644 --- a/drivers/vfio/pci/vfio_pci_intrs.c +++ b/drivers/vfio/pci/vfio_pci_intrs.c @@ -94,7 +94,7 @@ static void vfio_send_intx_eventfd(void *opaque, void *unused) ctx = vfio_irq_ctx_get(vdev, 0); if (WARN_ON_ONCE(!ctx)) return; - eventfd_signal(ctx->trigger, 1); + eventfd_signal(ctx->trigger); } } @@ -342,7 +342,7 @@ static irqreturn_t vfio_msihandler(int irq, void *arg) { struct eventfd_ctx *trigger = arg; - eventfd_signal(trigger, 1); + eventfd_signal(trigger); return IRQ_HANDLED; } @@ -689,11 +689,11 @@ static int vfio_pci_set_msi_trigger(struct vfio_pci_core_device *vdev, if (!ctx) continue; if (flags & VFIO_IRQ_SET_DATA_NONE) { - eventfd_signal(ctx->trigger, 1); + eventfd_signal(ctx->trigger); } else if (flags & VFIO_IRQ_SET_DATA_BOOL) { uint8_t *bools = data; if (bools[i - start]) - eventfd_signal(ctx->trigger, 1); + eventfd_signal(ctx->trigger); } } return 0; @@ -707,7 +707,7 @@ static int vfio_pci_set_ctx_trigger_single(struct eventfd_ctx **ctx, if (flags & VFIO_IRQ_SET_DATA_NONE) { if (*ctx) { if (count) { - eventfd_signal(*ctx, 1); + eventfd_signal(*ctx); } else { eventfd_ctx_put(*ctx); *ctx = NULL; @@ -722,7 +722,7 @@ static int vfio_pci_set_ctx_trigger_single(struct eventfd_ctx **ctx, trigger = *(uint8_t *)data; if (trigger && *ctx) - eventfd_signal(*ctx, 1); + eventfd_signal(*ctx); return 0; } else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) { diff --git a/drivers/vfio/platform/vfio_platform_irq.c b/drivers/vfio/platform/vfio_platform_irq.c index 665197caed89..61a1bfb68ac7 100644 --- a/drivers/vfio/platform/vfio_platform_irq.c +++ b/drivers/vfio/platform/vfio_platform_irq.c @@ -155,7 +155,7 @@ static irqreturn_t vfio_automasked_irq_handler(int irq, void *dev_id) spin_unlock_irqrestore(&irq_ctx->lock, flags); if (ret == IRQ_HANDLED) - eventfd_signal(irq_ctx->trigger, 1); + eventfd_signal(irq_ctx->trigger); return ret; } @@ -164,7 +164,7 @@ static irqreturn_t vfio_irq_handler(int irq, void *dev_id) { struct vfio_platform_irq *irq_ctx = dev_id; - eventfd_signal(irq_ctx->trigger, 1); + eventfd_signal(irq_ctx->trigger); return IRQ_HANDLED; } diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c index da7ec77cdaff..173beda74b38 100644 --- a/drivers/vhost/vdpa.c +++ b/drivers/vhost/vdpa.c @@ -178,7 +178,7 @@ static irqreturn_t vhost_vdpa_virtqueue_cb(void *private) struct eventfd_ctx *call_ctx = vq->call_ctx.ctx; if (call_ctx) - eventfd_signal(call_ctx, 1); + eventfd_signal(call_ctx); return IRQ_HANDLED; } @@ -189,7 +189,7 @@ static irqreturn_t vhost_vdpa_config_cb(void *private) struct eventfd_ctx *config_ctx = v->config_ctx; if (config_ctx) - eventfd_signal(config_ctx, 1); + eventfd_signal(config_ctx); return IRQ_HANDLED; } diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index e0c181ad17e3..045f666b4f12 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c @@ -2248,7 +2248,7 @@ int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log, len -= l; if (!len) { if (vq->log_ctx) - eventfd_signal(vq->log_ctx, 1); + eventfd_signal(vq->log_ctx); return 0; } } @@ -2271,7 +2271,7 @@ static int vhost_update_used_flags(struct vhost_virtqueue *vq) log_used(vq, (used - (void __user *)vq->used), sizeof vq->used->flags); if (vq->log_ctx) - eventfd_signal(vq->log_ctx, 1); + eventfd_signal(vq->log_ctx); } return 0; } @@ -2289,7 +2289,7 @@ static int vhost_update_avail_event(struct vhost_virtqueue *vq) log_used(vq, (used - (void __user *)vq->used), sizeof *vhost_avail_event(vq)); if (vq->log_ctx) - eventfd_signal(vq->log_ctx, 1); + eventfd_signal(vq->log_ctx); } return 0; } @@ -2715,7 +2715,7 @@ int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads, log_used(vq, offsetof(struct vring_used, idx), sizeof vq->used->idx); if (vq->log_ctx) - eventfd_signal(vq->log_ctx, 1); + eventfd_signal(vq->log_ctx); } return r; } @@ -2763,7 +2763,7 @@ void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq) { /* Signal the Guest tell them we used something up. */ if (vq->call_ctx.ctx && vhost_notify(dev, vq)) - eventfd_signal(vq->call_ctx.ctx, 1); + eventfd_signal(vq->call_ctx.ctx); } EXPORT_SYMBOL_GPL(vhost_signal); diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h index f60d5f7bef94..9e942fcda5c3 100644 --- a/drivers/vhost/vhost.h +++ b/drivers/vhost/vhost.h @@ -249,7 +249,7 @@ void vhost_iotlb_map_free(struct vhost_iotlb *iotlb, #define vq_err(vq, fmt, ...) do { \ pr_debug(pr_fmt(fmt), ##__VA_ARGS__); \ if ((vq)->error_ctx) \ - eventfd_signal((vq)->error_ctx, 1);\ + eventfd_signal((vq)->error_ctx);\ } while (0) enum { diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c index a80939fe2ee6..6a29d2594b91 100644 --- a/drivers/video/fbdev/hyperv_fb.c +++ b/drivers/video/fbdev/hyperv_fb.c @@ -927,8 +927,8 @@ static phys_addr_t hvfb_get_phymem(struct hv_device *hdev, if (request_size == 0) return -1; - if (order <= MAX_ORDER) { - /* Call alloc_pages if the size is less than 2^MAX_ORDER */ + if (order <= MAX_PAGE_ORDER) { + /* Call alloc_pages if the size is less than 2^MAX_PAGE_ORDER */ page = alloc_pages(GFP_KERNEL | __GFP_ZERO, order); if (!page) return -1; @@ -958,7 +958,7 @@ static void hvfb_release_phymem(struct hv_device *hdev, { unsigned int order = get_order(size); - if (order <= MAX_ORDER) + if (order <= MAX_PAGE_ORDER) __free_pages(pfn_to_page(paddr >> PAGE_SHIFT), order); else dma_free_coherent(&hdev->device, diff --git a/drivers/video/fbdev/vermilion/vermilion.c b/drivers/video/fbdev/vermilion/vermilion.c index 840ead69654b..a32e5b2924c9 100644 --- a/drivers/video/fbdev/vermilion/vermilion.c +++ b/drivers/video/fbdev/vermilion/vermilion.c @@ -197,7 +197,7 @@ static int vmlfb_alloc_vram(struct vml_info *vinfo, va = &vinfo->vram[i]; order = 0; - while (requested > (PAGE_SIZE << order) && order <= MAX_ORDER) + while (requested > (PAGE_SIZE << order) && order <= MAX_PAGE_ORDER) order++; err = vmlfb_alloc_vram_area(va, order, 0); diff --git a/drivers/virt/acrn/ioeventfd.c b/drivers/virt/acrn/ioeventfd.c index ac4037e9f947..4e845c6ca0b5 100644 --- a/drivers/virt/acrn/ioeventfd.c +++ b/drivers/virt/acrn/ioeventfd.c @@ -223,7 +223,7 @@ static int acrn_ioeventfd_handler(struct acrn_ioreq_client *client, mutex_lock(&client->vm->ioeventfds_lock); p = hsm_ioeventfd_match(client->vm, addr, val, size, req->type); if (p) - eventfd_signal(p->eventfd, 1); + eventfd_signal(p->eventfd); mutex_unlock(&client->vm->ioeventfds_lock); return 0; diff --git a/drivers/virt/coco/sev-guest/sev-guest.c b/drivers/virt/coco/sev-guest/sev-guest.c index bc564adcf499..87f241825bc3 100644 --- a/drivers/virt/coco/sev-guest/sev-guest.c +++ b/drivers/virt/coco/sev-guest/sev-guest.c @@ -994,7 +994,7 @@ e_unmap: return ret; } -static int __exit sev_guest_remove(struct platform_device *pdev) +static void __exit sev_guest_remove(struct platform_device *pdev) { struct snp_guest_dev *snp_dev = platform_get_drvdata(pdev); @@ -1003,8 +1003,6 @@ static int __exit sev_guest_remove(struct platform_device *pdev) free_shared_pages(snp_dev->request, sizeof(struct snp_guest_msg)); deinit_crypto(snp_dev->crypto); misc_deregister(&snp_dev->misc); - - return 0; } /* @@ -1013,7 +1011,7 @@ static int __exit sev_guest_remove(struct platform_device *pdev) * with the SEV-SNP support, it is named "sev-guest". */ static struct platform_driver sev_guest_driver = { - .remove = __exit_p(sev_guest_remove), + .remove_new = __exit_p(sev_guest_remove), .driver = { .name = "sev-guest", }, diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c index 1fe93e93f5bc..59cdc0292dce 100644 --- a/drivers/virtio/virtio_balloon.c +++ b/drivers/virtio/virtio_balloon.c @@ -33,7 +33,7 @@ #define VIRTIO_BALLOON_FREE_PAGE_ALLOC_FLAG (__GFP_NORETRY | __GFP_NOWARN | \ __GFP_NOMEMALLOC) /* The order of free page blocks to report to host */ -#define VIRTIO_BALLOON_HINT_BLOCK_ORDER MAX_ORDER +#define VIRTIO_BALLOON_HINT_BLOCK_ORDER MAX_PAGE_ORDER /* The size of a free page block in bytes */ #define VIRTIO_BALLOON_HINT_BLOCK_BYTES \ (1 << (VIRTIO_BALLOON_HINT_BLOCK_ORDER + PAGE_SHIFT)) diff --git a/drivers/virtio/virtio_mem.c b/drivers/virtio/virtio_mem.c index fa5226c198cc..8e3223294442 100644 --- a/drivers/virtio/virtio_mem.c +++ b/drivers/virtio/virtio_mem.c @@ -1154,13 +1154,13 @@ static void virtio_mem_clear_fake_offline(unsigned long pfn, */ static void virtio_mem_fake_online(unsigned long pfn, unsigned long nr_pages) { - unsigned long order = MAX_ORDER; + unsigned long order = MAX_PAGE_ORDER; unsigned long i; /* * We might get called for ranges that don't cover properly aligned - * MAX_ORDER pages; however, we can only online properly aligned - * pages with an order of MAX_ORDER at maximum. + * MAX_PAGE_ORDER pages; however, we can only online properly aligned + * pages with an order of MAX_PAGE_ORDER at maximum. */ while (!IS_ALIGNED(pfn | nr_pages, 1 << order)) order--; @@ -1280,7 +1280,7 @@ static void virtio_mem_online_page(struct virtio_mem *vm, bool do_online; /* - * We can get called with any order up to MAX_ORDER. If our subblock + * We can get called with any order up to MAX_PAGE_ORDER. If our subblock * size is smaller than that and we have a mixture of plugged and * unplugged subblocks within such a page, we have to process in * smaller granularity. In that case we'll adjust the order exactly once diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c index 0eb337a8ec0f..35b6e306026a 100644 --- a/drivers/xen/privcmd.c +++ b/drivers/xen/privcmd.c @@ -1147,7 +1147,7 @@ static irqreturn_t ioeventfd_interrupt(int irq, void *dev_id) if (ioreq->addr == kioeventfd->addr + VIRTIO_MMIO_QUEUE_NOTIFY && ioreq->size == kioeventfd->addr_len && (ioreq->data & QUEUE_NOTIFY_VQ_MASK) == kioeventfd->vq) { - eventfd_signal(kioeventfd->eventfd, 1); + eventfd_signal(kioeventfd->eventfd); state = STATE_IORESP_READY; break; } |