diff options
Diffstat (limited to 'drivers')
857 files changed, 14539 insertions, 6959 deletions
diff --git a/drivers/accessibility/speakup/speakup_dectlk.c b/drivers/accessibility/speakup/speakup_dectlk.c index 580ec796816b..78ca4987e619 100644 --- a/drivers/accessibility/speakup/speakup_dectlk.c +++ b/drivers/accessibility/speakup/speakup_dectlk.c @@ -44,6 +44,7 @@ static struct var_t vars[] = { { CAPS_START, .u.s = {"[:dv ap 160] " } }, { CAPS_STOP, .u.s = {"[:dv ap 100 ] " } }, { RATE, .u.n = {"[:ra %d] ", 180, 75, 650, 0, 0, NULL } }, + { PITCH, .u.n = {"[:dv ap %d] ", 122, 50, 350, 0, 0, NULL } }, { INFLECTION, .u.n = {"[:dv pr %d] ", 100, 0, 10000, 0, 0, NULL } }, { VOL, .u.n = {"[:dv g5 %d] ", 86, 60, 86, 0, 0, NULL } }, { PUNCT, .u.n = {"[:pu %c] ", 0, 0, 2, 0, 0, "nsa" } }, diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig index ba45541b1f1f..1e34f846508f 100644 --- a/drivers/acpi/Kconfig +++ b/drivers/acpi/Kconfig @@ -11,6 +11,7 @@ menuconfig ACPI depends on ARCH_SUPPORTS_ACPI select PNP select NLS + select CRC32 default y if X86 help Advanced Configuration and Power Interface (ACPI) support for @@ -301,7 +302,7 @@ config ACPI_IPMI help This driver enables the ACPI to access the BMC controller. And it uses the IPMI request/response message to communicate with BMC - controller, which can be found on on the server. + controller, which can be found on the server. To compile this driver as a module, choose M here: the module will be called as acpi_ipmi. diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile index bb757148e7ba..b5a8d3e00a52 100644 --- a/drivers/acpi/Makefile +++ b/drivers/acpi/Makefile @@ -81,6 +81,9 @@ obj-$(CONFIG_ACPI_AC) += ac.o obj-$(CONFIG_ACPI_BUTTON) += button.o obj-$(CONFIG_ACPI_TINY_POWER_BUTTON) += tiny-power-button.o obj-$(CONFIG_ACPI_FAN) += fan.o +fan-objs := fan_core.o +fan-objs += fan_attr.o + obj-$(CONFIG_ACPI_VIDEO) += video.o obj-$(CONFIG_ACPI_TAD) += acpi_tad.o obj-$(CONFIG_ACPI_PCI_SLOT) += pci_slot.o diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c index bcae0f03572b..fbe0756259c5 100644 --- a/drivers/acpi/acpi_lpss.c +++ b/drivers/acpi/acpi_lpss.c @@ -21,6 +21,7 @@ #include <linux/pm_domain.h> #include <linux/pm_runtime.h> #include <linux/pwm.h> +#include <linux/pxa2xx_ssp.h> #include <linux/suspend.h> #include <linux/delay.h> @@ -82,7 +83,7 @@ struct lpss_device_desc { const char *clk_con_id; unsigned int prv_offset; size_t prv_size_override; - struct property_entry *properties; + const struct property_entry *properties; void (*setup)(struct lpss_private_data *pdata); bool resume_from_noirq; }; @@ -219,10 +220,16 @@ static void bsw_pwm_setup(struct lpss_private_data *pdata) pwm_add_table(bsw_pwm_lookup, ARRAY_SIZE(bsw_pwm_lookup)); } -static const struct lpss_device_desc lpt_dev_desc = { +static const struct property_entry lpt_spi_properties[] = { + PROPERTY_ENTRY_U32("intel,spi-pxa2xx-type", LPSS_LPT_SSP), + { } +}; + +static const struct lpss_device_desc lpt_spi_dev_desc = { .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_LTR | LPSS_SAVE_CTX, .prv_offset = 0x800, + .properties = lpt_spi_properties, }; static const struct lpss_device_desc lpt_i2c_dev_desc = { @@ -282,9 +289,15 @@ static const struct lpss_device_desc bsw_uart_dev_desc = { .properties = uart_properties, }; +static const struct property_entry byt_spi_properties[] = { + PROPERTY_ENTRY_U32("intel,spi-pxa2xx-type", LPSS_BYT_SSP), + { } +}; + static const struct lpss_device_desc byt_spi_dev_desc = { .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX, .prv_offset = 0x400, + .properties = byt_spi_properties, }; static const struct lpss_device_desc byt_sdio_dev_desc = { @@ -305,11 +318,17 @@ static const struct lpss_device_desc bsw_i2c_dev_desc = { .resume_from_noirq = true, }; +static const struct property_entry bsw_spi_properties[] = { + PROPERTY_ENTRY_U32("intel,spi-pxa2xx-type", LPSS_BSW_SSP), + { } +}; + static const struct lpss_device_desc bsw_spi_dev_desc = { .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX | LPSS_NO_D3_DELAY, .prv_offset = 0x400, .setup = lpss_deassert_reset, + .properties = bsw_spi_properties, }; static const struct x86_cpu_id lpss_cpu_ids[] = { @@ -329,8 +348,8 @@ static const struct acpi_device_id acpi_lpss_device_ids[] = { { "INTL9C60", LPSS_ADDR(lpss_dma_desc) }, /* Lynxpoint LPSS devices */ - { "INT33C0", LPSS_ADDR(lpt_dev_desc) }, - { "INT33C1", LPSS_ADDR(lpt_dev_desc) }, + { "INT33C0", LPSS_ADDR(lpt_spi_dev_desc) }, + { "INT33C1", LPSS_ADDR(lpt_spi_dev_desc) }, { "INT33C2", LPSS_ADDR(lpt_i2c_dev_desc) }, { "INT33C3", LPSS_ADDR(lpt_i2c_dev_desc) }, { "INT33C4", LPSS_ADDR(lpt_uart_dev_desc) }, @@ -356,8 +375,8 @@ static const struct acpi_device_id acpi_lpss_device_ids[] = { { "808622C1", LPSS_ADDR(bsw_i2c_dev_desc) }, /* Broadwell LPSS devices */ - { "INT3430", LPSS_ADDR(lpt_dev_desc) }, - { "INT3431", LPSS_ADDR(lpt_dev_desc) }, + { "INT3430", LPSS_ADDR(lpt_spi_dev_desc) }, + { "INT3431", LPSS_ADDR(lpt_spi_dev_desc) }, { "INT3432", LPSS_ADDR(lpt_i2c_dev_desc) }, { "INT3433", LPSS_ADDR(lpt_i2c_dev_desc) }, { "INT3434", LPSS_ADDR(lpt_uart_dev_desc) }, @@ -366,7 +385,7 @@ static const struct acpi_device_id acpi_lpss_device_ids[] = { { "INT3437", }, /* Wildcat Point LPSS devices */ - { "INT3438", LPSS_ADDR(lpt_dev_desc) }, + { "INT3438", LPSS_ADDR(lpt_spi_dev_desc) }, { } }; diff --git a/drivers/acpi/acpi_platform.c b/drivers/acpi/acpi_platform.c index 78d621290a35..de3cbf152dee 100644 --- a/drivers/acpi/acpi_platform.c +++ b/drivers/acpi/acpi_platform.c @@ -95,7 +95,7 @@ static void acpi_platform_fill_resource(struct acpi_device *adev, * Name of the platform device will be the same as @adev's. */ struct platform_device *acpi_create_platform_device(struct acpi_device *adev, - struct property_entry *properties) + const struct property_entry *properties) { struct platform_device *pdev = NULL; struct platform_device_info pdevinfo; diff --git a/drivers/acpi/acpica/nswalk.c b/drivers/acpi/acpica/nswalk.c index 915c2433463d..e7c30ce06e18 100644 --- a/drivers/acpi/acpica/nswalk.c +++ b/drivers/acpi/acpica/nswalk.c @@ -169,6 +169,9 @@ acpi_ns_walk_namespace(acpi_object_type type, if (start_node == ACPI_ROOT_OBJECT) { start_node = acpi_gbl_root_node; + if (!start_node) { + return_ACPI_STATUS(AE_NO_NAMESPACE); + } } /* Null child means "get first node" */ diff --git a/drivers/acpi/apei/bert.c b/drivers/acpi/apei/bert.c index 19e50fcbf4d6..598fd19b65fa 100644 --- a/drivers/acpi/apei/bert.c +++ b/drivers/acpi/apei/bert.c @@ -29,6 +29,7 @@ #undef pr_fmt #define pr_fmt(fmt) "BERT: " fmt +#define ACPI_BERT_PRINT_MAX_LEN 1024 static int bert_disable; @@ -58,8 +59,11 @@ static void __init bert_print_all(struct acpi_bert_region *region, } pr_info_once("Error records from previous boot:\n"); - - cper_estatus_print(KERN_INFO HW_ERR, estatus); + if (region_len < ACPI_BERT_PRINT_MAX_LEN) + cper_estatus_print(KERN_INFO HW_ERR, estatus); + else + pr_info_once("Max print length exceeded, table data is available at:\n" + "/sys/firmware/acpi/tables/data/BERT"); /* * Because the boot error source is "one-time polled" type, @@ -77,7 +81,7 @@ static int __init setup_bert_disable(char *str) { bert_disable = 1; - return 0; + return 1; } __setup("bert_disable", setup_bert_disable); diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c index 242f3c2d5533..698d67cee052 100644 --- a/drivers/acpi/apei/erst.c +++ b/drivers/acpi/apei/erst.c @@ -891,7 +891,7 @@ EXPORT_SYMBOL_GPL(erst_clear); static int __init setup_erst_disable(char *str) { erst_disable = 1; - return 0; + return 1; } __setup("erst_disable", setup_erst_disable); diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c index 0c5c9acc6254..d91ad378c00d 100644 --- a/drivers/acpi/apei/ghes.c +++ b/drivers/acpi/apei/ghes.c @@ -1457,33 +1457,35 @@ static struct platform_driver ghes_platform_driver = { .remove = ghes_remove, }; -static int __init ghes_init(void) +void __init acpi_ghes_init(void) { int rc; + sdei_init(); + if (acpi_disabled) - return -ENODEV; + return; switch (hest_disable) { case HEST_NOT_FOUND: - return -ENODEV; + return; case HEST_DISABLED: pr_info(GHES_PFX "HEST is not enabled!\n"); - return -EINVAL; + return; default: break; } if (ghes_disable) { pr_info(GHES_PFX "GHES is not enabled!\n"); - return -EINVAL; + return; } ghes_nmi_init_cxt(); rc = platform_driver_register(&ghes_platform_driver); if (rc) - goto err; + return; rc = apei_osc_setup(); if (rc == 0 && osc_sb_apei_support_acked) @@ -1494,9 +1496,4 @@ static int __init ghes_init(void) pr_info(GHES_PFX "APEI firmware first mode is enabled by APEI bit.\n"); else pr_info(GHES_PFX "Failed to enable APEI firmware first mode.\n"); - - return 0; -err: - return rc; } -device_initcall(ghes_init); diff --git a/drivers/acpi/apei/hest.c b/drivers/acpi/apei/hest.c index 0edc1ed47673..6aef1ee5e1bd 100644 --- a/drivers/acpi/apei/hest.c +++ b/drivers/acpi/apei/hest.c @@ -224,7 +224,7 @@ err: static int __init setup_hest_disable(char *str) { hest_disable = HEST_DISABLED; - return 0; + return 1; } __setup("hest_disable", setup_hest_disable); diff --git a/drivers/acpi/arm64/Kconfig b/drivers/acpi/arm64/Kconfig index 6dba187f4f2e..d4a72835f328 100644 --- a/drivers/acpi/arm64/Kconfig +++ b/drivers/acpi/arm64/Kconfig @@ -8,3 +8,13 @@ config ACPI_IORT config ACPI_GTDT bool + +config ACPI_AGDI + bool "Arm Generic Diagnostic Dump and Reset Device Interface" + depends on ARM_SDE_INTERFACE + help + Arm Generic Diagnostic Dump and Reset Device Interface (AGDI) is + a standard that enables issuing a non-maskable diagnostic dump and + reset command. + + If set, the kernel parses AGDI table and listens for the command. diff --git a/drivers/acpi/arm64/Makefile b/drivers/acpi/arm64/Makefile index 66acbe77f46e..7b9e4045659d 100644 --- a/drivers/acpi/arm64/Makefile +++ b/drivers/acpi/arm64/Makefile @@ -1,4 +1,5 @@ # SPDX-License-Identifier: GPL-2.0-only +obj-$(CONFIG_ACPI_AGDI) += agdi.o obj-$(CONFIG_ACPI_IORT) += iort.o obj-$(CONFIG_ACPI_GTDT) += gtdt.o obj-y += dma.o diff --git a/drivers/acpi/arm64/agdi.c b/drivers/acpi/arm64/agdi.c new file mode 100644 index 000000000000..4df337d545b7 --- /dev/null +++ b/drivers/acpi/arm64/agdi.c @@ -0,0 +1,116 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * This file implements handling of + * Arm Generic Diagnostic Dump and Reset Interface table (AGDI) + * + * Copyright (c) 2022, Ampere Computing LLC + */ + +#define pr_fmt(fmt) "ACPI: AGDI: " fmt + +#include <linux/acpi.h> +#include <linux/arm_sdei.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/platform_device.h> + +struct agdi_data { + int sdei_event; +}; + +static int agdi_sdei_handler(u32 sdei_event, struct pt_regs *regs, void *arg) +{ + nmi_panic(regs, "Arm Generic Diagnostic Dump and Reset SDEI event issued"); + return 0; +} + +static int agdi_sdei_probe(struct platform_device *pdev, + struct agdi_data *adata) +{ + int err; + + err = sdei_event_register(adata->sdei_event, agdi_sdei_handler, pdev); + if (err) { + dev_err(&pdev->dev, "Failed to register for SDEI event %d", + adata->sdei_event); + return err; + } + + err = sdei_event_enable(adata->sdei_event); + if (err) { + sdei_event_unregister(adata->sdei_event); + dev_err(&pdev->dev, "Failed to enable event %d\n", + adata->sdei_event); + return err; + } + + return 0; +} + +static int agdi_probe(struct platform_device *pdev) +{ + struct agdi_data *adata = dev_get_platdata(&pdev->dev); + + if (!adata) + return -EINVAL; + + return agdi_sdei_probe(pdev, adata); +} + +static int agdi_remove(struct platform_device *pdev) +{ + struct agdi_data *adata = dev_get_platdata(&pdev->dev); + int err, i; + + err = sdei_event_disable(adata->sdei_event); + if (err) + return err; + + for (i = 0; i < 3; i++) { + err = sdei_event_unregister(adata->sdei_event); + if (err != -EINPROGRESS) + break; + + schedule(); + } + + return err; +} + +static struct platform_driver agdi_driver = { + .driver = { + .name = "agdi", + }, + .probe = agdi_probe, + .remove = agdi_remove, +}; + +void __init acpi_agdi_init(void) +{ + struct acpi_table_agdi *agdi_table; + struct agdi_data pdata; + struct platform_device *pdev; + acpi_status status; + + status = acpi_get_table(ACPI_SIG_AGDI, 0, + (struct acpi_table_header **) &agdi_table); + if (ACPI_FAILURE(status)) + return; + + if (agdi_table->flags & ACPI_AGDI_SIGNALING_MODE) { + pr_warn("Interrupt signaling is not supported"); + goto err_put_table; + } + + pdata.sdei_event = agdi_table->sdei_event; + + pdev = platform_device_register_data(NULL, "agdi", 0, &pdata, sizeof(pdata)); + if (IS_ERR(pdev)) + goto err_put_table; + + if (platform_driver_register(&agdi_driver)) + platform_device_unregister(pdev); + +err_put_table: + acpi_put_table((struct acpi_table_header *)agdi_table); +} diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c index 3b23fb775ac4..f2f8f05662de 100644 --- a/drivers/acpi/arm64/iort.c +++ b/drivers/acpi/arm64/iort.c @@ -1361,9 +1361,17 @@ static void __init arm_smmu_v3_pmcg_init_resources(struct resource *res, res[0].start = pmcg->page0_base_address; res[0].end = pmcg->page0_base_address + SZ_4K - 1; res[0].flags = IORESOURCE_MEM; - res[1].start = pmcg->page1_base_address; - res[1].end = pmcg->page1_base_address + SZ_4K - 1; - res[1].flags = IORESOURCE_MEM; + /* + * The initial version in DEN0049C lacked a way to describe register + * page 1, which makes it broken for most PMCG implementations; in + * that case, just let the driver fail gracefully if it expects to + * find a second memory resource. + */ + if (node->revision > 0) { + res[1].start = pmcg->page1_base_address; + res[1].end = pmcg->page1_base_address + SZ_4K - 1; + res[1].flags = IORESOURCE_MEM; + } if (pmcg->overflow_gsiv) acpi_iort_register_irq(pmcg->overflow_gsiv, "overflow", diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c index ea31ae01458b..dc208f5f5a1f 100644 --- a/drivers/acpi/battery.c +++ b/drivers/acpi/battery.c @@ -59,6 +59,10 @@ MODULE_PARM_DESC(cache_time, "cache time in milliseconds"); static const struct acpi_device_id battery_device_ids[] = { {"PNP0C0A", 0}, + + /* Microsoft Surface Go 3 */ + {"MSHW0146", 0}, + {"", 0}, }; @@ -1148,6 +1152,14 @@ static const struct dmi_system_id bat_dmi_table[] __initconst = { DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad"), }, }, + { + /* Microsoft Surface Go 3 */ + .callback = battery_notification_delay_quirk, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"), + DMI_MATCH(DMI_PRODUCT_NAME, "Surface Go 3"), + }, + }, {}, }; diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index 07f604832fd6..3e58b613a2c4 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c @@ -26,6 +26,7 @@ #include <asm/mpspec.h> #include <linux/dmi.h> #endif +#include <linux/acpi_agdi.h> #include <linux/acpi_iort.h> #include <linux/acpi_viot.h> #include <linux/pci.h> @@ -283,6 +284,8 @@ EXPORT_SYMBOL_GPL(osc_pc_lpi_support_confirmed); bool osc_sb_native_usb4_support_confirmed; EXPORT_SYMBOL_GPL(osc_sb_native_usb4_support_confirmed); +bool osc_sb_cppc_not_supported; + static u8 sb_uuid_str[] = "0811B06E-4A27-44F9-8D60-3CBBC22E7B48"; static void acpi_bus_osc_negotiate_platform_control(void) { @@ -332,21 +335,38 @@ static void acpi_bus_osc_negotiate_platform_control(void) if (ACPI_FAILURE(acpi_run_osc(handle, &context))) return; - kfree(context.ret.pointer); + capbuf_ret = context.ret.pointer; + if (context.ret.length <= OSC_SUPPORT_DWORD) { + kfree(context.ret.pointer); + return; + } + +#ifdef CONFIG_X86 + if (boot_cpu_has(X86_FEATURE_HWP)) + osc_sb_cppc_not_supported = !(capbuf_ret[OSC_SUPPORT_DWORD] & + (OSC_SB_CPC_SUPPORT | OSC_SB_CPCV2_SUPPORT)); +#endif - /* Now run _OSC again with query flag clear */ + /* + * Now run _OSC again with query flag clear and with the caps + * supported by both the OS and the platform. + */ capbuf[OSC_QUERY_DWORD] = 0; + capbuf[OSC_SUPPORT_DWORD] = capbuf_ret[OSC_SUPPORT_DWORD]; + kfree(context.ret.pointer); if (ACPI_FAILURE(acpi_run_osc(handle, &context))) return; capbuf_ret = context.ret.pointer; - osc_sb_apei_support_acked = - capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_APEI_SUPPORT; - osc_pc_lpi_support_confirmed = - capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_PCLPI_SUPPORT; - osc_sb_native_usb4_support_confirmed = - capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_NATIVE_USB4_SUPPORT; + if (context.ret.length > OSC_SUPPORT_DWORD) { + osc_sb_apei_support_acked = + capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_APEI_SUPPORT; + osc_pc_lpi_support_confirmed = + capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_PCLPI_SUPPORT; + osc_sb_native_usb4_support_confirmed = + capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_NATIVE_USB4_SUPPORT; + } kfree(context.ret.pointer); } @@ -1043,7 +1063,12 @@ struct bus_type acpi_bus_type = { .remove = acpi_device_remove, .uevent = acpi_device_uevent, }; -EXPORT_SYMBOL_GPL(acpi_bus_type); + +int acpi_bus_for_each_dev(int (*fn)(struct device *, void *), void *data) +{ + return bus_for_each_dev(&acpi_bus_type, NULL, data, fn); +} +EXPORT_SYMBOL_GPL(acpi_bus_for_each_dev); /* -------------------------------------------------------------------------- Initialization/Cleanup @@ -1331,6 +1356,8 @@ static int __init acpi_init(void) pci_mmcfg_late_init(); acpi_iort_init(); + acpi_hest_init(); + acpi_ghes_init(); acpi_scan_init(); acpi_ec_init(); acpi_debugfs_init(); @@ -1339,6 +1366,7 @@ static int __init acpi_init(void) acpi_debugger_init(); acpi_setup_sb_notify_handler(); acpi_viot_init(); + acpi_agdi_init(); return 0; } diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c index 866560cbb082..d418449194ee 100644 --- a/drivers/acpi/cppc_acpi.c +++ b/drivers/acpi/cppc_acpi.c @@ -633,8 +633,8 @@ static bool is_cppc_supported(int revision, int num_ent) * ) */ -#ifndef init_freq_invariance_cppc -static inline void init_freq_invariance_cppc(void) { } +#ifndef arch_init_invariance_cppc +static inline void arch_init_invariance_cppc(void) { } #endif /** @@ -656,6 +656,9 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr) acpi_status status; int ret = -EFAULT; + if (osc_sb_cppc_not_supported) + return -ENODEV; + /* Parse the ACPI _CPC table for this CPU. */ status = acpi_evaluate_object_typed(handle, "_CPC", NULL, &output, ACPI_TYPE_PACKAGE); @@ -816,7 +819,7 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr) goto out_free; } - init_freq_invariance_cppc(); + arch_init_invariance_cppc(); kfree(output.pointer); return 0; diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index 0077d2c85df8..a1b871a418f8 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c @@ -168,7 +168,7 @@ struct acpi_ec_query { }; static int acpi_ec_submit_query(struct acpi_ec *ec); -static bool advance_transaction(struct acpi_ec *ec, bool interrupt); +static void advance_transaction(struct acpi_ec *ec, bool interrupt); static void acpi_ec_event_handler(struct work_struct *work); struct acpi_ec *first_ec; @@ -441,36 +441,35 @@ static bool acpi_ec_submit_flushable_request(struct acpi_ec *ec) return true; } -static bool acpi_ec_submit_event(struct acpi_ec *ec) +static void acpi_ec_submit_event(struct acpi_ec *ec) { + /* + * It is safe to mask the events here, because acpi_ec_close_event() + * will run at least once after this. + */ acpi_ec_mask_events(ec); if (!acpi_ec_event_enabled(ec)) - return false; + return; - if (ec->event_state == EC_EVENT_READY) { - ec_dbg_evt("Command(%s) submitted/blocked", - acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY)); + if (ec->event_state != EC_EVENT_READY) + return; - ec->event_state = EC_EVENT_IN_PROGRESS; - /* - * If events_to_process is greqter than 0 at this point, the - * while () loop in acpi_ec_event_handler() is still running - * and incrementing events_to_process will cause it to invoke - * acpi_ec_submit_query() once more, so it is not necessary to - * queue up the event work to start the same loop again. - */ - if (ec->events_to_process++ > 0) - return true; - - ec->events_in_progress++; - return queue_work(ec_wq, &ec->work); - } + ec_dbg_evt("Command(%s) submitted/blocked", + acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY)); + ec->event_state = EC_EVENT_IN_PROGRESS; /* - * The event handling work has not been completed yet, so it needs to be - * flushed. + * If events_to_process is greater than 0 at this point, the while () + * loop in acpi_ec_event_handler() is still running and incrementing + * events_to_process will cause it to invoke acpi_ec_submit_query() once + * more, so it is not necessary to queue up the event work to start the + * same loop again. */ - return true; + if (ec->events_to_process++ > 0) + return; + + ec->events_in_progress++; + queue_work(ec_wq, &ec->work); } static void acpi_ec_complete_event(struct acpi_ec *ec) @@ -655,11 +654,10 @@ static void acpi_ec_spurious_interrupt(struct acpi_ec *ec, struct transaction *t acpi_ec_mask_events(ec); } -static bool advance_transaction(struct acpi_ec *ec, bool interrupt) +static void advance_transaction(struct acpi_ec *ec, bool interrupt) { struct transaction *t = ec->curr; bool wakeup = false; - bool ret = false; u8 status; ec_dbg_stm("%s (%d)", interrupt ? "IRQ" : "TASK", smp_processor_id()); @@ -724,12 +722,10 @@ static bool advance_transaction(struct acpi_ec *ec, bool interrupt) out: if (status & ACPI_EC_FLAG_SCI) - ret = acpi_ec_submit_event(ec); + acpi_ec_submit_event(ec); if (wakeup && interrupt) wake_up(&ec->wait); - - return ret; } static void start_transaction(struct acpi_ec *ec) @@ -1242,6 +1238,7 @@ static void acpi_ec_event_handler(struct work_struct *work) acpi_ec_submit_query(ec); spin_lock_irq(&ec->lock); + ec->events_to_process--; } @@ -1250,27 +1247,30 @@ static void acpi_ec_event_handler(struct work_struct *work) * event handling work again regardless of whether or not the query * queued up above is processed successfully. */ - if (ec_event_clearing == ACPI_EC_EVT_TIMING_EVENT) + if (ec_event_clearing == ACPI_EC_EVT_TIMING_EVENT) { + bool guard_timeout; + acpi_ec_complete_event(ec); - else - acpi_ec_close_event(ec); - spin_unlock_irq(&ec->lock); + ec_dbg_evt("Event stopped"); - ec_dbg_evt("Event stopped"); + spin_unlock_irq(&ec->lock); + + guard_timeout = !!ec_guard(ec); - if (ec_event_clearing == ACPI_EC_EVT_TIMING_EVENT && ec_guard(ec)) { spin_lock_irq(&ec->lock); /* Take care of SCI_EVT unless someone else is doing that. */ - if (!ec->curr) + if (guard_timeout && !ec->curr) advance_transaction(ec, false); + } else { + acpi_ec_close_event(ec); - spin_unlock_irq(&ec->lock); + ec_dbg_evt("Event stopped"); } - spin_lock_irq(&ec->lock); ec->events_in_progress--; + spin_unlock_irq(&ec->lock); } @@ -2051,6 +2051,11 @@ void acpi_ec_set_gpe_wake_mask(u8 action) acpi_set_gpe_wake_mask(NULL, first_ec->gpe, action); } +static bool acpi_ec_work_in_progress(struct acpi_ec *ec) +{ + return ec->events_in_progress + ec->queries_in_progress > 0; +} + bool acpi_ec_dispatch_gpe(void) { bool work_in_progress = false; @@ -2066,13 +2071,27 @@ bool acpi_ec_dispatch_gpe(void) return true; /* + * Cancel the SCI wakeup and process all pending events in case there + * are any wakeup ones in there. + * + * Note that if any non-EC GPEs are active at this point, the SCI will + * retrigger after the rearming in acpi_s2idle_wake(), so no events + * should be missed by canceling the wakeup here. + */ + pm_system_cancel_wakeup(); + + /* * Dispatch the EC GPE in-band, but do not report wakeup in any case * to allow the caller to process events properly after that. */ spin_lock_irq(&first_ec->lock); - if (acpi_ec_gpe_status_set(first_ec)) - work_in_progress = advance_transaction(first_ec, false); + if (acpi_ec_gpe_status_set(first_ec)) { + pm_pr_dbg("ACPI EC GPE status set\n"); + + advance_transaction(first_ec, false); + work_in_progress = acpi_ec_work_in_progress(first_ec); + } spin_unlock_irq(&first_ec->lock); @@ -2089,8 +2108,7 @@ bool acpi_ec_dispatch_gpe(void) spin_lock_irq(&first_ec->lock); - work_in_progress = first_ec->events_in_progress + - first_ec->queries_in_progress > 0; + work_in_progress = acpi_ec_work_in_progress(first_ec); spin_unlock_irq(&first_ec->lock); } while (work_in_progress && !pm_wakeup_pending()); diff --git a/drivers/acpi/fan.h b/drivers/acpi/fan.h index dd9bb8ca2244..44728529a5b6 100644 --- a/drivers/acpi/fan.h +++ b/drivers/acpi/fan.h @@ -6,9 +6,53 @@ * * Add new device IDs before the generic ACPI fan one. */ + +#ifndef _ACPI_FAN_H_ +#define _ACPI_FAN_H_ + #define ACPI_FAN_DEVICE_IDS \ {"INT3404", }, /* Fan */ \ {"INTC1044", }, /* Fan for Tiger Lake generation */ \ {"INTC1048", }, /* Fan for Alder Lake generation */ \ {"INTC10A2", }, /* Fan for Raptor Lake generation */ \ {"PNP0C0B", } /* Generic ACPI fan */ + +#define ACPI_FPS_NAME_LEN 20 + +struct acpi_fan_fps { + u64 control; + u64 trip_point; + u64 speed; + u64 noise_level; + u64 power; + char name[ACPI_FPS_NAME_LEN]; + struct device_attribute dev_attr; +}; + +struct acpi_fan_fif { + u8 revision; + u8 fine_grain_ctrl; + u8 step_size; + u8 low_speed_notification; +}; + +struct acpi_fan_fst { + u64 revision; + u64 control; + u64 speed; +}; + +struct acpi_fan { + bool acpi4; + struct acpi_fan_fif fif; + struct acpi_fan_fps *fps; + int fps_count; + struct thermal_cooling_device *cdev; + struct device_attribute fst_speed; + struct device_attribute fine_grain_control; +}; + +int acpi_fan_get_fst(struct acpi_device *device, struct acpi_fan_fst *fst); +int acpi_fan_create_attributes(struct acpi_device *device); +void acpi_fan_delete_attributes(struct acpi_device *device); +#endif diff --git a/drivers/acpi/fan_attr.c b/drivers/acpi/fan_attr.c new file mode 100644 index 000000000000..f15157d40713 --- /dev/null +++ b/drivers/acpi/fan_attr.c @@ -0,0 +1,137 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * fan_attr.c - Create extra attributes for ACPI Fan driver + * + * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> + * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> + * Copyright (C) 2022 Intel Corporation. All rights reserved. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/acpi.h> + +#include "fan.h" + +MODULE_LICENSE("GPL"); + +static ssize_t show_state(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct acpi_fan_fps *fps = container_of(attr, struct acpi_fan_fps, dev_attr); + int count; + + if (fps->control == 0xFFFFFFFF || fps->control > 100) + count = scnprintf(buf, PAGE_SIZE, "not-defined:"); + else + count = scnprintf(buf, PAGE_SIZE, "%lld:", fps->control); + + if (fps->trip_point == 0xFFFFFFFF || fps->trip_point > 9) + count += scnprintf(&buf[count], PAGE_SIZE - count, "not-defined:"); + else + count += scnprintf(&buf[count], PAGE_SIZE - count, "%lld:", fps->trip_point); + + if (fps->speed == 0xFFFFFFFF) + count += scnprintf(&buf[count], PAGE_SIZE - count, "not-defined:"); + else + count += scnprintf(&buf[count], PAGE_SIZE - count, "%lld:", fps->speed); + + if (fps->noise_level == 0xFFFFFFFF) + count += scnprintf(&buf[count], PAGE_SIZE - count, "not-defined:"); + else + count += scnprintf(&buf[count], PAGE_SIZE - count, "%lld:", fps->noise_level * 100); + + if (fps->power == 0xFFFFFFFF) + count += scnprintf(&buf[count], PAGE_SIZE - count, "not-defined\n"); + else + count += scnprintf(&buf[count], PAGE_SIZE - count, "%lld\n", fps->power); + + return count; +} + +static ssize_t show_fan_speed(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct acpi_device *acpi_dev = container_of(dev, struct acpi_device, dev); + struct acpi_fan_fst fst; + int status; + + status = acpi_fan_get_fst(acpi_dev, &fst); + if (status) + return status; + + return sprintf(buf, "%lld\n", fst.speed); +} + +static ssize_t show_fine_grain_control(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct acpi_device *acpi_dev = container_of(dev, struct acpi_device, dev); + struct acpi_fan *fan = acpi_driver_data(acpi_dev); + + return sprintf(buf, "%d\n", fan->fif.fine_grain_ctrl); +} + +int acpi_fan_create_attributes(struct acpi_device *device) +{ + struct acpi_fan *fan = acpi_driver_data(device); + int i, status; + + sysfs_attr_init(&fan->fine_grain_control.attr); + fan->fine_grain_control.show = show_fine_grain_control; + fan->fine_grain_control.store = NULL; + fan->fine_grain_control.attr.name = "fine_grain_control"; + fan->fine_grain_control.attr.mode = 0444; + status = sysfs_create_file(&device->dev.kobj, &fan->fine_grain_control.attr); + if (status) + return status; + + /* _FST is present if we are here */ + sysfs_attr_init(&fan->fst_speed.attr); + fan->fst_speed.show = show_fan_speed; + fan->fst_speed.store = NULL; + fan->fst_speed.attr.name = "fan_speed_rpm"; + fan->fst_speed.attr.mode = 0444; + status = sysfs_create_file(&device->dev.kobj, &fan->fst_speed.attr); + if (status) + goto rem_fine_grain_attr; + + for (i = 0; i < fan->fps_count; ++i) { + struct acpi_fan_fps *fps = &fan->fps[i]; + + snprintf(fps->name, ACPI_FPS_NAME_LEN, "state%d", i); + sysfs_attr_init(&fps->dev_attr.attr); + fps->dev_attr.show = show_state; + fps->dev_attr.store = NULL; + fps->dev_attr.attr.name = fps->name; + fps->dev_attr.attr.mode = 0444; + status = sysfs_create_file(&device->dev.kobj, &fps->dev_attr.attr); + if (status) { + int j; + + for (j = 0; j < i; ++j) + sysfs_remove_file(&device->dev.kobj, &fan->fps[j].dev_attr.attr); + goto rem_fst_attr; + } + } + + return 0; + +rem_fst_attr: + sysfs_remove_file(&device->dev.kobj, &fan->fst_speed.attr); + +rem_fine_grain_attr: + sysfs_remove_file(&device->dev.kobj, &fan->fine_grain_control.attr); + + return status; +} + +void acpi_fan_delete_attributes(struct acpi_device *device) +{ + struct acpi_fan *fan = acpi_driver_data(device); + int i; + + for (i = 0; i < fan->fps_count; ++i) + sysfs_remove_file(&device->dev.kobj, &fan->fps[i].dev_attr.attr); + + sysfs_remove_file(&device->dev.kobj, &fan->fst_speed.attr); + sysfs_remove_file(&device->dev.kobj, &fan->fine_grain_control.attr); +} diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan_core.c index 5cd0ceb50bc8..b9a9a59ddcc1 100644 --- a/drivers/acpi/fan.c +++ b/drivers/acpi/fan_core.c @@ -1,9 +1,10 @@ // SPDX-License-Identifier: GPL-2.0-or-later /* - * acpi_fan.c - ACPI Fan Driver ($Revision: 29 $) + * fan_core.c - ACPI Fan core Driver * * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> + * Copyright (C) 2022 Intel Corporation. All rights reserved. */ #include <linux/kernel.h> @@ -45,33 +46,6 @@ static const struct dev_pm_ops acpi_fan_pm = { #define FAN_PM_OPS_PTR NULL #endif -#define ACPI_FPS_NAME_LEN 20 - -struct acpi_fan_fps { - u64 control; - u64 trip_point; - u64 speed; - u64 noise_level; - u64 power; - char name[ACPI_FPS_NAME_LEN]; - struct device_attribute dev_attr; -}; - -struct acpi_fan_fif { - u64 revision; - u64 fine_grain_ctrl; - u64 step_size; - u64 low_speed_notification; -}; - -struct acpi_fan { - bool acpi4; - struct acpi_fan_fif fif; - struct acpi_fan_fps *fps; - int fps_count; - struct thermal_cooling_device *cdev; -}; - static struct platform_driver acpi_fan_driver = { .probe = acpi_fan_probe, .remove = acpi_fan_remove, @@ -89,25 +63,29 @@ static int fan_get_max_state(struct thermal_cooling_device *cdev, unsigned long struct acpi_device *device = cdev->devdata; struct acpi_fan *fan = acpi_driver_data(device); - if (fan->acpi4) - *state = fan->fps_count - 1; - else + if (fan->acpi4) { + if (fan->fif.fine_grain_ctrl) + *state = 100 / fan->fif.step_size; + else + *state = fan->fps_count - 1; + } else { *state = 1; + } + return 0; } -static int fan_get_state_acpi4(struct acpi_device *device, unsigned long *state) +int acpi_fan_get_fst(struct acpi_device *device, struct acpi_fan_fst *fst) { struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; - struct acpi_fan *fan = acpi_driver_data(device); union acpi_object *obj; acpi_status status; - int control, i; + int ret = 0; status = acpi_evaluate_object(device->handle, "_FST", NULL, &buffer); if (ACPI_FAILURE(status)) { dev_err(&device->dev, "Get fan state failed\n"); - return status; + return -ENODEV; } obj = buffer.pointer; @@ -115,35 +93,52 @@ static int fan_get_state_acpi4(struct acpi_device *device, unsigned long *state) obj->package.count != 3 || obj->package.elements[1].type != ACPI_TYPE_INTEGER) { dev_err(&device->dev, "Invalid _FST data\n"); - status = -EINVAL; + ret = -EINVAL; goto err; } - control = obj->package.elements[1].integer.value; + fst->revision = obj->package.elements[0].integer.value; + fst->control = obj->package.elements[1].integer.value; + fst->speed = obj->package.elements[2].integer.value; + +err: + kfree(obj); + return ret; +} + +static int fan_get_state_acpi4(struct acpi_device *device, unsigned long *state) +{ + struct acpi_fan *fan = acpi_driver_data(device); + struct acpi_fan_fst fst; + int status, i; + + status = acpi_fan_get_fst(device, &fst); + if (status) + return status; + + if (fan->fif.fine_grain_ctrl) { + /* This control should be same what we set using _FSL by spec */ + if (fst.control > 100) { + dev_dbg(&device->dev, "Invalid control value returned\n"); + goto match_fps; + } + + *state = (int) fst.control / fan->fif.step_size; + return 0; + } + +match_fps: for (i = 0; i < fan->fps_count; i++) { - /* - * When Fine Grain Control is set, return the state - * corresponding to maximum fan->fps[i].control - * value compared to the current speed. Here the - * fan->fps[] is sorted array with increasing speed. - */ - if (fan->fif.fine_grain_ctrl && control < fan->fps[i].control) { - i = (i > 0) ? i - 1 : 0; + if (fst.control == fan->fps[i].control) break; - } else if (control == fan->fps[i].control) { - break; - } } if (i == fan->fps_count) { dev_dbg(&device->dev, "Invalid control value returned\n"); - status = -EINVAL; - goto err; + return -EINVAL; } *state = i; -err: - kfree(obj); return status; } @@ -187,15 +182,30 @@ static int fan_set_state_acpi4(struct acpi_device *device, unsigned long state) { struct acpi_fan *fan = acpi_driver_data(device); acpi_status status; + u64 value = state; + int max_state; - if (state >= fan->fps_count) + if (fan->fif.fine_grain_ctrl) + max_state = 100 / fan->fif.step_size; + else + max_state = fan->fps_count - 1; + + if (state > max_state) return -EINVAL; - status = acpi_execute_simple_method(device->handle, "_FSL", - fan->fps[state].control); + if (fan->fif.fine_grain_ctrl) { + value *= fan->fif.step_size; + /* Spec allows compensate the last step only */ + if (value + fan->fif.step_size > 100) + value = 100; + } else { + value = fan->fps[state].control; + } + + status = acpi_execute_simple_method(device->handle, "_FSL", value); if (ACPI_FAILURE(status)) { dev_dbg(&device->dev, "Failed to set state by _FSL\n"); - return status; + return -ENODEV; } return 0; @@ -237,7 +247,8 @@ static int acpi_fan_get_fif(struct acpi_device *device) struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; struct acpi_fan *fan = acpi_driver_data(device); struct acpi_buffer format = { sizeof("NNNN"), "NNNN" }; - struct acpi_buffer fif = { sizeof(fan->fif), &fan->fif }; + u64 fields[4]; + struct acpi_buffer fif = { sizeof(fields), fields }; union acpi_object *obj; acpi_status status; @@ -258,6 +269,17 @@ static int acpi_fan_get_fif(struct acpi_device *device) status = -EINVAL; } + fan->fif.revision = fields[0]; + fan->fif.fine_grain_ctrl = fields[1]; + fan->fif.step_size = fields[2]; + fan->fif.low_speed_notification = fields[3]; + + /* If there is a bug in step size and set as 0, change to 1 */ + if (!fan->fif.step_size) + fan->fif.step_size = 1; + /* If step size > 9, change to 9 (by spec valid values 1-9) */ + else if (fan->fif.step_size > 9) + fan->fif.step_size = 9; err: kfree(obj); return status; @@ -270,39 +292,6 @@ static int acpi_fan_speed_cmp(const void *a, const void *b) return fps1->speed - fps2->speed; } -static ssize_t show_state(struct device *dev, struct device_attribute *attr, char *buf) -{ - struct acpi_fan_fps *fps = container_of(attr, struct acpi_fan_fps, dev_attr); - int count; - - if (fps->control == 0xFFFFFFFF || fps->control > 100) - count = scnprintf(buf, PAGE_SIZE, "not-defined:"); - else - count = scnprintf(buf, PAGE_SIZE, "%lld:", fps->control); - - if (fps->trip_point == 0xFFFFFFFF || fps->trip_point > 9) - count += scnprintf(&buf[count], PAGE_SIZE - count, "not-defined:"); - else - count += scnprintf(&buf[count], PAGE_SIZE - count, "%lld:", fps->trip_point); - - if (fps->speed == 0xFFFFFFFF) - count += scnprintf(&buf[count], PAGE_SIZE - count, "not-defined:"); - else - count += scnprintf(&buf[count], PAGE_SIZE - count, "%lld:", fps->speed); - - if (fps->noise_level == 0xFFFFFFFF) - count += scnprintf(&buf[count], PAGE_SIZE - count, "not-defined:"); - else - count += scnprintf(&buf[count], PAGE_SIZE - count, "%lld:", fps->noise_level * 100); - - if (fps->power == 0xFFFFFFFF) - count += scnprintf(&buf[count], PAGE_SIZE - count, "not-defined\n"); - else - count += scnprintf(&buf[count], PAGE_SIZE - count, "%lld\n", fps->power); - - return count; -} - static int acpi_fan_get_fps(struct acpi_device *device) { struct acpi_fan *fan = acpi_driver_data(device); @@ -347,25 +336,6 @@ static int acpi_fan_get_fps(struct acpi_device *device) sort(fan->fps, fan->fps_count, sizeof(*fan->fps), acpi_fan_speed_cmp, NULL); - for (i = 0; i < fan->fps_count; ++i) { - struct acpi_fan_fps *fps = &fan->fps[i]; - - snprintf(fps->name, ACPI_FPS_NAME_LEN, "state%d", i); - sysfs_attr_init(&fps->dev_attr.attr); - fps->dev_attr.show = show_state; - fps->dev_attr.store = NULL; - fps->dev_attr.attr.name = fps->name; - fps->dev_attr.attr.mode = 0444; - status = sysfs_create_file(&device->dev.kobj, &fps->dev_attr.attr); - if (status) { - int j; - - for (j = 0; j < i; ++j) - sysfs_remove_file(&device->dev.kobj, &fan->fps[j].dev_attr.attr); - break; - } - } - err: kfree(obj); return status; @@ -396,6 +366,10 @@ static int acpi_fan_probe(struct platform_device *pdev) if (result) return result; + result = acpi_fan_create_attributes(device); + if (result) + return result; + fan->acpi4 = true; } else { result = acpi_device_update_power(device, NULL); @@ -437,12 +411,8 @@ static int acpi_fan_probe(struct platform_device *pdev) return 0; err_end: - if (fan->acpi4) { - int i; - - for (i = 0; i < fan->fps_count; ++i) - sysfs_remove_file(&device->dev.kobj, &fan->fps[i].dev_attr.attr); - } + if (fan->acpi4) + acpi_fan_delete_attributes(device); return result; } @@ -453,10 +423,8 @@ static int acpi_fan_remove(struct platform_device *pdev) if (fan->acpi4) { struct acpi_device *device = ACPI_COMPANION(&pdev->dev); - int i; - for (i = 0; i < fan->fps_count; ++i) - sysfs_remove_file(&device->dev.kobj, &fan->fps[i].dev_attr.attr); + acpi_fan_delete_attributes(device); } sysfs_remove_link(&pdev->dev.kobj, "thermal_cooling"); sysfs_remove_link(&fan->cdev->device.kobj, "device"); diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h index 457e11d851b8..628bf8f18130 100644 --- a/drivers/acpi/internal.h +++ b/drivers/acpi/internal.h @@ -96,8 +96,6 @@ void acpi_scan_table_notify(void); extern struct list_head acpi_bus_id_list; -#define ACPI_MAX_DEVICE_INSTANCES 4096 - struct acpi_device_bus_id { const char *bus_id; struct ida instance_ida; diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c index 45c5c0e45e33..7a70c4bfc23c 100644 --- a/drivers/acpi/osl.c +++ b/drivers/acpi/osl.c @@ -642,22 +642,24 @@ u64 acpi_os_get_timer(void) (ACPI_100NSEC_PER_SEC / HZ); } -acpi_status acpi_os_read_port(acpi_io_address port, u32 * value, u32 width) +acpi_status acpi_os_read_port(acpi_io_address port, u32 *value, u32 width) { u32 dummy; - if (!value) + if (value) + *value = 0; + else value = &dummy; - *value = 0; if (width <= 8) { - *(u8 *) value = inb(port); + *value = inb(port); } else if (width <= 16) { - *(u16 *) value = inw(port); + *value = inw(port); } else if (width <= 32) { - *(u32 *) value = inl(port); + *value = inl(port); } else { - BUG(); + pr_debug("%s: Access width %d not supported\n", __func__, width); + return AE_BAD_PARAMETER; } return AE_OK; @@ -674,7 +676,8 @@ acpi_status acpi_os_write_port(acpi_io_address port, u32 value, u32 width) } else if (width <= 32) { outl(value, port); } else { - BUG(); + pr_debug("%s: Access width %d not supported\n", __func__, width); + return AE_BAD_PARAMETER; } return AE_OK; diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c index d54fb8e54671..58647051c948 100644 --- a/drivers/acpi/pci_link.c +++ b/drivers/acpi/pci_link.c @@ -185,7 +185,7 @@ static acpi_status acpi_pci_link_check_current(struct acpi_resource *resource, if (!p || !p->interrupt_count) { /* * IRQ descriptors may have no IRQ# bits set, - * particularly those those w/ _STA disabled + * particularly those w/ _STA disabled */ pr_debug("Blank _CRS IRQ resource\n"); return AE_OK; diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c index b76db99cced3..6f9e75d14808 100644 --- a/drivers/acpi/pci_root.c +++ b/drivers/acpi/pci_root.c @@ -22,8 +22,6 @@ #include <linux/slab.h> #include <linux/dmi.h> #include <linux/platform_data/x86/apple.h> -#include <acpi/apei.h> /* for acpi_hest_init() */ - #include "internal.h" #define ACPI_PCI_ROOT_CLASS "pci_bridge" @@ -943,7 +941,6 @@ out_release_info: void __init acpi_pci_root_init(void) { - acpi_hest_init(); if (acpi_pci_disabled) return; diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index 86560a28751b..32b20efff5f8 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -96,6 +96,11 @@ static const struct dmi_system_id processor_power_dmi_table[] = { DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."), DMI_MATCH(DMI_PRODUCT_NAME,"L8400B series Notebook PC")}, (void *)1}, + /* T40 can not handle C3 idle state */ + { set_max_cstate, "IBM ThinkPad T40", { + DMI_MATCH(DMI_SYS_VENDOR, "IBM"), + DMI_MATCH(DMI_PRODUCT_NAME, "23737CU")}, + (void *)2}, {}, }; @@ -1075,6 +1080,11 @@ static int flatten_lpi_states(struct acpi_processor *pr, return 0; } +int __weak acpi_processor_ffh_lpi_probe(unsigned int cpu) +{ + return -EOPNOTSUPP; +} + static int acpi_processor_get_lpi_info(struct acpi_processor *pr) { int ret, i; @@ -1083,6 +1093,11 @@ static int acpi_processor_get_lpi_info(struct acpi_processor *pr) struct acpi_device *d = NULL; struct acpi_lpi_states_array info[2], *tmp, *prev, *curr; + /* make sure our architecture has support */ + ret = acpi_processor_ffh_lpi_probe(pr->id); + if (ret == -EOPNOTSUPP) + return ret; + if (!osc_pc_lpi_support_confirmed) return -EOPNOTSUPP; @@ -1134,11 +1149,6 @@ static int acpi_processor_get_lpi_info(struct acpi_processor *pr) return 0; } -int __weak acpi_processor_ffh_lpi_probe(unsigned int cpu) -{ - return -ENODEV; -} - int __weak acpi_processor_ffh_lpi_enter(struct acpi_lpi_state *lpi) { return -ENODEV; diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c index d0986bda2964..12bbfe833609 100644 --- a/drivers/acpi/property.c +++ b/drivers/acpi/property.c @@ -541,7 +541,8 @@ acpi_device_data_of_node(const struct fwnode_handle *fwnode) if (is_acpi_device_node(fwnode)) { const struct acpi_device *adev = to_acpi_device_node(fwnode); return &adev->data; - } else if (is_acpi_data_node(fwnode)) { + } + if (is_acpi_data_node(fwnode)) { const struct acpi_data_node *dn = to_acpi_data_node(fwnode); return &dn->data; } @@ -685,7 +686,7 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode, */ if (obj->type == ACPI_TYPE_LOCAL_REFERENCE) { if (index) - return -EINVAL; + return -ENOENT; device = acpi_fetch_acpi_dev(obj->reference.handle); if (!device) @@ -739,14 +740,19 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode, return -EINVAL; } - /* assume following integer elements are all args */ + /* + * Assume the following integer elements are all args. + * Stop counting on the first reference or end of the + * package arguments. In case of neither reference, + * nor integer, return an error, we can't parse it. + */ for (i = 0; element + i < end && i < num_args; i++) { int type = element[i].type; + if (type == ACPI_TYPE_LOCAL_REFERENCE) + break; if (type == ACPI_TYPE_INTEGER) nargs++; - else if (type == ACPI_TYPE_LOCAL_REFERENCE) - break; else return -EINVAL; } @@ -950,7 +956,7 @@ static int acpi_data_prop_read(const struct acpi_device_data *data, if (proptype != DEV_PROP_STRING && nval > obj->package.count) return -EOVERFLOW; - else if (nval <= 0) + if (nval == 0) return -EINVAL; items = obj->package.elements; @@ -1012,14 +1018,10 @@ struct fwnode_handle *acpi_get_next_subnode(const struct fwnode_handle *fwnode, const struct list_head *head; struct list_head *next; - if (!child || is_acpi_device_node(child)) { + if ((!child || is_acpi_device_node(child)) && adev) { struct acpi_device *child_adev; - if (adev) - head = &adev->children; - else - goto nondev; - + head = &adev->children; if (list_empty(head)) goto nondev; @@ -1089,7 +1091,8 @@ acpi_node_get_parent(const struct fwnode_handle *fwnode) if (is_acpi_data_node(fwnode)) { /* All data nodes have parent pointer so just return that */ return to_acpi_data_node(fwnode)->parent; - } else if (is_acpi_device_node(fwnode)) { + } + if (is_acpi_device_node(fwnode)) { struct device *dev = to_acpi_device_node(fwnode)->dev.parent; if (dev) diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index 1331756d4cfc..5ffd87ac42b3 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c @@ -477,7 +477,8 @@ static void acpi_device_del(struct acpi_device *device) list_for_each_entry(acpi_device_bus_id, &acpi_bus_id_list, node) if (!strcmp(acpi_device_bus_id->bus_id, acpi_device_hid(device))) { - ida_simple_remove(&acpi_device_bus_id->instance_ida, device->pnp.instance_no); + ida_free(&acpi_device_bus_id->instance_ida, + device->pnp.instance_no); if (ida_is_empty(&acpi_device_bus_id->instance_ida)) { list_del(&acpi_device_bus_id->node); kfree_const(acpi_device_bus_id->bus_id); @@ -642,7 +643,7 @@ static int acpi_device_set_name(struct acpi_device *device, struct ida *instance_ida = &acpi_device_bus_id->instance_ida; int result; - result = ida_simple_get(instance_ida, 0, ACPI_MAX_DEVICE_INSTANCES, GFP_KERNEL); + result = ida_alloc(instance_ida, GFP_KERNEL); if (result < 0) return result; @@ -1377,11 +1378,11 @@ static void acpi_set_pnp_ids(acpi_handle handle, struct acpi_device_pnp *pnp, if (info->valid & ACPI_VALID_HID) { acpi_add_id(pnp, info->hardware_id.string); pnp->type.platform_id = 1; - if (info->valid & ACPI_VALID_CID) { - cid_list = &info->compatible_id_list; - for (i = 0; i < cid_list->count; i++) - acpi_add_id(pnp, cid_list->ids[i].string); - } + } + if (info->valid & ACPI_VALID_CID) { + cid_list = &info->compatible_id_list; + for (i = 0; i < cid_list->count; i++) + acpi_add_id(pnp, cid_list->ids[i].string); } if (info->valid & ACPI_VALID_ADR) { pnp->bus_address = info->address; diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index a60ff5dfed3a..c992e57b2c79 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c @@ -736,21 +736,15 @@ bool acpi_s2idle_wake(void) return true; } - /* Check non-EC GPE wakeups and dispatch the EC GPE. */ + /* + * Check non-EC GPE wakeups and if there are none, cancel the + * SCI-related wakeup and dispatch the EC GPE. + */ if (acpi_ec_dispatch_gpe()) { pm_pr_dbg("ACPI non-EC GPE wakeup\n"); return true; } - /* - * Cancel the SCI wakeup and process all pending events in case - * there are any wakeup ones in there. - * - * Note that if any non-EC GPEs are active at this point, the - * SCI will retrigger after the rearming below, so no events - * should be missed by canceling the wakeup here. - */ - pm_system_cancel_wakeup(); acpi_os_wait_events_complete(); /* @@ -764,6 +758,9 @@ bool acpi_s2idle_wake(void) return true; } + pm_pr_dbg("Rearming ACPI SCI for wakeup\n"); + + pm_wakeup_clear(acpi_sci_irq); rearm_wake_irq(acpi_sci_irq); } @@ -874,12 +871,7 @@ static inline void acpi_sleep_syscore_init(void) {} #ifdef CONFIG_HIBERNATION static unsigned long s4_hardware_signature; static struct acpi_table_facs *facs; -static int sigcheck = -1; /* Default behaviour is just to warn */ - -void __init acpi_check_s4_hw_signature(int check) -{ - sigcheck = check; -} +int acpi_check_s4_hw_signature = -1; /* Default behaviour is just to warn */ static int acpi_hibernation_begin(pm_message_t stage) { @@ -1004,7 +996,7 @@ static void acpi_sleep_hibernate_setup(void) hibernation_set_ops(old_suspend_ordering ? &acpi_hibernation_ops_old : &acpi_hibernation_ops); sleep_states[ACPI_STATE_S4] = 1; - if (!sigcheck) + if (!acpi_check_s4_hw_signature) return; acpi_get_table(ACPI_SIG_FACS, 1, (struct acpi_table_header **)&facs); @@ -1016,7 +1008,7 @@ static void acpi_sleep_hibernate_setup(void) */ s4_hardware_signature = facs->hardware_signature; - if (sigcheck > 0) { + if (acpi_check_s4_hw_signature > 0) { /* * If we're actually obeying the ACPI specification * then the signature is written out as part of the diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c index 0741a4933f62..ceee808f7f2a 100644 --- a/drivers/acpi/tables.c +++ b/drivers/acpi/tables.c @@ -400,7 +400,7 @@ int __init_or_acpilib acpi_table_parse_entries_array( acpi_get_table(id, instance, &table_header); if (!table_header) { - pr_warn("%4.4s not present\n", id); + pr_debug("%4.4s not present\n", id); return -ENODEV; } @@ -545,7 +545,7 @@ static const char table_sigs[][ACPI_NAMESEG_SIZE] __initconst = { ACPI_SIG_WDDT, ACPI_SIG_WDRT, ACPI_SIG_DSDT, ACPI_SIG_FADT, ACPI_SIG_PSDT, ACPI_SIG_RSDT, ACPI_SIG_XSDT, ACPI_SIG_SSDT, ACPI_SIG_IORT, ACPI_SIG_NFIT, ACPI_SIG_HMAT, ACPI_SIG_PPTT, - ACPI_SIG_NHLT, ACPI_SIG_AEST }; + ACPI_SIG_NHLT, ACPI_SIG_AEST, ACPI_SIG_CEDT, ACPI_SIG_AGDI }; #define ACPI_HEADER_SIZE sizeof(struct acpi_table_header) diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c index 4f64713e9917..becc198e4c22 100644 --- a/drivers/acpi/video_detect.c +++ b/drivers/acpi/video_detect.c @@ -415,6 +415,81 @@ static const struct dmi_system_id video_detect_dmi_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "GA503"), }, }, + /* + * Clevo NL5xRU and NL5xNU/TUXEDO Aura 15 Gen1 and Gen2 have both a + * working native and video interface. However the default detection + * mechanism first registers the video interface before unregistering + * it again and switching to the native interface during boot. This + * results in a dangling SBIOS request for backlight change for some + * reason, causing the backlight to switch to ~2% once per boot on the + * first power cord connect or disconnect event. Setting the native + * interface explicitly circumvents this buggy behaviour, by avoiding + * the unregistering process. + */ + { + .callback = video_detect_force_native, + .ident = "Clevo NL5xRU", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"), + DMI_MATCH(DMI_BOARD_NAME, "NL5xRU"), + }, + }, + { + .callback = video_detect_force_native, + .ident = "Clevo NL5xRU", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "SchenkerTechnologiesGmbH"), + DMI_MATCH(DMI_BOARD_NAME, "NL5xRU"), + }, + }, + { + .callback = video_detect_force_native, + .ident = "Clevo NL5xRU", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Notebook"), + DMI_MATCH(DMI_BOARD_NAME, "NL5xRU"), + }, + }, + { + .callback = video_detect_force_native, + .ident = "Clevo NL5xRU", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"), + DMI_MATCH(DMI_BOARD_NAME, "AURA1501"), + }, + }, + { + .callback = video_detect_force_native, + .ident = "Clevo NL5xRU", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"), + DMI_MATCH(DMI_BOARD_NAME, "EDUBOOK1502"), + }, + }, + { + .callback = video_detect_force_native, + .ident = "Clevo NL5xNU", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"), + DMI_MATCH(DMI_BOARD_NAME, "NL5xNU"), + }, + }, + { + .callback = video_detect_force_native, + .ident = "Clevo NL5xNU", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "SchenkerTechnologiesGmbH"), + DMI_MATCH(DMI_BOARD_NAME, "NL5xNU"), + }, + }, + { + .callback = video_detect_force_native, + .ident = "Clevo NL5xNU", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Notebook"), + DMI_MATCH(DMI_BOARD_NAME, "NL5xNU"), + }, + }, /* * Desktops which falsely report a backlight and which our heuristics diff --git a/drivers/acpi/x86/s2idle.c b/drivers/acpi/x86/s2idle.c index abc06e7f89d8..ed889f827f53 100644 --- a/drivers/acpi/x86/s2idle.c +++ b/drivers/acpi/x86/s2idle.c @@ -424,15 +424,11 @@ static int lps0_device_attach(struct acpi_device *adev, mem_sleep_current = PM_SUSPEND_TO_IDLE; /* - * Some Intel based LPS0 systems, like ASUS Zenbook UX430UNR/i7-8550U don't - * use intel-hid or intel-vbtn but require the EC GPE to be enabled while - * suspended for certain wakeup devices to work, so mark it as wakeup-capable. - * - * Only enable on !AMD as enabling this universally causes problems for a number - * of AMD based systems. + * Some LPS0 systems, like ASUS Zenbook UX430UNR/i7-8550U, require the + * EC GPE to be enabled while suspended for certain wakeup devices to + * work, so mark it as wakeup-capable. */ - if (!acpi_s2idle_vendor_amd()) - acpi_ec_mark_gpe_for_wake(); + acpi_ec_mark_gpe_for_wake(); return 0; } diff --git a/drivers/acpi/x86/utils.c b/drivers/acpi/x86/utils.c index ffdeed5334d6..664070fc8349 100644 --- a/drivers/acpi/x86/utils.c +++ b/drivers/acpi/x86/utils.c @@ -285,6 +285,27 @@ static const struct dmi_system_id acpi_quirk_skip_dmi_ids[] = { ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY), }, { + /* Lenovo Yoga Tablet 1050F/L */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Intel Corp."), + DMI_MATCH(DMI_PRODUCT_NAME, "VALLEYVIEW C0 PLATFORM"), + DMI_MATCH(DMI_BOARD_NAME, "BYT-T FFD8"), + /* Partial match on beginning of BIOS version */ + DMI_MATCH(DMI_BIOS_VERSION, "BLADE_21"), + }, + .driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS | + ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY), + }, + { + /* Nextbook Ares 8 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Insyde"), + DMI_MATCH(DMI_PRODUCT_NAME, "M890BAP"), + }, + .driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS | + ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY), + }, + { /* Whitelabel (sold as various brands) TM800A550L */ .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"), diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c index e1a5eca3ae3c..d3bd14aaabf6 100644 --- a/drivers/amba/bus.c +++ b/drivers/amba/bus.c @@ -370,6 +370,7 @@ int amba_driver_register(struct amba_driver *drv) return driver_register(&drv->drv); } +EXPORT_SYMBOL(amba_driver_register); /** * amba_driver_unregister - remove an AMBA device driver @@ -383,7 +384,7 @@ void amba_driver_unregister(struct amba_driver *drv) { driver_unregister(&drv->drv); } - +EXPORT_SYMBOL(amba_driver_unregister); static void amba_device_release(struct device *dev) { @@ -642,6 +643,7 @@ int amba_device_register(struct amba_device *dev, struct resource *parent) return amba_device_add(dev, parent); } +EXPORT_SYMBOL(amba_device_register); /** * amba_device_put - put an AMBA device @@ -668,66 +670,7 @@ void amba_device_unregister(struct amba_device *dev) { device_unregister(&dev->dev); } - - -struct find_data { - struct amba_device *dev; - struct device *parent; - const char *busid; - unsigned int id; - unsigned int mask; -}; - -static int amba_find_match(struct device *dev, void *data) -{ - struct find_data *d = data; - struct amba_device *pcdev = to_amba_device(dev); - int r; - - r = (pcdev->periphid & d->mask) == d->id; - if (d->parent) - r &= d->parent == dev->parent; - if (d->busid) - r &= strcmp(dev_name(dev), d->busid) == 0; - - if (r) { - get_device(dev); - d->dev = pcdev; - } - - return r; -} - -/** - * amba_find_device - locate an AMBA device given a bus id - * @busid: bus id for device (or NULL) - * @parent: parent device (or NULL) - * @id: peripheral ID (or 0) - * @mask: peripheral ID mask (or 0) - * - * Return the AMBA device corresponding to the supplied parameters. - * If no device matches, returns NULL. - * - * NOTE: When a valid device is found, its refcount is - * incremented, and must be decremented before the returned - * reference. - */ -struct amba_device * -amba_find_device(const char *busid, struct device *parent, unsigned int id, - unsigned int mask) -{ - struct find_data data; - - data.dev = NULL; - data.parent = parent; - data.busid = busid; - data.id = id; - data.mask = mask; - - bus_for_each_dev(&amba_bustype, NULL, &data, amba_find_match); - - return data.dev; -} +EXPORT_SYMBOL(amba_device_unregister); /** * amba_request_regions - request all mem regions associated with device @@ -749,6 +692,7 @@ int amba_request_regions(struct amba_device *dev, const char *name) return ret; } +EXPORT_SYMBOL(amba_request_regions); /** * amba_release_regions - release mem regions associated with device @@ -763,11 +707,4 @@ void amba_release_regions(struct amba_device *dev) size = resource_size(&dev->res); release_mem_region(dev->res.start, size); } - -EXPORT_SYMBOL(amba_driver_register); -EXPORT_SYMBOL(amba_driver_unregister); -EXPORT_SYMBOL(amba_device_register); -EXPORT_SYMBOL(amba_device_unregister); -EXPORT_SYMBOL(amba_find_device); -EXPORT_SYMBOL(amba_request_regions); EXPORT_SYMBOL(amba_release_regions); diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 67f88027680a..0c854aebfe0b 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -2007,6 +2007,9 @@ static bool ata_log_supported(struct ata_device *dev, u8 log) { struct ata_port *ap = dev->link->ap; + if (dev->horkage & ATA_HORKAGE_NO_LOG_DIR) + return false; + if (ata_read_log_page(dev, ATA_LOG_DIRECTORY, 0, ap->sector_buf, 1)) return false; return get_unaligned_le16(&ap->sector_buf[log * 2]) ? true : false; @@ -2445,23 +2448,21 @@ static void ata_dev_config_cpr(struct ata_device *dev) struct ata_cpr_log *cpr_log = NULL; u8 *desc, *buf = NULL; - if (!ata_identify_page_supported(dev, - ATA_LOG_CONCURRENT_POSITIONING_RANGES)) + if (ata_id_major_version(dev->id) < 11 || + !ata_log_supported(dev, ATA_LOG_CONCURRENT_POSITIONING_RANGES)) goto out; /* - * Read IDENTIFY DEVICE data log, page 0x47 - * (concurrent positioning ranges). We can have at most 255 32B range - * descriptors plus a 64B header. + * Read the concurrent positioning ranges log (0x47). We can have at + * most 255 32B range descriptors plus a 64B header. */ buf_len = (64 + 255 * 32 + 511) & ~511; buf = kzalloc(buf_len, GFP_KERNEL); if (!buf) goto out; - err_mask = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE, - ATA_LOG_CONCURRENT_POSITIONING_RANGES, - buf, buf_len >> 9); + err_mask = ata_read_log_page(dev, ATA_LOG_CONCURRENT_POSITIONING_RANGES, + 0, buf, buf_len >> 9); if (err_mask) goto out; @@ -4028,6 +4029,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { /* devices that don't properly handle TRIM commands */ { "SuperSSpeed S238*", NULL, ATA_HORKAGE_NOTRIM, }, + { "M88V29*", NULL, ATA_HORKAGE_NOTRIM, }, /* * As defined, the DRAT (Deterministic Read After Trim) and RZAT @@ -4073,6 +4075,13 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { { "WDC WD3000JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM }, { "WDC WD3200JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM }, + /* + * This sata dom device goes on a walkabout when the ATA_LOG_DIRECTORY + * log page is accessed. Ensure we never ask for this log page with + * these devices. + */ + { "SATADOM-ML 3ME", NULL, ATA_HORKAGE_NO_LOG_DIR }, + /* End Marker */ { } }; diff --git a/drivers/ata/pata_hpt37x.c b/drivers/ata/pata_hpt37x.c index 7abc7e04f656..6fa4a2faf49c 100644 --- a/drivers/ata/pata_hpt37x.c +++ b/drivers/ata/pata_hpt37x.c @@ -920,6 +920,20 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id) pci_write_config_byte(dev, 0x5a, irqmask); /* + * HPT371 chips physically have only one channel, the secondary one, + * but the primary channel registers do exist! Go figure... + * So, we manually disable the non-existing channel here + * (if the BIOS hasn't done this already). + */ + if (dev->device == PCI_DEVICE_ID_TTI_HPT371) { + u8 mcr1; + + pci_read_config_byte(dev, 0x50, &mcr1); + mcr1 &= ~0x04; + pci_write_config_byte(dev, 0x50, mcr1); + } + + /* * default to pci clock. make sure MA15/16 are set to output * to prevent drives having problems with 40-pin cables. Needed * for some drives such as IBM-DTLA which will not enter ready @@ -950,14 +964,14 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id) if ((freq >> 12) != 0xABCDE) { int i; - u8 sr; + u16 sr; u32 total = 0; dev_warn(&dev->dev, "BIOS has not set timing clocks\n"); /* This is the process the HPT371 BIOS is reported to use */ for (i = 0; i < 128; i++) { - pci_read_config_byte(dev, 0x78, &sr); + pci_read_config_word(dev, 0x78, &sr); total += sr & 0x1FF; udelay(15); } diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c index da0152116d9f..556034a15430 100644 --- a/drivers/ata/sata_fsl.c +++ b/drivers/ata/sata_fsl.c @@ -322,7 +322,7 @@ static void fsl_sata_set_irq_coalescing(struct ata_host *host, static ssize_t fsl_sata_intr_coalescing_show(struct device *dev, struct device_attribute *attr, char *buf) { - return sysfs_emit(buf, "%d %d\n", + return sysfs_emit(buf, "%u %u\n", intr_coalescing_count, intr_coalescing_ticks); } @@ -332,10 +332,8 @@ static ssize_t fsl_sata_intr_coalescing_store(struct device *dev, { unsigned int coalescing_count, coalescing_ticks; - if (sscanf(buf, "%d%d", - &coalescing_count, - &coalescing_ticks) != 2) { - printk(KERN_ERR "fsl-sata: wrong parameter format.\n"); + if (sscanf(buf, "%u%u", &coalescing_count, &coalescing_ticks) != 2) { + dev_err(dev, "fsl-sata: wrong parameter format.\n"); return -EINVAL; } @@ -359,7 +357,7 @@ static ssize_t fsl_sata_rx_watermark_show(struct device *dev, rx_watermark &= 0x1f; spin_unlock_irqrestore(&host->lock, flags); - return sysfs_emit(buf, "%d\n", rx_watermark); + return sysfs_emit(buf, "%u\n", rx_watermark); } static ssize_t fsl_sata_rx_watermark_store(struct device *dev, @@ -373,8 +371,8 @@ static ssize_t fsl_sata_rx_watermark_store(struct device *dev, void __iomem *csr_base = host_priv->csr_base; u32 temp; - if (sscanf(buf, "%d", &rx_watermark) != 1) { - printk(KERN_ERR "fsl-sata: wrong parameter format.\n"); + if (kstrtouint(buf, 10, &rx_watermark) < 0) { + dev_err(dev, "fsl-sata: wrong parameter format.\n"); return -EINVAL; } @@ -382,8 +380,8 @@ static ssize_t fsl_sata_rx_watermark_store(struct device *dev, temp = ioread32(csr_base + TRANSCFG); temp &= 0xffffffe0; iowrite32(temp | rx_watermark, csr_base + TRANSCFG); - spin_unlock_irqrestore(&host->lock, flags); + return strlen(buf); } diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c index 422753d52244..a31ffe16e626 100644 --- a/drivers/atm/eni.c +++ b/drivers/atm/eni.c @@ -1112,6 +1112,8 @@ DPRINTK("iovcnt = %d\n",skb_shinfo(skb)->nr_frags); skb_data3 = skb->data[3]; paddr = dma_map_single(&eni_dev->pci_dev->dev,skb->data,skb->len, DMA_TO_DEVICE); + if (dma_mapping_error(&eni_dev->pci_dev->dev, paddr)) + return enq_next; ENI_PRV_PADDR(skb) = paddr; /* prepare DMA queue entries */ j = 0; diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c index 3bc3c314a467..4f67404fe64c 100644 --- a/drivers/atm/firestream.c +++ b/drivers/atm/firestream.c @@ -1676,6 +1676,8 @@ static int fs_init(struct fs_dev *dev) dev->hw_base = pci_resource_start(pci_dev, 0); dev->base = ioremap(dev->hw_base, 0x1000); + if (!dev->base) + return 1; reset_chip (dev); diff --git a/drivers/auxdisplay/lcd2s.c b/drivers/auxdisplay/lcd2s.c index 38ba08628ccb..2578b2d45439 100644 --- a/drivers/auxdisplay/lcd2s.c +++ b/drivers/auxdisplay/lcd2s.c @@ -238,7 +238,7 @@ static int lcd2s_redefine_char(struct charlcd *lcd, char *esc) if (buf[1] > 7) return 1; - i = 0; + i = 2; shift = 0; value = 0; while (*esc && i < LCD2S_CHARACTER_SIZE + 2) { @@ -298,6 +298,10 @@ static int lcd2s_i2c_probe(struct i2c_client *i2c, I2C_FUNC_SMBUS_WRITE_BLOCK_DATA)) return -EIO; + lcd2s = devm_kzalloc(&i2c->dev, sizeof(*lcd2s), GFP_KERNEL); + if (!lcd2s) + return -ENOMEM; + /* Test, if the display is responding */ err = lcd2s_i2c_smbus_write_byte(i2c, LCD2S_CMD_DISPLAY_OFF); if (err < 0) @@ -307,12 +311,6 @@ static int lcd2s_i2c_probe(struct i2c_client *i2c, if (!lcd) return -ENOMEM; - lcd2s = kzalloc(sizeof(struct lcd2s_data), GFP_KERNEL); - if (!lcd2s) { - err = -ENOMEM; - goto fail1; - } - lcd->drvdata = lcd2s; lcd2s->i2c = i2c; lcd2s->charlcd = lcd; @@ -321,26 +319,24 @@ static int lcd2s_i2c_probe(struct i2c_client *i2c, err = device_property_read_u32(&i2c->dev, "display-height-chars", &lcd->height); if (err) - goto fail2; + goto fail1; err = device_property_read_u32(&i2c->dev, "display-width-chars", &lcd->width); if (err) - goto fail2; + goto fail1; lcd->ops = &lcd2s_ops; err = charlcd_register(lcd2s->charlcd); if (err) - goto fail2; + goto fail1; i2c_set_clientdata(i2c, lcd2s); return 0; -fail2: - kfree(lcd2s); fail1: - kfree(lcd); + charlcd_free(lcd2s->charlcd); return err; } @@ -349,7 +345,7 @@ static int lcd2s_i2c_remove(struct i2c_client *i2c) struct lcd2s_data *lcd2s = i2c_get_clientdata(i2c); charlcd_unregister(lcd2s->charlcd); - kfree(lcd2s->charlcd); + charlcd_free(lcd2s->charlcd); return 0; } diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c index 976154140f0b..1d6636ebaac5 100644 --- a/drivers/base/arch_topology.c +++ b/drivers/base/arch_topology.c @@ -339,6 +339,46 @@ bool __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu) return !ret; } +#ifdef CONFIG_ACPI_CPPC_LIB +#include <acpi/cppc_acpi.h> + +void topology_init_cpu_capacity_cppc(void) +{ + struct cppc_perf_caps perf_caps; + int cpu; + + if (likely(acpi_disabled || !acpi_cpc_valid())) + return; + + raw_capacity = kcalloc(num_possible_cpus(), sizeof(*raw_capacity), + GFP_KERNEL); + if (!raw_capacity) + return; + + for_each_possible_cpu(cpu) { + if (!cppc_get_perf_caps(cpu, &perf_caps) && + (perf_caps.highest_perf >= perf_caps.nominal_perf) && + (perf_caps.highest_perf >= perf_caps.lowest_perf)) { + raw_capacity[cpu] = perf_caps.highest_perf; + pr_debug("cpu_capacity: CPU%d cpu_capacity=%u (raw).\n", + cpu, raw_capacity[cpu]); + continue; + } + + pr_err("cpu_capacity: CPU%d missing/invalid highest performance.\n", cpu); + pr_err("cpu_capacity: partial information: fallback to 1024 for all CPUs\n"); + goto exit; + } + + topology_normalize_cpu_scale(); + schedule_work(&update_topology_flags_work); + pr_debug("cpu_capacity: cpu_capacity initialization done\n"); + +exit: + free_raw_capacity(); +} +#endif + #ifdef CONFIG_CPU_FREQ static cpumask_var_t cpus_to_visit; static void parsing_done_workfn(struct work_struct *work); @@ -387,9 +427,8 @@ static int __init register_cpufreq_notifier(void) int ret; /* - * on ACPI-based systems we need to use the default cpu capacity - * until we have the necessary code to parse the cpu capacity, so - * skip registering cpufreq notifier. + * On ACPI-based systems skip registering cpufreq notifier as cpufreq + * information is not needed for cpu capacity initialization. */ if (!acpi_disabled || !raw_capacity) return -EINVAL; diff --git a/drivers/base/dd.c b/drivers/base/dd.c index 9eaaff2f556c..f47cab21430f 100644 --- a/drivers/base/dd.c +++ b/drivers/base/dd.c @@ -629,6 +629,9 @@ re_probe: drv->remove(dev); devres_release_all(dev); + arch_teardown_dma_ops(dev); + kfree(dev->dma_range_map); + dev->dma_range_map = NULL; driver_sysfs_remove(dev); dev->driver = NULL; dev_set_drvdata(dev, NULL); @@ -1209,6 +1212,8 @@ static void __device_release_driver(struct device *dev, struct device *parent) devres_release_all(dev); arch_teardown_dma_ops(dev); + kfree(dev->dma_range_map); + dev->dma_range_map = NULL; dev->driver = NULL; dev_set_drvdata(dev, NULL); if (dev->pm_domain && dev->pm_domain->dismiss) diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index 5db704f02e71..1ee878d126fd 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -636,6 +636,18 @@ static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on, atomic_read(&genpd->sd_count) > 0) return -EBUSY; + /* + * The children must be in their deepest (powered-off) states to allow + * the parent to be powered off. Note that, there's no need for + * additional locking, as powering on a child, requires the parent's + * lock to be acquired first. + */ + list_for_each_entry(link, &genpd->parent_links, parent_node) { + struct generic_pm_domain *child = link->child; + if (child->state_idx < child->state_count - 1) + return -EBUSY; + } + list_for_each_entry(pdd, &genpd->dev_list, list_node) { enum pm_qos_flags_status stat; @@ -1073,6 +1085,13 @@ static void genpd_sync_power_off(struct generic_pm_domain *genpd, bool use_lock, || atomic_read(&genpd->sd_count) > 0) return; + /* Check that the children are in their deepest (powered-off) state. */ + list_for_each_entry(link, &genpd->parent_links, parent_node) { + struct generic_pm_domain *child = link->child; + if (child->state_idx < child->state_count - 1) + return; + } + /* Choose the deepest state when suspending */ genpd->state_idx = genpd->state_count - 1; if (_genpd_power_off(genpd, false)) @@ -2058,9 +2077,9 @@ static int genpd_remove(struct generic_pm_domain *genpd) kfree(link); } - genpd_debug_remove(genpd); list_del(&genpd->gpd_list_node); genpd_unlock(genpd); + genpd_debug_remove(genpd); cancel_work_sync(&genpd->power_off_work); if (genpd_is_cpu_domain(genpd)) free_cpumask_var(genpd->cpus); @@ -2248,12 +2267,8 @@ int of_genpd_add_provider_simple(struct device_node *np, /* Parse genpd OPP table */ if (genpd->set_performance_state) { ret = dev_pm_opp_of_add_table(&genpd->dev); - if (ret) { - if (ret != -EPROBE_DEFER) - dev_err(&genpd->dev, "Failed to add OPP table: %d\n", - ret); - return ret; - } + if (ret) + return dev_err_probe(&genpd->dev, ret, "Failed to add OPP table\n"); /* * Save table for faster processing while setting performance @@ -2312,9 +2327,8 @@ int of_genpd_add_provider_onecell(struct device_node *np, if (genpd->set_performance_state) { ret = dev_pm_opp_of_add_table_indexed(&genpd->dev, i); if (ret) { - if (ret != -EPROBE_DEFER) - dev_err(&genpd->dev, "Failed to add OPP table for index %d: %d\n", - i, ret); + dev_err_probe(&genpd->dev, ret, + "Failed to add OPP table for index %d\n", i); goto error; } @@ -2672,12 +2686,8 @@ static int __genpd_dev_pm_attach(struct device *dev, struct device *base_dev, ret = genpd_add_device(pd, dev, base_dev); mutex_unlock(&gpd_list_lock); - if (ret < 0) { - if (ret != -EPROBE_DEFER) - dev_err(dev, "failed to add to PM domain %s: %d", - pd->name, ret); - return ret; - } + if (ret < 0) + return dev_err_probe(dev, ret, "failed to add to PM domain %s\n", pd->name); dev->pm_domain->detach = genpd_dev_pm_detach; dev->pm_domain->sync = genpd_dev_pm_sync; diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 04ea92cbd9cf..c50139207794 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -485,7 +485,7 @@ static int dpm_run_callback(pm_callback_t cb, struct device *dev, trace_device_pm_callback_start(dev, info, state.event); error = cb(dev); trace_device_pm_callback_end(dev, error); - suspend_report_result(cb, error); + suspend_report_result(dev, cb, error); initcall_debug_report(dev, calltime, cb, error); @@ -1568,7 +1568,7 @@ static int legacy_suspend(struct device *dev, pm_message_t state, trace_device_pm_callback_start(dev, info, state.event); error = cb(dev, state); trace_device_pm_callback_end(dev, error); - suspend_report_result(cb, error); + suspend_report_result(dev, cb, error); initcall_debug_report(dev, calltime, cb, error); @@ -1855,7 +1855,7 @@ unlock: device_unlock(dev); if (ret < 0) { - suspend_report_result(callback, ret); + suspend_report_result(dev, callback, ret); pm_runtime_put(dev); return ret; } @@ -1960,10 +1960,10 @@ int dpm_suspend_start(pm_message_t state) } EXPORT_SYMBOL_GPL(dpm_suspend_start); -void __suspend_report_result(const char *function, void *fn, int ret) +void __suspend_report_result(const char *function, struct device *dev, void *fn, int ret) { if (ret) - pr_err("%s(): %pS returns %d\n", function, fn, ret); + dev_err(dev, "%s(): %pS returns %d\n", function, fn, ret); } EXPORT_SYMBOL_GPL(__suspend_report_result); @@ -2018,7 +2018,9 @@ static bool pm_ops_is_empty(const struct dev_pm_ops *ops) void device_pm_check_callbacks(struct device *dev) { - spin_lock_irq(&dev->power.lock); + unsigned long flags; + + spin_lock_irqsave(&dev->power.lock, flags); dev->power.no_pm_callbacks = (!dev->bus || (pm_ops_is_empty(dev->bus->pm) && !dev->bus->suspend && !dev->bus->resume)) && @@ -2027,7 +2029,7 @@ void device_pm_check_callbacks(struct device *dev) (!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) && (!dev->driver || (pm_ops_is_empty(dev->driver->pm) && !dev->driver->suspend && !dev->driver->resume)); - spin_unlock_irq(&dev->power.lock); + spin_unlock_irqrestore(&dev->power.lock, flags); } bool dev_pm_skip_suspend(struct device *dev) diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index 2f3cce17219b..d4059e6ffeae 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -1476,11 +1476,16 @@ EXPORT_SYMBOL_GPL(pm_runtime_enable); static void pm_runtime_disable_action(void *data) { + pm_runtime_dont_use_autosuspend(data); pm_runtime_disable(data); } /** * devm_pm_runtime_enable - devres-enabled version of pm_runtime_enable. + * + * NOTE: this will also handle calling pm_runtime_dont_use_autosuspend() for + * you at driver exit time if needed. + * * @dev: Device to handle. */ int devm_pm_runtime_enable(struct device *dev) diff --git a/drivers/base/power/wakeirq.c b/drivers/base/power/wakeirq.c index 0004db4a9d3b..d487a6bac630 100644 --- a/drivers/base/power/wakeirq.c +++ b/drivers/base/power/wakeirq.c @@ -289,7 +289,7 @@ EXPORT_SYMBOL_GPL(dev_pm_disable_wake_irq); * * Enables wakeirq conditionally. We need to enable wake-up interrupt * lazily on the first rpm_suspend(). This is needed as the consumer device - * starts in RPM_SUSPENDED state, and the the first pm_runtime_get() would + * starts in RPM_SUSPENDED state, and the first pm_runtime_get() would * otherwise try to disable already disabled wakeirq. The wake-up interrupt * starts disabled with IRQ_NOAUTOEN set. * diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c index 99bda0da23a8..a57d469676ca 100644 --- a/drivers/base/power/wakeup.c +++ b/drivers/base/power/wakeup.c @@ -34,7 +34,8 @@ suspend_state_t pm_suspend_target_state; bool events_check_enabled __read_mostly; /* First wakeup IRQ seen by the kernel in the last cycle. */ -unsigned int pm_wakeup_irq __read_mostly; +static unsigned int wakeup_irq[2] __read_mostly; +static DEFINE_RAW_SPINLOCK(wakeup_irq_lock); /* If greater than 0 and the system is suspending, terminate the suspend. */ static atomic_t pm_abort_suspend __read_mostly; @@ -586,7 +587,7 @@ static bool wakeup_source_not_registered(struct wakeup_source *ws) * @ws: Wakeup source to handle. * * Update the @ws' statistics and, if @ws has just been activated, notify the PM - * core of the event by incrementing the counter of of wakeup events being + * core of the event by incrementing the counter of the wakeup events being * processed. */ static void wakeup_source_activate(struct wakeup_source *ws) @@ -732,7 +733,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws) /* * Increment the counter of registered wakeup events and decrement the - * couter of wakeup events in progress simultaneously. + * counter of wakeup events in progress simultaneously. */ cec = atomic_add_return(MAX_IN_PROGRESS, &combined_event_count); trace_wakeup_source_deactivate(ws->name, cec); @@ -942,19 +943,45 @@ void pm_system_cancel_wakeup(void) atomic_dec_if_positive(&pm_abort_suspend); } -void pm_wakeup_clear(bool reset) +void pm_wakeup_clear(unsigned int irq_number) { - pm_wakeup_irq = 0; - if (reset) + raw_spin_lock_irq(&wakeup_irq_lock); + + if (irq_number && wakeup_irq[0] == irq_number) + wakeup_irq[0] = wakeup_irq[1]; + else + wakeup_irq[0] = 0; + + wakeup_irq[1] = 0; + + raw_spin_unlock_irq(&wakeup_irq_lock); + + if (!irq_number) atomic_set(&pm_abort_suspend, 0); } void pm_system_irq_wakeup(unsigned int irq_number) { - if (pm_wakeup_irq == 0) { - pm_wakeup_irq = irq_number; + unsigned long flags; + + raw_spin_lock_irqsave(&wakeup_irq_lock, flags); + + if (wakeup_irq[0] == 0) + wakeup_irq[0] = irq_number; + else if (wakeup_irq[1] == 0) + wakeup_irq[1] = irq_number; + else + irq_number = 0; + + raw_spin_unlock_irqrestore(&wakeup_irq_lock, flags); + + if (irq_number) pm_system_wakeup(); - } +} + +unsigned int pm_wakeup_irq(void) +{ + return wakeup_irq[0]; } /** diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c index d2656581a608..4a446259a184 100644 --- a/drivers/base/regmap/regmap-irq.c +++ b/drivers/base/regmap/regmap-irq.c @@ -189,11 +189,9 @@ static void regmap_irq_sync_unlock(struct irq_data *data) ret = regmap_write(map, reg, d->mask_buf[i]); if (d->chip->clear_ack) { if (d->chip->ack_invert && !ret) - ret = regmap_write(map, reg, - d->mask_buf[i]); + ret = regmap_write(map, reg, UINT_MAX); else if (!ret) - ret = regmap_write(map, reg, - ~d->mask_buf[i]); + ret = regmap_write(map, reg, 0); } if (ret != 0) dev_err(d->map->dev, "Failed to ack 0x%x: %d\n", @@ -556,11 +554,9 @@ static irqreturn_t regmap_irq_thread(int irq, void *d) data->status_buf[i]); if (chip->clear_ack) { if (chip->ack_invert && !ret) - ret = regmap_write(map, reg, - data->status_buf[i]); + ret = regmap_write(map, reg, UINT_MAX); else if (!ret) - ret = regmap_write(map, reg, - ~data->status_buf[i]); + ret = regmap_write(map, reg, 0); } if (ret != 0) dev_err(map->dev, "Failed to ack 0x%x: %d\n", @@ -817,13 +813,9 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode, d->status_buf[i] & d->mask_buf[i]); if (chip->clear_ack) { if (chip->ack_invert && !ret) - ret = regmap_write(map, reg, - (d->status_buf[i] & - d->mask_buf[i])); + ret = regmap_write(map, reg, UINT_MAX); else if (!ret) - ret = regmap_write(map, reg, - ~(d->status_buf[i] & - d->mask_buf[i])); + ret = regmap_write(map, reg, 0); } if (ret != 0) { dev_err(map->dev, "Failed to ack 0x%x: %d\n", diff --git a/drivers/base/topology.c b/drivers/base/topology.c index fc24e89f9592..e9d1efcda89b 100644 --- a/drivers/base/topology.c +++ b/drivers/base/topology.c @@ -14,11 +14,11 @@ #include <linux/hardirq.h> #include <linux/topology.h> -#define define_id_show_func(name) \ +#define define_id_show_func(name, fmt) \ static ssize_t name##_show(struct device *dev, \ struct device_attribute *attr, char *buf) \ { \ - return sysfs_emit(buf, "%d\n", topology_##name(dev->id)); \ + return sysfs_emit(buf, fmt "\n", topology_##name(dev->id)); \ } #define define_siblings_read_func(name, mask) \ @@ -42,22 +42,25 @@ static ssize_t name##_list_read(struct file *file, struct kobject *kobj, \ off, count); \ } -define_id_show_func(physical_package_id); +define_id_show_func(physical_package_id, "%d"); static DEVICE_ATTR_RO(physical_package_id); #ifdef TOPOLOGY_DIE_SYSFS -define_id_show_func(die_id); +define_id_show_func(die_id, "%d"); static DEVICE_ATTR_RO(die_id); #endif #ifdef TOPOLOGY_CLUSTER_SYSFS -define_id_show_func(cluster_id); +define_id_show_func(cluster_id, "%d"); static DEVICE_ATTR_RO(cluster_id); #endif -define_id_show_func(core_id); +define_id_show_func(core_id, "%d"); static DEVICE_ATTR_RO(core_id); +define_id_show_func(ppin, "0x%llx"); +static DEVICE_ATTR_ADMIN_RO(ppin); + define_siblings_read_func(thread_siblings, sibling_cpumask); static BIN_ATTR_RO(thread_siblings, 0); static BIN_ATTR_RO(thread_siblings_list, 0); @@ -87,7 +90,7 @@ static BIN_ATTR_RO(package_cpus, 0); static BIN_ATTR_RO(package_cpus_list, 0); #ifdef TOPOLOGY_BOOK_SYSFS -define_id_show_func(book_id); +define_id_show_func(book_id, "%d"); static DEVICE_ATTR_RO(book_id); define_siblings_read_func(book_siblings, book_cpumask); static BIN_ATTR_RO(book_siblings, 0); @@ -95,7 +98,7 @@ static BIN_ATTR_RO(book_siblings_list, 0); #endif #ifdef TOPOLOGY_DRAWER_SYSFS -define_id_show_func(drawer_id); +define_id_show_func(drawer_id, "%d"); static DEVICE_ATTR_RO(drawer_id); define_siblings_read_func(drawer_siblings, drawer_cpumask); static BIN_ATTR_RO(drawer_siblings, 0); @@ -145,6 +148,7 @@ static struct attribute *default_attrs[] = { #ifdef TOPOLOGY_DRAWER_SYSFS &dev_attr_drawer_id.attr, #endif + &dev_attr_ppin.attr, NULL }; diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 01cbbfc4e9e2..19fe19eaa50e 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -79,6 +79,7 @@ #include <linux/ioprio.h> #include <linux/blk-cgroup.h> #include <linux/sched/mm.h> +#include <linux/statfs.h> #include "loop.h" @@ -774,8 +775,13 @@ static void loop_config_discard(struct loop_device *lo) granularity = 0; } else { + struct kstatfs sbuf; + max_discard_sectors = UINT_MAX >> 9; - granularity = inode->i_sb->s_blocksize; + if (!vfs_statfs(&file->f_path, &sbuf)) + granularity = sbuf.f_bsize; + else + max_discard_sectors = 0; } if (max_discard_sectors) { @@ -1082,7 +1088,7 @@ out_putf: return error; } -static void __loop_clr_fd(struct loop_device *lo) +static void __loop_clr_fd(struct loop_device *lo, bool release) { struct file *filp; gfp_t gfp = lo->old_gfp_mask; @@ -1144,6 +1150,8 @@ static void __loop_clr_fd(struct loop_device *lo) /* let user-space know about this change */ kobject_uevent(&disk_to_dev(lo->lo_disk)->kobj, KOBJ_CHANGE); mapping_set_gfp_mask(filp->f_mapping, gfp); + /* This is safe: open() is still holding a reference. */ + module_put(THIS_MODULE); blk_mq_unfreeze_queue(lo->lo_queue); disk_force_media_change(lo->lo_disk, DISK_EVENT_MEDIA_CHANGE); @@ -1151,52 +1159,44 @@ static void __loop_clr_fd(struct loop_device *lo) if (lo->lo_flags & LO_FLAGS_PARTSCAN) { int err; - mutex_lock(&lo->lo_disk->open_mutex); + /* + * open_mutex has been held already in release path, so don't + * acquire it if this function is called in such case. + * + * If the reread partition isn't from release path, lo_refcnt + * must be at least one and it can only become zero when the + * current holder is released. + */ + if (!release) + mutex_lock(&lo->lo_disk->open_mutex); err = bdev_disk_changed(lo->lo_disk, false); - mutex_unlock(&lo->lo_disk->open_mutex); + if (!release) + mutex_unlock(&lo->lo_disk->open_mutex); if (err) pr_warn("%s: partition scan of loop%d failed (rc=%d)\n", __func__, lo->lo_number, err); /* Device is gone, no point in returning error */ } + /* + * lo->lo_state is set to Lo_unbound here after above partscan has + * finished. There cannot be anybody else entering __loop_clr_fd() as + * Lo_rundown state protects us from all the other places trying to + * change the 'lo' device. + */ lo->lo_flags = 0; if (!part_shift) lo->lo_disk->flags |= GENHD_FL_NO_PART; - - fput(filp); -} - -static void loop_rundown_completed(struct loop_device *lo) -{ mutex_lock(&lo->lo_mutex); lo->lo_state = Lo_unbound; mutex_unlock(&lo->lo_mutex); - module_put(THIS_MODULE); -} - -static void loop_rundown_workfn(struct work_struct *work) -{ - struct loop_device *lo = container_of(work, struct loop_device, - rundown_work); - struct block_device *bdev = lo->lo_device; - struct gendisk *disk = lo->lo_disk; - - __loop_clr_fd(lo); - kobject_put(&bdev->bd_device.kobj); - module_put(disk->fops->owner); - loop_rundown_completed(lo); -} - -static void loop_schedule_rundown(struct loop_device *lo) -{ - struct block_device *bdev = lo->lo_device; - struct gendisk *disk = lo->lo_disk; - __module_get(disk->fops->owner); - kobject_get(&bdev->bd_device.kobj); - INIT_WORK(&lo->rundown_work, loop_rundown_workfn); - queue_work(system_long_wq, &lo->rundown_work); + /* + * Need not hold lo_mutex to fput backing file. Calling fput holding + * lo_mutex triggers a circular lock dependency possibility warning as + * fput can take open_mutex which is usually taken before lo_mutex. + */ + fput(filp); } static int loop_clr_fd(struct loop_device *lo) @@ -1228,8 +1228,7 @@ static int loop_clr_fd(struct loop_device *lo) lo->lo_state = Lo_rundown; mutex_unlock(&lo->lo_mutex); - __loop_clr_fd(lo); - loop_rundown_completed(lo); + __loop_clr_fd(lo, false); return 0; } @@ -1754,7 +1753,7 @@ static void lo_release(struct gendisk *disk, fmode_t mode) * In autoclear mode, stop the loop thread * and remove configuration after last close. */ - loop_schedule_rundown(lo); + __loop_clr_fd(lo, true); return; } else if (lo->lo_state == Lo_bound) { /* diff --git a/drivers/block/loop.h b/drivers/block/loop.h index 918a7a2dc025..082d4b6bfc6a 100644 --- a/drivers/block/loop.h +++ b/drivers/block/loop.h @@ -56,7 +56,6 @@ struct loop_device { struct gendisk *lo_disk; struct mutex lo_mutex; bool idr_visible; - struct work_struct rundown_work; }; struct loop_cmd { diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c index 2d43ab5b5cc0..4fbaf0b4958b 100644 --- a/drivers/block/mtip32xx/mtip32xx.c +++ b/drivers/block/mtip32xx/mtip32xx.c @@ -4109,7 +4109,7 @@ static void mtip_pci_remove(struct pci_dev *pdev) "Completion workers still active!\n"); } - blk_set_queue_dying(dd->queue); + blk_mark_disk_dead(dd->disk); set_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag); /* Clean up the block layer. */ diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 4203cdab8abf..b844432bad20 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -7185,7 +7185,7 @@ static ssize_t do_rbd_remove(struct bus_type *bus, * IO to complete/fail. */ blk_mq_freeze_queue(rbd_dev->disk->queue); - blk_set_queue_dying(rbd_dev->disk->queue); + blk_mark_disk_dead(rbd_dev->disk); } del_gendisk(rbd_dev->disk); diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 5c636ca7f1a7..35399a9eae0e 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -69,9 +69,6 @@ struct virtio_blk { /* Process context for config space updates */ struct work_struct config_work; - /* What host tells us, plus 2 for header & tailer. */ - unsigned int sg_elems; - /* Ida index - used to track minor number allocations. */ int index; @@ -315,8 +312,6 @@ static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx, blk_status_t status; int err; - BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems); - status = virtblk_setup_cmd(vblk->vdev, req, vbr); if (unlikely(status)) return status; @@ -747,8 +742,6 @@ static int virtblk_probe(struct virtio_device *vdev) /* Prevent integer overflows and honor max vq size */ sg_elems = min_t(u32, sg_elems, VIRTIO_BLK_MAX_SG_ELEMS - 2); - /* We need extra sg elements at head and tail. */ - sg_elems += 2; vdev->priv = vblk = kmalloc(sizeof(*vblk), GFP_KERNEL); if (!vblk) { err = -ENOMEM; @@ -758,7 +751,6 @@ static int virtblk_probe(struct virtio_device *vdev) mutex_init(&vblk->vdev_mutex); vblk->vdev = vdev; - vblk->sg_elems = sg_elems; INIT_WORK(&vblk->config_work, virtblk_config_changed_work); @@ -815,7 +807,7 @@ static int virtblk_probe(struct virtio_device *vdev) set_disk_ro(vblk->disk, 1); /* We can handle whatever the host told us to handle. */ - blk_queue_max_segments(q, vblk->sg_elems-2); + blk_queue_max_segments(q, sg_elems); /* No real sector limit. */ blk_queue_max_hw_sectors(q, -1U); @@ -887,9 +879,15 @@ static int virtblk_probe(struct virtio_device *vdev) virtio_cread(vdev, struct virtio_blk_config, max_discard_seg, &v); + + /* + * max_discard_seg == 0 is out of spec but we always + * handled it. + */ + if (!v) + v = sg_elems; blk_queue_max_discard_segments(q, - min_not_zero(v, - MAX_DISCARD_SEGMENTS)); + min(v, MAX_DISCARD_SEGMENTS)); blk_queue_flag_set(QUEUE_FLAG_DISCARD, q); } diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index ccd0dd0c6b83..03b5fb341e58 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -1288,7 +1288,8 @@ free_shadow: rinfo->ring_ref[i] = GRANT_INVALID_REF; } } - free_pages((unsigned long)rinfo->ring.sring, get_order(info->nr_ring_pages * XEN_PAGE_SIZE)); + free_pages_exact(rinfo->ring.sring, + info->nr_ring_pages * XEN_PAGE_SIZE); rinfo->ring.sring = NULL; if (rinfo->irq) @@ -1372,9 +1373,15 @@ static int blkif_get_final_status(enum blk_req_status s1, return BLKIF_RSP_OKAY; } -static bool blkif_completion(unsigned long *id, - struct blkfront_ring_info *rinfo, - struct blkif_response *bret) +/* + * Return values: + * 1 response processed. + * 0 missing further responses. + * -1 error while processing. + */ +static int blkif_completion(unsigned long *id, + struct blkfront_ring_info *rinfo, + struct blkif_response *bret) { int i = 0; struct scatterlist *sg; @@ -1397,7 +1404,7 @@ static bool blkif_completion(unsigned long *id, /* Wait the second response if not yet here. */ if (s2->status < REQ_DONE) - return false; + return 0; bret->status = blkif_get_final_status(s->status, s2->status); @@ -1448,42 +1455,43 @@ static bool blkif_completion(unsigned long *id, } /* Add the persistent grant into the list of free grants */ for (i = 0; i < num_grant; i++) { - if (gnttab_query_foreign_access(s->grants_used[i]->gref)) { + if (!gnttab_try_end_foreign_access(s->grants_used[i]->gref)) { /* * If the grant is still mapped by the backend (the * backend has chosen to make this grant persistent) * we add it at the head of the list, so it will be * reused first. */ - if (!info->feature_persistent) - pr_alert_ratelimited("backed has not unmapped grant: %u\n", - s->grants_used[i]->gref); + if (!info->feature_persistent) { + pr_alert("backed has not unmapped grant: %u\n", + s->grants_used[i]->gref); + return -1; + } list_add(&s->grants_used[i]->node, &rinfo->grants); rinfo->persistent_gnts_c++; } else { /* - * If the grant is not mapped by the backend we end the - * foreign access and add it to the tail of the list, - * so it will not be picked again unless we run out of - * persistent grants. + * If the grant is not mapped by the backend we add it + * to the tail of the list, so it will not be picked + * again unless we run out of persistent grants. */ - gnttab_end_foreign_access(s->grants_used[i]->gref, 0, 0UL); s->grants_used[i]->gref = GRANT_INVALID_REF; list_add_tail(&s->grants_used[i]->node, &rinfo->grants); } } if (s->req.operation == BLKIF_OP_INDIRECT) { for (i = 0; i < INDIRECT_GREFS(num_grant); i++) { - if (gnttab_query_foreign_access(s->indirect_grants[i]->gref)) { - if (!info->feature_persistent) - pr_alert_ratelimited("backed has not unmapped grant: %u\n", - s->indirect_grants[i]->gref); + if (!gnttab_try_end_foreign_access(s->indirect_grants[i]->gref)) { + if (!info->feature_persistent) { + pr_alert("backed has not unmapped grant: %u\n", + s->indirect_grants[i]->gref); + return -1; + } list_add(&s->indirect_grants[i]->node, &rinfo->grants); rinfo->persistent_gnts_c++; } else { struct page *indirect_page; - gnttab_end_foreign_access(s->indirect_grants[i]->gref, 0, 0UL); /* * Add the used indirect page back to the list of * available pages for indirect grefs. @@ -1498,7 +1506,7 @@ static bool blkif_completion(unsigned long *id, } } - return true; + return 1; } static irqreturn_t blkif_interrupt(int irq, void *dev_id) @@ -1564,12 +1572,17 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id) } if (bret.operation != BLKIF_OP_DISCARD) { + int ret; + /* * We may need to wait for an extra response if the * I/O request is split in 2 */ - if (!blkif_completion(&id, rinfo, &bret)) + ret = blkif_completion(&id, rinfo, &bret); + if (!ret) continue; + if (unlikely(ret < 0)) + goto err; } if (add_id_to_freelist(rinfo, id)) { @@ -1676,8 +1689,7 @@ static int setup_blkring(struct xenbus_device *dev, for (i = 0; i < info->nr_ring_pages; i++) rinfo->ring_ref[i] = GRANT_INVALID_REF; - sring = (struct blkif_sring *)__get_free_pages(GFP_NOIO | __GFP_HIGH, - get_order(ring_size)); + sring = alloc_pages_exact(ring_size, GFP_NOIO); if (!sring) { xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring"); return -ENOMEM; @@ -1687,7 +1699,7 @@ static int setup_blkring(struct xenbus_device *dev, err = xenbus_grant_ring(dev, rinfo->ring.sring, info->nr_ring_pages, gref); if (err < 0) { - free_pages((unsigned long)sring, get_order(ring_size)); + free_pages_exact(sring, ring_size); rinfo->ring.sring = NULL; goto fail; } @@ -2126,7 +2138,7 @@ static void blkfront_closing(struct blkfront_info *info) /* No more blkif_request(). */ blk_mq_stop_hw_queues(info->rq); - blk_set_queue_dying(info->rq); + blk_mark_disk_dead(info->gd); set_capacity(info->gd, 0); for_each_rinfo(info, rinfo, i) { @@ -2532,11 +2544,10 @@ static void purge_persistent_grants(struct blkfront_info *info) list_for_each_entry_safe(gnt_list_entry, tmp, &rinfo->grants, node) { if (gnt_list_entry->gref == GRANT_INVALID_REF || - gnttab_query_foreign_access(gnt_list_entry->gref)) + !gnttab_try_end_foreign_access(gnt_list_entry->gref)) continue; list_del(&gnt_list_entry->node); - gnttab_end_foreign_access(gnt_list_entry->gref, 0, 0UL); rinfo->persistent_gnts_c--; gnt_list_entry->gref = GRANT_INVALID_REF; list_add_tail(&gnt_list_entry->node, &rinfo->grants); diff --git a/drivers/bus/mhi/pci_generic.c b/drivers/bus/mhi/pci_generic.c index 3a258a677df8..b79895810c52 100644 --- a/drivers/bus/mhi/pci_generic.c +++ b/drivers/bus/mhi/pci_generic.c @@ -366,6 +366,7 @@ static const struct mhi_pci_dev_info mhi_foxconn_sdx55_info = { .config = &modem_foxconn_sdx55_config, .bar_num = MHI_PCI_DEFAULT_BAR_NUM, .dma_data_width = 32, + .mru_default = 32768, .sideband_wake = false, }; @@ -401,6 +402,7 @@ static const struct mhi_pci_dev_info mhi_mv31_info = { .config = &modem_mv31_config, .bar_num = MHI_PCI_DEFAULT_BAR_NUM, .dma_data_width = 32, + .mru_default = 32768, }; static const struct mhi_channel_config mhi_sierra_em919x_channels[] = { diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig index 9704963f9d50..a087156a5818 100644 --- a/drivers/char/hw_random/Kconfig +++ b/drivers/char/hw_random/Kconfig @@ -401,7 +401,7 @@ config HW_RANDOM_MESON config HW_RANDOM_CAVIUM tristate "Cavium ThunderX Random Number Generator support" - depends on HW_RANDOM && PCI && ARM64 + depends on HW_RANDOM && PCI && ARCH_THUNDER default HW_RANDOM help This driver provides kernel-side support for the Random Number diff --git a/drivers/char/hw_random/atmel-rng.c b/drivers/char/hw_random/atmel-rng.c index ecb71c4317a5..b8effe77d80f 100644 --- a/drivers/char/hw_random/atmel-rng.c +++ b/drivers/char/hw_random/atmel-rng.c @@ -13,13 +13,16 @@ #include <linux/err.h> #include <linux/clk.h> #include <linux/io.h> +#include <linux/iopoll.h> #include <linux/hw_random.h> #include <linux/of_device.h> #include <linux/platform_device.h> +#include <linux/pm_runtime.h> #define TRNG_CR 0x00 #define TRNG_MR 0x04 #define TRNG_ISR 0x1c +#define TRNG_ISR_DATRDY BIT(0) #define TRNG_ODATA 0x50 #define TRNG_KEY 0x524e4700 /* RNG */ @@ -34,37 +37,79 @@ struct atmel_trng { struct clk *clk; void __iomem *base; struct hwrng rng; + bool has_half_rate; }; +static bool atmel_trng_wait_ready(struct atmel_trng *trng, bool wait) +{ + int ready; + + ready = readl(trng->base + TRNG_ISR) & TRNG_ISR_DATRDY; + if (!ready && wait) + readl_poll_timeout(trng->base + TRNG_ISR, ready, + ready & TRNG_ISR_DATRDY, 1000, 20000); + + return !!ready; +} + static int atmel_trng_read(struct hwrng *rng, void *buf, size_t max, bool wait) { struct atmel_trng *trng = container_of(rng, struct atmel_trng, rng); u32 *data = buf; + int ret; - /* data ready? */ - if (readl(trng->base + TRNG_ISR) & 1) { - *data = readl(trng->base + TRNG_ODATA); - /* - ensure data ready is only set again AFTER the next data - word is ready in case it got set between checking ISR - and reading ODATA, so we don't risk re-reading the - same word - */ - readl(trng->base + TRNG_ISR); - return 4; - } else - return 0; + ret = pm_runtime_get_sync((struct device *)trng->rng.priv); + if (ret < 0) { + pm_runtime_put_sync((struct device *)trng->rng.priv); + return ret; + } + + ret = atmel_trng_wait_ready(trng, wait); + if (!ret) + goto out; + + *data = readl(trng->base + TRNG_ODATA); + /* + * ensure data ready is only set again AFTER the next data word is ready + * in case it got set between checking ISR and reading ODATA, so we + * don't risk re-reading the same word + */ + readl(trng->base + TRNG_ISR); + ret = 4; + +out: + pm_runtime_mark_last_busy((struct device *)trng->rng.priv); + pm_runtime_put_sync_autosuspend((struct device *)trng->rng.priv); + return ret; } -static void atmel_trng_enable(struct atmel_trng *trng) +static int atmel_trng_init(struct atmel_trng *trng) { + unsigned long rate; + int ret; + + ret = clk_prepare_enable(trng->clk); + if (ret) + return ret; + + if (trng->has_half_rate) { + rate = clk_get_rate(trng->clk); + + /* if peripheral clk is above 100MHz, set HALFR */ + if (rate > 100000000) + writel(TRNG_HALFR, trng->base + TRNG_MR); + } + writel(TRNG_KEY | 1, trng->base + TRNG_CR); + + return 0; } -static void atmel_trng_disable(struct atmel_trng *trng) +static void atmel_trng_cleanup(struct atmel_trng *trng) { writel(TRNG_KEY, trng->base + TRNG_CR); + clk_disable_unprepare(trng->clk); } static int atmel_trng_probe(struct platform_device *pdev) @@ -88,32 +133,31 @@ static int atmel_trng_probe(struct platform_device *pdev) if (!data) return -ENODEV; - if (data->has_half_rate) { - unsigned long rate = clk_get_rate(trng->clk); - - /* if peripheral clk is above 100MHz, set HALFR */ - if (rate > 100000000) - writel(TRNG_HALFR, trng->base + TRNG_MR); - } - - ret = clk_prepare_enable(trng->clk); - if (ret) - return ret; - - atmel_trng_enable(trng); + trng->has_half_rate = data->has_half_rate; trng->rng.name = pdev->name; trng->rng.read = atmel_trng_read; + trng->rng.priv = (unsigned long)&pdev->dev; + platform_set_drvdata(pdev, trng); - ret = devm_hwrng_register(&pdev->dev, &trng->rng); +#ifndef CONFIG_PM + ret = atmel_trng_init(trng); if (ret) - goto err_register; + return ret; +#endif - platform_set_drvdata(pdev, trng); + pm_runtime_set_autosuspend_delay(&pdev->dev, 100); + pm_runtime_use_autosuspend(&pdev->dev); + pm_runtime_enable(&pdev->dev); - return 0; + ret = devm_hwrng_register(&pdev->dev, &trng->rng); + if (ret) { + pm_runtime_disable(&pdev->dev); + pm_runtime_set_suspended(&pdev->dev); +#ifndef CONFIG_PM + atmel_trng_cleanup(trng); +#endif + } -err_register: - clk_disable_unprepare(trng->clk); return ret; } @@ -121,43 +165,35 @@ static int atmel_trng_remove(struct platform_device *pdev) { struct atmel_trng *trng = platform_get_drvdata(pdev); - - atmel_trng_disable(trng); - clk_disable_unprepare(trng->clk); + atmel_trng_cleanup(trng); + pm_runtime_disable(&pdev->dev); + pm_runtime_set_suspended(&pdev->dev); return 0; } -#ifdef CONFIG_PM -static int atmel_trng_suspend(struct device *dev) +static int __maybe_unused atmel_trng_runtime_suspend(struct device *dev) { struct atmel_trng *trng = dev_get_drvdata(dev); - atmel_trng_disable(trng); - clk_disable_unprepare(trng->clk); + atmel_trng_cleanup(trng); return 0; } -static int atmel_trng_resume(struct device *dev) +static int __maybe_unused atmel_trng_runtime_resume(struct device *dev) { struct atmel_trng *trng = dev_get_drvdata(dev); - int ret; - ret = clk_prepare_enable(trng->clk); - if (ret) - return ret; - - atmel_trng_enable(trng); - - return 0; + return atmel_trng_init(trng); } -static const struct dev_pm_ops atmel_trng_pm_ops = { - .suspend = atmel_trng_suspend, - .resume = atmel_trng_resume, +static const struct dev_pm_ops __maybe_unused atmel_trng_pm_ops = { + SET_RUNTIME_PM_OPS(atmel_trng_runtime_suspend, + atmel_trng_runtime_resume, NULL) + SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, + pm_runtime_force_resume) }; -#endif /* CONFIG_PM */ static const struct atmel_trng_data at91sam9g45_config = { .has_half_rate = false, @@ -185,9 +221,7 @@ static struct platform_driver atmel_trng_driver = { .remove = atmel_trng_remove, .driver = { .name = "atmel-trng", -#ifdef CONFIG_PM - .pm = &atmel_trng_pm_ops, -#endif /* CONFIG_PM */ + .pm = pm_ptr(&atmel_trng_pm_ops), .of_match_table = atmel_trng_dt_ids, }, }; diff --git a/drivers/char/hw_random/cavium-rng-vf.c b/drivers/char/hw_random/cavium-rng-vf.c index 6f66919652bf..7c55f4cf4a8b 100644 --- a/drivers/char/hw_random/cavium-rng-vf.c +++ b/drivers/char/hw_random/cavium-rng-vf.c @@ -179,7 +179,7 @@ static int cavium_map_pf_regs(struct cavium_rng *rng) pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CAVIUM_RNG_PF, NULL); if (!pdev) { - dev_err(&pdev->dev, "Cannot find RNG PF device\n"); + pr_err("Cannot find RNG PF device\n"); return -EIO; } diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c index a3db27916256..16f227b995e8 100644 --- a/drivers/char/hw_random/core.c +++ b/drivers/char/hw_random/core.c @@ -15,6 +15,7 @@ #include <linux/err.h> #include <linux/fs.h> #include <linux/hw_random.h> +#include <linux/random.h> #include <linux/kernel.h> #include <linux/kthread.h> #include <linux/sched/signal.h> @@ -31,7 +32,7 @@ static struct hwrng *current_rng; /* the current rng has been explicitly chosen by user via sysfs */ static int cur_rng_set_by_user; static struct task_struct *hwrng_fill; -/* list of registered rngs, sorted decending by quality */ +/* list of registered rngs */ static LIST_HEAD(rng_list); /* Protects rng_list and current_rng */ static DEFINE_MUTEX(rng_mutex); @@ -44,14 +45,14 @@ static unsigned short default_quality; /* = 0; default to "off" */ module_param(current_quality, ushort, 0644); MODULE_PARM_DESC(current_quality, - "current hwrng entropy estimation per 1024 bits of input"); + "current hwrng entropy estimation per 1024 bits of input -- obsolete, use rng_quality instead"); module_param(default_quality, ushort, 0644); MODULE_PARM_DESC(default_quality, "default entropy content of hwrng per 1024 bits of input"); static void drop_current_rng(void); static int hwrng_init(struct hwrng *rng); -static void start_khwrngd(void); +static void hwrng_manage_rngd(struct hwrng *rng); static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size, int wait); @@ -64,13 +65,12 @@ static size_t rng_buffer_size(void) static void add_early_randomness(struct hwrng *rng) { int bytes_read; - size_t size = min_t(size_t, 16, rng_buffer_size()); mutex_lock(&reading_mutex); - bytes_read = rng_get_data(rng, rng_buffer, size, 0); + bytes_read = rng_get_data(rng, rng_fillbuf, 32, 0); mutex_unlock(&reading_mutex); if (bytes_read > 0) - add_device_randomness(rng_buffer, bytes_read); + add_device_randomness(rng_fillbuf, bytes_read); } static inline void cleanup_rng(struct kref *kref) @@ -161,14 +161,13 @@ static int hwrng_init(struct hwrng *rng) reinit_completion(&rng->cleanup_done); skip_init: - current_quality = rng->quality ? : default_quality; - if (current_quality > 1024) - current_quality = 1024; + if (!rng->quality) + rng->quality = default_quality; + if (rng->quality > 1024) + rng->quality = 1024; + current_quality = rng->quality; /* obsolete */ - if (current_quality == 0 && hwrng_fill) - kthread_stop(hwrng_fill); - if (current_quality > 0 && !hwrng_fill) - start_khwrngd(); + hwrng_manage_rngd(rng); return 0; } @@ -298,24 +297,28 @@ static struct miscdevice rng_miscdev = { static int enable_best_rng(void) { + struct hwrng *rng, *new_rng = NULL; int ret = -ENODEV; BUG_ON(!mutex_is_locked(&rng_mutex)); - /* rng_list is sorted by quality, use the best (=first) one */ - if (!list_empty(&rng_list)) { - struct hwrng *new_rng; - - new_rng = list_entry(rng_list.next, struct hwrng, list); - ret = ((new_rng == current_rng) ? 0 : set_current_rng(new_rng)); - if (!ret) - cur_rng_set_by_user = 0; - } else { + /* no rng to use? */ + if (list_empty(&rng_list)) { drop_current_rng(); cur_rng_set_by_user = 0; - ret = 0; + return 0; + } + + /* use the rng which offers the best quality */ + list_for_each_entry(rng, &rng_list, list) { + if (!new_rng || rng->quality > new_rng->quality) + new_rng = rng; } + ret = ((new_rng == current_rng) ? 0 : set_current_rng(new_rng)); + if (!ret) + cur_rng_set_by_user = 0; + return ret; } @@ -336,8 +339,9 @@ static ssize_t rng_current_store(struct device *dev, } else { list_for_each_entry(rng, &rng_list, list) { if (sysfs_streq(rng->name, buf)) { - cur_rng_set_by_user = 1; err = set_current_rng(rng); + if (!err) + cur_rng_set_by_user = 1; break; } } @@ -399,14 +403,76 @@ static ssize_t rng_selected_show(struct device *dev, return sysfs_emit(buf, "%d\n", cur_rng_set_by_user); } +static ssize_t rng_quality_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + ssize_t ret; + struct hwrng *rng; + + rng = get_current_rng(); + if (IS_ERR(rng)) + return PTR_ERR(rng); + + if (!rng) /* no need to put_rng */ + return -ENODEV; + + ret = sysfs_emit(buf, "%hu\n", rng->quality); + put_rng(rng); + + return ret; +} + +static ssize_t rng_quality_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t len) +{ + u16 quality; + int ret = -EINVAL; + + if (len < 2) + return -EINVAL; + + ret = mutex_lock_interruptible(&rng_mutex); + if (ret) + return -ERESTARTSYS; + + ret = kstrtou16(buf, 0, &quality); + if (ret || quality > 1024) { + ret = -EINVAL; + goto out; + } + + if (!current_rng) { + ret = -ENODEV; + goto out; + } + + current_rng->quality = quality; + current_quality = quality; /* obsolete */ + + /* the best available RNG may have changed */ + ret = enable_best_rng(); + + /* start/stop rngd if necessary */ + if (current_rng) + hwrng_manage_rngd(current_rng); + +out: + mutex_unlock(&rng_mutex); + return ret ? ret : len; +} + static DEVICE_ATTR_RW(rng_current); static DEVICE_ATTR_RO(rng_available); static DEVICE_ATTR_RO(rng_selected); +static DEVICE_ATTR_RW(rng_quality); static struct attribute *rng_dev_attrs[] = { &dev_attr_rng_current.attr, &dev_attr_rng_available.attr, &dev_attr_rng_selected.attr, + &dev_attr_rng_quality.attr, NULL }; @@ -424,9 +490,11 @@ static int __init register_miscdev(void) static int hwrng_fillfn(void *unused) { + size_t entropy, entropy_credit = 0; /* in 1/1024 of a bit */ long rc; while (!kthread_should_stop()) { + unsigned short quality; struct hwrng *rng; rng = get_current_rng(); @@ -435,27 +503,49 @@ static int hwrng_fillfn(void *unused) mutex_lock(&reading_mutex); rc = rng_get_data(rng, rng_fillbuf, rng_buffer_size(), 1); + if (current_quality != rng->quality) + rng->quality = current_quality; /* obsolete */ + quality = rng->quality; mutex_unlock(&reading_mutex); put_rng(rng); + + if (!quality) + break; + if (rc <= 0) { pr_warn("hwrng: no data available\n"); msleep_interruptible(10000); continue; } + + /* If we cannot credit at least one bit of entropy, + * keep track of the remainder for the next iteration + */ + entropy = rc * quality * 8 + entropy_credit; + if ((entropy >> 10) == 0) + entropy_credit = entropy; + /* Outside lock, sure, but y'know: randomness. */ add_hwgenerator_randomness((void *)rng_fillbuf, rc, - rc * current_quality * 8 >> 10); + entropy >> 10); } hwrng_fill = NULL; return 0; } -static void start_khwrngd(void) +static void hwrng_manage_rngd(struct hwrng *rng) { - hwrng_fill = kthread_run(hwrng_fillfn, NULL, "hwrng"); - if (IS_ERR(hwrng_fill)) { - pr_err("hwrng_fill thread creation failed\n"); - hwrng_fill = NULL; + if (WARN_ON(!mutex_is_locked(&rng_mutex))) + return; + + if (rng->quality == 0 && hwrng_fill) + kthread_stop(hwrng_fill); + if (rng->quality > 0 && !hwrng_fill) { + hwrng_fill = kthread_run(hwrng_fillfn, NULL, "hwrng"); + if (IS_ERR(hwrng_fill)) { + pr_err("hwrng_fill thread creation failed\n"); + hwrng_fill = NULL; + } } } @@ -463,7 +553,6 @@ int hwrng_register(struct hwrng *rng) { int err = -EINVAL; struct hwrng *tmp; - struct list_head *rng_list_ptr; bool is_new_current = false; if (!rng->name || (!rng->data_read && !rng->read)) @@ -477,18 +566,11 @@ int hwrng_register(struct hwrng *rng) if (strcmp(tmp->name, rng->name) == 0) goto out_unlock; } + list_add_tail(&rng->list, &rng_list); init_completion(&rng->cleanup_done); complete(&rng->cleanup_done); - /* rng_list is sorted by decreasing quality */ - list_for_each(rng_list_ptr, &rng_list) { - tmp = list_entry(rng_list_ptr, struct hwrng, list); - if (tmp->quality < rng->quality) - break; - } - list_add_tail(&rng->list, rng_list_ptr); - if (!current_rng || (!cur_rng_set_by_user && rng->quality > current_rng->quality)) { /* @@ -638,7 +720,7 @@ static void __exit hwrng_modexit(void) unregister_miscdev(); } -module_init(hwrng_modinit); +fs_initcall(hwrng_modinit); /* depends on misc_register() */ module_exit(hwrng_modexit); MODULE_DESCRIPTION("H/W Random Number Generator (RNG) driver"); diff --git a/drivers/char/hw_random/nomadik-rng.c b/drivers/char/hw_random/nomadik-rng.c index 67947a19aa22..e8f9621e7954 100644 --- a/drivers/char/hw_random/nomadik-rng.c +++ b/drivers/char/hw_random/nomadik-rng.c @@ -65,14 +65,14 @@ static int nmk_rng_probe(struct amba_device *dev, const struct amba_id *id) out_release: amba_release_regions(dev); out_clk: - clk_disable(rng_clk); + clk_disable_unprepare(rng_clk); return ret; } static void nmk_rng_remove(struct amba_device *dev) { amba_release_regions(dev); - clk_disable(rng_clk); + clk_disable_unprepare(rng_clk); } static const struct amba_id nmk_rng_ids[] = { diff --git a/drivers/char/mem.c b/drivers/char/mem.c index cc296f0823bd..9f586025dbe6 100644 --- a/drivers/char/mem.c +++ b/drivers/char/mem.c @@ -707,7 +707,7 @@ static const struct memdev { [5] = { "zero", 0666, &zero_fops, FMODE_NOWAIT }, [7] = { "full", 0666, &full_fops, 0 }, [8] = { "random", 0666, &random_fops, 0 }, - [9] = { "urandom", 0666, &urandom_fops, 0 }, + [9] = { "urandom", 0666, &random_fops, 0 }, #ifdef CONFIG_PRINTK [11] = { "kmsg", 0644, &kmsg_fops, 0 }, #endif diff --git a/drivers/char/random.c b/drivers/char/random.c index f206c87c6202..85f4cca6e707 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -1,320 +1,28 @@ +// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) /* - * random.c -- A strong random number generator - * * Copyright (C) 2017-2022 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. - * * Copyright Matt Mackall <mpm@selenic.com>, 2003, 2004, 2005 - * - * Copyright Theodore Ts'o, 1994, 1995, 1996, 1997, 1998, 1999. All - * rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, and the entire permission notice in its entirety, - * including the disclaimer of warranties. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. The name of the author may not be used to endorse or promote - * products derived from this software without specific prior - * written permission. - * - * ALTERNATIVELY, this product may be distributed under the terms of - * the GNU General Public License, in which case the provisions of the GPL are - * required INSTEAD OF the above restrictions. (This clause is - * necessary due to a potential bad interaction between the GPL and - * the restrictions contained in a BSD-style copyright.) - * - * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED - * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ALL OF - * WHICH ARE HEREBY DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT - * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR - * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF - * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE - * USE OF THIS SOFTWARE, EVEN IF NOT ADVISED OF THE POSSIBILITY OF SUCH - * DAMAGE. - */ - -/* - * (now, with legal B.S. out of the way.....) - * - * This routine gathers environmental noise from device drivers, etc., - * and returns good random numbers, suitable for cryptographic use. - * Besides the obvious cryptographic uses, these numbers are also good - * for seeding TCP sequence numbers, and other places where it is - * desirable to have numbers which are not only random, but hard to - * predict by an attacker. - * - * Theory of operation - * =================== - * - * Computers are very predictable devices. Hence it is extremely hard - * to produce truly random numbers on a computer --- as opposed to - * pseudo-random numbers, which can easily generated by using a - * algorithm. Unfortunately, it is very easy for attackers to guess - * the sequence of pseudo-random number generators, and for some - * applications this is not acceptable. So instead, we must try to - * gather "environmental noise" from the computer's environment, which - * must be hard for outside attackers to observe, and use that to - * generate random numbers. In a Unix environment, this is best done - * from inside the kernel. - * - * Sources of randomness from the environment include inter-keyboard - * timings, inter-interrupt timings from some interrupts, and other - * events which are both (a) non-deterministic and (b) hard for an - * outside observer to measure. Randomness from these sources are - * added to an "entropy pool", which is mixed using a CRC-like function. - * This is not cryptographically strong, but it is adequate assuming - * the randomness is not chosen maliciously, and it is fast enough that - * the overhead of doing it on every interrupt is very reasonable. - * As random bytes are mixed into the entropy pool, the routines keep - * an *estimate* of how many bits of randomness have been stored into - * the random number generator's internal state. - * - * When random bytes are desired, they are obtained by taking the BLAKE2s - * hash of the contents of the "entropy pool". The BLAKE2s hash avoids - * exposing the internal state of the entropy pool. It is believed to - * be computationally infeasible to derive any useful information - * about the input of BLAKE2s from its output. Even if it is possible to - * analyze BLAKE2s in some clever way, as long as the amount of data - * returned from the generator is less than the inherent entropy in - * the pool, the output data is totally unpredictable. For this - * reason, the routine decreases its internal estimate of how many - * bits of "true randomness" are contained in the entropy pool as it - * outputs random numbers. - * - * If this estimate goes to zero, the routine can still generate - * random numbers; however, an attacker may (at least in theory) be - * able to infer the future output of the generator from prior - * outputs. This requires successful cryptanalysis of BLAKE2s, which is - * not believed to be feasible, but there is a remote possibility. - * Nonetheless, these numbers should be useful for the vast majority - * of purposes. - * - * Exported interfaces ---- output - * =============================== - * - * There are four exported interfaces; two for use within the kernel, - * and two for use from userspace. - * - * Exported interfaces ---- userspace output - * ----------------------------------------- - * - * The userspace interfaces are two character devices /dev/random and - * /dev/urandom. /dev/random is suitable for use when very high - * quality randomness is desired (for example, for key generation or - * one-time pads), as it will only return a maximum of the number of - * bits of randomness (as estimated by the random number generator) - * contained in the entropy pool. - * - * The /dev/urandom device does not have this limit, and will return - * as many bytes as are requested. As more and more random bytes are - * requested without giving time for the entropy pool to recharge, - * this will result in random numbers that are merely cryptographically - * strong. For many applications, however, this is acceptable. - * - * Exported interfaces ---- kernel output - * -------------------------------------- - * - * The primary kernel interface is - * - * void get_random_bytes(void *buf, int nbytes); - * - * This interface will return the requested number of random bytes, - * and place it in the requested buffer. This is equivalent to a - * read from /dev/urandom. - * - * For less critical applications, there are the functions: - * - * u32 get_random_u32() - * u64 get_random_u64() - * unsigned int get_random_int() - * unsigned long get_random_long() - * - * These are produced by a cryptographic RNG seeded from get_random_bytes, - * and so do not deplete the entropy pool as much. These are recommended - * for most in-kernel operations *if the result is going to be stored in - * the kernel*. - * - * Specifically, the get_random_int() family do not attempt to do - * "anti-backtracking". If you capture the state of the kernel (e.g. - * by snapshotting the VM), you can figure out previous get_random_int() - * return values. But if the value is stored in the kernel anyway, - * this is not a problem. - * - * It *is* safe to expose get_random_int() output to attackers (e.g. as - * network cookies); given outputs 1..n, it's not feasible to predict - * outputs 0 or n+1. The only concern is an attacker who breaks into - * the kernel later; the get_random_int() engine is not reseeded as - * often as the get_random_bytes() one. - * - * get_random_bytes() is needed for keys that need to stay secret after - * they are erased from the kernel. For example, any key that will - * be wrapped and stored encrypted. And session encryption keys: we'd - * like to know that after the session is closed and the keys erased, - * the plaintext is unrecoverable to someone who recorded the ciphertext. - * - * But for network ports/cookies, stack canaries, PRNG seeds, address - * space layout randomization, session *authentication* keys, or other - * applications where the sensitive data is stored in the kernel in - * plaintext for as long as it's sensitive, the get_random_int() family - * is just fine. - * - * Consider ASLR. We want to keep the address space secret from an - * outside attacker while the process is running, but once the address - * space is torn down, it's of no use to an attacker any more. And it's - * stored in kernel data structures as long as it's alive, so worrying - * about an attacker's ability to extrapolate it from the get_random_int() - * CRNG is silly. - * - * Even some cryptographic keys are safe to generate with get_random_int(). - * In particular, keys for SipHash are generally fine. Here, knowledge - * of the key authorizes you to do something to a kernel object (inject - * packets to a network connection, or flood a hash table), and the - * key is stored with the object being protected. Once it goes away, - * we no longer care if anyone knows the key. - * - * prandom_u32() - * ------------- - * - * For even weaker applications, see the pseudorandom generator - * prandom_u32(), prandom_max(), and prandom_bytes(). If the random - * numbers aren't security-critical at all, these are *far* cheaper. - * Useful for self-tests, random error simulation, randomized backoffs, - * and any other application where you trust that nobody is trying to - * maliciously mess with you by guessing the "random" numbers. - * - * Exported interfaces ---- input - * ============================== - * - * The current exported interfaces for gathering environmental noise - * from the devices are: - * - * void add_device_randomness(const void *buf, unsigned int size); - * void add_input_randomness(unsigned int type, unsigned int code, - * unsigned int value); - * void add_interrupt_randomness(int irq); - * void add_disk_randomness(struct gendisk *disk); - * void add_hwgenerator_randomness(const char *buffer, size_t count, - * size_t entropy); - * void add_bootloader_randomness(const void *buf, unsigned int size); - * - * add_device_randomness() is for adding data to the random pool that - * is likely to differ between two devices (or possibly even per boot). - * This would be things like MAC addresses or serial numbers, or the - * read-out of the RTC. This does *not* add any actual entropy to the - * pool, but it initializes the pool to different values for devices - * that might otherwise be identical and have very little entropy - * available to them (particularly common in the embedded world). - * - * add_input_randomness() uses the input layer interrupt timing, as well as - * the event type information from the hardware. - * - * add_interrupt_randomness() uses the interrupt timing as random - * inputs to the entropy pool. Using the cycle counters and the irq source - * as inputs, it feeds the randomness roughly once a second. - * - * add_disk_randomness() uses what amounts to the seek time of block - * layer request events, on a per-disk_devt basis, as input to the - * entropy pool. Note that high-speed solid state drives with very low - * seek times do not make for good sources of entropy, as their seek - * times are usually fairly consistent. - * - * All of these routines try to estimate how many bits of randomness a - * particular randomness source. They do this by keeping track of the - * first and second order deltas of the event timings. - * - * add_hwgenerator_randomness() is for true hardware RNGs, and will credit - * entropy as specified by the caller. If the entropy pool is full it will - * block until more entropy is needed. - * - * add_bootloader_randomness() is the same as add_hwgenerator_randomness() or - * add_device_randomness(), depending on whether or not the configuration - * option CONFIG_RANDOM_TRUST_BOOTLOADER is set. - * - * Ensuring unpredictability at system startup - * ============================================ - * - * When any operating system starts up, it will go through a sequence - * of actions that are fairly predictable by an adversary, especially - * if the start-up does not involve interaction with a human operator. - * This reduces the actual number of bits of unpredictability in the - * entropy pool below the value in entropy_count. In order to - * counteract this effect, it helps to carry information in the - * entropy pool across shut-downs and start-ups. To do this, put the - * following lines an appropriate script which is run during the boot - * sequence: - * - * echo "Initializing random number generator..." - * random_seed=/var/run/random-seed - * # Carry a random seed from start-up to start-up - * # Load and then save the whole entropy pool - * if [ -f $random_seed ]; then - * cat $random_seed >/dev/urandom - * else - * touch $random_seed - * fi - * chmod 600 $random_seed - * dd if=/dev/urandom of=$random_seed count=1 bs=512 - * - * and the following lines in an appropriate script which is run as - * the system is shutdown: - * - * # Carry a random seed from shut-down to start-up - * # Save the whole entropy pool - * echo "Saving random seed..." - * random_seed=/var/run/random-seed - * touch $random_seed - * chmod 600 $random_seed - * dd if=/dev/urandom of=$random_seed count=1 bs=512 - * - * For example, on most modern systems using the System V init - * scripts, such code fragments would be found in - * /etc/rc.d/init.d/random. On older Linux systems, the correct script - * location might be in /etc/rcb.d/rc.local or /etc/rc.d/rc.0. - * - * Effectively, these commands cause the contents of the entropy pool - * to be saved at shut-down time and reloaded into the entropy pool at - * start-up. (The 'dd' in the addition to the bootup script is to - * make sure that /etc/random-seed is different for every start-up, - * even if the system crashes without executing rc.0.) Even with - * complete knowledge of the start-up activities, predicting the state - * of the entropy pool requires knowledge of the previous history of - * the system. - * - * Configuring the /dev/random driver under Linux - * ============================================== - * - * The /dev/random driver under Linux uses minor numbers 8 and 9 of - * the /dev/mem major number (#1). So if your system does not have - * /dev/random and /dev/urandom created already, they can be created - * by using the commands: - * - * mknod /dev/random c 1 8 - * mknod /dev/urandom c 1 9 - * - * Acknowledgements: - * ================= - * - * Ideas for constructing this random number generator were derived - * from Pretty Good Privacy's random number generator, and from private - * discussions with Phil Karn. Colin Plumb provided a faster random - * number generator, which speed up the mixing function of the entropy - * pool, taken from PGPfone. Dale Worley has also contributed many - * useful ideas and suggestions to improve this driver. - * - * Any flaws in the design are solely my responsibility, and should - * not be attributed to the Phil, Colin, or any of authors of PGP. - * - * Further background information on this topic may be obtained from - * RFC 1750, "Randomness Recommendations for Security", by Donald - * Eastlake, Steve Crocker, and Jeff Schiller. + * Copyright Theodore Ts'o, 1994, 1995, 1996, 1997, 1998, 1999. All rights reserved. + * + * This driver produces cryptographically secure pseudorandom data. It is divided + * into roughly six sections, each with a section header: + * + * - Initialization and readiness waiting. + * - Fast key erasure RNG, the "crng". + * - Entropy accumulation and extraction routines. + * - Entropy collection routines. + * - Userspace reader/writer interfaces. + * - Sysctl interface. + * + * The high level overview is that there is one input pool, into which + * various pieces of data are hashed. Some of that data is then "credited" as + * having a certain number of bits of entropy. When enough bits of entropy are + * available, the hash is finalized and handed as a key to a stream cipher that + * expands it indefinitely for various consumers. This key is periodically + * refreshed as the various entropy collectors, described below, add data to the + * input pool and credit it. There is currently no Fortuna-like scheduler + * involved, which can lead to malicious entropy sources causing a premature + * reseed, and the entropy estimates are, at best, conservative guesses. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt @@ -344,744 +52,937 @@ #include <linux/syscalls.h> #include <linux/completion.h> #include <linux/uuid.h> +#include <linux/uaccess.h> #include <crypto/chacha.h> #include <crypto/blake2s.h> - #include <asm/processor.h> -#include <linux/uaccess.h> #include <asm/irq.h> #include <asm/irq_regs.h> #include <asm/io.h> -#define CREATE_TRACE_POINTS -#include <trace/events/random.h> - -/* #define ADD_INTERRUPT_BENCH */ - -/* - * If the entropy count falls under this number of bits, then we - * should wake up processes which are selecting or polling on write - * access to /dev/random. - */ -static int random_write_wakeup_bits = 28 * (1 << 5); - -/* - * Originally, we used a primitive polynomial of degree .poolwords - * over GF(2). The taps for various sizes are defined below. They - * were chosen to be evenly spaced except for the last tap, which is 1 - * to get the twisting happening as fast as possible. - * - * For the purposes of better mixing, we use the CRC-32 polynomial as - * well to make a (modified) twisted Generalized Feedback Shift - * Register. (See M. Matsumoto & Y. Kurita, 1992. Twisted GFSR - * generators. ACM Transactions on Modeling and Computer Simulation - * 2(3):179-194. Also see M. Matsumoto & Y. Kurita, 1994. Twisted - * GFSR generators II. ACM Transactions on Modeling and Computer - * Simulation 4:254-266) +/********************************************************************* * - * Thanks to Colin Plumb for suggesting this. + * Initialization and readiness waiting. * - * The mixing operation is much less sensitive than the output hash, - * where we use BLAKE2s. All that we want of mixing operation is that - * it be a good non-cryptographic hash; i.e. it not produce collisions - * when fed "random" data of the sort we expect to see. As long as - * the pool state differs for different inputs, we have preserved the - * input entropy and done a good job. The fact that an intelligent - * attacker can construct inputs that will produce controlled - * alterations to the pool's state is not important because we don't - * consider such inputs to contribute any randomness. The only - * property we need with respect to them is that the attacker can't - * increase his/her knowledge of the pool's state. Since all - * additions are reversible (knowing the final state and the input, - * you can reconstruct the initial state), if an attacker has any - * uncertainty about the initial state, he/she can only shuffle that - * uncertainty about, but never cause any collisions (which would - * decrease the uncertainty). + * Much of the RNG infrastructure is devoted to various dependencies + * being able to wait until the RNG has collected enough entropy and + * is ready for safe consumption. * - * Our mixing functions were analyzed by Lacharme, Roeck, Strubel, and - * Videau in their paper, "The Linux Pseudorandom Number Generator - * Revisited" (see: http://eprint.iacr.org/2012/251.pdf). In their - * paper, they point out that we are not using a true Twisted GFSR, - * since Matsumoto & Kurita used a trinomial feedback polynomial (that - * is, with only three taps, instead of the six that we are using). - * As a result, the resulting polynomial is neither primitive nor - * irreducible, and hence does not have a maximal period over - * GF(2**32). They suggest a slight change to the generator - * polynomial which improves the resulting TGFSR polynomial to be - * irreducible, which we have made here. - */ -enum poolinfo { - POOL_WORDS = 128, - POOL_WORDMASK = POOL_WORDS - 1, - POOL_BYTES = POOL_WORDS * sizeof(u32), - POOL_BITS = POOL_BYTES * 8, - POOL_BITSHIFT = ilog2(POOL_BITS), - - /* To allow fractional bits to be tracked, the entropy_count field is - * denominated in units of 1/8th bits. */ - POOL_ENTROPY_SHIFT = 3, -#define POOL_ENTROPY_BITS() (input_pool.entropy_count >> POOL_ENTROPY_SHIFT) - POOL_FRACBITS = POOL_BITS << POOL_ENTROPY_SHIFT, - - /* x^128 + x^104 + x^76 + x^51 +x^25 + x + 1 */ - POOL_TAP1 = 104, - POOL_TAP2 = 76, - POOL_TAP3 = 51, - POOL_TAP4 = 25, - POOL_TAP5 = 1, - - EXTRACT_SIZE = BLAKE2S_HASH_SIZE / 2 -}; - -/* - * Static global variables - */ -static DECLARE_WAIT_QUEUE_HEAD(random_write_wait); -static struct fasync_struct *fasync; - -static DEFINE_SPINLOCK(random_ready_list_lock); -static LIST_HEAD(random_ready_list); - -struct crng_state { - u32 state[16]; - unsigned long init_time; - spinlock_t lock; -}; - -static struct crng_state primary_crng = { - .lock = __SPIN_LOCK_UNLOCKED(primary_crng.lock), - .state[0] = CHACHA_CONSTANT_EXPA, - .state[1] = CHACHA_CONSTANT_ND_3, - .state[2] = CHACHA_CONSTANT_2_BY, - .state[3] = CHACHA_CONSTANT_TE_K, -}; + *********************************************************************/ /* * crng_init = 0 --> Uninitialized * 1 --> Initialized * 2 --> Initialized from input_pool * - * crng_init is protected by primary_crng->lock, and only increases + * crng_init is protected by base_crng->lock, and only increases * its value (from 0->1->2). */ static int crng_init = 0; -static bool crng_need_final_init = false; #define crng_ready() (likely(crng_init > 1)) -static int crng_init_cnt = 0; -static unsigned long crng_global_init_time = 0; -#define CRNG_INIT_CNT_THRESH (2 * CHACHA_KEY_SIZE) -static void _extract_crng(struct crng_state *crng, u8 out[CHACHA_BLOCK_SIZE]); -static void _crng_backtrack_protect(struct crng_state *crng, - u8 tmp[CHACHA_BLOCK_SIZE], int used); -static void process_random_ready_list(void); -static void _get_random_bytes(void *buf, int nbytes); +/* Various types of waiters for crng_init->2 transition. */ +static DECLARE_WAIT_QUEUE_HEAD(crng_init_wait); +static struct fasync_struct *fasync; +static DEFINE_SPINLOCK(random_ready_chain_lock); +static RAW_NOTIFIER_HEAD(random_ready_chain); +/* Control how we warn userspace. */ static struct ratelimit_state unseeded_warning = RATELIMIT_STATE_INIT("warn_unseeded_randomness", HZ, 3); -static struct ratelimit_state urandom_warning = - RATELIMIT_STATE_INIT("warn_urandom_randomness", HZ, 3); - static int ratelimit_disable __read_mostly; - module_param_named(ratelimit_disable, ratelimit_disable, int, 0644); MODULE_PARM_DESC(ratelimit_disable, "Disable random ratelimit suppression"); -/********************************************************************** - * - * OS independent entropy store. Here are the functions which handle - * storing entropy in an entropy pool. +/* + * Returns whether or not the input pool has been seeded and thus guaranteed + * to supply cryptographically secure random numbers. This applies to + * get_random_bytes() and get_random_{u32,u64,int,long}(). * - **********************************************************************/ - -static u32 input_pool_data[POOL_WORDS] __latent_entropy; - -static struct { - spinlock_t lock; - u16 add_ptr; - u16 input_rotate; - int entropy_count; -} input_pool = { - .lock = __SPIN_LOCK_UNLOCKED(input_pool.lock), -}; + * Returns: true if the input pool has been seeded. + * false if the input pool has not been seeded. + */ +bool rng_is_initialized(void) +{ + return crng_ready(); +} +EXPORT_SYMBOL(rng_is_initialized); -static ssize_t extract_entropy(void *buf, size_t nbytes, int min); -static ssize_t _extract_entropy(void *buf, size_t nbytes); +/* Used by wait_for_random_bytes(), and considered an entropy collector, below. */ +static void try_to_generate_entropy(void); -static void crng_reseed(struct crng_state *crng, bool use_input_pool); +/* + * Wait for the input pool to be seeded and thus guaranteed to supply + * cryptographically secure random numbers. This applies to + * get_random_bytes() and get_random_{u32,u64,int,long}(). Using any + * of these functions without first calling this function means that + * the returned numbers might not be cryptographically secure. + * + * Returns: 0 if the input pool has been seeded. + * -ERESTARTSYS if the function was interrupted by a signal. + */ +int wait_for_random_bytes(void) +{ + while (!crng_ready()) { + int ret; -static const u32 twist_table[8] = { - 0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158, - 0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 }; + try_to_generate_entropy(); + ret = wait_event_interruptible_timeout(crng_init_wait, crng_ready(), HZ); + if (ret) + return ret > 0 ? 0 : ret; + } + return 0; +} +EXPORT_SYMBOL(wait_for_random_bytes); /* - * This function adds bytes into the entropy "pool". It does not - * update the entropy estimate. The caller should call - * credit_entropy_bits if this is appropriate. + * Add a callback function that will be invoked when the input + * pool is initialised. * - * The pool is stirred with a primitive polynomial of the appropriate - * degree, and then twisted. We twist by three bits at a time because - * it's cheap to do so and helps slightly in the expected case where - * the entropy is concentrated in the low-order bits. + * returns: 0 if callback is successfully added + * -EALREADY if pool is already initialised (callback not called) */ -static void _mix_pool_bytes(const void *in, int nbytes) +int register_random_ready_notifier(struct notifier_block *nb) { - unsigned long i; - int input_rotate; - const u8 *bytes = in; - u32 w; - - input_rotate = input_pool.input_rotate; - i = input_pool.add_ptr; - - /* mix one byte at a time to simplify size handling and churn faster */ - while (nbytes--) { - w = rol32(*bytes++, input_rotate); - i = (i - 1) & POOL_WORDMASK; - - /* XOR in the various taps */ - w ^= input_pool_data[i]; - w ^= input_pool_data[(i + POOL_TAP1) & POOL_WORDMASK]; - w ^= input_pool_data[(i + POOL_TAP2) & POOL_WORDMASK]; - w ^= input_pool_data[(i + POOL_TAP3) & POOL_WORDMASK]; - w ^= input_pool_data[(i + POOL_TAP4) & POOL_WORDMASK]; - w ^= input_pool_data[(i + POOL_TAP5) & POOL_WORDMASK]; - - /* Mix the result back in with a twist */ - input_pool_data[i] = (w >> 3) ^ twist_table[w & 7]; + unsigned long flags; + int ret = -EALREADY; - /* - * Normally, we add 7 bits of rotation to the pool. - * At the beginning of the pool, add an extra 7 bits - * rotation, so that successive passes spread the - * input bits across the pool evenly. - */ - input_rotate = (input_rotate + (i ? 7 : 14)) & 31; - } + if (crng_ready()) + return ret; - input_pool.input_rotate = input_rotate; - input_pool.add_ptr = i; + spin_lock_irqsave(&random_ready_chain_lock, flags); + if (!crng_ready()) + ret = raw_notifier_chain_register(&random_ready_chain, nb); + spin_unlock_irqrestore(&random_ready_chain_lock, flags); + return ret; } -static void __mix_pool_bytes(const void *in, int nbytes) +/* + * Delete a previously registered readiness callback function. + */ +int unregister_random_ready_notifier(struct notifier_block *nb) { - trace_mix_pool_bytes_nolock(nbytes, _RET_IP_); - _mix_pool_bytes(in, nbytes); + unsigned long flags; + int ret; + + spin_lock_irqsave(&random_ready_chain_lock, flags); + ret = raw_notifier_chain_unregister(&random_ready_chain, nb); + spin_unlock_irqrestore(&random_ready_chain_lock, flags); + return ret; } -static void mix_pool_bytes(const void *in, int nbytes) +static void process_random_ready_list(void) { unsigned long flags; - trace_mix_pool_bytes(nbytes, _RET_IP_); - spin_lock_irqsave(&input_pool.lock, flags); - _mix_pool_bytes(in, nbytes); - spin_unlock_irqrestore(&input_pool.lock, flags); + spin_lock_irqsave(&random_ready_chain_lock, flags); + raw_notifier_call_chain(&random_ready_chain, 0, NULL); + spin_unlock_irqrestore(&random_ready_chain_lock, flags); } -struct fast_pool { - u32 pool[4]; - unsigned long last; - u16 reg_idx; - u8 count; -}; +#define warn_unseeded_randomness(previous) \ + _warn_unseeded_randomness(__func__, (void *)_RET_IP_, (previous)) -/* - * This is a fast mixing routine used by the interrupt randomness - * collector. It's hardcoded for an 128 bit pool and assumes that any - * locks that might be needed are taken by the caller. - */ -static void fast_mix(struct fast_pool *f) +static void _warn_unseeded_randomness(const char *func_name, void *caller, void **previous) { - u32 a = f->pool[0], b = f->pool[1]; - u32 c = f->pool[2], d = f->pool[3]; +#ifdef CONFIG_WARN_ALL_UNSEEDED_RANDOM + const bool print_once = false; +#else + static bool print_once __read_mostly; +#endif + + if (print_once || crng_ready() || + (previous && (caller == READ_ONCE(*previous)))) + return; + WRITE_ONCE(*previous, caller); +#ifndef CONFIG_WARN_ALL_UNSEEDED_RANDOM + print_once = true; +#endif + if (__ratelimit(&unseeded_warning)) + printk_deferred(KERN_NOTICE "random: %s called from %pS with crng_init=%d\n", + func_name, caller, crng_init); +} - a += b; c += d; - b = rol32(b, 6); d = rol32(d, 27); - d ^= a; b ^= c; - a += b; c += d; - b = rol32(b, 16); d = rol32(d, 14); - d ^= a; b ^= c; +/********************************************************************* + * + * Fast key erasure RNG, the "crng". + * + * These functions expand entropy from the entropy extractor into + * long streams for external consumption using the "fast key erasure" + * RNG described at <https://blog.cr.yp.to/20170723-random.html>. + * + * There are a few exported interfaces for use by other drivers: + * + * void get_random_bytes(void *buf, size_t nbytes) + * u32 get_random_u32() + * u64 get_random_u64() + * unsigned int get_random_int() + * unsigned long get_random_long() + * + * These interfaces will return the requested number of random bytes + * into the given buffer or as a return value. The returned numbers are + * the same as those of getrandom(0). The integer family of functions may + * be higher performance for one-off random integers, because they do a + * bit of buffering and do not invoke reseeding. + * + *********************************************************************/ - a += b; c += d; - b = rol32(b, 6); d = rol32(d, 27); - d ^= a; b ^= c; +enum { + CRNG_RESEED_INTERVAL = 300 * HZ, + CRNG_INIT_CNT_THRESH = 2 * CHACHA_KEY_SIZE +}; - a += b; c += d; - b = rol32(b, 16); d = rol32(d, 14); - d ^= a; b ^= c; +static struct { + u8 key[CHACHA_KEY_SIZE] __aligned(__alignof__(long)); + unsigned long birth; + unsigned long generation; + spinlock_t lock; +} base_crng = { + .lock = __SPIN_LOCK_UNLOCKED(base_crng.lock) +}; - f->pool[0] = a; f->pool[1] = b; - f->pool[2] = c; f->pool[3] = d; - f->count++; -} +struct crng { + u8 key[CHACHA_KEY_SIZE]; + unsigned long generation; + local_lock_t lock; +}; -static void process_random_ready_list(void) +static DEFINE_PER_CPU(struct crng, crngs) = { + .generation = ULONG_MAX, + .lock = INIT_LOCAL_LOCK(crngs.lock), +}; + +/* Used by crng_reseed() to extract a new seed from the input pool. */ +static bool drain_entropy(void *buf, size_t nbytes, bool force); + +/* + * This extracts a new crng key from the input pool, but only if there is a + * sufficient amount of entropy available or force is true, in order to + * mitigate bruteforcing of newly added bits. + */ +static void crng_reseed(bool force) { unsigned long flags; - struct random_ready_callback *rdy, *tmp; + unsigned long next_gen; + u8 key[CHACHA_KEY_SIZE]; + bool finalize_init = false; - spin_lock_irqsave(&random_ready_list_lock, flags); - list_for_each_entry_safe(rdy, tmp, &random_ready_list, list) { - struct module *owner = rdy->owner; + /* Only reseed if we can, to prevent brute forcing a small amount of new bits. */ + if (!drain_entropy(key, sizeof(key), force)) + return; - list_del_init(&rdy->list); - rdy->func(rdy); - module_put(owner); + /* + * We copy the new key into the base_crng, overwriting the old one, + * and update the generation counter. We avoid hitting ULONG_MAX, + * because the per-cpu crngs are initialized to ULONG_MAX, so this + * forces new CPUs that come online to always initialize. + */ + spin_lock_irqsave(&base_crng.lock, flags); + memcpy(base_crng.key, key, sizeof(base_crng.key)); + next_gen = base_crng.generation + 1; + if (next_gen == ULONG_MAX) + ++next_gen; + WRITE_ONCE(base_crng.generation, next_gen); + WRITE_ONCE(base_crng.birth, jiffies); + if (!crng_ready()) { + crng_init = 2; + finalize_init = true; + } + spin_unlock_irqrestore(&base_crng.lock, flags); + memzero_explicit(key, sizeof(key)); + if (finalize_init) { + process_random_ready_list(); + wake_up_interruptible(&crng_init_wait); + kill_fasync(&fasync, SIGIO, POLL_IN); + pr_notice("crng init done\n"); + if (unseeded_warning.missed) { + pr_notice("%d get_random_xx warning(s) missed due to ratelimiting\n", + unseeded_warning.missed); + unseeded_warning.missed = 0; + } } - spin_unlock_irqrestore(&random_ready_list_lock, flags); } /* - * Credit (or debit) the entropy store with n bits of entropy. - * Use credit_entropy_bits_safe() if the value comes from userspace - * or otherwise should be checked for extreme values. + * This generates a ChaCha block using the provided key, and then + * immediately overwites that key with half the block. It returns + * the resultant ChaCha state to the user, along with the second + * half of the block containing 32 bytes of random data that may + * be used; random_data_len may not be greater than 32. */ -static void credit_entropy_bits(int nbits) +static void crng_fast_key_erasure(u8 key[CHACHA_KEY_SIZE], + u32 chacha_state[CHACHA_STATE_WORDS], + u8 *random_data, size_t random_data_len) { - int entropy_count, entropy_bits, orig; - int nfrac = nbits << POOL_ENTROPY_SHIFT; - - /* Ensure that the multiplication can avoid being 64 bits wide. */ - BUILD_BUG_ON(2 * (POOL_ENTROPY_SHIFT + POOL_BITSHIFT) > 31); - - if (!nbits) - return; + u8 first_block[CHACHA_BLOCK_SIZE]; -retry: - entropy_count = orig = READ_ONCE(input_pool.entropy_count); - if (nfrac < 0) { - /* Debit */ - entropy_count += nfrac; - } else { - /* - * Credit: we have to account for the possibility of - * overwriting already present entropy. Even in the - * ideal case of pure Shannon entropy, new contributions - * approach the full value asymptotically: - * - * entropy <- entropy + (pool_size - entropy) * - * (1 - exp(-add_entropy/pool_size)) - * - * For add_entropy <= pool_size/2 then - * (1 - exp(-add_entropy/pool_size)) >= - * (add_entropy/pool_size)*0.7869... - * so we can approximate the exponential with - * 3/4*add_entropy/pool_size and still be on the - * safe side by adding at most pool_size/2 at a time. - * - * The use of pool_size-2 in the while statement is to - * prevent rounding artifacts from making the loop - * arbitrarily long; this limits the loop to log2(pool_size)*2 - * turns no matter how large nbits is. - */ - int pnfrac = nfrac; - const int s = POOL_BITSHIFT + POOL_ENTROPY_SHIFT + 2; - /* The +2 corresponds to the /4 in the denominator */ - - do { - unsigned int anfrac = min(pnfrac, POOL_FRACBITS / 2); - unsigned int add = - ((POOL_FRACBITS - entropy_count) * anfrac * 3) >> s; - - entropy_count += add; - pnfrac -= anfrac; - } while (unlikely(entropy_count < POOL_FRACBITS - 2 && pnfrac)); - } + BUG_ON(random_data_len > 32); - if (WARN_ON(entropy_count < 0)) { - pr_warn("negative entropy/overflow: count %d\n", entropy_count); - entropy_count = 0; - } else if (entropy_count > POOL_FRACBITS) - entropy_count = POOL_FRACBITS; - if (cmpxchg(&input_pool.entropy_count, orig, entropy_count) != orig) - goto retry; + chacha_init_consts(chacha_state); + memcpy(&chacha_state[4], key, CHACHA_KEY_SIZE); + memset(&chacha_state[12], 0, sizeof(u32) * 4); + chacha20_block(chacha_state, first_block); - trace_credit_entropy_bits(nbits, entropy_count >> POOL_ENTROPY_SHIFT, _RET_IP_); + memcpy(key, first_block, CHACHA_KEY_SIZE); + memcpy(random_data, first_block + CHACHA_KEY_SIZE, random_data_len); + memzero_explicit(first_block, sizeof(first_block)); +} - entropy_bits = entropy_count >> POOL_ENTROPY_SHIFT; - if (crng_init < 2 && entropy_bits >= 128) - crng_reseed(&primary_crng, true); +/* + * Return whether the crng seed is considered to be sufficiently + * old that a reseeding might be attempted. This happens if the last + * reseeding was CRNG_RESEED_INTERVAL ago, or during early boot, at + * an interval proportional to the uptime. + */ +static bool crng_has_old_seed(void) +{ + static bool early_boot = true; + unsigned long interval = CRNG_RESEED_INTERVAL; + + if (unlikely(READ_ONCE(early_boot))) { + time64_t uptime = ktime_get_seconds(); + if (uptime >= CRNG_RESEED_INTERVAL / HZ * 2) + WRITE_ONCE(early_boot, false); + else + interval = max_t(unsigned int, 5 * HZ, + (unsigned int)uptime / 2 * HZ); + } + return time_after(jiffies, READ_ONCE(base_crng.birth) + interval); } -static int credit_entropy_bits_safe(int nbits) +/* + * This function returns a ChaCha state that you may use for generating + * random data. It also returns up to 32 bytes on its own of random data + * that may be used; random_data_len may not be greater than 32. + */ +static void crng_make_state(u32 chacha_state[CHACHA_STATE_WORDS], + u8 *random_data, size_t random_data_len) { - if (nbits < 0) - return -EINVAL; + unsigned long flags; + struct crng *crng; - /* Cap the value to avoid overflows */ - nbits = min(nbits, POOL_BITS); + BUG_ON(random_data_len > 32); - credit_entropy_bits(nbits); - return 0; -} + /* + * For the fast path, we check whether we're ready, unlocked first, and + * then re-check once locked later. In the case where we're really not + * ready, we do fast key erasure with the base_crng directly, because + * this is what crng_pre_init_inject() mutates during early init. + */ + if (!crng_ready()) { + bool ready; + + spin_lock_irqsave(&base_crng.lock, flags); + ready = crng_ready(); + if (!ready) + crng_fast_key_erasure(base_crng.key, chacha_state, + random_data, random_data_len); + spin_unlock_irqrestore(&base_crng.lock, flags); + if (!ready) + return; + } -/********************************************************************* - * - * CRNG using CHACHA20 - * - *********************************************************************/ + /* + * If the base_crng is old enough, we try to reseed, which in turn + * bumps the generation counter that we check below. + */ + if (unlikely(crng_has_old_seed())) + crng_reseed(false); -#define CRNG_RESEED_INTERVAL (300 * HZ) + local_lock_irqsave(&crngs.lock, flags); + crng = raw_cpu_ptr(&crngs); -static DECLARE_WAIT_QUEUE_HEAD(crng_init_wait); + /* + * If our per-cpu crng is older than the base_crng, then it means + * somebody reseeded the base_crng. In that case, we do fast key + * erasure on the base_crng, and use its output as the new key + * for our per-cpu crng. This brings us up to date with base_crng. + */ + if (unlikely(crng->generation != READ_ONCE(base_crng.generation))) { + spin_lock(&base_crng.lock); + crng_fast_key_erasure(base_crng.key, chacha_state, + crng->key, sizeof(crng->key)); + crng->generation = base_crng.generation; + spin_unlock(&base_crng.lock); + } + + /* + * Finally, when we've made it this far, our per-cpu crng has an up + * to date key, and we can do fast key erasure with it to produce + * some random data and a ChaCha state for the caller. All other + * branches of this function are "unlikely", so most of the time we + * should wind up here immediately. + */ + crng_fast_key_erasure(crng->key, chacha_state, random_data, random_data_len); + local_unlock_irqrestore(&crngs.lock, flags); +} /* - * Hack to deal with crazy userspace progams when they are all trying - * to access /dev/urandom in parallel. The programs are almost - * certainly doing something terribly wrong, but we'll work around - * their brain damage. + * This function is for crng_init == 0 only. It loads entropy directly + * into the crng's key, without going through the input pool. It is, + * generally speaking, not very safe, but we use this only at early + * boot time when it's better to have something there rather than + * nothing. + * + * If account is set, then the crng_init_cnt counter is incremented. + * This shouldn't be set by functions like add_device_randomness(), + * where we can't trust the buffer passed to it is guaranteed to be + * unpredictable (so it might not have any entropy at all). + * + * Returns the number of bytes processed from input, which is bounded + * by CRNG_INIT_CNT_THRESH if account is true. */ -static struct crng_state **crng_node_pool __read_mostly; +static size_t crng_pre_init_inject(const void *input, size_t len, bool account) +{ + static int crng_init_cnt = 0; + struct blake2s_state hash; + unsigned long flags; -static void invalidate_batched_entropy(void); -static void numa_crng_init(void); + blake2s_init(&hash, sizeof(base_crng.key)); -static bool trust_cpu __ro_after_init = IS_ENABLED(CONFIG_RANDOM_TRUST_CPU); -static int __init parse_trust_cpu(char *arg) -{ - return kstrtobool(arg, &trust_cpu); -} -early_param("random.trust_cpu", parse_trust_cpu); + spin_lock_irqsave(&base_crng.lock, flags); + if (crng_init != 0) { + spin_unlock_irqrestore(&base_crng.lock, flags); + return 0; + } -static bool crng_init_try_arch(struct crng_state *crng) -{ - int i; - bool arch_init = true; - unsigned long rv; + if (account) + len = min_t(size_t, len, CRNG_INIT_CNT_THRESH - crng_init_cnt); - for (i = 4; i < 16; i++) { - if (!arch_get_random_seed_long(&rv) && - !arch_get_random_long(&rv)) { - rv = random_get_entropy(); - arch_init = false; + blake2s_update(&hash, base_crng.key, sizeof(base_crng.key)); + blake2s_update(&hash, input, len); + blake2s_final(&hash, base_crng.key); + + if (account) { + crng_init_cnt += len; + if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) { + ++base_crng.generation; + crng_init = 1; } - crng->state[i] ^= rv; } - return arch_init; + spin_unlock_irqrestore(&base_crng.lock, flags); + + if (crng_init == 1) + pr_notice("fast init done\n"); + + return len; } -static bool __init crng_init_try_arch_early(struct crng_state *crng) +static void _get_random_bytes(void *buf, size_t nbytes) { - int i; - bool arch_init = true; - unsigned long rv; + u32 chacha_state[CHACHA_STATE_WORDS]; + u8 tmp[CHACHA_BLOCK_SIZE]; + size_t len; - for (i = 4; i < 16; i++) { - if (!arch_get_random_seed_long_early(&rv) && - !arch_get_random_long_early(&rv)) { - rv = random_get_entropy(); - arch_init = false; + if (!nbytes) + return; + + len = min_t(size_t, 32, nbytes); + crng_make_state(chacha_state, buf, len); + nbytes -= len; + buf += len; + + while (nbytes) { + if (nbytes < CHACHA_BLOCK_SIZE) { + chacha20_block(chacha_state, tmp); + memcpy(buf, tmp, nbytes); + memzero_explicit(tmp, sizeof(tmp)); + break; } - crng->state[i] ^= rv; + + chacha20_block(chacha_state, buf); + if (unlikely(chacha_state[12] == 0)) + ++chacha_state[13]; + nbytes -= CHACHA_BLOCK_SIZE; + buf += CHACHA_BLOCK_SIZE; } - return arch_init; + memzero_explicit(chacha_state, sizeof(chacha_state)); } -static void crng_initialize_secondary(struct crng_state *crng) +/* + * This function is the exported kernel interface. It returns some + * number of good random numbers, suitable for key generation, seeding + * TCP sequence numbers, etc. It does not rely on the hardware random + * number generator. For random bytes direct from the hardware RNG + * (when available), use get_random_bytes_arch(). In order to ensure + * that the randomness provided by this function is okay, the function + * wait_for_random_bytes() should be called and return 0 at least once + * at any point prior. + */ +void get_random_bytes(void *buf, size_t nbytes) { - chacha_init_consts(crng->state); - _get_random_bytes(&crng->state[4], sizeof(u32) * 12); - crng_init_try_arch(crng); - crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1; -} + static void *previous; -static void __init crng_initialize_primary(struct crng_state *crng) -{ - _extract_entropy(&crng->state[4], sizeof(u32) * 12); - if (crng_init_try_arch_early(crng) && trust_cpu && crng_init < 2) { - invalidate_batched_entropy(); - numa_crng_init(); - crng_init = 2; - pr_notice("crng init done (trusting CPU's manufacturer)\n"); - } - crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1; + warn_unseeded_randomness(&previous); + _get_random_bytes(buf, nbytes); } +EXPORT_SYMBOL(get_random_bytes); -static void crng_finalize_init(struct crng_state *crng) +static ssize_t get_random_bytes_user(void __user *buf, size_t nbytes) { - if (crng != &primary_crng || crng_init >= 2) - return; - if (!system_wq) { - /* We can't call numa_crng_init until we have workqueues, - * so mark this for processing later. */ - crng_need_final_init = true; - return; - } + bool large_request = nbytes > 256; + ssize_t ret = 0; + size_t len; + u32 chacha_state[CHACHA_STATE_WORDS]; + u8 output[CHACHA_BLOCK_SIZE]; - invalidate_batched_entropy(); - numa_crng_init(); - crng_init = 2; - process_random_ready_list(); - wake_up_interruptible(&crng_init_wait); - kill_fasync(&fasync, SIGIO, POLL_IN); - pr_notice("crng init done\n"); - if (unseeded_warning.missed) { - pr_notice("%d get_random_xx warning(s) missed due to ratelimiting\n", - unseeded_warning.missed); - unseeded_warning.missed = 0; - } - if (urandom_warning.missed) { - pr_notice("%d urandom warning(s) missed due to ratelimiting\n", - urandom_warning.missed); - urandom_warning.missed = 0; - } -} + if (!nbytes) + return 0; -static void do_numa_crng_init(struct work_struct *work) -{ - int i; - struct crng_state *crng; - struct crng_state **pool; - - pool = kcalloc(nr_node_ids, sizeof(*pool), GFP_KERNEL | __GFP_NOFAIL); - for_each_online_node(i) { - crng = kmalloc_node(sizeof(struct crng_state), - GFP_KERNEL | __GFP_NOFAIL, i); - spin_lock_init(&crng->lock); - crng_initialize_secondary(crng); - pool[i] = crng; - } - /* pairs with READ_ONCE() in select_crng() */ - if (cmpxchg_release(&crng_node_pool, NULL, pool) != NULL) { - for_each_node(i) - kfree(pool[i]); - kfree(pool); - } -} + len = min_t(size_t, 32, nbytes); + crng_make_state(chacha_state, output, len); -static DECLARE_WORK(numa_crng_init_work, do_numa_crng_init); + if (copy_to_user(buf, output, len)) + return -EFAULT; + nbytes -= len; + buf += len; + ret += len; -static void numa_crng_init(void) -{ - if (IS_ENABLED(CONFIG_NUMA)) - schedule_work(&numa_crng_init_work); -} + while (nbytes) { + if (large_request && need_resched()) { + if (signal_pending(current)) + break; + schedule(); + } -static struct crng_state *select_crng(void) -{ - if (IS_ENABLED(CONFIG_NUMA)) { - struct crng_state **pool; - int nid = numa_node_id(); - - /* pairs with cmpxchg_release() in do_numa_crng_init() */ - pool = READ_ONCE(crng_node_pool); - if (pool && pool[nid]) - return pool[nid]; + chacha20_block(chacha_state, output); + if (unlikely(chacha_state[12] == 0)) + ++chacha_state[13]; + + len = min_t(size_t, nbytes, CHACHA_BLOCK_SIZE); + if (copy_to_user(buf, output, len)) { + ret = -EFAULT; + break; + } + + nbytes -= len; + buf += len; + ret += len; } - return &primary_crng; + memzero_explicit(chacha_state, sizeof(chacha_state)); + memzero_explicit(output, sizeof(output)); + return ret; } /* - * crng_fast_load() can be called by code in the interrupt service - * path. So we can't afford to dilly-dally. Returns the number of - * bytes processed from cp. + * Batched entropy returns random integers. The quality of the random + * number is good as /dev/urandom. In order to ensure that the randomness + * provided by this function is okay, the function wait_for_random_bytes() + * should be called and return 0 at least once at any point prior. */ -static size_t crng_fast_load(const u8 *cp, size_t len) +struct batched_entropy { + union { + /* + * We make this 1.5x a ChaCha block, so that we get the + * remaining 32 bytes from fast key erasure, plus one full + * block from the detached ChaCha state. We can increase + * the size of this later if needed so long as we keep the + * formula of (integer_blocks + 0.5) * CHACHA_BLOCK_SIZE. + */ + u64 entropy_u64[CHACHA_BLOCK_SIZE * 3 / (2 * sizeof(u64))]; + u32 entropy_u32[CHACHA_BLOCK_SIZE * 3 / (2 * sizeof(u32))]; + }; + local_lock_t lock; + unsigned long generation; + unsigned int position; +}; + + +static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64) = { + .lock = INIT_LOCAL_LOCK(batched_entropy_u64.lock), + .position = UINT_MAX +}; + +u64 get_random_u64(void) { + u64 ret; unsigned long flags; - u8 *p; - size_t ret = 0; + struct batched_entropy *batch; + static void *previous; + unsigned long next_gen; - if (!spin_trylock_irqsave(&primary_crng.lock, flags)) - return 0; - if (crng_init != 0) { - spin_unlock_irqrestore(&primary_crng.lock, flags); - return 0; - } - p = (u8 *)&primary_crng.state[4]; - while (len > 0 && crng_init_cnt < CRNG_INIT_CNT_THRESH) { - p[crng_init_cnt % CHACHA_KEY_SIZE] ^= *cp; - cp++; crng_init_cnt++; len--; ret++; - } - spin_unlock_irqrestore(&primary_crng.lock, flags); - if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) { - invalidate_batched_entropy(); - crng_init = 1; - pr_notice("fast init done\n"); + warn_unseeded_randomness(&previous); + + local_lock_irqsave(&batched_entropy_u64.lock, flags); + batch = raw_cpu_ptr(&batched_entropy_u64); + + next_gen = READ_ONCE(base_crng.generation); + if (batch->position >= ARRAY_SIZE(batch->entropy_u64) || + next_gen != batch->generation) { + _get_random_bytes(batch->entropy_u64, sizeof(batch->entropy_u64)); + batch->position = 0; + batch->generation = next_gen; } + + ret = batch->entropy_u64[batch->position]; + batch->entropy_u64[batch->position] = 0; + ++batch->position; + local_unlock_irqrestore(&batched_entropy_u64.lock, flags); return ret; } +EXPORT_SYMBOL(get_random_u64); -/* - * crng_slow_load() is called by add_device_randomness, which has two - * attributes. (1) We can't trust the buffer passed to it is - * guaranteed to be unpredictable (so it might not have any entropy at - * all), and (2) it doesn't have the performance constraints of - * crng_fast_load(). - * - * So we do something more comprehensive which is guaranteed to touch - * all of the primary_crng's state, and which uses a LFSR with a - * period of 255 as part of the mixing algorithm. Finally, we do - * *not* advance crng_init_cnt since buffer we may get may be something - * like a fixed DMI table (for example), which might very well be - * unique to the machine, but is otherwise unvarying. - */ -static int crng_slow_load(const u8 *cp, size_t len) +static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32) = { + .lock = INIT_LOCAL_LOCK(batched_entropy_u32.lock), + .position = UINT_MAX +}; + +u32 get_random_u32(void) { + u32 ret; unsigned long flags; - static u8 lfsr = 1; - u8 tmp; - unsigned int i, max = CHACHA_KEY_SIZE; - const u8 *src_buf = cp; - u8 *dest_buf = (u8 *)&primary_crng.state[4]; + struct batched_entropy *batch; + static void *previous; + unsigned long next_gen; - if (!spin_trylock_irqsave(&primary_crng.lock, flags)) - return 0; - if (crng_init != 0) { - spin_unlock_irqrestore(&primary_crng.lock, flags); - return 0; - } - if (len > max) - max = len; - - for (i = 0; i < max; i++) { - tmp = lfsr; - lfsr >>= 1; - if (tmp & 1) - lfsr ^= 0xE1; - tmp = dest_buf[i % CHACHA_KEY_SIZE]; - dest_buf[i % CHACHA_KEY_SIZE] ^= src_buf[i % len] ^ lfsr; - lfsr += (tmp << 3) | (tmp >> 5); + warn_unseeded_randomness(&previous); + + local_lock_irqsave(&batched_entropy_u32.lock, flags); + batch = raw_cpu_ptr(&batched_entropy_u32); + + next_gen = READ_ONCE(base_crng.generation); + if (batch->position >= ARRAY_SIZE(batch->entropy_u32) || + next_gen != batch->generation) { + _get_random_bytes(batch->entropy_u32, sizeof(batch->entropy_u32)); + batch->position = 0; + batch->generation = next_gen; } - spin_unlock_irqrestore(&primary_crng.lock, flags); - return 1; + + ret = batch->entropy_u32[batch->position]; + batch->entropy_u32[batch->position] = 0; + ++batch->position; + local_unlock_irqrestore(&batched_entropy_u32.lock, flags); + return ret; } +EXPORT_SYMBOL(get_random_u32); -static void crng_reseed(struct crng_state *crng, bool use_input_pool) +#ifdef CONFIG_SMP +/* + * This function is called when the CPU is coming up, with entry + * CPUHP_RANDOM_PREPARE, which comes before CPUHP_WORKQUEUE_PREP. + */ +int random_prepare_cpu(unsigned int cpu) { - unsigned long flags; - int i, num; - union { - u8 block[CHACHA_BLOCK_SIZE]; - u32 key[8]; - } buf; + /* + * When the cpu comes back online, immediately invalidate both + * the per-cpu crng and all batches, so that we serve fresh + * randomness. + */ + per_cpu_ptr(&crngs, cpu)->generation = ULONG_MAX; + per_cpu_ptr(&batched_entropy_u32, cpu)->position = UINT_MAX; + per_cpu_ptr(&batched_entropy_u64, cpu)->position = UINT_MAX; + return 0; +} +#endif - if (use_input_pool) { - num = extract_entropy(&buf, 32, 16); - if (num == 0) - return; - } else { - _extract_crng(&primary_crng, buf.block); - _crng_backtrack_protect(&primary_crng, buf.block, - CHACHA_KEY_SIZE); - } - spin_lock_irqsave(&crng->lock, flags); - for (i = 0; i < 8; i++) { - unsigned long rv; - if (!arch_get_random_seed_long(&rv) && - !arch_get_random_long(&rv)) - rv = random_get_entropy(); - crng->state[i + 4] ^= buf.key[i] ^ rv; +/** + * randomize_page - Generate a random, page aligned address + * @start: The smallest acceptable address the caller will take. + * @range: The size of the area, starting at @start, within which the + * random address must fall. + * + * If @start + @range would overflow, @range is capped. + * + * NOTE: Historical use of randomize_range, which this replaces, presumed that + * @start was already page aligned. We now align it regardless. + * + * Return: A page aligned address within [start, start + range). On error, + * @start is returned. + */ +unsigned long randomize_page(unsigned long start, unsigned long range) +{ + if (!PAGE_ALIGNED(start)) { + range -= PAGE_ALIGN(start) - start; + start = PAGE_ALIGN(start); } - memzero_explicit(&buf, sizeof(buf)); - WRITE_ONCE(crng->init_time, jiffies); - spin_unlock_irqrestore(&crng->lock, flags); - crng_finalize_init(crng); + + if (start > ULONG_MAX - range) + range = ULONG_MAX - start; + + range >>= PAGE_SHIFT; + + if (range == 0) + return start; + + return start + (get_random_long() % range << PAGE_SHIFT); } -static void _extract_crng(struct crng_state *crng, u8 out[CHACHA_BLOCK_SIZE]) +/* + * This function will use the architecture-specific hardware random + * number generator if it is available. It is not recommended for + * use. Use get_random_bytes() instead. It returns the number of + * bytes filled in. + */ +size_t __must_check get_random_bytes_arch(void *buf, size_t nbytes) { - unsigned long flags, init_time; + size_t left = nbytes; + u8 *p = buf; - if (crng_ready()) { - init_time = READ_ONCE(crng->init_time); - if (time_after(READ_ONCE(crng_global_init_time), init_time) || - time_after(jiffies, init_time + CRNG_RESEED_INTERVAL)) - crng_reseed(crng, crng == &primary_crng); + while (left) { + unsigned long v; + size_t chunk = min_t(size_t, left, sizeof(unsigned long)); + + if (!arch_get_random_long(&v)) + break; + + memcpy(p, &v, chunk); + p += chunk; + left -= chunk; } - spin_lock_irqsave(&crng->lock, flags); - chacha20_block(&crng->state[0], out); - if (crng->state[12] == 0) - crng->state[13]++; - spin_unlock_irqrestore(&crng->lock, flags); + + return nbytes - left; } +EXPORT_SYMBOL(get_random_bytes_arch); + + +/********************************************************************** + * + * Entropy accumulation and extraction routines. + * + * Callers may add entropy via: + * + * static void mix_pool_bytes(const void *in, size_t nbytes) + * + * After which, if added entropy should be credited: + * + * static void credit_entropy_bits(size_t nbits) + * + * Finally, extract entropy via these two, with the latter one + * setting the entropy count to zero and extracting only if there + * is POOL_MIN_BITS entropy credited prior or force is true: + * + * static void extract_entropy(void *buf, size_t nbytes) + * static bool drain_entropy(void *buf, size_t nbytes, bool force) + * + **********************************************************************/ + +enum { + POOL_BITS = BLAKE2S_HASH_SIZE * 8, + POOL_MIN_BITS = POOL_BITS /* No point in settling for less. */ +}; + +/* For notifying userspace should write into /dev/random. */ +static DECLARE_WAIT_QUEUE_HEAD(random_write_wait); + +static struct { + struct blake2s_state hash; + spinlock_t lock; + unsigned int entropy_count; +} input_pool = { + .hash.h = { BLAKE2S_IV0 ^ (0x01010000 | BLAKE2S_HASH_SIZE), + BLAKE2S_IV1, BLAKE2S_IV2, BLAKE2S_IV3, BLAKE2S_IV4, + BLAKE2S_IV5, BLAKE2S_IV6, BLAKE2S_IV7 }, + .hash.outlen = BLAKE2S_HASH_SIZE, + .lock = __SPIN_LOCK_UNLOCKED(input_pool.lock), +}; -static void extract_crng(u8 out[CHACHA_BLOCK_SIZE]) +static void _mix_pool_bytes(const void *in, size_t nbytes) { - _extract_crng(select_crng(), out); + blake2s_update(&input_pool.hash, in, nbytes); } /* - * Use the leftover bytes from the CRNG block output (if there is - * enough) to mutate the CRNG key to provide backtracking protection. + * This function adds bytes into the entropy "pool". It does not + * update the entropy estimate. The caller should call + * credit_entropy_bits if this is appropriate. */ -static void _crng_backtrack_protect(struct crng_state *crng, - u8 tmp[CHACHA_BLOCK_SIZE], int used) +static void mix_pool_bytes(const void *in, size_t nbytes) { unsigned long flags; - u32 *s, *d; - int i; - used = round_up(used, sizeof(u32)); - if (used + CHACHA_KEY_SIZE > CHACHA_BLOCK_SIZE) { - extract_crng(tmp); - used = 0; - } - spin_lock_irqsave(&crng->lock, flags); - s = (u32 *)&tmp[used]; - d = &crng->state[4]; - for (i = 0; i < 8; i++) - *d++ ^= *s++; - spin_unlock_irqrestore(&crng->lock, flags); + spin_lock_irqsave(&input_pool.lock, flags); + _mix_pool_bytes(in, nbytes); + spin_unlock_irqrestore(&input_pool.lock, flags); } -static void crng_backtrack_protect(u8 tmp[CHACHA_BLOCK_SIZE], int used) +static void credit_entropy_bits(size_t nbits) { - _crng_backtrack_protect(select_crng(), tmp, used); + unsigned int entropy_count, orig, add; + + if (!nbits) + return; + + add = min_t(size_t, nbits, POOL_BITS); + + do { + orig = READ_ONCE(input_pool.entropy_count); + entropy_count = min_t(unsigned int, POOL_BITS, orig + add); + } while (cmpxchg(&input_pool.entropy_count, orig, entropy_count) != orig); + + if (!crng_ready() && entropy_count >= POOL_MIN_BITS) + crng_reseed(false); } -static ssize_t extract_crng_user(void __user *buf, size_t nbytes) +/* + * This is an HKDF-like construction for using the hashed collected entropy + * as a PRF key, that's then expanded block-by-block. + */ +static void extract_entropy(void *buf, size_t nbytes) { - ssize_t ret = 0, i = CHACHA_BLOCK_SIZE; - u8 tmp[CHACHA_BLOCK_SIZE] __aligned(4); - int large_request = (nbytes > 256); + unsigned long flags; + u8 seed[BLAKE2S_HASH_SIZE], next_key[BLAKE2S_HASH_SIZE]; + struct { + unsigned long rdseed[32 / sizeof(long)]; + size_t counter; + } block; + size_t i; + + for (i = 0; i < ARRAY_SIZE(block.rdseed); ++i) { + if (!arch_get_random_seed_long(&block.rdseed[i]) && + !arch_get_random_long(&block.rdseed[i])) + block.rdseed[i] = random_get_entropy(); + } - while (nbytes) { - if (large_request && need_resched()) { - if (signal_pending(current)) { - if (ret == 0) - ret = -ERESTARTSYS; - break; - } - schedule(); - } + spin_lock_irqsave(&input_pool.lock, flags); - extract_crng(tmp); - i = min_t(int, nbytes, CHACHA_BLOCK_SIZE); - if (copy_to_user(buf, tmp, i)) { - ret = -EFAULT; - break; - } + /* seed = HASHPRF(last_key, entropy_input) */ + blake2s_final(&input_pool.hash, seed); + + /* next_key = HASHPRF(seed, RDSEED || 0) */ + block.counter = 0; + blake2s(next_key, (u8 *)&block, seed, sizeof(next_key), sizeof(block), sizeof(seed)); + blake2s_init_key(&input_pool.hash, BLAKE2S_HASH_SIZE, next_key, sizeof(next_key)); + spin_unlock_irqrestore(&input_pool.lock, flags); + memzero_explicit(next_key, sizeof(next_key)); + + while (nbytes) { + i = min_t(size_t, nbytes, BLAKE2S_HASH_SIZE); + /* output = HASHPRF(seed, RDSEED || ++counter) */ + ++block.counter; + blake2s(buf, (u8 *)&block, seed, i, sizeof(block), sizeof(seed)); nbytes -= i; buf += i; - ret += i; } - crng_backtrack_protect(tmp, i); - /* Wipe data just written to memory */ - memzero_explicit(tmp, sizeof(tmp)); + memzero_explicit(seed, sizeof(seed)); + memzero_explicit(&block, sizeof(block)); +} - return ret; +/* + * First we make sure we have POOL_MIN_BITS of entropy in the pool unless force + * is true, and then we set the entropy count to zero (but don't actually touch + * any data). Only then can we extract a new key with extract_entropy(). + */ +static bool drain_entropy(void *buf, size_t nbytes, bool force) +{ + unsigned int entropy_count; + do { + entropy_count = READ_ONCE(input_pool.entropy_count); + if (!force && entropy_count < POOL_MIN_BITS) + return false; + } while (cmpxchg(&input_pool.entropy_count, entropy_count, 0) != entropy_count); + extract_entropy(buf, nbytes); + wake_up_interruptible(&random_write_wait); + kill_fasync(&fasync, SIGIO, POLL_OUT); + return true; } -/********************************************************************* + +/********************************************************************** * - * Entropy input management + * Entropy collection routines. * - *********************************************************************/ + * The following exported functions are used for pushing entropy into + * the above entropy accumulation routines: + * + * void add_device_randomness(const void *buf, size_t size); + * void add_input_randomness(unsigned int type, unsigned int code, + * unsigned int value); + * void add_disk_randomness(struct gendisk *disk); + * void add_hwgenerator_randomness(const void *buffer, size_t count, + * size_t entropy); + * void add_bootloader_randomness(const void *buf, size_t size); + * void add_vmfork_randomness(const void *unique_vm_id, size_t size); + * void add_interrupt_randomness(int irq); + * + * add_device_randomness() adds data to the input pool that + * is likely to differ between two devices (or possibly even per boot). + * This would be things like MAC addresses or serial numbers, or the + * read-out of the RTC. This does *not* credit any actual entropy to + * the pool, but it initializes the pool to different values for devices + * that might otherwise be identical and have very little entropy + * available to them (particularly common in the embedded world). + * + * add_input_randomness() uses the input layer interrupt timing, as well + * as the event type information from the hardware. + * + * add_disk_randomness() uses what amounts to the seek time of block + * layer request events, on a per-disk_devt basis, as input to the + * entropy pool. Note that high-speed solid state drives with very low + * seek times do not make for good sources of entropy, as their seek + * times are usually fairly consistent. + * + * The above two routines try to estimate how many bits of entropy + * to credit. They do this by keeping track of the first and second + * order deltas of the event timings. + * + * add_hwgenerator_randomness() is for true hardware RNGs, and will credit + * entropy as specified by the caller. If the entropy pool is full it will + * block until more entropy is needed. + * + * add_bootloader_randomness() is the same as add_hwgenerator_randomness() or + * add_device_randomness(), depending on whether or not the configuration + * option CONFIG_RANDOM_TRUST_BOOTLOADER is set. + * + * add_vmfork_randomness() adds a unique (but not necessarily secret) ID + * representing the current instance of a VM to the pool, without crediting, + * and then force-reseeds the crng so that it takes effect immediately. + * + * add_interrupt_randomness() uses the interrupt timing as random + * inputs to the entropy pool. Using the cycle counters and the irq source + * as inputs, it feeds the input pool roughly once a second or after 64 + * interrupts, crediting 1 bit of entropy for whichever comes first. + * + **********************************************************************/ -/* There is one of these per entropy source */ -struct timer_rand_state { - cycles_t last_time; - long last_delta, last_delta2; -}; +static bool trust_cpu __ro_after_init = IS_ENABLED(CONFIG_RANDOM_TRUST_CPU); +static int __init parse_trust_cpu(char *arg) +{ + return kstrtobool(arg, &trust_cpu); +} +early_param("random.trust_cpu", parse_trust_cpu); + +/* + * The first collection of entropy occurs at system boot while interrupts + * are still turned off. Here we push in RDSEED, a timestamp, and utsname(). + * Depending on the above configuration knob, RDSEED may be considered + * sufficient for initialization. Note that much earlier setup may already + * have pushed entropy into the input pool by the time we get here. + */ +int __init rand_initialize(void) +{ + size_t i; + ktime_t now = ktime_get_real(); + bool arch_init = true; + unsigned long rv; + + for (i = 0; i < BLAKE2S_BLOCK_SIZE; i += sizeof(rv)) { + if (!arch_get_random_seed_long_early(&rv) && + !arch_get_random_long_early(&rv)) { + rv = random_get_entropy(); + arch_init = false; + } + _mix_pool_bytes(&rv, sizeof(rv)); + } + _mix_pool_bytes(&now, sizeof(now)); + _mix_pool_bytes(utsname(), sizeof(*(utsname()))); -#define INIT_TIMER_RAND_STATE { INITIAL_JIFFIES, }; + extract_entropy(base_crng.key, sizeof(base_crng.key)); + ++base_crng.generation; + + if (arch_init && trust_cpu && !crng_ready()) { + crng_init = 2; + pr_notice("crng init done (trusting CPU's manufacturer)\n"); + } + + if (ratelimit_disable) + unseeded_warning.interval = 0; + return 0; +} /* * Add device- or boot-specific data to the input pool to help @@ -1091,23 +992,27 @@ struct timer_rand_state { * the entropy pool having similar initial state across largely * identical devices. */ -void add_device_randomness(const void *buf, unsigned int size) +void add_device_randomness(const void *buf, size_t size) { - unsigned long time = random_get_entropy() ^ jiffies; - unsigned long flags; + cycles_t cycles = random_get_entropy(); + unsigned long flags, now = jiffies; - if (!crng_ready() && size) - crng_slow_load(buf, size); + if (crng_init == 0 && size) + crng_pre_init_inject(buf, size, false); - trace_add_device_randomness(size, _RET_IP_); spin_lock_irqsave(&input_pool.lock, flags); + _mix_pool_bytes(&cycles, sizeof(cycles)); + _mix_pool_bytes(&now, sizeof(now)); _mix_pool_bytes(buf, size); - _mix_pool_bytes(&time, sizeof(time)); spin_unlock_irqrestore(&input_pool.lock, flags); } EXPORT_SYMBOL(add_device_randomness); -static struct timer_rand_state input_timer_state = INIT_TIMER_RAND_STATE; +/* There is one of these per entropy source */ +struct timer_rand_state { + unsigned long last_time; + long last_delta, last_delta2; +}; /* * This function adds entropy to the entropy "pool" by using timing @@ -1117,29 +1022,26 @@ static struct timer_rand_state input_timer_state = INIT_TIMER_RAND_STATE; * The number "num" is also added to the pool - it should somehow describe * the type of event which just happened. This is currently 0-255 for * keyboard scan codes, and 256 upwards for interrupts. - * */ -static void add_timer_randomness(struct timer_rand_state *state, unsigned num) +static void add_timer_randomness(struct timer_rand_state *state, unsigned int num) { - struct { - long jiffies; - unsigned int cycles; - unsigned int num; - } sample; + cycles_t cycles = random_get_entropy(); + unsigned long flags, now = jiffies; long delta, delta2, delta3; - sample.jiffies = jiffies; - sample.cycles = random_get_entropy(); - sample.num = num; - mix_pool_bytes(&sample, sizeof(sample)); + spin_lock_irqsave(&input_pool.lock, flags); + _mix_pool_bytes(&cycles, sizeof(cycles)); + _mix_pool_bytes(&now, sizeof(now)); + _mix_pool_bytes(&num, sizeof(num)); + spin_unlock_irqrestore(&input_pool.lock, flags); /* * Calculate number of bits of randomness we probably added. * We take into account the first, second and third-order deltas * in order to make our estimate. */ - delta = sample.jiffies - READ_ONCE(state->last_time); - WRITE_ONCE(state->last_time, sample.jiffies); + delta = now - READ_ONCE(state->last_time); + WRITE_ONCE(state->last_time, now); delta2 = delta - READ_ONCE(state->last_delta); WRITE_ONCE(state->last_delta, delta); @@ -1163,318 +1065,303 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num) * Round down by 1 bit on general principles, * and limit entropy estimate to 12 bits. */ - credit_entropy_bits(min_t(int, fls(delta >> 1), 11)); + credit_entropy_bits(min_t(unsigned int, fls(delta >> 1), 11)); } void add_input_randomness(unsigned int type, unsigned int code, unsigned int value) { static unsigned char last_value; + static struct timer_rand_state input_timer_state = { INITIAL_JIFFIES }; - /* ignore autorepeat and the like */ + /* Ignore autorepeat and the like. */ if (value == last_value) return; last_value = value; add_timer_randomness(&input_timer_state, (type << 4) ^ code ^ (code >> 4) ^ value); - trace_add_input_randomness(POOL_ENTROPY_BITS()); } EXPORT_SYMBOL_GPL(add_input_randomness); -static DEFINE_PER_CPU(struct fast_pool, irq_randomness); - -#ifdef ADD_INTERRUPT_BENCH -static unsigned long avg_cycles, avg_deviation; - -#define AVG_SHIFT 8 /* Exponential average factor k=1/256 */ -#define FIXED_1_2 (1 << (AVG_SHIFT - 1)) - -static void add_interrupt_bench(cycles_t start) +#ifdef CONFIG_BLOCK +void add_disk_randomness(struct gendisk *disk) { - long delta = random_get_entropy() - start; - - /* Use a weighted moving average */ - delta = delta - ((avg_cycles + FIXED_1_2) >> AVG_SHIFT); - avg_cycles += delta; - /* And average deviation */ - delta = abs(delta) - ((avg_deviation + FIXED_1_2) >> AVG_SHIFT); - avg_deviation += delta; + if (!disk || !disk->random) + return; + /* First major is 1, so we get >= 0x200 here. */ + add_timer_randomness(disk->random, 0x100 + disk_devt(disk)); } -#else -#define add_interrupt_bench(x) -#endif +EXPORT_SYMBOL_GPL(add_disk_randomness); -static u32 get_reg(struct fast_pool *f, struct pt_regs *regs) +void rand_initialize_disk(struct gendisk *disk) { - u32 *ptr = (u32 *)regs; - unsigned int idx; + struct timer_rand_state *state; - if (regs == NULL) - return 0; - idx = READ_ONCE(f->reg_idx); - if (idx >= sizeof(struct pt_regs) / sizeof(u32)) - idx = 0; - ptr += idx++; - WRITE_ONCE(f->reg_idx, idx); - return *ptr; + /* + * If kzalloc returns null, we just won't use that entropy + * source. + */ + state = kzalloc(sizeof(struct timer_rand_state), GFP_KERNEL); + if (state) { + state->last_time = INITIAL_JIFFIES; + disk->random = state; + } } +#endif -void add_interrupt_randomness(int irq) +/* + * Interface for in-kernel drivers of true hardware RNGs. + * Those devices may produce endless random bits and will be throttled + * when our pool is full. + */ +void add_hwgenerator_randomness(const void *buffer, size_t count, + size_t entropy) { - struct fast_pool *fast_pool = this_cpu_ptr(&irq_randomness); - struct pt_regs *regs = get_irq_regs(); - unsigned long now = jiffies; - cycles_t cycles = random_get_entropy(); - u32 c_high, j_high; - u64 ip; - - if (cycles == 0) - cycles = get_reg(fast_pool, regs); - c_high = (sizeof(cycles) > 4) ? cycles >> 32 : 0; - j_high = (sizeof(now) > 4) ? now >> 32 : 0; - fast_pool->pool[0] ^= cycles ^ j_high ^ irq; - fast_pool->pool[1] ^= now ^ c_high; - ip = regs ? instruction_pointer(regs) : _RET_IP_; - fast_pool->pool[2] ^= ip; - fast_pool->pool[3] ^= - (sizeof(ip) > 4) ? ip >> 32 : get_reg(fast_pool, regs); - - fast_mix(fast_pool); - add_interrupt_bench(cycles); - if (unlikely(crng_init == 0)) { - if ((fast_pool->count >= 64) && - crng_fast_load((u8 *)fast_pool->pool, sizeof(fast_pool->pool)) > 0) { - fast_pool->count = 0; - fast_pool->last = now; - } - return; + size_t ret = crng_pre_init_inject(buffer, count, true); + mix_pool_bytes(buffer, ret); + count -= ret; + buffer += ret; + if (!count || crng_init == 0) + return; } - if ((fast_pool->count < 64) && !time_after(now, fast_pool->last + HZ)) - return; + /* + * Throttle writing if we're above the trickle threshold. + * We'll be woken up again once below POOL_MIN_BITS, when + * the calling thread is about to terminate, or once + * CRNG_RESEED_INTERVAL has elapsed. + */ + wait_event_interruptible_timeout(random_write_wait, + !system_wq || kthread_should_stop() || + input_pool.entropy_count < POOL_MIN_BITS, + CRNG_RESEED_INTERVAL); + mix_pool_bytes(buffer, count); + credit_entropy_bits(entropy); +} +EXPORT_SYMBOL_GPL(add_hwgenerator_randomness); - if (!spin_trylock(&input_pool.lock)) - return; +/* + * Handle random seed passed by bootloader. + * If the seed is trustworthy, it would be regarded as hardware RNGs. Otherwise + * it would be regarded as device data. + * The decision is controlled by CONFIG_RANDOM_TRUST_BOOTLOADER. + */ +void add_bootloader_randomness(const void *buf, size_t size) +{ + if (IS_ENABLED(CONFIG_RANDOM_TRUST_BOOTLOADER)) + add_hwgenerator_randomness(buf, size, size * 8); + else + add_device_randomness(buf, size); +} +EXPORT_SYMBOL_GPL(add_bootloader_randomness); - fast_pool->last = now; - __mix_pool_bytes(&fast_pool->pool, sizeof(fast_pool->pool)); - spin_unlock(&input_pool.lock); +#if IS_ENABLED(CONFIG_VMGENID) +static BLOCKING_NOTIFIER_HEAD(vmfork_chain); - fast_pool->count = 0; +/* + * Handle a new unique VM ID, which is unique, not secret, so we + * don't credit it, but we do immediately force a reseed after so + * that it's used by the crng posthaste. + */ +void add_vmfork_randomness(const void *unique_vm_id, size_t size) +{ + add_device_randomness(unique_vm_id, size); + if (crng_ready()) { + crng_reseed(true); + pr_notice("crng reseeded due to virtual machine fork\n"); + } + blocking_notifier_call_chain(&vmfork_chain, 0, NULL); +} +#if IS_MODULE(CONFIG_VMGENID) +EXPORT_SYMBOL_GPL(add_vmfork_randomness); +#endif - /* award one bit for the contents of the fast pool */ - credit_entropy_bits(1); +int register_random_vmfork_notifier(struct notifier_block *nb) +{ + return blocking_notifier_chain_register(&vmfork_chain, nb); } -EXPORT_SYMBOL_GPL(add_interrupt_randomness); +EXPORT_SYMBOL_GPL(register_random_vmfork_notifier); -#ifdef CONFIG_BLOCK -void add_disk_randomness(struct gendisk *disk) +int unregister_random_vmfork_notifier(struct notifier_block *nb) { - if (!disk || !disk->random) - return; - /* first major is 1, so we get >= 0x200 here */ - add_timer_randomness(disk->random, 0x100 + disk_devt(disk)); - trace_add_disk_randomness(disk_devt(disk), POOL_ENTROPY_BITS()); + return blocking_notifier_chain_unregister(&vmfork_chain, nb); } -EXPORT_SYMBOL_GPL(add_disk_randomness); +EXPORT_SYMBOL_GPL(unregister_random_vmfork_notifier); #endif -/********************************************************************* - * - * Entropy extraction routines - * - *********************************************************************/ +struct fast_pool { + struct work_struct mix; + unsigned long pool[4]; + unsigned long last; + unsigned int count; + u16 reg_idx; +}; + +static DEFINE_PER_CPU(struct fast_pool, irq_randomness) = { +#ifdef CONFIG_64BIT + /* SipHash constants */ + .pool = { 0x736f6d6570736575UL, 0x646f72616e646f6dUL, + 0x6c7967656e657261UL, 0x7465646279746573UL } +#else + /* HalfSipHash constants */ + .pool = { 0, 0, 0x6c796765U, 0x74656462U } +#endif +}; /* - * This function decides how many bytes to actually take from the - * given pool, and also debits the entropy count accordingly. + * This is [Half]SipHash-1-x, starting from an empty key. Because + * the key is fixed, it assumes that its inputs are non-malicious, + * and therefore this has no security on its own. s represents the + * 128 or 256-bit SipHash state, while v represents a 128-bit input. */ -static size_t account(size_t nbytes, int min) +static void fast_mix(unsigned long s[4], const unsigned long *v) { - int entropy_count, orig; - size_t ibytes, nfrac; - - BUG_ON(input_pool.entropy_count > POOL_FRACBITS); - - /* Can we pull enough? */ -retry: - entropy_count = orig = READ_ONCE(input_pool.entropy_count); - if (WARN_ON(entropy_count < 0)) { - pr_warn("negative entropy count: count %d\n", entropy_count); - entropy_count = 0; - } + size_t i; - /* never pull more than available */ - ibytes = min_t(size_t, nbytes, entropy_count >> (POOL_ENTROPY_SHIFT + 3)); - if (ibytes < min) - ibytes = 0; - nfrac = ibytes << (POOL_ENTROPY_SHIFT + 3); - if ((size_t)entropy_count > nfrac) - entropy_count -= nfrac; - else - entropy_count = 0; - - if (cmpxchg(&input_pool.entropy_count, orig, entropy_count) != orig) - goto retry; - - trace_debit_entropy(8 * ibytes); - if (ibytes && POOL_ENTROPY_BITS() < random_write_wakeup_bits) { - wake_up_interruptible(&random_write_wait); - kill_fasync(&fasync, SIGIO, POLL_OUT); + for (i = 0; i < 16 / sizeof(long); ++i) { + s[3] ^= v[i]; +#ifdef CONFIG_64BIT + s[0] += s[1]; s[1] = rol64(s[1], 13); s[1] ^= s[0]; s[0] = rol64(s[0], 32); + s[2] += s[3]; s[3] = rol64(s[3], 16); s[3] ^= s[2]; + s[0] += s[3]; s[3] = rol64(s[3], 21); s[3] ^= s[0]; + s[2] += s[1]; s[1] = rol64(s[1], 17); s[1] ^= s[2]; s[2] = rol64(s[2], 32); +#else + s[0] += s[1]; s[1] = rol32(s[1], 5); s[1] ^= s[0]; s[0] = rol32(s[0], 16); + s[2] += s[3]; s[3] = rol32(s[3], 8); s[3] ^= s[2]; + s[0] += s[3]; s[3] = rol32(s[3], 7); s[3] ^= s[0]; + s[2] += s[1]; s[1] = rol32(s[1], 13); s[1] ^= s[2]; s[2] = rol32(s[2], 16); +#endif + s[0] ^= v[i]; } - - return ibytes; } +#ifdef CONFIG_SMP /* - * This function does the actual extraction for extract_entropy. - * - * Note: we assume that .poolwords is a multiple of 16 words. + * This function is called when the CPU has just come online, with + * entry CPUHP_AP_RANDOM_ONLINE, just after CPUHP_AP_WORKQUEUE_ONLINE. */ -static void extract_buf(u8 *out) +int random_online_cpu(unsigned int cpu) { - struct blake2s_state state __aligned(__alignof__(unsigned long)); - u8 hash[BLAKE2S_HASH_SIZE]; - unsigned long *salt; - unsigned long flags; - - blake2s_init(&state, sizeof(hash)); - /* - * If we have an architectural hardware random number - * generator, use it for BLAKE2's salt & personal fields. + * During CPU shutdown and before CPU onlining, add_interrupt_ + * randomness() may schedule mix_interrupt_randomness(), and + * set the MIX_INFLIGHT flag. However, because the worker can + * be scheduled on a different CPU during this period, that + * flag will never be cleared. For that reason, we zero out + * the flag here, which runs just after workqueues are onlined + * for the CPU again. This also has the effect of setting the + * irq randomness count to zero so that new accumulated irqs + * are fresh. */ - for (salt = (unsigned long *)&state.h[4]; - salt < (unsigned long *)&state.h[8]; ++salt) { - unsigned long v; - if (!arch_get_random_long(&v)) - break; - *salt ^= v; - } - - /* Generate a hash across the pool */ - spin_lock_irqsave(&input_pool.lock, flags); - blake2s_update(&state, (const u8 *)input_pool_data, POOL_BYTES); - blake2s_final(&state, hash); /* final zeros out state */ + per_cpu_ptr(&irq_randomness, cpu)->count = 0; + return 0; +} +#endif - /* - * We mix the hash back into the pool to prevent backtracking - * attacks (where the attacker knows the state of the pool - * plus the current outputs, and attempts to find previous - * outputs), unless the hash function can be inverted. By - * mixing at least a hash worth of hash data back, we make - * brute-forcing the feedback as hard as brute-forcing the - * hash. - */ - __mix_pool_bytes(hash, sizeof(hash)); - spin_unlock_irqrestore(&input_pool.lock, flags); +static unsigned long get_reg(struct fast_pool *f, struct pt_regs *regs) +{ + unsigned long *ptr = (unsigned long *)regs; + unsigned int idx; - /* Note that EXTRACT_SIZE is half of hash size here, because above - * we've dumped the full length back into mixer. By reducing the - * amount that we emit, we retain a level of forward secrecy. - */ - memcpy(out, hash, EXTRACT_SIZE); - memzero_explicit(hash, sizeof(hash)); + if (regs == NULL) + return 0; + idx = READ_ONCE(f->reg_idx); + if (idx >= sizeof(struct pt_regs) / sizeof(unsigned long)) + idx = 0; + ptr += idx++; + WRITE_ONCE(f->reg_idx, idx); + return *ptr; } -static ssize_t _extract_entropy(void *buf, size_t nbytes) +static void mix_interrupt_randomness(struct work_struct *work) { - ssize_t ret = 0, i; - u8 tmp[EXTRACT_SIZE]; + struct fast_pool *fast_pool = container_of(work, struct fast_pool, mix); + /* + * The size of the copied stack pool is explicitly 16 bytes so that we + * tax mix_pool_byte()'s compression function the same amount on all + * platforms. This means on 64-bit we copy half the pool into this, + * while on 32-bit we copy all of it. The entropy is supposed to be + * sufficiently dispersed between bits that in the sponge-like + * half case, on average we don't wind up "losing" some. + */ + u8 pool[16]; - while (nbytes) { - extract_buf(tmp); - i = min_t(int, nbytes, EXTRACT_SIZE); - memcpy(buf, tmp, i); - nbytes -= i; - buf += i; - ret += i; + /* Check to see if we're running on the wrong CPU due to hotplug. */ + local_irq_disable(); + if (fast_pool != this_cpu_ptr(&irq_randomness)) { + local_irq_enable(); + return; } - /* Wipe data just returned from memory */ - memzero_explicit(tmp, sizeof(tmp)); + /* + * Copy the pool to the stack so that the mixer always has a + * consistent view, before we reenable irqs again. + */ + memcpy(pool, fast_pool->pool, sizeof(pool)); + fast_pool->count = 0; + fast_pool->last = jiffies; + local_irq_enable(); - return ret; -} + if (unlikely(crng_init == 0)) { + crng_pre_init_inject(pool, sizeof(pool), true); + mix_pool_bytes(pool, sizeof(pool)); + } else { + mix_pool_bytes(pool, sizeof(pool)); + credit_entropy_bits(1); + } -/* - * This function extracts randomness from the "entropy pool", and - * returns it in a buffer. - * - * The min parameter specifies the minimum amount we can pull before - * failing to avoid races that defeat catastrophic reseeding. - */ -static ssize_t extract_entropy(void *buf, size_t nbytes, int min) -{ - trace_extract_entropy(nbytes, POOL_ENTROPY_BITS(), _RET_IP_); - nbytes = account(nbytes, min); - return _extract_entropy(buf, nbytes); + memzero_explicit(pool, sizeof(pool)); } -#define warn_unseeded_randomness(previous) \ - _warn_unseeded_randomness(__func__, (void *)_RET_IP_, (previous)) - -static void _warn_unseeded_randomness(const char *func_name, void *caller, void **previous) +void add_interrupt_randomness(int irq) { -#ifdef CONFIG_WARN_ALL_UNSEEDED_RANDOM - const bool print_once = false; -#else - static bool print_once __read_mostly; -#endif - - if (print_once || crng_ready() || - (previous && (caller == READ_ONCE(*previous)))) - return; - WRITE_ONCE(*previous, caller); -#ifndef CONFIG_WARN_ALL_UNSEEDED_RANDOM - print_once = true; -#endif - if (__ratelimit(&unseeded_warning)) - printk_deferred(KERN_NOTICE "random: %s called from %pS with crng_init=%d\n", - func_name, caller, crng_init); -} + enum { MIX_INFLIGHT = 1U << 31 }; + cycles_t cycles = random_get_entropy(); + unsigned long now = jiffies; + struct fast_pool *fast_pool = this_cpu_ptr(&irq_randomness); + struct pt_regs *regs = get_irq_regs(); + unsigned int new_count; + union { + u32 u32[4]; + u64 u64[2]; + unsigned long longs[16 / sizeof(long)]; + } irq_data; -/* - * This function is the exported kernel interface. It returns some - * number of good random numbers, suitable for key generation, seeding - * TCP sequence numbers, etc. It does not rely on the hardware random - * number generator. For random bytes direct from the hardware RNG - * (when available), use get_random_bytes_arch(). In order to ensure - * that the randomness provided by this function is okay, the function - * wait_for_random_bytes() should be called and return 0 at least once - * at any point prior. - */ -static void _get_random_bytes(void *buf, int nbytes) -{ - u8 tmp[CHACHA_BLOCK_SIZE] __aligned(4); + if (cycles == 0) + cycles = get_reg(fast_pool, regs); - trace_get_random_bytes(nbytes, _RET_IP_); + if (sizeof(cycles) == 8) + irq_data.u64[0] = cycles ^ rol64(now, 32) ^ irq; + else { + irq_data.u32[0] = cycles ^ irq; + irq_data.u32[1] = now; + } - while (nbytes >= CHACHA_BLOCK_SIZE) { - extract_crng(buf); - buf += CHACHA_BLOCK_SIZE; - nbytes -= CHACHA_BLOCK_SIZE; + if (sizeof(unsigned long) == 8) + irq_data.u64[1] = regs ? instruction_pointer(regs) : _RET_IP_; + else { + irq_data.u32[2] = regs ? instruction_pointer(regs) : _RET_IP_; + irq_data.u32[3] = get_reg(fast_pool, regs); } - if (nbytes > 0) { - extract_crng(tmp); - memcpy(buf, tmp, nbytes); - crng_backtrack_protect(tmp, nbytes); - } else - crng_backtrack_protect(tmp, CHACHA_BLOCK_SIZE); - memzero_explicit(tmp, sizeof(tmp)); -} + fast_mix(fast_pool->pool, irq_data.longs); + new_count = ++fast_pool->count; -void get_random_bytes(void *buf, int nbytes) -{ - static void *previous; + if (new_count & MIX_INFLIGHT) + return; - warn_unseeded_randomness(&previous); - _get_random_bytes(buf, nbytes); + if (new_count < 64 && (!time_after(now, fast_pool->last + HZ) || + unlikely(crng_init == 0))) + return; + + if (unlikely(!fast_pool->mix.func)) + INIT_WORK(&fast_pool->mix, mix_interrupt_randomness); + fast_pool->count |= MIX_INFLIGHT; + queue_work_on(raw_smp_processor_id(), system_highpri_wq, &fast_pool->mix); } -EXPORT_SYMBOL(get_random_bytes); +EXPORT_SYMBOL_GPL(add_interrupt_randomness); /* * Each time the timer fires, we expect that we got an unpredictable @@ -1501,264 +1388,81 @@ static void entropy_timer(struct timer_list *t) static void try_to_generate_entropy(void) { struct { - unsigned long now; + cycles_t cycles; struct timer_list timer; } stack; - stack.now = random_get_entropy(); + stack.cycles = random_get_entropy(); /* Slow counter - or none. Don't even bother */ - if (stack.now == random_get_entropy()) + if (stack.cycles == random_get_entropy()) return; timer_setup_on_stack(&stack.timer, entropy_timer, 0); - while (!crng_ready()) { + while (!crng_ready() && !signal_pending(current)) { if (!timer_pending(&stack.timer)) mod_timer(&stack.timer, jiffies + 1); - mix_pool_bytes(&stack.now, sizeof(stack.now)); + mix_pool_bytes(&stack.cycles, sizeof(stack.cycles)); schedule(); - stack.now = random_get_entropy(); + stack.cycles = random_get_entropy(); } del_timer_sync(&stack.timer); destroy_timer_on_stack(&stack.timer); - mix_pool_bytes(&stack.now, sizeof(stack.now)); + mix_pool_bytes(&stack.cycles, sizeof(stack.cycles)); } -/* - * Wait for the urandom pool to be seeded and thus guaranteed to supply - * cryptographically secure random numbers. This applies to: the /dev/urandom - * device, the get_random_bytes function, and the get_random_{u32,u64,int,long} - * family of functions. Using any of these functions without first calling - * this function forfeits the guarantee of security. - * - * Returns: 0 if the urandom pool has been seeded. - * -ERESTARTSYS if the function was interrupted by a signal. - */ -int wait_for_random_bytes(void) -{ - if (likely(crng_ready())) - return 0; - - do { - int ret; - ret = wait_event_interruptible_timeout(crng_init_wait, crng_ready(), HZ); - if (ret) - return ret > 0 ? 0 : ret; - - try_to_generate_entropy(); - } while (!crng_ready()); - return 0; -} -EXPORT_SYMBOL(wait_for_random_bytes); - -/* - * Returns whether or not the urandom pool has been seeded and thus guaranteed - * to supply cryptographically secure random numbers. This applies to: the - * /dev/urandom device, the get_random_bytes function, and the get_random_{u32, - * ,u64,int,long} family of functions. +/********************************************************************** * - * Returns: true if the urandom pool has been seeded. - * false if the urandom pool has not been seeded. - */ -bool rng_is_initialized(void) -{ - return crng_ready(); -} -EXPORT_SYMBOL(rng_is_initialized); - -/* - * Add a callback function that will be invoked when the nonblocking - * pool is initialised. + * Userspace reader/writer interfaces. * - * returns: 0 if callback is successfully added - * -EALREADY if pool is already initialised (callback not called) - * -ENOENT if module for callback is not alive - */ -int add_random_ready_callback(struct random_ready_callback *rdy) -{ - struct module *owner; - unsigned long flags; - int err = -EALREADY; - - if (crng_ready()) - return err; - - owner = rdy->owner; - if (!try_module_get(owner)) - return -ENOENT; - - spin_lock_irqsave(&random_ready_list_lock, flags); - if (crng_ready()) - goto out; - - owner = NULL; - - list_add(&rdy->list, &random_ready_list); - err = 0; - -out: - spin_unlock_irqrestore(&random_ready_list_lock, flags); - - module_put(owner); - - return err; -} -EXPORT_SYMBOL(add_random_ready_callback); - -/* - * Delete a previously registered readiness callback function. - */ -void del_random_ready_callback(struct random_ready_callback *rdy) -{ - unsigned long flags; - struct module *owner = NULL; - - spin_lock_irqsave(&random_ready_list_lock, flags); - if (!list_empty(&rdy->list)) { - list_del_init(&rdy->list); - owner = rdy->owner; - } - spin_unlock_irqrestore(&random_ready_list_lock, flags); - - module_put(owner); -} -EXPORT_SYMBOL(del_random_ready_callback); - -/* - * This function will use the architecture-specific hardware random - * number generator if it is available. The arch-specific hw RNG will - * almost certainly be faster than what we can do in software, but it - * is impossible to verify that it is implemented securely (as - * opposed, to, say, the AES encryption of a sequence number using a - * key known by the NSA). So it's useful if we need the speed, but - * only if we're willing to trust the hardware manufacturer not to - * have put in a back door. + * getrandom(2) is the primary modern interface into the RNG and should + * be used in preference to anything else. * - * Return number of bytes filled in. - */ -int __must_check get_random_bytes_arch(void *buf, int nbytes) -{ - int left = nbytes; - u8 *p = buf; - - trace_get_random_bytes_arch(left, _RET_IP_); - while (left) { - unsigned long v; - int chunk = min_t(int, left, sizeof(unsigned long)); - - if (!arch_get_random_long(&v)) - break; - - memcpy(p, &v, chunk); - p += chunk; - left -= chunk; - } - - return nbytes - left; -} -EXPORT_SYMBOL(get_random_bytes_arch); - -/* - * init_std_data - initialize pool with system data + * Reading from /dev/random and /dev/urandom both have the same effect + * as calling getrandom(2) with flags=0. (In earlier versions, however, + * they each had different semantics.) * - * This function clears the pool's entropy count and mixes some system - * data into the pool to prepare it for use. The pool is not cleared - * as that can only decrease the entropy in the pool. - */ -static void __init init_std_data(void) -{ - int i; - ktime_t now = ktime_get_real(); - unsigned long rv; - - mix_pool_bytes(&now, sizeof(now)); - for (i = POOL_BYTES; i > 0; i -= sizeof(rv)) { - if (!arch_get_random_seed_long(&rv) && - !arch_get_random_long(&rv)) - rv = random_get_entropy(); - mix_pool_bytes(&rv, sizeof(rv)); - } - mix_pool_bytes(utsname(), sizeof(*(utsname()))); -} - -/* - * Note that setup_arch() may call add_device_randomness() - * long before we get here. This allows seeding of the pools - * with some platform dependent data very early in the boot - * process. But it limits our options here. We must use - * statically allocated structures that already have all - * initializations complete at compile time. We should also - * take care not to overwrite the precious per platform data - * we were given. - */ -int __init rand_initialize(void) -{ - init_std_data(); - if (crng_need_final_init) - crng_finalize_init(&primary_crng); - crng_initialize_primary(&primary_crng); - crng_global_init_time = jiffies; - if (ratelimit_disable) { - urandom_warning.interval = 0; - unseeded_warning.interval = 0; - } - return 0; -} + * Writing to either /dev/random or /dev/urandom adds entropy to + * the input pool but does not credit it. + * + * Polling on /dev/random or /dev/urandom indicates when the RNG + * is initialized, on the read side, and when it wants new entropy, + * on the write side. + * + * Both /dev/random and /dev/urandom have the same set of ioctls for + * adding entropy, getting the entropy count, zeroing the count, and + * reseeding the crng. + * + **********************************************************************/ -#ifdef CONFIG_BLOCK -void rand_initialize_disk(struct gendisk *disk) +SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, count, unsigned int, + flags) { - struct timer_rand_state *state; + if (flags & ~(GRND_NONBLOCK | GRND_RANDOM | GRND_INSECURE)) + return -EINVAL; /* - * If kzalloc returns null, we just won't use that entropy - * source. + * Requesting insecure and blocking randomness at the same time makes + * no sense. */ - state = kzalloc(sizeof(struct timer_rand_state), GFP_KERNEL); - if (state) { - state->last_time = INITIAL_JIFFIES; - disk->random = state; - } -} -#endif - -static ssize_t urandom_read_nowarn(struct file *file, char __user *buf, - size_t nbytes, loff_t *ppos) -{ - int ret; + if ((flags & (GRND_INSECURE | GRND_RANDOM)) == (GRND_INSECURE | GRND_RANDOM)) + return -EINVAL; - nbytes = min_t(size_t, nbytes, INT_MAX >> (POOL_ENTROPY_SHIFT + 3)); - ret = extract_crng_user(buf, nbytes); - trace_urandom_read(8 * nbytes, 0, POOL_ENTROPY_BITS()); - return ret; -} + if (count > INT_MAX) + count = INT_MAX; -static ssize_t urandom_read(struct file *file, char __user *buf, size_t nbytes, - loff_t *ppos) -{ - static int maxwarn = 10; + if (!(flags & GRND_INSECURE) && !crng_ready()) { + int ret; - if (!crng_ready() && maxwarn > 0) { - maxwarn--; - if (__ratelimit(&urandom_warning)) - pr_notice("%s: uninitialized urandom read (%zd bytes read)\n", - current->comm, nbytes); + if (flags & GRND_NONBLOCK) + return -EAGAIN; + ret = wait_for_random_bytes(); + if (unlikely(ret)) + return ret; } - - return urandom_read_nowarn(file, buf, nbytes, ppos); -} - -static ssize_t random_read(struct file *file, char __user *buf, size_t nbytes, - loff_t *ppos) -{ - int ret; - - ret = wait_for_random_bytes(); - if (ret != 0) - return ret; - return urandom_read_nowarn(file, buf, nbytes, ppos); + return get_random_bytes_user(buf, count); } static __poll_t random_poll(struct file *file, poll_table *wait) @@ -1770,44 +1474,38 @@ static __poll_t random_poll(struct file *file, poll_table *wait) mask = 0; if (crng_ready()) mask |= EPOLLIN | EPOLLRDNORM; - if (POOL_ENTROPY_BITS() < random_write_wakeup_bits) + if (input_pool.entropy_count < POOL_MIN_BITS) mask |= EPOLLOUT | EPOLLWRNORM; return mask; } -static int write_pool(const char __user *buffer, size_t count) +static int write_pool(const char __user *ubuf, size_t count) { - size_t bytes; - u32 t, buf[16]; - const char __user *p = buffer; - - while (count > 0) { - int b, i = 0; + size_t len; + int ret = 0; + u8 block[BLAKE2S_BLOCK_SIZE]; - bytes = min(count, sizeof(buf)); - if (copy_from_user(&buf, p, bytes)) - return -EFAULT; - - for (b = bytes; b > 0; b -= sizeof(u32), i++) { - if (!arch_get_random_int(&t)) - break; - buf[i] ^= t; + while (count) { + len = min(count, sizeof(block)); + if (copy_from_user(block, ubuf, len)) { + ret = -EFAULT; + goto out; } - - count -= bytes; - p += bytes; - - mix_pool_bytes(buf, bytes); + count -= len; + ubuf += len; + mix_pool_bytes(block, len); cond_resched(); } - return 0; +out: + memzero_explicit(block, sizeof(block)); + return ret; } static ssize_t random_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos) { - size_t ret; + int ret; ret = write_pool(buffer, count); if (ret) @@ -1816,6 +1514,17 @@ static ssize_t random_write(struct file *file, const char __user *buffer, return (ssize_t)count; } +static ssize_t random_read(struct file *file, char __user *buf, size_t nbytes, + loff_t *ppos) +{ + int ret; + + ret = wait_for_random_bytes(); + if (ret != 0) + return ret; + return get_random_bytes_user(buf, nbytes); +} + static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg) { int size, ent_count; @@ -1824,9 +1533,8 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg) switch (cmd) { case RNDGETENTCNT: - /* inherently racy, no point locking */ - ent_count = POOL_ENTROPY_BITS(); - if (put_user(ent_count, p)) + /* Inherently racy, no point locking. */ + if (put_user(input_pool.entropy_count, p)) return -EFAULT; return 0; case RNDADDTOENTCNT: @@ -1834,7 +1542,10 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg) return -EPERM; if (get_user(ent_count, p)) return -EFAULT; - return credit_entropy_bits_safe(ent_count); + if (ent_count < 0) + return -EINVAL; + credit_entropy_bits(ent_count); + return 0; case RNDADDENTROPY: if (!capable(CAP_SYS_ADMIN)) return -EPERM; @@ -1847,7 +1558,8 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg) retval = write_pool((const char __user *)p, size); if (retval < 0) return retval; - return credit_entropy_bits_safe(ent_count); + credit_entropy_bits(ent_count); + return 0; case RNDZAPENTCNT: case RNDCLEARPOOL: /* @@ -1856,15 +1568,17 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg) */ if (!capable(CAP_SYS_ADMIN)) return -EPERM; - input_pool.entropy_count = 0; + if (xchg(&input_pool.entropy_count, 0) >= POOL_MIN_BITS) { + wake_up_interruptible(&random_write_wait); + kill_fasync(&fasync, SIGIO, POLL_OUT); + } return 0; case RNDRESEEDCRNG: if (!capable(CAP_SYS_ADMIN)) return -EPERM; - if (crng_init < 2) + if (!crng_ready()) return -ENODATA; - crng_reseed(&primary_crng, true); - WRITE_ONCE(crng_global_init_time, jiffies - 1); + crng_reseed(false); return 0; default: return -EINVAL; @@ -1886,46 +1600,34 @@ const struct file_operations random_fops = { .llseek = noop_llseek, }; -const struct file_operations urandom_fops = { - .read = urandom_read, - .write = random_write, - .unlocked_ioctl = random_ioctl, - .compat_ioctl = compat_ptr_ioctl, - .fasync = random_fasync, - .llseek = noop_llseek, -}; - -SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, count, unsigned int, - flags) -{ - int ret; - - if (flags & ~(GRND_NONBLOCK | GRND_RANDOM | GRND_INSECURE)) - return -EINVAL; - - /* - * Requesting insecure and blocking randomness at the same time makes - * no sense. - */ - if ((flags & (GRND_INSECURE | GRND_RANDOM)) == (GRND_INSECURE | GRND_RANDOM)) - return -EINVAL; - - if (count > INT_MAX) - count = INT_MAX; - - if (!(flags & GRND_INSECURE) && !crng_ready()) { - if (flags & GRND_NONBLOCK) - return -EAGAIN; - ret = wait_for_random_bytes(); - if (unlikely(ret)) - return ret; - } - return urandom_read_nowarn(NULL, buf, count, NULL); -} /******************************************************************** * - * Sysctl interface + * Sysctl interface. + * + * These are partly unused legacy knobs with dummy values to not break + * userspace and partly still useful things. They are usually accessible + * in /proc/sys/kernel/random/ and are as follows: + * + * - boot_id - a UUID representing the current boot. + * + * - uuid - a random UUID, different each time the file is read. + * + * - poolsize - the number of bits of entropy that the input pool can + * hold, tied to the POOL_BITS constant. + * + * - entropy_avail - the number of bits of entropy currently in the + * input pool. Always <= poolsize. + * + * - write_wakeup_threshold - the amount of entropy in the input pool + * below which write polls to /dev/random will unblock, requesting + * more entropy, tied to the POOL_MIN_BITS constant. It is writable + * to avoid breaking old userspaces, but writing to it does not + * change any behavior of the RNG. + * + * - urandom_min_reseed_secs - fixed to the value CRNG_RESEED_INTERVAL. + * It is writable to avoid breaking old userspaces, but writing + * to it does not change any behavior of the RNG. * ********************************************************************/ @@ -1933,25 +1635,28 @@ SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, count, unsigned int, #include <linux/sysctl.h> -static int min_write_thresh; -static int max_write_thresh = POOL_BITS; -static int random_min_urandom_seed = 60; -static char sysctl_bootid[16]; +static int sysctl_random_min_urandom_seed = CRNG_RESEED_INTERVAL / HZ; +static int sysctl_random_write_wakeup_bits = POOL_MIN_BITS; +static int sysctl_poolsize = POOL_BITS; +static u8 sysctl_bootid[UUID_SIZE]; /* * This function is used to return both the bootid UUID, and random - * UUID. The difference is in whether table->data is NULL; if it is, + * UUID. The difference is in whether table->data is NULL; if it is, * then a new UUID is generated and returned to the user. - * - * If the user accesses this via the proc interface, the UUID will be - * returned as an ASCII string in the standard UUID format; if via the - * sysctl system call, as 16 bytes of binary data. */ static int proc_do_uuid(struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos) { - struct ctl_table fake_table; - unsigned char buf[64], tmp_uuid[16], *uuid; + u8 tmp_uuid[UUID_SIZE], *uuid; + char uuid_string[UUID_STRING_LEN + 1]; + struct ctl_table fake_table = { + .data = uuid_string, + .maxlen = UUID_STRING_LEN + }; + + if (write) + return -EPERM; uuid = table->data; if (!uuid) { @@ -1966,32 +1671,17 @@ static int proc_do_uuid(struct ctl_table *table, int write, void *buffer, spin_unlock(&bootid_spinlock); } - sprintf(buf, "%pU", uuid); - - fake_table.data = buf; - fake_table.maxlen = sizeof(buf); - - return proc_dostring(&fake_table, write, buffer, lenp, ppos); + snprintf(uuid_string, sizeof(uuid_string), "%pU", uuid); + return proc_dostring(&fake_table, 0, buffer, lenp, ppos); } -/* - * Return entropy available scaled to integral bits - */ -static int proc_do_entropy(struct ctl_table *table, int write, void *buffer, - size_t *lenp, loff_t *ppos) +/* The same as proc_dointvec, but writes don't change anything. */ +static int proc_do_rointvec(struct ctl_table *table, int write, void *buffer, + size_t *lenp, loff_t *ppos) { - struct ctl_table fake_table; - int entropy_count; - - entropy_count = *(int *)table->data >> POOL_ENTROPY_SHIFT; - - fake_table.data = &entropy_count; - fake_table.maxlen = sizeof(entropy_count); - - return proc_dointvec(&fake_table, write, buffer, lenp, ppos); + return write ? 0 : proc_dointvec(table, 0, buffer, lenp, ppos); } -static int sysctl_poolsize = POOL_BITS; static struct ctl_table random_table[] = { { .procname = "poolsize", @@ -2002,56 +1692,36 @@ static struct ctl_table random_table[] = { }, { .procname = "entropy_avail", + .data = &input_pool.entropy_count, .maxlen = sizeof(int), .mode = 0444, - .proc_handler = proc_do_entropy, - .data = &input_pool.entropy_count, + .proc_handler = proc_dointvec, }, { .procname = "write_wakeup_threshold", - .data = &random_write_wakeup_bits, + .data = &sysctl_random_write_wakeup_bits, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = proc_dointvec_minmax, - .extra1 = &min_write_thresh, - .extra2 = &max_write_thresh, + .proc_handler = proc_do_rointvec, }, { .procname = "urandom_min_reseed_secs", - .data = &random_min_urandom_seed, + .data = &sysctl_random_min_urandom_seed, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = proc_dointvec, + .proc_handler = proc_do_rointvec, }, { .procname = "boot_id", .data = &sysctl_bootid, - .maxlen = 16, .mode = 0444, .proc_handler = proc_do_uuid, }, { .procname = "uuid", - .maxlen = 16, .mode = 0444, .proc_handler = proc_do_uuid, }, -#ifdef ADD_INTERRUPT_BENCH - { - .procname = "add_interrupt_avg_cycles", - .data = &avg_cycles, - .maxlen = sizeof(avg_cycles), - .mode = 0444, - .proc_handler = proc_doulongvec_minmax, - }, - { - .procname = "add_interrupt_avg_deviation", - .data = &avg_deviation, - .maxlen = sizeof(avg_deviation), - .mode = 0444, - .proc_handler = proc_doulongvec_minmax, - }, -#endif { } }; @@ -2065,168 +1735,4 @@ static int __init random_sysctls_init(void) return 0; } device_initcall(random_sysctls_init); -#endif /* CONFIG_SYSCTL */ - -struct batched_entropy { - union { - u64 entropy_u64[CHACHA_BLOCK_SIZE / sizeof(u64)]; - u32 entropy_u32[CHACHA_BLOCK_SIZE / sizeof(u32)]; - }; - unsigned int position; - spinlock_t batch_lock; -}; - -/* - * Get a random word for internal kernel use only. The quality of the random - * number is good as /dev/urandom, but there is no backtrack protection, with - * the goal of being quite fast and not depleting entropy. In order to ensure - * that the randomness provided by this function is okay, the function - * wait_for_random_bytes() should be called and return 0 at least once at any - * point prior. - */ -static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64) = { - .batch_lock = __SPIN_LOCK_UNLOCKED(batched_entropy_u64.lock), -}; - -u64 get_random_u64(void) -{ - u64 ret; - unsigned long flags; - struct batched_entropy *batch; - static void *previous; - - warn_unseeded_randomness(&previous); - - batch = raw_cpu_ptr(&batched_entropy_u64); - spin_lock_irqsave(&batch->batch_lock, flags); - if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) { - extract_crng((u8 *)batch->entropy_u64); - batch->position = 0; - } - ret = batch->entropy_u64[batch->position++]; - spin_unlock_irqrestore(&batch->batch_lock, flags); - return ret; -} -EXPORT_SYMBOL(get_random_u64); - -static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32) = { - .batch_lock = __SPIN_LOCK_UNLOCKED(batched_entropy_u32.lock), -}; -u32 get_random_u32(void) -{ - u32 ret; - unsigned long flags; - struct batched_entropy *batch; - static void *previous; - - warn_unseeded_randomness(&previous); - - batch = raw_cpu_ptr(&batched_entropy_u32); - spin_lock_irqsave(&batch->batch_lock, flags); - if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) { - extract_crng((u8 *)batch->entropy_u32); - batch->position = 0; - } - ret = batch->entropy_u32[batch->position++]; - spin_unlock_irqrestore(&batch->batch_lock, flags); - return ret; -} -EXPORT_SYMBOL(get_random_u32); - -/* It's important to invalidate all potential batched entropy that might - * be stored before the crng is initialized, which we can do lazily by - * simply resetting the counter to zero so that it's re-extracted on the - * next usage. */ -static void invalidate_batched_entropy(void) -{ - int cpu; - unsigned long flags; - - for_each_possible_cpu(cpu) { - struct batched_entropy *batched_entropy; - - batched_entropy = per_cpu_ptr(&batched_entropy_u32, cpu); - spin_lock_irqsave(&batched_entropy->batch_lock, flags); - batched_entropy->position = 0; - spin_unlock(&batched_entropy->batch_lock); - - batched_entropy = per_cpu_ptr(&batched_entropy_u64, cpu); - spin_lock(&batched_entropy->batch_lock); - batched_entropy->position = 0; - spin_unlock_irqrestore(&batched_entropy->batch_lock, flags); - } -} - -/** - * randomize_page - Generate a random, page aligned address - * @start: The smallest acceptable address the caller will take. - * @range: The size of the area, starting at @start, within which the - * random address must fall. - * - * If @start + @range would overflow, @range is capped. - * - * NOTE: Historical use of randomize_range, which this replaces, presumed that - * @start was already page aligned. We now align it regardless. - * - * Return: A page aligned address within [start, start + range). On error, - * @start is returned. - */ -unsigned long randomize_page(unsigned long start, unsigned long range) -{ - if (!PAGE_ALIGNED(start)) { - range -= PAGE_ALIGN(start) - start; - start = PAGE_ALIGN(start); - } - - if (start > ULONG_MAX - range) - range = ULONG_MAX - start; - - range >>= PAGE_SHIFT; - - if (range == 0) - return start; - - return start + (get_random_long() % range << PAGE_SHIFT); -} - -/* Interface for in-kernel drivers of true hardware RNGs. - * Those devices may produce endless random bits and will be throttled - * when our pool is full. - */ -void add_hwgenerator_randomness(const char *buffer, size_t count, - size_t entropy) -{ - if (unlikely(crng_init == 0)) { - size_t ret = crng_fast_load(buffer, count); - mix_pool_bytes(buffer, ret); - count -= ret; - buffer += ret; - if (!count || crng_init == 0) - return; - } - - /* Suspend writing if we're above the trickle threshold. - * We'll be woken up again once below random_write_wakeup_thresh, - * or when the calling thread is about to terminate. - */ - wait_event_interruptible(random_write_wait, - !system_wq || kthread_should_stop() || - POOL_ENTROPY_BITS() <= random_write_wakeup_bits); - mix_pool_bytes(buffer, count); - credit_entropy_bits(entropy); -} -EXPORT_SYMBOL_GPL(add_hwgenerator_randomness); - -/* Handle random seed passed by bootloader. - * If the seed is trustworthy, it would be regarded as hardware RNGs. Otherwise - * it would be regarded as device data. - * The decision is controlled by CONFIG_RANDOM_TRUST_BOOTLOADER. - */ -void add_bootloader_randomness(const void *buf, unsigned int size) -{ - if (IS_ENABLED(CONFIG_RANDOM_TRUST_BOOTLOADER)) - add_hwgenerator_randomness(buf, size, size * 8); - else - add_device_randomness(buf, size); -} -EXPORT_SYMBOL_GPL(add_bootloader_randomness); +#endif diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c index b009e7479b70..783d65fc71f0 100644 --- a/drivers/char/tpm/tpm-chip.c +++ b/drivers/char/tpm/tpm-chip.c @@ -274,14 +274,6 @@ static void tpm_dev_release(struct device *dev) kfree(chip); } -static void tpm_devs_release(struct device *dev) -{ - struct tpm_chip *chip = container_of(dev, struct tpm_chip, devs); - - /* release the master device reference */ - put_device(&chip->dev); -} - /** * tpm_class_shutdown() - prepare the TPM device for loss of power. * @dev: device to which the chip is associated. @@ -344,7 +336,6 @@ struct tpm_chip *tpm_chip_alloc(struct device *pdev, chip->dev_num = rc; device_initialize(&chip->dev); - device_initialize(&chip->devs); chip->dev.class = tpm_class; chip->dev.class->shutdown_pre = tpm_class_shutdown; @@ -352,39 +343,20 @@ struct tpm_chip *tpm_chip_alloc(struct device *pdev, chip->dev.parent = pdev; chip->dev.groups = chip->groups; - chip->devs.parent = pdev; - chip->devs.class = tpmrm_class; - chip->devs.release = tpm_devs_release; - /* get extra reference on main device to hold on - * behalf of devs. This holds the chip structure - * while cdevs is in use. The corresponding put - * is in the tpm_devs_release (TPM2 only) - */ - if (chip->flags & TPM_CHIP_FLAG_TPM2) - get_device(&chip->dev); - if (chip->dev_num == 0) chip->dev.devt = MKDEV(MISC_MAJOR, TPM_MINOR); else chip->dev.devt = MKDEV(MAJOR(tpm_devt), chip->dev_num); - chip->devs.devt = - MKDEV(MAJOR(tpm_devt), chip->dev_num + TPM_NUM_DEVICES); - rc = dev_set_name(&chip->dev, "tpm%d", chip->dev_num); if (rc) goto out; - rc = dev_set_name(&chip->devs, "tpmrm%d", chip->dev_num); - if (rc) - goto out; if (!pdev) chip->flags |= TPM_CHIP_FLAG_VIRTUAL; cdev_init(&chip->cdev, &tpm_fops); - cdev_init(&chip->cdevs, &tpmrm_fops); chip->cdev.owner = THIS_MODULE; - chip->cdevs.owner = THIS_MODULE; rc = tpm2_init_space(&chip->work_space, TPM2_SPACE_BUFFER_SIZE); if (rc) { @@ -396,7 +368,6 @@ struct tpm_chip *tpm_chip_alloc(struct device *pdev, return chip; out: - put_device(&chip->devs); put_device(&chip->dev); return ERR_PTR(rc); } @@ -445,14 +416,9 @@ static int tpm_add_char_device(struct tpm_chip *chip) } if (chip->flags & TPM_CHIP_FLAG_TPM2 && !tpm_is_firmware_upgrade(chip)) { - rc = cdev_device_add(&chip->cdevs, &chip->devs); - if (rc) { - dev_err(&chip->devs, - "unable to cdev_device_add() %s, major %d, minor %d, err=%d\n", - dev_name(&chip->devs), MAJOR(chip->devs.devt), - MINOR(chip->devs.devt), rc); - return rc; - } + rc = tpm_devs_add(chip); + if (rc) + goto err_del_cdev; } /* Make the chip available. */ @@ -460,6 +426,10 @@ static int tpm_add_char_device(struct tpm_chip *chip) idr_replace(&dev_nums_idr, chip, chip->dev_num); mutex_unlock(&idr_lock); + return 0; + +err_del_cdev: + cdev_device_del(&chip->cdev, &chip->dev); return rc; } @@ -654,7 +624,7 @@ void tpm_chip_unregister(struct tpm_chip *chip) hwrng_unregister(&chip->hwrng); tpm_bios_log_teardown(chip); if (chip->flags & TPM_CHIP_FLAG_TPM2 && !tpm_is_firmware_upgrade(chip)) - cdev_device_del(&chip->cdevs, &chip->devs); + tpm_devs_remove(chip); tpm_del_char_device(chip); } EXPORT_SYMBOL_GPL(tpm_chip_unregister); diff --git a/drivers/char/tpm/tpm-dev-common.c b/drivers/char/tpm/tpm-dev-common.c index c08cbb306636..dc4c0a0a5129 100644 --- a/drivers/char/tpm/tpm-dev-common.c +++ b/drivers/char/tpm/tpm-dev-common.c @@ -69,7 +69,13 @@ static void tpm_dev_async_work(struct work_struct *work) ret = tpm_dev_transmit(priv->chip, priv->space, priv->data_buffer, sizeof(priv->data_buffer)); tpm_put_ops(priv->chip); - if (ret > 0) { + + /* + * If ret is > 0 then tpm_dev_transmit returned the size of the + * response. If ret is < 0 then tpm_dev_transmit failed and + * returned an error code. + */ + if (ret != 0) { priv->response_length = ret; mod_timer(&priv->user_read_timer, jiffies + (120 * HZ)); } diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h index 283f78211c3a..2163c6ee0d36 100644 --- a/drivers/char/tpm/tpm.h +++ b/drivers/char/tpm/tpm.h @@ -234,6 +234,8 @@ int tpm2_prepare_space(struct tpm_chip *chip, struct tpm_space *space, u8 *cmd, size_t cmdsiz); int tpm2_commit_space(struct tpm_chip *chip, struct tpm_space *space, void *buf, size_t *bufsiz); +int tpm_devs_add(struct tpm_chip *chip); +void tpm_devs_remove(struct tpm_chip *chip); void tpm_bios_log_setup(struct tpm_chip *chip); void tpm_bios_log_teardown(struct tpm_chip *chip); diff --git a/drivers/char/tpm/tpm2-space.c b/drivers/char/tpm/tpm2-space.c index 97e916856cf3..ffb35f0154c1 100644 --- a/drivers/char/tpm/tpm2-space.c +++ b/drivers/char/tpm/tpm2-space.c @@ -58,12 +58,12 @@ int tpm2_init_space(struct tpm_space *space, unsigned int buf_size) void tpm2_del_space(struct tpm_chip *chip, struct tpm_space *space) { - mutex_lock(&chip->tpm_mutex); - if (!tpm_chip_start(chip)) { + + if (tpm_try_get_ops(chip) == 0) { tpm2_flush_sessions(chip, space); - tpm_chip_stop(chip); + tpm_put_ops(chip); } - mutex_unlock(&chip->tpm_mutex); + kfree(space->context_buf); kfree(space->session_buf); } @@ -574,3 +574,68 @@ out: dev_err(&chip->dev, "%s: error %d\n", __func__, rc); return rc; } + +/* + * Put the reference to the main device. + */ +static void tpm_devs_release(struct device *dev) +{ + struct tpm_chip *chip = container_of(dev, struct tpm_chip, devs); + + /* release the master device reference */ + put_device(&chip->dev); +} + +/* + * Remove the device file for exposed TPM spaces and release the device + * reference. This may also release the reference to the master device. + */ +void tpm_devs_remove(struct tpm_chip *chip) +{ + cdev_device_del(&chip->cdevs, &chip->devs); + put_device(&chip->devs); +} + +/* + * Add a device file to expose TPM spaces. Also take a reference to the + * main device. + */ +int tpm_devs_add(struct tpm_chip *chip) +{ + int rc; + + device_initialize(&chip->devs); + chip->devs.parent = chip->dev.parent; + chip->devs.class = tpmrm_class; + + /* + * Get extra reference on main device to hold on behalf of devs. + * This holds the chip structure while cdevs is in use. The + * corresponding put is in the tpm_devs_release. + */ + get_device(&chip->dev); + chip->devs.release = tpm_devs_release; + chip->devs.devt = MKDEV(MAJOR(tpm_devt), chip->dev_num + TPM_NUM_DEVICES); + cdev_init(&chip->cdevs, &tpmrm_fops); + chip->cdevs.owner = THIS_MODULE; + + rc = dev_set_name(&chip->devs, "tpmrm%d", chip->dev_num); + if (rc) + goto err_put_devs; + + rc = cdev_device_add(&chip->cdevs, &chip->devs); + if (rc) { + dev_err(&chip->devs, + "unable to cdev_device_add() %s, major %d, minor %d, err=%d\n", + dev_name(&chip->devs), MAJOR(chip->devs.devt), + MINOR(chip->devs.devt), rc); + goto err_put_devs; + } + + return 0; + +err_put_devs: + put_device(&chip->devs); + + return rc; +} diff --git a/drivers/char/tpm/xen-tpmfront.c b/drivers/char/tpm/xen-tpmfront.c index da5b30771418..f53e0cf1ec7e 100644 --- a/drivers/char/tpm/xen-tpmfront.c +++ b/drivers/char/tpm/xen-tpmfront.c @@ -126,16 +126,16 @@ static void vtpm_cancel(struct tpm_chip *chip) notify_remote_via_evtchn(priv->evtchn); } -static unsigned int shr_data_offset(struct vtpm_shared_page *shr) +static size_t shr_data_offset(struct vtpm_shared_page *shr) { - return sizeof(*shr) + sizeof(u32) * shr->nr_extra_pages; + return struct_size(shr, extra_pages, shr->nr_extra_pages); } static int vtpm_send(struct tpm_chip *chip, u8 *buf, size_t count) { struct tpm_private *priv = dev_get_drvdata(&chip->dev); struct vtpm_shared_page *shr = priv->shr; - unsigned int offset = shr_data_offset(shr); + size_t offset = shr_data_offset(shr); u32 ordinal; unsigned long duration; @@ -177,7 +177,7 @@ static int vtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count) { struct tpm_private *priv = dev_get_drvdata(&chip->dev); struct vtpm_shared_page *shr = priv->shr; - unsigned int offset = shr_data_offset(shr); + size_t offset = shr_data_offset(shr); size_t length = shr->length; if (shr->state == VTPM_STATE_IDLE) diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index 2359889a35a0..e3c430539a17 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c @@ -1957,6 +1957,13 @@ static void virtcons_remove(struct virtio_device *vdev) list_del(&portdev->list); spin_unlock_irq(&pdrvdata_lock); + /* Device is going away, exit any polling for buffers */ + virtio_break_device(vdev); + if (use_multiport(portdev)) + flush_work(&portdev->control_work); + else + flush_work(&portdev->config_work); + /* Disable interrupts for vqs */ virtio_reset_device(vdev); /* Finish up work that's lined up */ diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig index ad4256d54361..d4d67fbae869 100644 --- a/drivers/clk/Kconfig +++ b/drivers/clk/Kconfig @@ -231,6 +231,8 @@ config COMMON_CLK_GEMINI config COMMON_CLK_LAN966X bool "Generic Clock Controller driver for LAN966X SoC" + depends on HAS_IOMEM + depends on OF help This driver provides support for Generic Clock Controller(GCK) on LAN966X SoC. GCK generates and supplies clock to various peripherals diff --git a/drivers/clk/ingenic/jz4725b-cgu.c b/drivers/clk/ingenic/jz4725b-cgu.c index 744d136b721b..15d61793f53b 100644 --- a/drivers/clk/ingenic/jz4725b-cgu.c +++ b/drivers/clk/ingenic/jz4725b-cgu.c @@ -139,11 +139,10 @@ static const struct ingenic_cgu_clk_info jz4725b_cgu_clocks[] = { }, [JZ4725B_CLK_I2S] = { - "i2s", CGU_CLK_MUX | CGU_CLK_DIV | CGU_CLK_GATE, + "i2s", CGU_CLK_MUX | CGU_CLK_DIV, .parents = { JZ4725B_CLK_EXT, JZ4725B_CLK_PLL_HALF, -1, -1 }, .mux = { CGU_REG_CPCCR, 31, 1 }, .div = { CGU_REG_I2SCDR, 0, 1, 9, -1, -1, -1 }, - .gate = { CGU_REG_CLKGR, 6 }, }, [JZ4725B_CLK_SPI] = { diff --git a/drivers/clk/qcom/dispcc-sc7180.c b/drivers/clk/qcom/dispcc-sc7180.c index 538e4963c915..5d2ae297e741 100644 --- a/drivers/clk/qcom/dispcc-sc7180.c +++ b/drivers/clk/qcom/dispcc-sc7180.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2019, The Linux Foundation. All rights reserved. + * Copyright (c) 2019, 2022, The Linux Foundation. All rights reserved. */ #include <linux/clk-provider.h> @@ -625,6 +625,9 @@ static struct clk_branch disp_cc_mdss_vsync_clk = { static struct gdsc mdss_gdsc = { .gdscr = 0x3000, + .en_rest_wait_val = 0x2, + .en_few_wait_val = 0x2, + .clk_dis_wait_val = 0xf, .pd = { .name = "mdss_gdsc", }, diff --git a/drivers/clk/qcom/dispcc-sc7280.c b/drivers/clk/qcom/dispcc-sc7280.c index 4ef4ae231794..ad596d567f6a 100644 --- a/drivers/clk/qcom/dispcc-sc7280.c +++ b/drivers/clk/qcom/dispcc-sc7280.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2021-2022, The Linux Foundation. All rights reserved. */ #include <linux/clk-provider.h> @@ -787,6 +787,9 @@ static struct clk_branch disp_cc_sleep_clk = { static struct gdsc disp_cc_mdss_core_gdsc = { .gdscr = 0x1004, + .en_rest_wait_val = 0x2, + .en_few_wait_val = 0x2, + .clk_dis_wait_val = 0xf, .pd = { .name = "disp_cc_mdss_core_gdsc", }, diff --git a/drivers/clk/qcom/dispcc-sm8250.c b/drivers/clk/qcom/dispcc-sm8250.c index 566fdfa0a15b..db9379634fb2 100644 --- a/drivers/clk/qcom/dispcc-sm8250.c +++ b/drivers/clk/qcom/dispcc-sm8250.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2018-2020, 2022, The Linux Foundation. All rights reserved. */ #include <linux/clk-provider.h> @@ -1126,6 +1126,9 @@ static struct clk_branch disp_cc_mdss_vsync_clk = { static struct gdsc mdss_gdsc = { .gdscr = 0x3000, + .en_rest_wait_val = 0x2, + .en_few_wait_val = 0x2, + .clk_dis_wait_val = 0xf, .pd = { .name = "mdss_gdsc", }, diff --git a/drivers/clk/qcom/gcc-msm8994.c b/drivers/clk/qcom/gcc-msm8994.c index 71aa630fa4bd..f09499999eb3 100644 --- a/drivers/clk/qcom/gcc-msm8994.c +++ b/drivers/clk/qcom/gcc-msm8994.c @@ -108,42 +108,6 @@ static const struct clk_parent_data gcc_xo_gpll0_gpll4[] = { { .hw = &gpll4.clkr.hw }, }; -static struct clk_rcg2 system_noc_clk_src = { - .cmd_rcgr = 0x0120, - .hid_width = 5, - .parent_map = gcc_xo_gpll0_map, - .clkr.hw.init = &(struct clk_init_data){ - .name = "system_noc_clk_src", - .parent_data = gcc_xo_gpll0, - .num_parents = ARRAY_SIZE(gcc_xo_gpll0), - .ops = &clk_rcg2_ops, - }, -}; - -static struct clk_rcg2 config_noc_clk_src = { - .cmd_rcgr = 0x0150, - .hid_width = 5, - .parent_map = gcc_xo_gpll0_map, - .clkr.hw.init = &(struct clk_init_data){ - .name = "config_noc_clk_src", - .parent_data = gcc_xo_gpll0, - .num_parents = ARRAY_SIZE(gcc_xo_gpll0), - .ops = &clk_rcg2_ops, - }, -}; - -static struct clk_rcg2 periph_noc_clk_src = { - .cmd_rcgr = 0x0190, - .hid_width = 5, - .parent_map = gcc_xo_gpll0_map, - .clkr.hw.init = &(struct clk_init_data){ - .name = "periph_noc_clk_src", - .parent_data = gcc_xo_gpll0, - .num_parents = ARRAY_SIZE(gcc_xo_gpll0), - .ops = &clk_rcg2_ops, - }, -}; - static struct freq_tbl ftbl_ufs_axi_clk_src[] = { F(50000000, P_GPLL0, 12, 0, 0), F(100000000, P_GPLL0, 6, 0, 0), @@ -1150,8 +1114,6 @@ static struct clk_branch gcc_blsp1_ahb_clk = { .enable_mask = BIT(17), .hw.init = &(struct clk_init_data){ .name = "gcc_blsp1_ahb_clk", - .parent_hws = (const struct clk_hw *[]){ &periph_noc_clk_src.clkr.hw }, - .num_parents = 1, .ops = &clk_branch2_ops, }, }, @@ -1435,8 +1397,6 @@ static struct clk_branch gcc_blsp2_ahb_clk = { .enable_mask = BIT(15), .hw.init = &(struct clk_init_data){ .name = "gcc_blsp2_ahb_clk", - .parent_hws = (const struct clk_hw *[]){ &periph_noc_clk_src.clkr.hw }, - .num_parents = 1, .ops = &clk_branch2_ops, }, }, @@ -1764,8 +1724,6 @@ static struct clk_branch gcc_lpass_q6_axi_clk = { .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_lpass_q6_axi_clk", - .parent_hws = (const struct clk_hw *[]){ &system_noc_clk_src.clkr.hw }, - .num_parents = 1, .ops = &clk_branch2_ops, }, }, @@ -1778,8 +1736,6 @@ static struct clk_branch gcc_mss_q6_bimc_axi_clk = { .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_mss_q6_bimc_axi_clk", - .parent_hws = (const struct clk_hw *[]){ &system_noc_clk_src.clkr.hw }, - .num_parents = 1, .ops = &clk_branch2_ops, }, }, @@ -1807,9 +1763,6 @@ static struct clk_branch gcc_pcie_0_cfg_ahb_clk = { .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_pcie_0_cfg_ahb_clk", - .parent_hws = (const struct clk_hw *[]){ &config_noc_clk_src.clkr.hw }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, @@ -1822,9 +1775,6 @@ static struct clk_branch gcc_pcie_0_mstr_axi_clk = { .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_pcie_0_mstr_axi_clk", - .parent_hws = (const struct clk_hw *[]){ &system_noc_clk_src.clkr.hw }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, @@ -1854,9 +1804,6 @@ static struct clk_branch gcc_pcie_0_slv_axi_clk = { .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_pcie_0_slv_axi_clk", - .parent_hws = (const struct clk_hw *[]){ &system_noc_clk_src.clkr.hw }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, @@ -1884,9 +1831,6 @@ static struct clk_branch gcc_pcie_1_cfg_ahb_clk = { .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_pcie_1_cfg_ahb_clk", - .parent_hws = (const struct clk_hw *[]){ &config_noc_clk_src.clkr.hw }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, @@ -1899,9 +1843,6 @@ static struct clk_branch gcc_pcie_1_mstr_axi_clk = { .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_pcie_1_mstr_axi_clk", - .parent_hws = (const struct clk_hw *[]){ &system_noc_clk_src.clkr.hw }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, @@ -1930,9 +1871,6 @@ static struct clk_branch gcc_pcie_1_slv_axi_clk = { .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_pcie_1_slv_axi_clk", - .parent_hws = (const struct clk_hw *[]){ &system_noc_clk_src.clkr.hw }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, @@ -1960,8 +1898,6 @@ static struct clk_branch gcc_pdm_ahb_clk = { .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_pdm_ahb_clk", - .parent_hws = (const struct clk_hw *[]){ &periph_noc_clk_src.clkr.hw }, - .num_parents = 1, .ops = &clk_branch2_ops, }, }, @@ -1989,9 +1925,6 @@ static struct clk_branch gcc_sdcc1_ahb_clk = { .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_sdcc1_ahb_clk", - .parent_hws = (const struct clk_hw *[]){ &periph_noc_clk_src.clkr.hw }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, @@ -2004,9 +1937,6 @@ static struct clk_branch gcc_sdcc2_ahb_clk = { .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_sdcc2_ahb_clk", - .parent_hws = (const struct clk_hw *[]){ &periph_noc_clk_src.clkr.hw }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, @@ -2034,9 +1964,6 @@ static struct clk_branch gcc_sdcc3_ahb_clk = { .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_sdcc3_ahb_clk", - .parent_hws = (const struct clk_hw *[]){ &periph_noc_clk_src.clkr.hw }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, @@ -2064,9 +1991,6 @@ static struct clk_branch gcc_sdcc4_ahb_clk = { .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_sdcc4_ahb_clk", - .parent_hws = (const struct clk_hw *[]){ &periph_noc_clk_src.clkr.hw }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, @@ -2124,8 +2048,6 @@ static struct clk_branch gcc_tsif_ahb_clk = { .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_tsif_ahb_clk", - .parent_hws = (const struct clk_hw *[]){ &periph_noc_clk_src.clkr.hw }, - .num_parents = 1, .ops = &clk_branch2_ops, }, }, @@ -2153,8 +2075,6 @@ static struct clk_branch gcc_ufs_ahb_clk = { .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_ufs_ahb_clk", - .parent_hws = (const struct clk_hw *[]){ &config_noc_clk_src.clkr.hw }, - .num_parents = 1, .ops = &clk_branch2_ops, }, }, @@ -2198,8 +2118,6 @@ static struct clk_branch gcc_ufs_rx_symbol_0_clk = { .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_ufs_rx_symbol_0_clk", - .parent_hws = (const struct clk_hw *[]){ &system_noc_clk_src.clkr.hw }, - .num_parents = 1, .ops = &clk_branch2_ops, }, }, @@ -2213,8 +2131,6 @@ static struct clk_branch gcc_ufs_rx_symbol_1_clk = { .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_ufs_rx_symbol_1_clk", - .parent_hws = (const struct clk_hw *[]){ &system_noc_clk_src.clkr.hw }, - .num_parents = 1, .ops = &clk_branch2_ops, }, }, @@ -2243,8 +2159,6 @@ static struct clk_branch gcc_ufs_tx_symbol_0_clk = { .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_ufs_tx_symbol_0_clk", - .parent_hws = (const struct clk_hw *[]){ &system_noc_clk_src.clkr.hw }, - .num_parents = 1, .ops = &clk_branch2_ops, }, }, @@ -2258,8 +2172,6 @@ static struct clk_branch gcc_ufs_tx_symbol_1_clk = { .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_ufs_tx_symbol_1_clk", - .parent_hws = (const struct clk_hw *[]){ &system_noc_clk_src.clkr.hw }, - .num_parents = 1, .ops = &clk_branch2_ops, }, }, @@ -2364,8 +2276,6 @@ static struct clk_branch gcc_usb_hs_ahb_clk = { .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_usb_hs_ahb_clk", - .parent_hws = (const struct clk_hw *[]){ &periph_noc_clk_src.clkr.hw }, - .num_parents = 1, .ops = &clk_branch2_ops, }, }, @@ -2488,8 +2398,6 @@ static struct clk_branch gcc_boot_rom_ahb_clk = { .enable_mask = BIT(10), .hw.init = &(struct clk_init_data){ .name = "gcc_boot_rom_ahb_clk", - .parent_hws = (const struct clk_hw *[]){ &config_noc_clk_src.clkr.hw }, - .num_parents = 1, .ops = &clk_branch2_ops, }, }, @@ -2503,8 +2411,6 @@ static struct clk_branch gcc_prng_ahb_clk = { .enable_mask = BIT(13), .hw.init = &(struct clk_init_data){ .name = "gcc_prng_ahb_clk", - .parent_hws = (const struct clk_hw *[]){ &periph_noc_clk_src.clkr.hw }, - .num_parents = 1, .ops = &clk_branch2_ops, }, }, @@ -2547,9 +2453,6 @@ static struct clk_regmap *gcc_msm8994_clocks[] = { [GPLL0] = &gpll0.clkr, [GPLL4_EARLY] = &gpll4_early.clkr, [GPLL4] = &gpll4.clkr, - [CONFIG_NOC_CLK_SRC] = &config_noc_clk_src.clkr, - [PERIPH_NOC_CLK_SRC] = &periph_noc_clk_src.clkr, - [SYSTEM_NOC_CLK_SRC] = &system_noc_clk_src.clkr, [UFS_AXI_CLK_SRC] = &ufs_axi_clk_src.clkr, [USB30_MASTER_CLK_SRC] = &usb30_master_clk_src.clkr, [BLSP1_QUP1_I2C_APPS_CLK_SRC] = &blsp1_qup1_i2c_apps_clk_src.clkr, @@ -2696,6 +2599,15 @@ static struct clk_regmap *gcc_msm8994_clocks[] = { [USB_SS_PHY_LDO] = &usb_ss_phy_ldo.clkr, [GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr, [GCC_PRNG_AHB_CLK] = &gcc_prng_ahb_clk.clkr, + + /* + * The following clocks should NOT be managed by this driver, but they once were + * mistakengly added. Now they are only here to indicate that they are not defined + * on purpose, even though the names will stay in the header file (for ABI sanity). + */ + [CONFIG_NOC_CLK_SRC] = NULL, + [PERIPH_NOC_CLK_SRC] = NULL, + [SYSTEM_NOC_CLK_SRC] = NULL, }; static struct gdsc *gcc_msm8994_gdscs[] = { diff --git a/drivers/clk/qcom/gdsc.c b/drivers/clk/qcom/gdsc.c index 7e1dd8ccfa38..44520efc6c72 100644 --- a/drivers/clk/qcom/gdsc.c +++ b/drivers/clk/qcom/gdsc.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2015, 2017-2018, The Linux Foundation. All rights reserved. + * Copyright (c) 2015, 2017-2018, 2022, The Linux Foundation. All rights reserved. */ #include <linux/bitops.h> @@ -35,9 +35,14 @@ #define CFG_GDSCR_OFFSET 0x4 /* Wait 2^n CXO cycles between all states. Here, n=2 (4 cycles). */ -#define EN_REST_WAIT_VAL (0x2 << 20) -#define EN_FEW_WAIT_VAL (0x8 << 16) -#define CLK_DIS_WAIT_VAL (0x2 << 12) +#define EN_REST_WAIT_VAL 0x2 +#define EN_FEW_WAIT_VAL 0x8 +#define CLK_DIS_WAIT_VAL 0x2 + +/* Transition delay shifts */ +#define EN_REST_WAIT_SHIFT 20 +#define EN_FEW_WAIT_SHIFT 16 +#define CLK_DIS_WAIT_SHIFT 12 #define RETAIN_MEM BIT(14) #define RETAIN_PERIPH BIT(13) @@ -380,7 +385,18 @@ static int gdsc_init(struct gdsc *sc) */ mask = HW_CONTROL_MASK | SW_OVERRIDE_MASK | EN_REST_WAIT_MASK | EN_FEW_WAIT_MASK | CLK_DIS_WAIT_MASK; - val = EN_REST_WAIT_VAL | EN_FEW_WAIT_VAL | CLK_DIS_WAIT_VAL; + + if (!sc->en_rest_wait_val) + sc->en_rest_wait_val = EN_REST_WAIT_VAL; + if (!sc->en_few_wait_val) + sc->en_few_wait_val = EN_FEW_WAIT_VAL; + if (!sc->clk_dis_wait_val) + sc->clk_dis_wait_val = CLK_DIS_WAIT_VAL; + + val = sc->en_rest_wait_val << EN_REST_WAIT_SHIFT | + sc->en_few_wait_val << EN_FEW_WAIT_SHIFT | + sc->clk_dis_wait_val << CLK_DIS_WAIT_SHIFT; + ret = regmap_update_bits(sc->regmap, sc->gdscr, mask, val); if (ret) return ret; diff --git a/drivers/clk/qcom/gdsc.h b/drivers/clk/qcom/gdsc.h index d7cc4c21a9d4..ad313d7210bd 100644 --- a/drivers/clk/qcom/gdsc.h +++ b/drivers/clk/qcom/gdsc.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (c) 2015, 2017-2018, The Linux Foundation. All rights reserved. + * Copyright (c) 2015, 2017-2018, 2022, The Linux Foundation. All rights reserved. */ #ifndef __QCOM_GDSC_H__ @@ -22,6 +22,9 @@ struct reset_controller_dev; * @cxcs: offsets of branch registers to toggle mem/periph bits in * @cxc_count: number of @cxcs * @pwrsts: Possible powerdomain power states + * @en_rest_wait_val: transition delay value for receiving enr ack signal + * @en_few_wait_val: transition delay value for receiving enf ack signal + * @clk_dis_wait_val: transition delay value for halting clock * @resets: ids of resets associated with this gdsc * @reset_count: number of @resets * @rcdev: reset controller @@ -36,6 +39,9 @@ struct gdsc { unsigned int clamp_io_ctrl; unsigned int *cxcs; unsigned int cxc_count; + unsigned int en_rest_wait_val; + unsigned int en_few_wait_val; + unsigned int clk_dis_wait_val; const u8 pwrsts; /* Powerdomain allowable state bitfields */ #define PWRSTS_OFF BIT(0) diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig index cfb8ea0df3b1..1ea556e75494 100644 --- a/drivers/clocksource/Kconfig +++ b/drivers/clocksource/Kconfig @@ -713,7 +713,6 @@ config INGENIC_OST config MICROCHIP_PIT64B bool "Microchip PIT64B support" depends on OF || COMPILE_TEST - select CLKSRC_MMIO select TIMER_OF help This option enables Microchip PIT64B timer for Atmel diff --git a/drivers/clocksource/acpi_pm.c b/drivers/clocksource/acpi_pm.c index eb596ff9e7bb..279ddff81ab4 100644 --- a/drivers/clocksource/acpi_pm.c +++ b/drivers/clocksource/acpi_pm.c @@ -229,8 +229,10 @@ static int __init parse_pmtmr(char *arg) int ret; ret = kstrtouint(arg, 16, &base); - if (ret) - return ret; + if (ret) { + pr_warn("PMTMR: invalid 'pmtmr=' value: '%s'\n", arg); + return 1; + } pr_info("PMTMR IOPort override: 0x%04x -> 0x%04x\n", pmtmr_ioport, base); diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c index 1ecd52f903b8..9ab8221ee3c6 100644 --- a/drivers/clocksource/arm_arch_timer.c +++ b/drivers/clocksource/arm_arch_timer.c @@ -880,10 +880,19 @@ static void __arch_timer_setup(unsigned type, clockevents_config_and_register(clk, arch_timer_rate, 0xf, max_delta); } -static void arch_timer_evtstrm_enable(int divider) +static void arch_timer_evtstrm_enable(unsigned int divider) { u32 cntkctl = arch_timer_get_cntkctl(); +#ifdef CONFIG_ARM64 + /* ECV is likely to require a large divider. Use the EVNTIS flag. */ + if (cpus_have_const_cap(ARM64_HAS_ECV) && divider > 15) { + cntkctl |= ARCH_TIMER_EVT_INTERVAL_SCALE; + divider -= 8; + } +#endif + + divider = min(divider, 15U); cntkctl &= ~ARCH_TIMER_EVT_TRIGGER_MASK; /* Set the divider and enable virtual event stream */ cntkctl |= (divider << ARCH_TIMER_EVT_TRIGGER_SHIFT) @@ -912,7 +921,7 @@ static void arch_timer_configure_evtstream(void) lsb++; /* enable event stream */ - arch_timer_evtstrm_enable(max(0, min(lsb, 15))); + arch_timer_evtstrm_enable(max(0, lsb)); } static void arch_counter_set_user_access(void) diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c index 6db3d5511b0f..f29c812b70c9 100644 --- a/drivers/clocksource/exynos_mct.c +++ b/drivers/clocksource/exynos_mct.c @@ -60,27 +60,18 @@ #define MCT_CLKEVENTS_RATING 350 #endif +/* There are four Global timers starting with 0 offset */ +#define MCT_G0_IRQ 0 +/* Local timers count starts after global timer count */ +#define MCT_L0_IRQ 4 +/* Max number of IRQ as per DT binding document */ +#define MCT_NR_IRQS 20 + enum { MCT_INT_SPI, MCT_INT_PPI }; -enum { - MCT_G0_IRQ, - MCT_G1_IRQ, - MCT_G2_IRQ, - MCT_G3_IRQ, - MCT_L0_IRQ, - MCT_L1_IRQ, - MCT_L2_IRQ, - MCT_L3_IRQ, - MCT_L4_IRQ, - MCT_L5_IRQ, - MCT_L6_IRQ, - MCT_L7_IRQ, - MCT_NR_IRQS, -}; - static void __iomem *reg_base; static unsigned long clk_rate; static unsigned int mct_int_type; @@ -89,7 +80,11 @@ static int mct_irqs[MCT_NR_IRQS]; struct mct_clock_event_device { struct clock_event_device evt; unsigned long base; - char name[10]; + /** + * The length of the name must be adjusted if number of + * local timer interrupts grow over two digits + */ + char name[11]; }; static void exynos4_mct_write(unsigned int value, unsigned long offset) @@ -541,6 +536,11 @@ static int __init exynos4_timer_interrupts(struct device_node *np, * irqs are specified. */ nr_irqs = of_irq_count(np); + if (nr_irqs > ARRAY_SIZE(mct_irqs)) { + pr_err("exynos-mct: too many (%d) interrupts configured in DT\n", + nr_irqs); + nr_irqs = ARRAY_SIZE(mct_irqs); + } for (i = MCT_L0_IRQ; i < nr_irqs; i++) mct_irqs[i] = irq_of_parse_and_map(np, i); @@ -553,11 +553,14 @@ static int __init exynos4_timer_interrupts(struct device_node *np, mct_irqs[MCT_L0_IRQ], err); } else { for_each_possible_cpu(cpu) { - int mct_irq = mct_irqs[MCT_L0_IRQ + cpu]; + int mct_irq; struct mct_clock_event_device *pcpu_mevt = per_cpu_ptr(&percpu_mct_tick, cpu); pcpu_mevt->evt.irq = -1; + if (MCT_L0_IRQ + cpu >= ARRAY_SIZE(mct_irqs)) + break; + mct_irq = mct_irqs[MCT_L0_IRQ + cpu]; irq_set_status_flags(mct_irq, IRQ_NOAUTOEN); if (request_irq(mct_irq, diff --git a/drivers/clocksource/timer-imx-sysctr.c b/drivers/clocksource/timer-imx-sysctr.c index 55a8e198d2a1..523e37662a6e 100644 --- a/drivers/clocksource/timer-imx-sysctr.c +++ b/drivers/clocksource/timer-imx-sysctr.c @@ -110,7 +110,7 @@ static struct timer_of to_sysctr = { }, .of_irq = { .handler = sysctr_timer_interrupt, - .flags = IRQF_TIMER | IRQF_IRQPOLL, + .flags = IRQF_TIMER, }, .of_clk = { .name = "per", diff --git a/drivers/clocksource/timer-imx-tpm.c b/drivers/clocksource/timer-imx-tpm.c index 2cdc077a39f5..bd64a8a8427f 100644 --- a/drivers/clocksource/timer-imx-tpm.c +++ b/drivers/clocksource/timer-imx-tpm.c @@ -32,8 +32,8 @@ #define TPM_C0SC_CHF_MASK (0x1 << 7) #define TPM_C0V 0x24 -static int counter_width; -static void __iomem *timer_base; +static int counter_width __ro_after_init; +static void __iomem *timer_base __ro_after_init; static inline void tpm_timer_disable(void) { @@ -73,12 +73,12 @@ static unsigned long tpm_read_current_timer(void) { return tpm_read_counter(); } -#endif static u64 notrace tpm_read_sched_clock(void) { return tpm_read_counter(); } +#endif static int tpm_set_next_event(unsigned long delta, struct clock_event_device *evt) @@ -127,9 +127,9 @@ static irqreturn_t tpm_timer_interrupt(int irq, void *dev_id) static struct timer_of to_tpm = { .flags = TIMER_OF_IRQ | TIMER_OF_BASE | TIMER_OF_CLOCK, .clkevt = { - .name = "i.MX7ULP TPM Timer", + .name = "i.MX TPM Timer", .rating = 200, - .features = CLOCK_EVT_FEAT_ONESHOT, + .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_DYNIRQ, .set_state_shutdown = tpm_set_state_shutdown, .set_state_oneshot = tpm_set_state_oneshot, .set_next_event = tpm_set_next_event, @@ -137,7 +137,7 @@ static struct timer_of to_tpm = { }, .of_irq = { .handler = tpm_timer_interrupt, - .flags = IRQF_TIMER | IRQF_IRQPOLL, + .flags = IRQF_TIMER, }, .of_clk = { .name = "per", @@ -150,10 +150,10 @@ static int __init tpm_clocksource_init(void) tpm_delay_timer.read_current_timer = &tpm_read_current_timer; tpm_delay_timer.freq = timer_of_rate(&to_tpm) >> 3; register_current_timer_delay(&tpm_delay_timer); -#endif sched_clock_register(tpm_read_sched_clock, counter_width, timer_of_rate(&to_tpm) >> 3); +#endif return clocksource_mmio_init(timer_base + TPM_CNT, "imx-tpm", diff --git a/drivers/clocksource/timer-microchip-pit64b.c b/drivers/clocksource/timer-microchip-pit64b.c index cfa4ec7ef396..abce83d2f00b 100644 --- a/drivers/clocksource/timer-microchip-pit64b.c +++ b/drivers/clocksource/timer-microchip-pit64b.c @@ -42,8 +42,7 @@ #define MCHP_PIT64B_LSBMASK GENMASK_ULL(31, 0) #define MCHP_PIT64B_PRES_TO_MODE(p) (MCHP_PIT64B_MR_PRES & ((p) << 8)) #define MCHP_PIT64B_MODE_TO_PRES(m) ((MCHP_PIT64B_MR_PRES & (m)) >> 8) -#define MCHP_PIT64B_DEF_CS_FREQ 5000000UL /* 5 MHz */ -#define MCHP_PIT64B_DEF_CE_FREQ 32768 /* 32 KHz */ +#define MCHP_PIT64B_DEF_FREQ 5000000UL /* 5 MHz */ #define MCHP_PIT64B_NAME "pit64b" @@ -165,7 +164,7 @@ static u64 mchp_pit64b_clksrc_read(struct clocksource *cs) return mchp_pit64b_cnt_read(mchp_pit64b_cs_base); } -static u64 mchp_pit64b_sched_read_clk(void) +static u64 notrace mchp_pit64b_sched_read_clk(void) { return mchp_pit64b_cnt_read(mchp_pit64b_cs_base); } @@ -418,7 +417,6 @@ static int __init mchp_pit64b_init_clkevt(struct mchp_pit64b_timer *timer, static int __init mchp_pit64b_dt_init_timer(struct device_node *node, bool clkevt) { - u32 freq = clkevt ? MCHP_PIT64B_DEF_CE_FREQ : MCHP_PIT64B_DEF_CS_FREQ; struct mchp_pit64b_timer timer; unsigned long clk_rate; u32 irq = 0; @@ -446,7 +444,7 @@ static int __init mchp_pit64b_dt_init_timer(struct device_node *node, } /* Initialize mode (prescaler + SGCK bit). To be used at runtime. */ - ret = mchp_pit64b_init_mode(&timer, freq); + ret = mchp_pit64b_init_mode(&timer, MCHP_PIT64B_DEF_FREQ); if (ret) goto irq_unmap; diff --git a/drivers/clocksource/timer-of.c b/drivers/clocksource/timer-of.c index 529cc6a51cdb..c3f54d9912be 100644 --- a/drivers/clocksource/timer-of.c +++ b/drivers/clocksource/timer-of.c @@ -157,9 +157,9 @@ static __init int timer_of_base_init(struct device_node *np, of_base->base = of_base->name ? of_io_request_and_map(np, of_base->index, of_base->name) : of_iomap(np, of_base->index); - if (IS_ERR(of_base->base)) { - pr_err("Failed to iomap (%s)\n", of_base->name); - return PTR_ERR(of_base->base); + if (IS_ERR_OR_NULL(of_base->base)) { + pr_err("Failed to iomap (%s:%s)\n", np->name, of_base->name); + return of_base->base ? PTR_ERR(of_base->base) : -ENOMEM; } return 0; diff --git a/drivers/clocksource/timer-ti-dm-systimer.c b/drivers/clocksource/timer-ti-dm-systimer.c index b6f97960d8ee..2737407ff069 100644 --- a/drivers/clocksource/timer-ti-dm-systimer.c +++ b/drivers/clocksource/timer-ti-dm-systimer.c @@ -241,8 +241,7 @@ static void __init dmtimer_systimer_assign_alwon(void) bool quirk_unreliable_oscillator = false; /* Quirk unreliable 32 KiHz oscillator with incomplete dts */ - if (of_machine_is_compatible("ti,omap3-beagle") || - of_machine_is_compatible("timll,omap3-devkit8000")) { + if (of_machine_is_compatible("ti,omap3-beagle-ab4")) { quirk_unreliable_oscillator = true; counter_32k = -ENODEV; } @@ -695,9 +694,9 @@ static int __init dmtimer_percpu_quirk_init(struct device_node *np, u32 pa) return 0; } - if (pa == 0x48034000) /* dra7 dmtimer3 */ + if (pa == 0x4882c000) /* dra7 dmtimer15 */ return dmtimer_percpu_timer_init(np, 0); - else if (pa == 0x48036000) /* dra7 dmtimer4 */ + else if (pa == 0x4882e000) /* dra7 dmtimer16 */ return dmtimer_percpu_timer_init(np, 1); return 0; diff --git a/drivers/counter/counter-sysfs.c b/drivers/counter/counter-sysfs.c index 7cc4d1d523ea..04eac41dad33 100644 --- a/drivers/counter/counter-sysfs.c +++ b/drivers/counter/counter-sysfs.c @@ -19,6 +19,11 @@ #include "counter-sysfs.h" +static inline struct counter_device *counter_from_dev(struct device *dev) +{ + return container_of(dev, struct counter_device, dev); +} + /** * struct counter_attribute - Counter sysfs attribute * @dev_attr: device attribute for sysfs @@ -90,7 +95,7 @@ static ssize_t counter_comp_u8_show(struct device *dev, struct device_attribute *attr, char *buf) { const struct counter_attribute *const a = to_counter_attribute(attr); - struct counter_device *const counter = dev_get_drvdata(dev); + struct counter_device *const counter = counter_from_dev(dev); int err; u8 data = 0; @@ -122,7 +127,7 @@ static ssize_t counter_comp_u8_store(struct device *dev, const char *buf, size_t len) { const struct counter_attribute *const a = to_counter_attribute(attr); - struct counter_device *const counter = dev_get_drvdata(dev); + struct counter_device *const counter = counter_from_dev(dev); int err; bool bool_data = 0; u8 data = 0; @@ -158,7 +163,7 @@ static ssize_t counter_comp_u32_show(struct device *dev, struct device_attribute *attr, char *buf) { const struct counter_attribute *const a = to_counter_attribute(attr); - struct counter_device *const counter = dev_get_drvdata(dev); + struct counter_device *const counter = counter_from_dev(dev); const struct counter_available *const avail = a->comp.priv; int err; u32 data = 0; @@ -221,7 +226,7 @@ static ssize_t counter_comp_u32_store(struct device *dev, const char *buf, size_t len) { const struct counter_attribute *const a = to_counter_attribute(attr); - struct counter_device *const counter = dev_get_drvdata(dev); + struct counter_device *const counter = counter_from_dev(dev); struct counter_count *const count = a->parent; struct counter_synapse *const synapse = a->comp.priv; const struct counter_available *const avail = a->comp.priv; @@ -281,7 +286,7 @@ static ssize_t counter_comp_u64_show(struct device *dev, struct device_attribute *attr, char *buf) { const struct counter_attribute *const a = to_counter_attribute(attr); - struct counter_device *const counter = dev_get_drvdata(dev); + struct counter_device *const counter = counter_from_dev(dev); int err; u64 data = 0; @@ -309,7 +314,7 @@ static ssize_t counter_comp_u64_store(struct device *dev, const char *buf, size_t len) { const struct counter_attribute *const a = to_counter_attribute(attr); - struct counter_device *const counter = dev_get_drvdata(dev); + struct counter_device *const counter = counter_from_dev(dev); int err; u64 data = 0; diff --git a/drivers/cpufreq/amd-pstate-trace.h b/drivers/cpufreq/amd-pstate-trace.h index 647505957d4f..35f38ae67fb1 100644 --- a/drivers/cpufreq/amd-pstate-trace.h +++ b/drivers/cpufreq/amd-pstate-trace.h @@ -27,6 +27,10 @@ TRACE_EVENT(amd_pstate_perf, TP_PROTO(unsigned long min_perf, unsigned long target_perf, unsigned long capacity, + u64 freq, + u64 mperf, + u64 aperf, + u64 tsc, unsigned int cpu_id, bool changed, bool fast_switch @@ -35,6 +39,10 @@ TRACE_EVENT(amd_pstate_perf, TP_ARGS(min_perf, target_perf, capacity, + freq, + mperf, + aperf, + tsc, cpu_id, changed, fast_switch @@ -44,6 +52,10 @@ TRACE_EVENT(amd_pstate_perf, __field(unsigned long, min_perf) __field(unsigned long, target_perf) __field(unsigned long, capacity) + __field(unsigned long long, freq) + __field(unsigned long long, mperf) + __field(unsigned long long, aperf) + __field(unsigned long long, tsc) __field(unsigned int, cpu_id) __field(bool, changed) __field(bool, fast_switch) @@ -53,15 +65,23 @@ TRACE_EVENT(amd_pstate_perf, __entry->min_perf = min_perf; __entry->target_perf = target_perf; __entry->capacity = capacity; + __entry->freq = freq; + __entry->mperf = mperf; + __entry->aperf = aperf; + __entry->tsc = tsc; __entry->cpu_id = cpu_id; __entry->changed = changed; __entry->fast_switch = fast_switch; ), - TP_printk("amd_min_perf=%lu amd_des_perf=%lu amd_max_perf=%lu cpu_id=%u changed=%s fast_switch=%s", + TP_printk("amd_min_perf=%lu amd_des_perf=%lu amd_max_perf=%lu freq=%llu mperf=%llu aperf=%llu tsc=%llu cpu_id=%u changed=%s fast_switch=%s", (unsigned long)__entry->min_perf, (unsigned long)__entry->target_perf, (unsigned long)__entry->capacity, + (unsigned long long)__entry->freq, + (unsigned long long)__entry->mperf, + (unsigned long long)__entry->aperf, + (unsigned long long)__entry->tsc, (unsigned int)__entry->cpu_id, (__entry->changed) ? "true" : "false", (__entry->fast_switch) ? "true" : "false" diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c index 9ce75ed11f8e..7be38bc6a673 100644 --- a/drivers/cpufreq/amd-pstate.c +++ b/drivers/cpufreq/amd-pstate.c @@ -66,6 +66,18 @@ MODULE_PARM_DESC(shared_mem, static struct cpufreq_driver amd_pstate_driver; /** + * struct amd_aperf_mperf + * @aperf: actual performance frequency clock count + * @mperf: maximum performance frequency clock count + * @tsc: time stamp counter + */ +struct amd_aperf_mperf { + u64 aperf; + u64 mperf; + u64 tsc; +}; + +/** * struct amd_cpudata - private CPU data for AMD P-State * @cpu: CPU number * @req: constraint request to apply @@ -81,6 +93,9 @@ static struct cpufreq_driver amd_pstate_driver; * @min_freq: the frequency that mapped to lowest_perf * @nominal_freq: the frequency that mapped to nominal_perf * @lowest_nonlinear_freq: the frequency that mapped to lowest_nonlinear_perf + * @cur: Difference of Aperf/Mperf/tsc count between last and current sample + * @prev: Last Aperf/Mperf/tsc count value read from register + * @freq: current cpu frequency value * @boost_supported: check whether the Processor or SBIOS supports boost mode * * The amd_cpudata is key private data for each CPU thread in AMD P-State, and @@ -102,6 +117,10 @@ struct amd_cpudata { u32 nominal_freq; u32 lowest_nonlinear_freq; + struct amd_aperf_mperf cur; + struct amd_aperf_mperf prev; + + u64 freq; bool boost_supported; }; @@ -211,6 +230,39 @@ static inline void amd_pstate_update_perf(struct amd_cpudata *cpudata, max_perf, fast_switch); } +static inline bool amd_pstate_sample(struct amd_cpudata *cpudata) +{ + u64 aperf, mperf, tsc; + unsigned long flags; + + local_irq_save(flags); + rdmsrl(MSR_IA32_APERF, aperf); + rdmsrl(MSR_IA32_MPERF, mperf); + tsc = rdtsc(); + + if (cpudata->prev.mperf == mperf || cpudata->prev.tsc == tsc) { + local_irq_restore(flags); + return false; + } + + local_irq_restore(flags); + + cpudata->cur.aperf = aperf; + cpudata->cur.mperf = mperf; + cpudata->cur.tsc = tsc; + cpudata->cur.aperf -= cpudata->prev.aperf; + cpudata->cur.mperf -= cpudata->prev.mperf; + cpudata->cur.tsc -= cpudata->prev.tsc; + + cpudata->prev.aperf = aperf; + cpudata->prev.mperf = mperf; + cpudata->prev.tsc = tsc; + + cpudata->freq = div64_u64((cpudata->cur.aperf * cpu_khz), cpudata->cur.mperf); + + return true; +} + static void amd_pstate_update(struct amd_cpudata *cpudata, u32 min_perf, u32 des_perf, u32 max_perf, bool fast_switch) { @@ -226,8 +278,11 @@ static void amd_pstate_update(struct amd_cpudata *cpudata, u32 min_perf, value &= ~AMD_CPPC_MAX_PERF(~0L); value |= AMD_CPPC_MAX_PERF(max_perf); - trace_amd_pstate_perf(min_perf, des_perf, max_perf, - cpudata->cpu, (value != prev), fast_switch); + if (trace_amd_pstate_perf_enabled() && amd_pstate_sample(cpudata)) { + trace_amd_pstate_perf(min_perf, des_perf, max_perf, cpudata->freq, + cpudata->cur.mperf, cpudata->cur.aperf, cpudata->cur.tsc, + cpudata->cpu, (value != prev), fast_switch); + } if (value == prev) return; diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index b8d95536ee22..80f535cc8a75 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -1518,6 +1518,10 @@ static int cpufreq_online(unsigned int cpu) kobject_uevent(&policy->kobj, KOBJ_ADD); + /* Callback for handling stuff after policy is ready */ + if (cpufreq_driver->ready) + cpufreq_driver->ready(policy); + if (cpufreq_thermal_control_enabled(cpufreq_driver)) policy->cdev = of_cpufreq_cooling_register(policy); diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index 08515f7e515f..b6bd0ff35323 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c @@ -146,7 +146,7 @@ static unsigned int cs_dbs_update(struct cpufreq_policy *policy) /************************** sysfs interface ************************/ -static ssize_t store_sampling_down_factor(struct gov_attr_set *attr_set, +static ssize_t sampling_down_factor_store(struct gov_attr_set *attr_set, const char *buf, size_t count) { struct dbs_data *dbs_data = to_dbs_data(attr_set); @@ -161,7 +161,7 @@ static ssize_t store_sampling_down_factor(struct gov_attr_set *attr_set, return count; } -static ssize_t store_up_threshold(struct gov_attr_set *attr_set, +static ssize_t up_threshold_store(struct gov_attr_set *attr_set, const char *buf, size_t count) { struct dbs_data *dbs_data = to_dbs_data(attr_set); @@ -177,7 +177,7 @@ static ssize_t store_up_threshold(struct gov_attr_set *attr_set, return count; } -static ssize_t store_down_threshold(struct gov_attr_set *attr_set, +static ssize_t down_threshold_store(struct gov_attr_set *attr_set, const char *buf, size_t count) { struct dbs_data *dbs_data = to_dbs_data(attr_set); @@ -195,7 +195,7 @@ static ssize_t store_down_threshold(struct gov_attr_set *attr_set, return count; } -static ssize_t store_ignore_nice_load(struct gov_attr_set *attr_set, +static ssize_t ignore_nice_load_store(struct gov_attr_set *attr_set, const char *buf, size_t count) { struct dbs_data *dbs_data = to_dbs_data(attr_set); @@ -220,7 +220,7 @@ static ssize_t store_ignore_nice_load(struct gov_attr_set *attr_set, return count; } -static ssize_t store_freq_step(struct gov_attr_set *attr_set, const char *buf, +static ssize_t freq_step_store(struct gov_attr_set *attr_set, const char *buf, size_t count) { struct dbs_data *dbs_data = to_dbs_data(attr_set); diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c index 63f7c219062b..0d42cf8b88d8 100644 --- a/drivers/cpufreq/cpufreq_governor.c +++ b/drivers/cpufreq/cpufreq_governor.c @@ -27,7 +27,7 @@ static DEFINE_MUTEX(gov_dbs_data_mutex); /* Common sysfs tunables */ /* - * store_sampling_rate - update sampling rate effective immediately if needed. + * sampling_rate_store - update sampling rate effective immediately if needed. * * If new rate is smaller than the old, simply updating * dbs.sampling_rate might not be appropriate. For example, if the @@ -41,7 +41,7 @@ static DEFINE_MUTEX(gov_dbs_data_mutex); * This must be called with dbs_data->mutex held, otherwise traversing * policy_dbs_list isn't safe. */ -ssize_t store_sampling_rate(struct gov_attr_set *attr_set, const char *buf, +ssize_t sampling_rate_store(struct gov_attr_set *attr_set, const char *buf, size_t count) { struct dbs_data *dbs_data = to_dbs_data(attr_set); @@ -80,7 +80,7 @@ ssize_t store_sampling_rate(struct gov_attr_set *attr_set, const char *buf, return count; } -EXPORT_SYMBOL_GPL(store_sampling_rate); +EXPORT_SYMBOL_GPL(sampling_rate_store); /** * gov_update_cpu_data - Update CPU load data. diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h index bab8e6140377..a5a0bc3cc23e 100644 --- a/drivers/cpufreq/cpufreq_governor.h +++ b/drivers/cpufreq/cpufreq_governor.h @@ -51,7 +51,7 @@ static inline struct dbs_data *to_dbs_data(struct gov_attr_set *attr_set) } #define gov_show_one(_gov, file_name) \ -static ssize_t show_##file_name \ +static ssize_t file_name##_show \ (struct gov_attr_set *attr_set, char *buf) \ { \ struct dbs_data *dbs_data = to_dbs_data(attr_set); \ @@ -60,7 +60,7 @@ static ssize_t show_##file_name \ } #define gov_show_one_common(file_name) \ -static ssize_t show_##file_name \ +static ssize_t file_name##_show \ (struct gov_attr_set *attr_set, char *buf) \ { \ struct dbs_data *dbs_data = to_dbs_data(attr_set); \ @@ -68,12 +68,10 @@ static ssize_t show_##file_name \ } #define gov_attr_ro(_name) \ -static struct governor_attr _name = \ -__ATTR(_name, 0444, show_##_name, NULL) +static struct governor_attr _name = __ATTR_RO(_name) #define gov_attr_rw(_name) \ -static struct governor_attr _name = \ -__ATTR(_name, 0644, show_##_name, store_##_name) +static struct governor_attr _name = __ATTR_RW(_name) /* Common to all CPUs of a policy */ struct policy_dbs_info { @@ -176,7 +174,7 @@ void od_register_powersave_bias_handler(unsigned int (*f) (struct cpufreq_policy *, unsigned int, unsigned int), unsigned int powersave_bias); void od_unregister_powersave_bias_handler(void); -ssize_t store_sampling_rate(struct gov_attr_set *attr_set, const char *buf, +ssize_t sampling_rate_store(struct gov_attr_set *attr_set, const char *buf, size_t count); void gov_update_cpu_data(struct dbs_data *dbs_data); #endif /* _CPUFREQ_GOVERNOR_H */ diff --git a/drivers/cpufreq/cpufreq_governor_attr_set.c b/drivers/cpufreq/cpufreq_governor_attr_set.c index a6f365b9cc1a..771770ea0ed0 100644 --- a/drivers/cpufreq/cpufreq_governor_attr_set.c +++ b/drivers/cpufreq/cpufreq_governor_attr_set.c @@ -8,11 +8,6 @@ #include "cpufreq_governor.h" -static inline struct gov_attr_set *to_gov_attr_set(struct kobject *kobj) -{ - return container_of(kobj, struct gov_attr_set, kobj); -} - static inline struct governor_attr *to_gov_attr(struct attribute *attr) { return container_of(attr, struct governor_attr, attr); diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index 6a41ea4729b8..e8fbf970ff07 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c @@ -202,7 +202,7 @@ static unsigned int od_dbs_update(struct cpufreq_policy *policy) /************************** sysfs interface ************************/ static struct dbs_governor od_dbs_gov; -static ssize_t store_io_is_busy(struct gov_attr_set *attr_set, const char *buf, +static ssize_t io_is_busy_store(struct gov_attr_set *attr_set, const char *buf, size_t count) { struct dbs_data *dbs_data = to_dbs_data(attr_set); @@ -220,7 +220,7 @@ static ssize_t store_io_is_busy(struct gov_attr_set *attr_set, const char *buf, return count; } -static ssize_t store_up_threshold(struct gov_attr_set *attr_set, +static ssize_t up_threshold_store(struct gov_attr_set *attr_set, const char *buf, size_t count) { struct dbs_data *dbs_data = to_dbs_data(attr_set); @@ -237,7 +237,7 @@ static ssize_t store_up_threshold(struct gov_attr_set *attr_set, return count; } -static ssize_t store_sampling_down_factor(struct gov_attr_set *attr_set, +static ssize_t sampling_down_factor_store(struct gov_attr_set *attr_set, const char *buf, size_t count) { struct dbs_data *dbs_data = to_dbs_data(attr_set); @@ -265,7 +265,7 @@ static ssize_t store_sampling_down_factor(struct gov_attr_set *attr_set, return count; } -static ssize_t store_ignore_nice_load(struct gov_attr_set *attr_set, +static ssize_t ignore_nice_load_store(struct gov_attr_set *attr_set, const char *buf, size_t count) { struct dbs_data *dbs_data = to_dbs_data(attr_set); @@ -290,7 +290,7 @@ static ssize_t store_ignore_nice_load(struct gov_attr_set *attr_set, return count; } -static ssize_t store_powersave_bias(struct gov_attr_set *attr_set, +static ssize_t powersave_bias_store(struct gov_attr_set *attr_set, const char *buf, size_t count) { struct dbs_data *dbs_data = to_dbs_data(attr_set); diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index bc7f7e6759bd..846bb3a78788 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -1692,6 +1692,37 @@ static void intel_pstate_enable_hwp_interrupt(struct cpudata *cpudata) } } +static void intel_pstate_update_epp_defaults(struct cpudata *cpudata) +{ + cpudata->epp_default = intel_pstate_get_epp(cpudata, 0); + + /* + * If this CPU gen doesn't call for change in balance_perf + * EPP return. + */ + if (epp_values[EPP_INDEX_BALANCE_PERFORMANCE] == HWP_EPP_BALANCE_PERFORMANCE) + return; + + /* + * If powerup EPP is something other than chipset default 0x80 and + * - is more performance oriented than 0x80 (default balance_perf EPP) + * - But less performance oriented than performance EPP + * then use this as new balance_perf EPP. + */ + if (cpudata->epp_default < HWP_EPP_BALANCE_PERFORMANCE && + cpudata->epp_default > HWP_EPP_PERFORMANCE) { + epp_values[EPP_INDEX_BALANCE_PERFORMANCE] = cpudata->epp_default; + return; + } + + /* + * Use hard coded value per gen to update the balance_perf + * and default EPP. + */ + cpudata->epp_default = epp_values[EPP_INDEX_BALANCE_PERFORMANCE]; + intel_pstate_set_epp(cpudata, cpudata->epp_default); +} + static void intel_pstate_hwp_enable(struct cpudata *cpudata) { /* First disable HWP notification interrupt till we activate again */ @@ -1705,12 +1736,7 @@ static void intel_pstate_hwp_enable(struct cpudata *cpudata) if (cpudata->epp_default >= 0) return; - if (epp_values[EPP_INDEX_BALANCE_PERFORMANCE] == HWP_EPP_BALANCE_PERFORMANCE) { - cpudata->epp_default = intel_pstate_get_epp(cpudata, 0); - } else { - cpudata->epp_default = epp_values[EPP_INDEX_BALANCE_PERFORMANCE]; - intel_pstate_set_epp(cpudata, cpudata->epp_default); - } + intel_pstate_update_epp_defaults(cpudata); } static int atom_get_min_pstate(void) diff --git a/drivers/cpufreq/longhaul.c b/drivers/cpufreq/longhaul.c index c538a153ee82..3e000e1a75c6 100644 --- a/drivers/cpufreq/longhaul.c +++ b/drivers/cpufreq/longhaul.c @@ -668,9 +668,9 @@ static acpi_status longhaul_walk_callback(acpi_handle obj_handle, u32 nesting_level, void *context, void **return_value) { - struct acpi_device *d; + struct acpi_device *d = acpi_fetch_acpi_dev(obj_handle); - if (acpi_bus_get_device(obj_handle, &d)) + if (!d) return 0; *return_value = acpi_driver_data(d); diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c index 12ab4014af71..d289036beff2 100644 --- a/drivers/cpufreq/powernow-k8.c +++ b/drivers/cpufreq/powernow-k8.c @@ -1172,14 +1172,14 @@ static int powernowk8_init(void) unsigned int i, supported_cpus = 0; int ret; + if (!x86_match_cpu(powernow_k8_ids)) + return -ENODEV; + if (boot_cpu_has(X86_FEATURE_HW_PSTATE)) { __request_acpi_cpufreq(); return -ENODEV; } - if (!x86_match_cpu(powernow_k8_ids)) - return -ENODEV; - cpus_read_lock(); for_each_online_cpu(i) { smp_call_function_single(i, check_supported_cpu, &ret, 1); diff --git a/drivers/cpufreq/qcom-cpufreq-hw.c b/drivers/cpufreq/qcom-cpufreq-hw.c index 05f3d7876e44..effbb680b453 100644 --- a/drivers/cpufreq/qcom-cpufreq-hw.c +++ b/drivers/cpufreq/qcom-cpufreq-hw.c @@ -388,7 +388,7 @@ static int qcom_cpufreq_hw_lmh_init(struct cpufreq_policy *policy, int index) snprintf(data->irq_name, sizeof(data->irq_name), "dcvsh-irq-%u", policy->cpu); ret = request_threaded_irq(data->throttle_irq, NULL, qcom_lmh_dcvs_handle_irq, - IRQF_ONESHOT, data->irq_name, data); + IRQF_ONESHOT | IRQF_NO_AUTOEN, data->irq_name, data); if (ret) { dev_err(&pdev->dev, "Error registering %s: %d\n", data->irq_name, ret); return 0; @@ -542,6 +542,14 @@ static int qcom_cpufreq_hw_cpu_exit(struct cpufreq_policy *policy) return 0; } +static void qcom_cpufreq_ready(struct cpufreq_policy *policy) +{ + struct qcom_cpufreq_data *data = policy->driver_data; + + if (data->throttle_irq >= 0) + enable_irq(data->throttle_irq); +} + static struct freq_attr *qcom_cpufreq_hw_attr[] = { &cpufreq_freq_attr_scaling_available_freqs, &cpufreq_freq_attr_scaling_boost_freqs, @@ -561,6 +569,7 @@ static struct cpufreq_driver cpufreq_qcom_hw_driver = { .fast_switch = qcom_cpufreq_hw_fast_switch, .name = "qcom-cpufreq-hw", .attr = qcom_cpufreq_hw_attr, + .ready = qcom_cpufreq_ready, }; static int qcom_cpufreq_hw_driver_probe(struct platform_device *pdev) diff --git a/drivers/cpuidle/cpuidle-haltpoll.c b/drivers/cpuidle/cpuidle-haltpoll.c index fcc53215bac8..3a39a7f48b77 100644 --- a/drivers/cpuidle/cpuidle-haltpoll.c +++ b/drivers/cpuidle/cpuidle-haltpoll.c @@ -108,11 +108,11 @@ static int __init haltpoll_init(void) if (boot_option_idle_override != IDLE_NO_OVERRIDE) return -ENODEV; - cpuidle_poll_state_init(drv); - if (!kvm_para_available() || !haltpoll_want()) return -ENODEV; + cpuidle_poll_state_init(drv); + ret = cpuidle_register_driver(drv); if (ret < 0) return ret; diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 4f705674f94f..7b2d138bc83e 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -808,6 +808,16 @@ config CRYPTO_DEV_ZYNQMP_AES accelerator. Select this if you want to use the ZynqMP module for AES algorithms. +config CRYPTO_DEV_ZYNQMP_SHA3 + tristate "Support for Xilinx ZynqMP SHA3 hardware accelerator" + depends on ZYNQMP_FIRMWARE || COMPILE_TEST + select CRYPTO_SHA3 + help + Xilinx ZynqMP has SHA3 engine used for secure hash calculation. + This driver interfaces with SHA3 hardware engine. + Select this if you want to use the ZynqMP module + for SHA3 hash computation. + source "drivers/crypto/chelsio/Kconfig" source "drivers/crypto/virtio/Kconfig" diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile index 1fe5120eb966..0a4fff23d272 100644 --- a/drivers/crypto/Makefile +++ b/drivers/crypto/Makefile @@ -47,7 +47,7 @@ obj-$(CONFIG_CRYPTO_DEV_VMX) += vmx/ obj-$(CONFIG_CRYPTO_DEV_BCM_SPU) += bcm/ obj-$(CONFIG_CRYPTO_DEV_SAFEXCEL) += inside-secure/ obj-$(CONFIG_CRYPTO_DEV_ARTPEC6) += axis/ -obj-$(CONFIG_CRYPTO_DEV_ZYNQMP_AES) += xilinx/ +obj-y += xilinx/ obj-y += hisilicon/ obj-$(CONFIG_CRYPTO_DEV_AMLOGIC_GXL) += amlogic/ obj-y += keembay/ diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c index 54ae8d16e493..35e3cadccac2 100644 --- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c +++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c @@ -11,6 +11,7 @@ * You could find a link for the datasheet in Documentation/arm/sunxi.rst */ +#include <linux/bottom_half.h> #include <linux/crypto.h> #include <linux/dma-mapping.h> #include <linux/io.h> @@ -283,7 +284,9 @@ static int sun8i_ce_cipher_run(struct crypto_engine *engine, void *areq) flow = rctx->flow; err = sun8i_ce_run_task(ce, flow, crypto_tfm_alg_name(breq->base.tfm)); + local_bh_disable(); crypto_finalize_skcipher_request(engine, breq, err); + local_bh_enable(); return 0; } diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c index 88194718a806..859b7522faaa 100644 --- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c +++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c @@ -9,6 +9,7 @@ * * You could find the datasheet in Documentation/arm/sunxi.rst */ +#include <linux/bottom_half.h> #include <linux/dma-mapping.h> #include <linux/pm_runtime.h> #include <linux/scatterlist.h> @@ -414,6 +415,8 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq) theend: kfree(buf); kfree(result); + local_bh_disable(); crypto_finalize_hash_request(engine, breq, err); + local_bh_enable(); return 0; } diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c index 9ef1c85c4aaa..554e400d41ca 100644 --- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c +++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c @@ -11,6 +11,7 @@ * You could find a link for the datasheet in Documentation/arm/sunxi.rst */ +#include <linux/bottom_half.h> #include <linux/crypto.h> #include <linux/dma-mapping.h> #include <linux/io.h> @@ -274,7 +275,9 @@ static int sun8i_ss_handle_cipher_request(struct crypto_engine *engine, void *ar struct skcipher_request *breq = container_of(areq, struct skcipher_request, base); err = sun8i_ss_cipher(breq); + local_bh_disable(); crypto_finalize_skcipher_request(engine, breq, err); + local_bh_enable(); return 0; } diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c index 80e89066dbd1..319fe3279a71 100644 --- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c +++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c @@ -30,6 +30,8 @@ static const struct ss_variant ss_a80_variant = { .alg_cipher = { SS_ALG_AES, SS_ALG_DES, SS_ALG_3DES, }, + .alg_hash = { SS_ID_NOTSUPP, SS_ID_NOTSUPP, SS_ID_NOTSUPP, SS_ID_NOTSUPP, + }, .op_mode = { SS_OP_ECB, SS_OP_CBC, }, .ss_clks = { diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c index 3c073eb3db03..1a71ed49d233 100644 --- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c +++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c @@ -9,6 +9,7 @@ * * You could find the datasheet in Documentation/arm/sunxi.rst */ +#include <linux/bottom_half.h> #include <linux/dma-mapping.h> #include <linux/pm_runtime.h> #include <linux/scatterlist.h> @@ -442,6 +443,8 @@ int sun8i_ss_hash_run(struct crypto_engine *engine, void *breq) theend: kfree(pad); kfree(result); + local_bh_disable(); crypto_finalize_hash_request(engine, breq, err); + local_bh_enable(); return 0; } diff --git a/drivers/crypto/amlogic/amlogic-gxl-cipher.c b/drivers/crypto/amlogic/amlogic-gxl-cipher.c index c6865cbd334b..e79514fce731 100644 --- a/drivers/crypto/amlogic/amlogic-gxl-cipher.c +++ b/drivers/crypto/amlogic/amlogic-gxl-cipher.c @@ -265,7 +265,9 @@ static int meson_handle_cipher_request(struct crypto_engine *engine, struct skcipher_request *breq = container_of(areq, struct skcipher_request, base); err = meson_cipher(breq); + local_bh_disable(); crypto_finalize_skcipher_request(engine, breq, err); + local_bh_enable(); return 0; } diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c index fe0558403191..f72c6b3e4ad8 100644 --- a/drivers/crypto/atmel-aes.c +++ b/drivers/crypto/atmel-aes.c @@ -2509,6 +2509,7 @@ static void atmel_aes_get_cap(struct atmel_aes_dev *dd) /* keep only major version number */ switch (dd->hw_version & 0xff0) { + case 0x700: case 0x500: dd->caps.has_dualbuff = 1; dd->caps.has_cfb64 = 1; diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c index 1b13f601fd95..d1628112dacc 100644 --- a/drivers/crypto/atmel-sha.c +++ b/drivers/crypto/atmel-sha.c @@ -2508,6 +2508,7 @@ static void atmel_sha_get_cap(struct atmel_sha_dev *dd) /* keep only major version number */ switch (dd->hw_version & 0xff0) { + case 0x700: case 0x510: dd->caps.has_dma = 1; dd->caps.has_dualbuff = 1; diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c index e30786ec9f2d..9fd7b8e439d2 100644 --- a/drivers/crypto/atmel-tdes.c +++ b/drivers/crypto/atmel-tdes.c @@ -1130,6 +1130,7 @@ static void atmel_tdes_get_cap(struct atmel_tdes_dev *dd) /* keep only major version number */ switch (dd->hw_version & 0xf00) { + case 0x800: case 0x700: dd->caps.has_dma = 1; dd->caps.has_cfb_3keys = 1; diff --git a/drivers/crypto/cavium/nitrox/nitrox_mbx.c b/drivers/crypto/cavium/nitrox/nitrox_mbx.c index 2e9c0d214363..9e7308e39b30 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_mbx.c +++ b/drivers/crypto/cavium/nitrox/nitrox_mbx.c @@ -1,4 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 +#include <linux/bitmap.h> #include <linux/workqueue.h> #include "nitrox_csr.h" @@ -120,6 +121,7 @@ static void pf2vf_resp_handler(struct work_struct *work) void nitrox_pf2vf_mbox_handler(struct nitrox_device *ndev) { + DECLARE_BITMAP(csr, BITS_PER_TYPE(u64)); struct nitrox_vfdev *vfdev; struct pf2vf_work *pfwork; u64 value, reg_addr; @@ -129,7 +131,8 @@ void nitrox_pf2vf_mbox_handler(struct nitrox_device *ndev) /* loop for VF(0..63) */ reg_addr = NPS_PKT_MBOX_INT_LO; value = nitrox_read_csr(ndev, reg_addr); - for_each_set_bit(i, (const unsigned long *)&value, BITS_PER_LONG) { + bitmap_from_u64(csr, value); + for_each_set_bit(i, csr, BITS_PER_TYPE(csr)) { /* get the vfno from ring */ vfno = RING_TO_VFNO(i, ndev->iov.max_vf_queues); vfdev = ndev->iov.vfdev + vfno; @@ -151,7 +154,8 @@ void nitrox_pf2vf_mbox_handler(struct nitrox_device *ndev) /* loop for VF(64..127) */ reg_addr = NPS_PKT_MBOX_INT_HI; value = nitrox_read_csr(ndev, reg_addr); - for_each_set_bit(i, (const unsigned long *)&value, BITS_PER_LONG) { + bitmap_from_u64(csr, value); + for_each_set_bit(i, csr, BITS_PER_TYPE(csr)) { /* get the vfno from ring */ vfno = RING_TO_VFNO(i + 64, ndev->iov.max_vf_queues); vfdev = ndev->iov.vfdev + vfno; diff --git a/drivers/crypto/cavium/nitrox/nitrox_req.h b/drivers/crypto/cavium/nitrox/nitrox_req.h index ed174883c8e3..6bf088bcdd11 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_req.h +++ b/drivers/crypto/cavium/nitrox/nitrox_req.h @@ -440,7 +440,7 @@ struct aqmq_command_s { /** * struct ctx_hdr - Book keeping data about the crypto context * @pool: Pool used to allocate crypto context - * @dma: Base DMA address of the cypto context + * @dma: Base DMA address of the crypto context * @ctx_dma: Actual usable crypto context for NITROX */ struct ctx_hdr { diff --git a/drivers/crypto/cavium/zip/zip_main.c b/drivers/crypto/cavium/zip/zip_main.c index 812b4ac9afd6..dc5b7bf7e1fd 100644 --- a/drivers/crypto/cavium/zip/zip_main.c +++ b/drivers/crypto/cavium/zip/zip_main.c @@ -55,6 +55,11 @@ static const struct pci_device_id zip_id_table[] = { { 0, } }; +static void zip_debugfs_init(void); +static void zip_debugfs_exit(void); +static int zip_register_compression_device(void); +static void zip_unregister_compression_device(void); + void zip_reg_write(u64 val, u64 __iomem *addr) { writeq(val, addr); @@ -235,6 +240,15 @@ static int zip_init_hw(struct zip_device *zip) return 0; } +static void zip_reset(struct zip_device *zip) +{ + union zip_cmd_ctl cmd_ctl; + + cmd_ctl.u_reg64 = 0x0ull; + cmd_ctl.s.reset = 1; /* Forces ZIP cores to do reset */ + zip_reg_write(cmd_ctl.u_reg64, (zip->reg_base + ZIP_CMD_CTL)); +} + static int zip_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct device *dev = &pdev->dev; @@ -282,8 +296,21 @@ static int zip_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (err) goto err_release_regions; + /* Register with the Kernel Crypto Interface */ + err = zip_register_compression_device(); + if (err < 0) { + zip_err("ZIP: Kernel Crypto Registration failed\n"); + goto err_register; + } + + /* comp-decomp statistics are handled with debugfs interface */ + zip_debugfs_init(); + return 0; +err_register: + zip_reset(zip); + err_release_regions: if (zip->reg_base) iounmap(zip->reg_base); @@ -305,16 +332,17 @@ err_free_device: static void zip_remove(struct pci_dev *pdev) { struct zip_device *zip = pci_get_drvdata(pdev); - union zip_cmd_ctl cmd_ctl; int q = 0; if (!zip) return; + zip_debugfs_exit(); + + zip_unregister_compression_device(); + if (zip->reg_base) { - cmd_ctl.u_reg64 = 0x0ull; - cmd_ctl.s.reset = 1; /* Forces ZIP cores to do reset */ - zip_reg_write(cmd_ctl.u_reg64, (zip->reg_base + ZIP_CMD_CTL)); + zip_reset(zip); iounmap(zip->reg_base); } @@ -585,7 +613,7 @@ DEFINE_SHOW_ATTRIBUTE(zip_regs); /* Root directory for thunderx_zip debugfs entry */ static struct dentry *zip_debugfs_root; -static void __init zip_debugfs_init(void) +static void zip_debugfs_init(void) { if (!debugfs_initialized()) return; @@ -604,7 +632,7 @@ static void __init zip_debugfs_init(void) } -static void __exit zip_debugfs_exit(void) +static void zip_debugfs_exit(void) { debugfs_remove_recursive(zip_debugfs_root); } @@ -615,48 +643,7 @@ static void __exit zip_debugfs_exit(void) { } #endif /* debugfs - end */ -static int __init zip_init_module(void) -{ - int ret; - - zip_msg("%s\n", DRV_NAME); - - ret = pci_register_driver(&zip_driver); - if (ret < 0) { - zip_err("ZIP: pci_register_driver() failed\n"); - return ret; - } - - /* Register with the Kernel Crypto Interface */ - ret = zip_register_compression_device(); - if (ret < 0) { - zip_err("ZIP: Kernel Crypto Registration failed\n"); - goto err_pci_unregister; - } - - /* comp-decomp statistics are handled with debugfs interface */ - zip_debugfs_init(); - - return ret; - -err_pci_unregister: - pci_unregister_driver(&zip_driver); - return ret; -} - -static void __exit zip_cleanup_module(void) -{ - zip_debugfs_exit(); - - /* Unregister from the kernel crypto interface */ - zip_unregister_compression_device(); - - /* Unregister this driver for pci zip devices */ - pci_unregister_driver(&zip_driver); -} - -module_init(zip_init_module); -module_exit(zip_cleanup_module); +module_pci_driver(zip_driver); MODULE_AUTHOR("Cavium Inc"); MODULE_DESCRIPTION("Cavium Inc ThunderX ZIP Driver"); diff --git a/drivers/crypto/ccp/ccp-crypto-aes.c b/drivers/crypto/ccp/ccp-crypto-aes.c index e6dcd8cedd53..bed331953ff9 100644 --- a/drivers/crypto/ccp/ccp-crypto-aes.c +++ b/drivers/crypto/ccp/ccp-crypto-aes.c @@ -69,7 +69,6 @@ static int ccp_aes_crypt(struct skcipher_request *req, bool encrypt) struct ccp_aes_req_ctx *rctx = skcipher_request_ctx(req); struct scatterlist *iv_sg = NULL; unsigned int iv_len = 0; - int ret; if (!ctx->u.aes.key_len) return -EINVAL; @@ -104,9 +103,7 @@ static int ccp_aes_crypt(struct skcipher_request *req, bool encrypt) rctx->cmd.u.aes.src_len = req->cryptlen; rctx->cmd.u.aes.dst = req->dst; - ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd); - - return ret; + return ccp_crypto_enqueue_request(&req->base, &rctx->cmd); } static int ccp_aes_encrypt(struct skcipher_request *req) diff --git a/drivers/crypto/ccp/ccp-dmaengine.c b/drivers/crypto/ccp/ccp-dmaengine.c index d718db224be4..7d4b4ad1db1f 100644 --- a/drivers/crypto/ccp/ccp-dmaengine.c +++ b/drivers/crypto/ccp/ccp-dmaengine.c @@ -632,6 +632,20 @@ static int ccp_terminate_all(struct dma_chan *dma_chan) return 0; } +static void ccp_dma_release(struct ccp_device *ccp) +{ + struct ccp_dma_chan *chan; + struct dma_chan *dma_chan; + unsigned int i; + + for (i = 0; i < ccp->cmd_q_count; i++) { + chan = ccp->ccp_dma_chan + i; + dma_chan = &chan->dma_chan; + tasklet_kill(&chan->cleanup_tasklet); + list_del_rcu(&dma_chan->device_node); + } +} + int ccp_dmaengine_register(struct ccp_device *ccp) { struct ccp_dma_chan *chan; @@ -736,6 +750,7 @@ int ccp_dmaengine_register(struct ccp_device *ccp) return 0; err_reg: + ccp_dma_release(ccp); kmem_cache_destroy(ccp->dma_desc_cache); err_cache: @@ -752,6 +767,7 @@ void ccp_dmaengine_unregister(struct ccp_device *ccp) return; dma_async_device_unregister(dma_dev); + ccp_dma_release(ccp); kmem_cache_destroy(ccp->dma_desc_cache); kmem_cache_destroy(ccp->dma_cmd_cache); diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c index 8fd774a10edc..6ab93dfd478a 100644 --- a/drivers/crypto/ccp/sev-dev.c +++ b/drivers/crypto/ccp/sev-dev.c @@ -413,7 +413,7 @@ static int __sev_platform_init_locked(int *error) { struct psp_device *psp = psp_master; struct sev_device *sev; - int rc, psp_ret; + int rc, psp_ret = -1; int (*init_function)(int *error); if (!psp || !psp->sev_data) diff --git a/drivers/crypto/ccree/cc_buffer_mgr.c b/drivers/crypto/ccree/cc_buffer_mgr.c index a5e041d9d2cf..11e0278c8631 100644 --- a/drivers/crypto/ccree/cc_buffer_mgr.c +++ b/drivers/crypto/ccree/cc_buffer_mgr.c @@ -258,6 +258,13 @@ static int cc_map_sg(struct device *dev, struct scatterlist *sg, { int ret = 0; + if (!nbytes) { + *mapped_nents = 0; + *lbytes = 0; + *nents = 0; + return 0; + } + *nents = cc_get_sgl_nents(dev, sg, nbytes, lbytes); if (*nents > max_sg_nents) { *nents = 0; diff --git a/drivers/crypto/ccree/cc_cipher.c b/drivers/crypto/ccree/cc_cipher.c index 78833491f534..309da6334a0a 100644 --- a/drivers/crypto/ccree/cc_cipher.c +++ b/drivers/crypto/ccree/cc_cipher.c @@ -257,8 +257,8 @@ static void cc_cipher_exit(struct crypto_tfm *tfm) &ctx_p->user.key_dma_addr); /* Free key buffer in context */ - kfree_sensitive(ctx_p->user.key); dev_dbg(dev, "Free key buffer in context. key=@%p\n", ctx_p->user.key); + kfree_sensitive(ctx_p->user.key); } struct tdes_keys { diff --git a/drivers/crypto/gemini/sl3516-ce-cipher.c b/drivers/crypto/gemini/sl3516-ce-cipher.c index c1c2b1d86663..14d0d83d388d 100644 --- a/drivers/crypto/gemini/sl3516-ce-cipher.c +++ b/drivers/crypto/gemini/sl3516-ce-cipher.c @@ -23,8 +23,8 @@ static bool sl3516_ce_need_fallback(struct skcipher_request *areq) struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); struct sl3516_ce_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm); struct sl3516_ce_dev *ce = op->ce; - struct scatterlist *in_sg = areq->src; - struct scatterlist *out_sg = areq->dst; + struct scatterlist *in_sg; + struct scatterlist *out_sg; struct scatterlist *sg; if (areq->cryptlen == 0 || areq->cryptlen % 16) { @@ -264,7 +264,9 @@ static int sl3516_ce_handle_cipher_request(struct crypto_engine *engine, void *a struct skcipher_request *breq = container_of(areq, struct skcipher_request, base); err = sl3516_ce_cipher(breq); + local_bh_disable(); crypto_finalize_skcipher_request(engine, breq, err); + local_bh_enable(); return 0; } diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index c5b84a5ea350..453390044181 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -3840,7 +3840,7 @@ static void qm_clear_queues(struct hisi_qm *qm) for (i = 0; i < qm->qp_num; i++) { qp = &qm->qp_array[i]; - if (qp->is_resetting) + if (qp->is_in_kernel && qp->is_resetting) memset(qp->qdma.va, 0, qp->qdma.size); } @@ -4295,7 +4295,7 @@ static void qm_vf_get_qos(struct hisi_qm *qm, u32 fun_num) static int qm_vf_read_qos(struct hisi_qm *qm) { int cnt = 0; - int ret; + int ret = -EINVAL; /* reset mailbox qos val */ qm->mb_qos = 0; diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c index 6a45bd23b363..a91635c348b5 100644 --- a/drivers/crypto/hisilicon/sec2/sec_crypto.c +++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c @@ -42,6 +42,8 @@ #define SEC_DE_OFFSET_V3 9 #define SEC_SCENE_OFFSET_V3 5 #define SEC_CKEY_OFFSET_V3 13 +#define SEC_CTR_CNT_OFFSET 25 +#define SEC_CTR_CNT_ROLLOVER 2 #define SEC_SRC_SGL_OFFSET_V3 11 #define SEC_DST_SGL_OFFSET_V3 14 #define SEC_CALG_OFFSET_V3 4 @@ -63,6 +65,7 @@ #define SEC_AUTH_CIPHER 0x1 #define SEC_MAX_MAC_LEN 64 #define SEC_MAX_AAD_LEN 65535 +#define SEC_MAX_CCM_AAD_LEN 65279 #define SEC_TOTAL_MAC_SZ (SEC_MAX_MAC_LEN * QM_Q_DEPTH) #define SEC_PBUF_SZ 512 @@ -237,7 +240,7 @@ static void sec_req_cb(struct hisi_qp *qp, void *resp) if (unlikely(type != type_supported)) { atomic64_inc(&dfx->err_bd_cnt); - pr_err("err bd type [%d]\n", type); + pr_err("err bd type [%u]\n", type); return; } @@ -641,13 +644,15 @@ static int sec_skcipher_fbtfm_init(struct crypto_skcipher *tfm) struct sec_cipher_ctx *c_ctx = &ctx->c_ctx; c_ctx->fallback = false; + + /* Currently, only XTS mode need fallback tfm when using 192bit key */ if (likely(strncmp(alg, "xts", SEC_XTS_NAME_SZ))) return 0; c_ctx->fbtfm = crypto_alloc_sync_skcipher(alg, 0, CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(c_ctx->fbtfm)) { - pr_err("failed to alloc fallback tfm!\n"); + pr_err("failed to alloc xts mode fallback tfm!\n"); return PTR_ERR(c_ctx->fbtfm); } @@ -808,7 +813,7 @@ static int sec_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key, } memcpy(c_ctx->c_key, key, keylen); - if (c_ctx->fallback) { + if (c_ctx->fallback && c_ctx->fbtfm) { ret = crypto_sync_skcipher_setkey(c_ctx->fbtfm, key, keylen); if (ret) { dev_err(dev, "failed to set fallback skcipher key!\n"); @@ -1300,6 +1305,10 @@ static int sec_skcipher_bd_fill_v3(struct sec_ctx *ctx, struct sec_req *req) cipher = SEC_CIPHER_DEC; sec_sqe3->c_icv_key |= cpu_to_le16(cipher); + /* Set the CTR counter mode is 128bit rollover */ + sec_sqe3->auth_mac_key = cpu_to_le32((u32)SEC_CTR_CNT_ROLLOVER << + SEC_CTR_CNT_OFFSET); + if (req->use_pbuf) { bd_param |= SEC_PBUF << SEC_SRC_SGL_OFFSET_V3; bd_param |= SEC_PBUF << SEC_DST_SGL_OFFSET_V3; @@ -1614,7 +1623,7 @@ static void sec_auth_bd_fill_ex_v3(struct sec_auth_ctx *ctx, int dir, sqe3->auth_mac_key |= cpu_to_le32((u32)SEC_AUTH_TYPE1); sqe3->huk_iv_seq &= SEC_CIPHER_AUTH_V3; } else { - sqe3->auth_mac_key |= cpu_to_le32((u32)SEC_AUTH_TYPE1); + sqe3->auth_mac_key |= cpu_to_le32((u32)SEC_AUTH_TYPE2); sqe3->huk_iv_seq |= SEC_AUTH_CIPHER_V3; } sqe3->a_len_key = cpu_to_le32(c_req->c_len + aq->assoclen); @@ -2032,13 +2041,12 @@ static int sec_skcipher_soft_crypto(struct sec_ctx *ctx, struct skcipher_request *sreq, bool encrypt) { struct sec_cipher_ctx *c_ctx = &ctx->c_ctx; + SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, c_ctx->fbtfm); struct device *dev = ctx->dev; int ret; - SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, c_ctx->fbtfm); - if (!c_ctx->fbtfm) { - dev_err(dev, "failed to check fallback tfm\n"); + dev_err_ratelimited(dev, "the soft tfm isn't supported in the current system.\n"); return -EINVAL; } @@ -2219,6 +2227,10 @@ static int sec_aead_spec_check(struct sec_ctx *ctx, struct sec_req *sreq) } if (c_mode == SEC_CMODE_CCM) { + if (unlikely(req->assoclen > SEC_MAX_CCM_AAD_LEN)) { + dev_err_ratelimited(dev, "CCM input aad parameter is too long!\n"); + return -EINVAL; + } ret = aead_iv_demension_check(req); if (ret) { dev_err(dev, "aead input iv param error!\n"); @@ -2256,7 +2268,6 @@ static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq) if (ctx->sec->qm.ver == QM_HW_V2) { if (unlikely(!req->cryptlen || (!sreq->c_req.encrypt && req->cryptlen <= authsize))) { - dev_err(dev, "Kunpeng920 not support 0 length!\n"); ctx->a_ctx.fallback = true; return -EINVAL; } @@ -2284,9 +2295,10 @@ static int sec_aead_soft_crypto(struct sec_ctx *ctx, struct aead_request *aead_req, bool encrypt) { - struct aead_request *subreq = aead_request_ctx(aead_req); struct sec_auth_ctx *a_ctx = &ctx->a_ctx; struct device *dev = ctx->dev; + struct aead_request *subreq; + int ret; /* Kunpeng920 aead mode not support input 0 size */ if (!a_ctx->fallback_aead_tfm) { @@ -2294,6 +2306,10 @@ static int sec_aead_soft_crypto(struct sec_ctx *ctx, return -EINVAL; } + subreq = aead_request_alloc(a_ctx->fallback_aead_tfm, GFP_KERNEL); + if (!subreq) + return -ENOMEM; + aead_request_set_tfm(subreq, a_ctx->fallback_aead_tfm); aead_request_set_callback(subreq, aead_req->base.flags, aead_req->base.complete, aead_req->base.data); @@ -2301,8 +2317,13 @@ static int sec_aead_soft_crypto(struct sec_ctx *ctx, aead_req->cryptlen, aead_req->iv); aead_request_set_ad(subreq, aead_req->assoclen); - return encrypt ? crypto_aead_encrypt(subreq) : - crypto_aead_decrypt(subreq); + if (encrypt) + ret = crypto_aead_encrypt(subreq); + else + ret = crypto_aead_decrypt(subreq); + aead_request_free(subreq); + + return ret; } static int sec_aead_crypto(struct aead_request *a_req, bool encrypt) diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.h b/drivers/crypto/hisilicon/sec2/sec_crypto.h index 9f71c358a6d3..5e039b50e9d4 100644 --- a/drivers/crypto/hisilicon/sec2/sec_crypto.h +++ b/drivers/crypto/hisilicon/sec2/sec_crypto.h @@ -354,8 +354,10 @@ struct sec_sqe3 { * akey_len: 9~14 bits * a_alg: 15~20 bits * key_sel: 21~24 bits - * updata_key: 25 bits - * reserved: 26~31 bits + * ctr_count_mode/sm4_xts: 25~26 bits + * sva_prefetch: 27 bits + * key_wrap_num: 28~30 bits + * update_key: 31 bits */ __le32 auth_mac_key; __le32 salt; diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c index 26d3ab1d308b..0b9906ff69e3 100644 --- a/drivers/crypto/hisilicon/sec2/sec_main.c +++ b/drivers/crypto/hisilicon/sec2/sec_main.c @@ -90,6 +90,10 @@ SEC_USER1_WB_DATA_SSV) #define SEC_USER1_SMMU_SVA (SEC_USER1_SMMU_NORMAL | SEC_USER1_SVA_SET) #define SEC_USER1_SMMU_MASK (~SEC_USER1_SVA_SET) +#define SEC_INTERFACE_USER_CTRL0_REG_V3 0x302220 +#define SEC_INTERFACE_USER_CTRL1_REG_V3 0x302224 +#define SEC_USER1_SMMU_NORMAL_V3 (BIT(23) | BIT(17) | BIT(11) | BIT(5)) +#define SEC_USER1_SMMU_MASK_V3 0xFF79E79E #define SEC_CORE_INT_STATUS_M_ECC BIT(2) #define SEC_PREFETCH_CFG 0x301130 @@ -335,6 +339,41 @@ static void sec_set_endian(struct hisi_qm *qm) writel_relaxed(reg, qm->io_base + SEC_CONTROL_REG); } +static void sec_engine_sva_config(struct hisi_qm *qm) +{ + u32 reg; + + if (qm->ver > QM_HW_V2) { + reg = readl_relaxed(qm->io_base + + SEC_INTERFACE_USER_CTRL0_REG_V3); + reg |= SEC_USER0_SMMU_NORMAL; + writel_relaxed(reg, qm->io_base + + SEC_INTERFACE_USER_CTRL0_REG_V3); + + reg = readl_relaxed(qm->io_base + + SEC_INTERFACE_USER_CTRL1_REG_V3); + reg &= SEC_USER1_SMMU_MASK_V3; + reg |= SEC_USER1_SMMU_NORMAL_V3; + writel_relaxed(reg, qm->io_base + + SEC_INTERFACE_USER_CTRL1_REG_V3); + } else { + reg = readl_relaxed(qm->io_base + + SEC_INTERFACE_USER_CTRL0_REG); + reg |= SEC_USER0_SMMU_NORMAL; + writel_relaxed(reg, qm->io_base + + SEC_INTERFACE_USER_CTRL0_REG); + reg = readl_relaxed(qm->io_base + + SEC_INTERFACE_USER_CTRL1_REG); + reg &= SEC_USER1_SMMU_MASK; + if (qm->use_sva) + reg |= SEC_USER1_SMMU_SVA; + else + reg |= SEC_USER1_SMMU_NORMAL; + writel_relaxed(reg, qm->io_base + + SEC_INTERFACE_USER_CTRL1_REG); + } +} + static void sec_open_sva_prefetch(struct hisi_qm *qm) { u32 val; @@ -426,26 +465,18 @@ static int sec_engine_init(struct hisi_qm *qm) reg |= (0x1 << SEC_TRNG_EN_SHIFT); writel_relaxed(reg, qm->io_base + SEC_CONTROL_REG); - reg = readl_relaxed(qm->io_base + SEC_INTERFACE_USER_CTRL0_REG); - reg |= SEC_USER0_SMMU_NORMAL; - writel_relaxed(reg, qm->io_base + SEC_INTERFACE_USER_CTRL0_REG); - - reg = readl_relaxed(qm->io_base + SEC_INTERFACE_USER_CTRL1_REG); - reg &= SEC_USER1_SMMU_MASK; - if (qm->use_sva && qm->ver == QM_HW_V2) - reg |= SEC_USER1_SMMU_SVA; - else - reg |= SEC_USER1_SMMU_NORMAL; - writel_relaxed(reg, qm->io_base + SEC_INTERFACE_USER_CTRL1_REG); + sec_engine_sva_config(qm); writel(SEC_SINGLE_PORT_MAX_TRANS, qm->io_base + AM_CFG_SINGLE_PORT_MAX_TRANS); writel(SEC_SAA_ENABLE, qm->io_base + SEC_SAA_EN_REG); - /* Enable sm4 extra mode, as ctr/ecb */ - writel_relaxed(SEC_BD_ERR_CHK_EN0, - qm->io_base + SEC_BD_ERR_CHK_EN_REG0); + /* HW V2 enable sm4 extra mode, as ctr/ecb */ + if (qm->ver < QM_HW_V3) + writel_relaxed(SEC_BD_ERR_CHK_EN0, + qm->io_base + SEC_BD_ERR_CHK_EN_REG0); + /* Enable sm4 xts mode multiple iv */ writel_relaxed(SEC_BD_ERR_CHK_EN1, qm->io_base + SEC_BD_ERR_CHK_EN_REG1); diff --git a/drivers/crypto/marvell/Kconfig b/drivers/crypto/marvell/Kconfig index 9125199f1702..a48591af12d0 100644 --- a/drivers/crypto/marvell/Kconfig +++ b/drivers/crypto/marvell/Kconfig @@ -47,6 +47,7 @@ config CRYPTO_DEV_OCTEONTX2_CPT select CRYPTO_SKCIPHER select CRYPTO_HASH select CRYPTO_AEAD + select NET_DEVLINK help This driver allows you to utilize the Marvell Cryptographic Accelerator Unit(CPT) found in OcteonTX2 series of processors. diff --git a/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c b/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c index ccbef01888d4..01c48ddc4eeb 100644 --- a/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c +++ b/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c @@ -1639,11 +1639,8 @@ static void swap_func(void *lptr, void *rptr, int size) { struct cpt_device_desc *ldesc = (struct cpt_device_desc *) lptr; struct cpt_device_desc *rdesc = (struct cpt_device_desc *) rptr; - struct cpt_device_desc desc; - desc = *ldesc; - *ldesc = *rdesc; - *rdesc = desc; + swap(*ldesc, *rdesc); } int otx_cpt_crypto_init(struct pci_dev *pdev, struct module *mod, diff --git a/drivers/crypto/marvell/octeontx/otx_cptvf_main.c b/drivers/crypto/marvell/octeontx/otx_cptvf_main.c index b681bd2dc6ad..36d72e35ebeb 100644 --- a/drivers/crypto/marvell/octeontx/otx_cptvf_main.c +++ b/drivers/crypto/marvell/octeontx/otx_cptvf_main.c @@ -204,7 +204,6 @@ static int alloc_command_queues(struct otx_cptvf *cptvf, /* per queue initialization */ for (i = 0; i < cptvf->num_queues; i++) { - c_size = 0; rem_q_size = q_size; first = NULL; last = NULL; diff --git a/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h b/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h index fb56824cb0a6..5012b7e669f0 100644 --- a/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h +++ b/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h @@ -157,5 +157,6 @@ struct otx2_cptlfs_info; int otx2_cpt_attach_rscrs_msg(struct otx2_cptlfs_info *lfs); int otx2_cpt_detach_rsrcs_msg(struct otx2_cptlfs_info *lfs); int otx2_cpt_msix_offset_msg(struct otx2_cptlfs_info *lfs); +int otx2_cpt_sync_mbox_msg(struct otx2_mbox *mbox); #endif /* __OTX2_CPT_COMMON_H */ diff --git a/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c b/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c index 9074876d38e5..a317319696ef 100644 --- a/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c +++ b/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c @@ -202,3 +202,17 @@ int otx2_cpt_msix_offset_msg(struct otx2_cptlfs_info *lfs) } return ret; } + +int otx2_cpt_sync_mbox_msg(struct otx2_mbox *mbox) +{ + int err; + + if (!otx2_mbox_nonempty(mbox, 0)) + return 0; + otx2_mbox_msg_send(mbox, 0); + err = otx2_mbox_wait_for_rsp(mbox, 0); + if (err) + return err; + + return otx2_mbox_check_rsp_msgs(mbox, 0); +} diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptlf.h b/drivers/crypto/marvell/octeontx2/otx2_cptlf.h index b691b6c1d5c4..4fcaf61a70e3 100644 --- a/drivers/crypto/marvell/octeontx2/otx2_cptlf.h +++ b/drivers/crypto/marvell/octeontx2/otx2_cptlf.h @@ -26,12 +26,22 @@ */ #define OTX2_CPT_INST_QLEN_MSGS ((OTX2_CPT_SIZE_DIV40 - 1) * 40) +/* + * LDWB is getting incorrectly used when IQB_LDWB = 1 and CPT instruction + * queue has less than 320 free entries. So, increase HW instruction queue + * size by 320 and give 320 entries less for SW/NIX RX as a workaround. + */ +#define OTX2_CPT_INST_QLEN_EXTRA_BYTES (320 * OTX2_CPT_INST_SIZE) +#define OTX2_CPT_EXTRA_SIZE_DIV40 (320/40) + /* CPT instruction queue length in bytes */ -#define OTX2_CPT_INST_QLEN_BYTES (OTX2_CPT_SIZE_DIV40 * 40 * \ - OTX2_CPT_INST_SIZE) +#define OTX2_CPT_INST_QLEN_BYTES \ + ((OTX2_CPT_SIZE_DIV40 * 40 * OTX2_CPT_INST_SIZE) + \ + OTX2_CPT_INST_QLEN_EXTRA_BYTES) /* CPT instruction group queue length in bytes */ -#define OTX2_CPT_INST_GRP_QLEN_BYTES (OTX2_CPT_SIZE_DIV40 * 16) +#define OTX2_CPT_INST_GRP_QLEN_BYTES \ + ((OTX2_CPT_SIZE_DIV40 + OTX2_CPT_EXTRA_SIZE_DIV40) * 16) /* CPT FC length in bytes */ #define OTX2_CPT_Q_FC_LEN 128 @@ -179,7 +189,8 @@ static inline void otx2_cptlf_do_set_iqueue_size(struct otx2_cptlf_info *lf) { union otx2_cptx_lf_q_size lf_q_size = { .u = 0x0 }; - lf_q_size.s.size_div40 = OTX2_CPT_SIZE_DIV40; + lf_q_size.s.size_div40 = OTX2_CPT_SIZE_DIV40 + + OTX2_CPT_EXTRA_SIZE_DIV40; otx2_cpt_write64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot, OTX2_CPT_LF_Q_SIZE, lf_q_size.u); } diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf.h b/drivers/crypto/marvell/octeontx2/otx2_cptpf.h index 05b2d9c650e1..936174b012e8 100644 --- a/drivers/crypto/marvell/octeontx2/otx2_cptpf.h +++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf.h @@ -46,6 +46,7 @@ struct otx2_cptpf_dev { struct workqueue_struct *flr_wq; struct cptpf_flr_work *flr_work; + struct mutex lock; /* serialize mailbox access */ unsigned long cap_flag; u8 pf_id; /* RVU PF number */ diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c index 1720a5bb7016..a402ccfac557 100644 --- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c +++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c @@ -140,10 +140,13 @@ static void cptpf_flr_wq_handler(struct work_struct *work) vf = flr_work - pf->flr_work; + mutex_lock(&pf->lock); req = otx2_mbox_alloc_msg_rsp(mbox, 0, sizeof(*req), sizeof(struct msg_rsp)); - if (!req) + if (!req) { + mutex_unlock(&pf->lock); return; + } req->sig = OTX2_MBOX_REQ_SIG; req->id = MBOX_MSG_VF_FLR; @@ -151,16 +154,19 @@ static void cptpf_flr_wq_handler(struct work_struct *work) req->pcifunc |= (vf + 1) & RVU_PFVF_FUNC_MASK; otx2_cpt_send_mbox_msg(mbox, pf->pdev); + if (!otx2_cpt_sync_mbox_msg(&pf->afpf_mbox)) { - if (vf >= 64) { - reg = 1; - vf = vf - 64; + if (vf >= 64) { + reg = 1; + vf = vf - 64; + } + /* Clear transaction pending register */ + otx2_cpt_write64(pf->reg_base, BLKADDR_RVUM, 0, + RVU_PF_VFTRPENDX(reg), BIT_ULL(vf)); + otx2_cpt_write64(pf->reg_base, BLKADDR_RVUM, 0, + RVU_PF_VFFLR_INT_ENA_W1SX(reg), BIT_ULL(vf)); } - /* Clear transaction pending register */ - otx2_cpt_write64(pf->reg_base, BLKADDR_RVUM, 0, - RVU_PF_VFTRPENDX(reg), BIT_ULL(vf)); - otx2_cpt_write64(pf->reg_base, BLKADDR_RVUM, 0, - RVU_PF_VFFLR_INT_ENA_W1SX(reg), BIT_ULL(vf)); + mutex_unlock(&pf->lock); } static irqreturn_t cptpf_vf_flr_intr(int __always_unused irq, void *arg) @@ -468,6 +474,7 @@ static int cptpf_afpf_mbox_init(struct otx2_cptpf_dev *cptpf) goto error; INIT_WORK(&cptpf->afpf_mbox_work, otx2_cptpf_afpf_mbox_handler); + mutex_init(&cptpf->lock); return 0; error: diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c index 186f1c1190c1..dee0aa60b698 100644 --- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c +++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c @@ -18,9 +18,12 @@ static int forward_to_af(struct otx2_cptpf_dev *cptpf, struct mbox_msghdr *msg; int ret; + mutex_lock(&cptpf->lock); msg = otx2_mbox_alloc_msg(&cptpf->afpf_mbox, 0, size); - if (msg == NULL) + if (msg == NULL) { + mutex_unlock(&cptpf->lock); return -ENOMEM; + } memcpy((uint8_t *)msg + sizeof(struct mbox_msghdr), (uint8_t *)req + sizeof(struct mbox_msghdr), size); @@ -29,15 +32,19 @@ static int forward_to_af(struct otx2_cptpf_dev *cptpf, msg->sig = req->sig; msg->ver = req->ver; - otx2_mbox_msg_send(&cptpf->afpf_mbox, 0); - ret = otx2_mbox_wait_for_rsp(&cptpf->afpf_mbox, 0); + ret = otx2_cpt_sync_mbox_msg(&cptpf->afpf_mbox); + /* Error code -EIO indicate there is a communication failure + * to the AF. Rest of the error codes indicate that AF processed + * VF messages and set the error codes in response messages + * (if any) so simply forward responses to VF. + */ if (ret == -EIO) { - dev_err(&cptpf->pdev->dev, "RVU MBOX timeout.\n"); + dev_warn(&cptpf->pdev->dev, + "AF not responding to VF%d messages\n", vf->vf_id); + mutex_unlock(&cptpf->lock); return ret; - } else if (ret) { - dev_err(&cptpf->pdev->dev, "RVU MBOX error: %d.\n", ret); - return -EFAULT; } + mutex_unlock(&cptpf->lock); return 0; } @@ -204,6 +211,10 @@ void otx2_cptpf_vfpf_mbox_handler(struct work_struct *work) if (err == -ENOMEM || err == -EIO) break; offset = msg->next_msgoff; + /* Write barrier required for VF responses which are handled by + * PF driver and not forwarded to AF. + */ + smp_wmb(); } /* Send mbox responses to VF */ if (mdev->num_msgs) @@ -350,6 +361,8 @@ void otx2_cptpf_afpf_mbox_handler(struct work_struct *work) process_afpf_mbox_msg(cptpf, msg); offset = msg->next_msgoff; + /* Sync VF response ready to be sent */ + smp_wmb(); mdev->msgs_acked++; } otx2_mbox_reset(afpf_mbox, 0); diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c index 4c8ebdf671ca..9cba2f714c7e 100644 --- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c +++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c @@ -1076,6 +1076,39 @@ static void delete_engine_grps(struct pci_dev *pdev, delete_engine_group(&pdev->dev, &eng_grps->grp[i]); } +#define PCI_DEVID_CN10K_RNM 0xA098 +#define RNM_ENTROPY_STATUS 0x8 + +static void rnm_to_cpt_errata_fixup(struct device *dev) +{ + struct pci_dev *pdev; + void __iomem *base; + int timeout = 5000; + + pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_RNM, NULL); + if (!pdev) + return; + + base = pci_ioremap_bar(pdev, 0); + if (!base) + goto put_pdev; + + while ((readq(base + RNM_ENTROPY_STATUS) & 0x7F) != 0x40) { + cpu_relax(); + udelay(1); + timeout--; + if (!timeout) { + dev_warn(dev, "RNM is not producing entropy\n"); + break; + } + } + + iounmap(base); + +put_pdev: + pci_dev_put(pdev); +} + int otx2_cpt_get_eng_grp(struct otx2_cpt_eng_grps *eng_grps, int eng_type) { @@ -1111,6 +1144,7 @@ int otx2_cpt_create_eng_grps(struct otx2_cptpf_dev *cptpf, struct otx2_cpt_engines engs[OTX2_CPT_MAX_ETYPES_PER_GRP] = { {0} }; struct pci_dev *pdev = cptpf->pdev; struct fw_info_t fw_info; + u64 reg_val; int ret = 0; mutex_lock(&eng_grps->lock); @@ -1189,9 +1223,17 @@ int otx2_cpt_create_eng_grps(struct otx2_cptpf_dev *cptpf, if (is_dev_otx2(pdev)) goto unlock; + + /* + * Ensure RNM_ENTROPY_STATUS[NORMAL_CNT] = 0x40 before writing + * CPT_AF_CTL[RNM_REQ_EN] = 1 as a workaround for HW errata. + */ + rnm_to_cpt_errata_fixup(&pdev->dev); + /* * Configure engine group mask to allow context prefetching - * for the groups. + * for the groups and enable random number request, to enable + * CPT to request random numbers from RNM. */ otx2_cpt_write_af_reg(&cptpf->afpf_mbox, pdev, CPT_AF_CTL, OTX2_CPT_ALL_ENG_GRPS_MASK << 3 | BIT_ULL(16), @@ -1203,6 +1245,18 @@ int otx2_cpt_create_eng_grps(struct otx2_cptpf_dev *cptpf, */ otx2_cpt_write_af_reg(&cptpf->afpf_mbox, pdev, CPT_AF_CTX_FLUSH_TIMER, CTX_FLUSH_TIMER_CNT, BLKADDR_CPT0); + + /* + * Set CPT_AF_DIAG[FLT_DIS], as a workaround for HW errata, when + * CPT_AF_DIAG[FLT_DIS] = 0 and a CPT engine access to LLC/DRAM + * encounters a fault/poison, a rare case may result in + * unpredictable data being delivered to a CPT engine. + */ + otx2_cpt_read_af_reg(&cptpf->afpf_mbox, pdev, CPT_AF_DIAG, ®_val, + BLKADDR_CPT0); + otx2_cpt_write_af_reg(&cptpf->afpf_mbox, pdev, CPT_AF_DIAG, + reg_val | BIT_ULL(24), BLKADDR_CPT0); + mutex_unlock(&eng_grps->lock); return 0; @@ -1753,7 +1807,6 @@ void otx2_cpt_print_uc_dbg_info(struct otx2_cptpf_dev *cptpf) char engs_info[2 * OTX2_CPT_NAME_LENGTH]; struct otx2_cpt_eng_grp_info *grp; struct otx2_cpt_engs_rsvd *engs; - u32 mask[4]; int i, j; pr_debug("Engine groups global info"); @@ -1785,6 +1838,8 @@ void otx2_cpt_print_uc_dbg_info(struct otx2_cptpf_dev *cptpf) for (j = 0; j < OTX2_CPT_MAX_ETYPES_PER_GRP; j++) { engs = &grp->engs[j]; if (engs->type) { + u32 mask[5] = { }; + get_engs_info(grp, engs_info, 2 * OTX2_CPT_NAME_LENGTH, j); pr_debug("Slot%d: %s", j, engs_info); diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c index 2748a3327e39..f8f8542ce3e4 100644 --- a/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c +++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c @@ -1634,16 +1634,13 @@ static inline int cpt_register_algs(void) { int i, err = 0; - if (!IS_ENABLED(CONFIG_DM_CRYPT)) { - for (i = 0; i < ARRAY_SIZE(otx2_cpt_skciphers); i++) - otx2_cpt_skciphers[i].base.cra_flags &= - ~CRYPTO_ALG_DEAD; - - err = crypto_register_skciphers(otx2_cpt_skciphers, - ARRAY_SIZE(otx2_cpt_skciphers)); - if (err) - return err; - } + for (i = 0; i < ARRAY_SIZE(otx2_cpt_skciphers); i++) + otx2_cpt_skciphers[i].base.cra_flags &= ~CRYPTO_ALG_DEAD; + + err = crypto_register_skciphers(otx2_cpt_skciphers, + ARRAY_SIZE(otx2_cpt_skciphers)); + if (err) + return err; for (i = 0; i < ARRAY_SIZE(otx2_cpt_aeads); i++) otx2_cpt_aeads[i].base.cra_flags &= ~CRYPTO_ALG_DEAD; diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c index d19e5ffb5104..d6f9e2fe863d 100644 --- a/drivers/crypto/mxs-dcp.c +++ b/drivers/crypto/mxs-dcp.c @@ -331,7 +331,7 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq) memset(key + AES_KEYSIZE_128, 0, AES_KEYSIZE_128); } - for_each_sg(req->src, src, sg_nents(src), i) { + for_each_sg(req->src, src, sg_nents(req->src), i) { src_buf = sg_virt(src); len = sg_dma_len(src); tlen += len; diff --git a/drivers/crypto/nx/nx-common-pseries.c b/drivers/crypto/nx/nx-common-pseries.c index 4e304f6081e4..7584a34ba88c 100644 --- a/drivers/crypto/nx/nx-common-pseries.c +++ b/drivers/crypto/nx/nx-common-pseries.c @@ -962,7 +962,7 @@ static struct attribute *nx842_sysfs_entries[] = { NULL, }; -static struct attribute_group nx842_attribute_group = { +static const struct attribute_group nx842_attribute_group = { .name = NULL, /* put in device directory */ .attrs = nx842_sysfs_entries, }; @@ -992,7 +992,7 @@ static struct attribute *nxcop_caps_sysfs_entries[] = { NULL, }; -static struct attribute_group nxcop_caps_attr_group = { +static const struct attribute_group nxcop_caps_attr_group = { .name = "nx_gzip_caps", .attrs = nxcop_caps_sysfs_entries, }; diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c index a196bb8b1701..581211a92628 100644 --- a/drivers/crypto/omap-aes.c +++ b/drivers/crypto/omap-aes.c @@ -1093,7 +1093,7 @@ static struct attribute *omap_aes_attrs[] = { NULL, }; -static struct attribute_group omap_aes_attr_group = { +static const struct attribute_group omap_aes_attr_group = { .attrs = omap_aes_attrs, }; diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c index f6bf53c00b61..4b37dc69a50c 100644 --- a/drivers/crypto/omap-sham.c +++ b/drivers/crypto/omap-sham.c @@ -2045,7 +2045,7 @@ static struct attribute *omap_sham_attrs[] = { NULL, }; -static struct attribute_group omap_sham_attr_group = { +static const struct attribute_group omap_sham_attr_group = { .attrs = omap_sham_attrs, }; diff --git a/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c index 6d10edc40aca..fb5970a68484 100644 --- a/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c +++ b/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c @@ -6,6 +6,7 @@ #include <adf_common_drv.h> #include <adf_gen4_hw_data.h> #include <adf_gen4_pfvf.h> +#include <adf_gen4_pm.h> #include "adf_4xxx_hw_data.h" #include "icp_qat_hw.h" @@ -52,7 +53,7 @@ static const char *const dev_cfg_services[] = { static int get_service_enabled(struct adf_accel_dev *accel_dev) { char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0}; - u32 ret; + int ret; ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC, ADF_SERVICES_ENABLED, services); @@ -229,7 +230,7 @@ static void adf_enable_error_correction(struct adf_accel_dev *accel_dev) void __iomem *csr = misc_bar->virt_addr; /* Enable all in errsou3 except VFLR notification on host */ - ADF_CSR_WR(csr, ADF_4XXX_ERRMSK3, ADF_4XXX_VFLNOTIFY); + ADF_CSR_WR(csr, ADF_GEN4_ERRMSK3, ADF_GEN4_VFLNOTIFY); } static void adf_enable_ints(struct adf_accel_dev *accel_dev) @@ -256,19 +257,19 @@ static int adf_init_device(struct adf_accel_dev *accel_dev) addr = (&GET_BARS(accel_dev)[ADF_4XXX_PMISC_BAR])->virt_addr; /* Temporarily mask PM interrupt */ - csr = ADF_CSR_RD(addr, ADF_4XXX_ERRMSK2); - csr |= ADF_4XXX_PM_SOU; - ADF_CSR_WR(addr, ADF_4XXX_ERRMSK2, csr); + csr = ADF_CSR_RD(addr, ADF_GEN4_ERRMSK2); + csr |= ADF_GEN4_PM_SOU; + ADF_CSR_WR(addr, ADF_GEN4_ERRMSK2, csr); /* Set DRV_ACTIVE bit to power up the device */ - ADF_CSR_WR(addr, ADF_4XXX_PM_INTERRUPT, ADF_4XXX_PM_DRV_ACTIVE); + ADF_CSR_WR(addr, ADF_GEN4_PM_INTERRUPT, ADF_GEN4_PM_DRV_ACTIVE); /* Poll status register to make sure the device is powered up */ ret = read_poll_timeout(ADF_CSR_RD, status, - status & ADF_4XXX_PM_INIT_STATE, - ADF_4XXX_PM_POLL_DELAY_US, - ADF_4XXX_PM_POLL_TIMEOUT_US, true, addr, - ADF_4XXX_PM_STATUS); + status & ADF_GEN4_PM_INIT_STATE, + ADF_GEN4_PM_POLL_DELAY_US, + ADF_GEN4_PM_POLL_TIMEOUT_US, true, addr, + ADF_GEN4_PM_STATUS); if (ret) dev_err(&GET_DEV(accel_dev), "Failed to power up the device\n"); @@ -354,6 +355,8 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data) hw_data->set_ssm_wdtimer = adf_gen4_set_ssm_wdtimer; hw_data->disable_iov = adf_disable_sriov; hw_data->ring_pair_reset = adf_gen4_ring_pair_reset; + hw_data->enable_pm = adf_gen4_enable_pm; + hw_data->handle_pm_interrupt = adf_gen4_handle_pm_interrupt; adf_gen4_init_hw_csr_ops(&hw_data->csr_ops); adf_gen4_init_pf_pfvf_ops(&hw_data->pfvf_ops); diff --git a/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.h b/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.h index 12e4fb9b40ce..1034752845ca 100644 --- a/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.h +++ b/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.h @@ -39,20 +39,6 @@ #define ADF_4XXX_NUM_RINGS_PER_BANK 2 #define ADF_4XXX_NUM_BANKS_PER_VF 4 -/* Error source registers */ -#define ADF_4XXX_ERRSOU0 (0x41A200) -#define ADF_4XXX_ERRSOU1 (0x41A204) -#define ADF_4XXX_ERRSOU2 (0x41A208) -#define ADF_4XXX_ERRSOU3 (0x41A20C) - -/* Error source mask registers */ -#define ADF_4XXX_ERRMSK0 (0x41A210) -#define ADF_4XXX_ERRMSK1 (0x41A214) -#define ADF_4XXX_ERRMSK2 (0x41A218) -#define ADF_4XXX_ERRMSK3 (0x41A21C) - -#define ADF_4XXX_VFLNOTIFY BIT(7) - /* Arbiter configuration */ #define ADF_4XXX_ARB_CONFIG (BIT(31) | BIT(6) | BIT(0)) #define ADF_4XXX_ARB_OFFSET (0x0) @@ -63,16 +49,6 @@ #define ADF_4XXX_ADMINMSGLR_OFFSET (0x500578) #define ADF_4XXX_MAILBOX_BASE_OFFSET (0x600970) -/* Power management */ -#define ADF_4XXX_PM_POLL_DELAY_US 20 -#define ADF_4XXX_PM_POLL_TIMEOUT_US USEC_PER_SEC -#define ADF_4XXX_PM_STATUS (0x50A00C) -#define ADF_4XXX_PM_INTERRUPT (0x50A028) -#define ADF_4XXX_PM_DRV_ACTIVE BIT(20) -#define ADF_4XXX_PM_INIT_STATE BIT(21) -/* Power management source in ERRSOU2 and ERRMSK2 */ -#define ADF_4XXX_PM_SOU BIT(18) - /* Firmware Binaries */ #define ADF_4XXX_FW "qat_4xxx.bin" #define ADF_4XXX_MMP "qat_4xxx_mmp.bin" diff --git a/drivers/crypto/qat/qat_4xxx/adf_drv.c b/drivers/crypto/qat/qat_4xxx/adf_drv.c index a6c78b9c730b..fa4c350c1bf9 100644 --- a/drivers/crypto/qat/qat_4xxx/adf_drv.c +++ b/drivers/crypto/qat/qat_4xxx/adf_drv.c @@ -75,6 +75,13 @@ static int adf_crypto_dev_config(struct adf_accel_dev *accel_dev) if (ret) goto err; + /* Temporarily set the number of crypto instances to zero to avoid + * registering the crypto algorithms. + * This will be removed when the algorithms will support the + * CRYPTO_TFM_REQ_MAY_BACKLOG flag + */ + instances = 0; + for (i = 0; i < instances; i++) { val = i; bank = i * 2; diff --git a/drivers/crypto/qat/qat_common/Makefile b/drivers/crypto/qat/qat_common/Makefile index 7e191a42a5c7..f25a6c8edfc7 100644 --- a/drivers/crypto/qat/qat_common/Makefile +++ b/drivers/crypto/qat/qat_common/Makefile @@ -12,6 +12,7 @@ intel_qat-objs := adf_cfg.o \ adf_hw_arbiter.o \ adf_gen2_hw_data.o \ adf_gen4_hw_data.o \ + adf_gen4_pm.o \ qat_crypto.o \ qat_algs.o \ qat_asym_algs.o \ diff --git a/drivers/crypto/qat/qat_common/adf_accel_devices.h b/drivers/crypto/qat/qat_common/adf_accel_devices.h index 2d4cd7c7cf33..a03c6cf72331 100644 --- a/drivers/crypto/qat/qat_common/adf_accel_devices.h +++ b/drivers/crypto/qat/qat_common/adf_accel_devices.h @@ -184,6 +184,8 @@ struct adf_hw_device_data { void (*exit_arb)(struct adf_accel_dev *accel_dev); const u32 *(*get_arb_mapping)(void); int (*init_device)(struct adf_accel_dev *accel_dev); + int (*enable_pm)(struct adf_accel_dev *accel_dev); + bool (*handle_pm_interrupt)(struct adf_accel_dev *accel_dev); void (*disable_iov)(struct adf_accel_dev *accel_dev); void (*configure_iov_threads)(struct adf_accel_dev *accel_dev, bool enable); diff --git a/drivers/crypto/qat/qat_common/adf_admin.c b/drivers/crypto/qat/qat_common/adf_admin.c index 498eb6f690e3..3b6184c35081 100644 --- a/drivers/crypto/qat/qat_common/adf_admin.c +++ b/drivers/crypto/qat/qat_common/adf_admin.c @@ -251,6 +251,43 @@ int adf_send_admin_init(struct adf_accel_dev *accel_dev) } EXPORT_SYMBOL_GPL(adf_send_admin_init); +/** + * adf_init_admin_pm() - Function sends PM init message to FW + * @accel_dev: Pointer to acceleration device. + * @idle_delay: QAT HW idle time before power gating is initiated. + * 000 - 64us + * 001 - 128us + * 010 - 256us + * 011 - 512us + * 100 - 1ms + * 101 - 2ms + * 110 - 4ms + * 111 - 8ms + * + * Function sends to the FW the admin init message for the PM state + * configuration. + * + * Return: 0 on success, error code otherwise. + */ +int adf_init_admin_pm(struct adf_accel_dev *accel_dev, u32 idle_delay) +{ + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + struct icp_qat_fw_init_admin_resp resp = {0}; + struct icp_qat_fw_init_admin_req req = {0}; + u32 ae_mask = hw_data->admin_ae_mask; + + if (!accel_dev->admin) { + dev_err(&GET_DEV(accel_dev), "adf_admin is not available\n"); + return -EFAULT; + } + + req.cmd_id = ICP_QAT_FW_PM_STATE_CONFIG; + req.idle_filter = idle_delay; + + return adf_send_admin(accel_dev, &req, &resp, ae_mask); +} +EXPORT_SYMBOL_GPL(adf_init_admin_pm); + int adf_init_admin_comms(struct adf_accel_dev *accel_dev) { struct adf_admin_comms *admin; diff --git a/drivers/crypto/qat/qat_common/adf_common_drv.h b/drivers/crypto/qat/qat_common/adf_common_drv.h index 76f4f96ec5eb..e8c9b77c0d66 100644 --- a/drivers/crypto/qat/qat_common/adf_common_drv.h +++ b/drivers/crypto/qat/qat_common/adf_common_drv.h @@ -102,6 +102,7 @@ void adf_exit_aer(void); int adf_init_admin_comms(struct adf_accel_dev *accel_dev); void adf_exit_admin_comms(struct adf_accel_dev *accel_dev); int adf_send_admin_init(struct adf_accel_dev *accel_dev); +int adf_init_admin_pm(struct adf_accel_dev *accel_dev, u32 idle_delay); int adf_init_arb(struct adf_accel_dev *accel_dev); void adf_exit_arb(struct adf_accel_dev *accel_dev); void adf_update_ring_arb(struct adf_etr_ring_data *ring); @@ -188,6 +189,9 @@ int qat_uclo_map_obj(struct icp_qat_fw_loader_handle *handle, void *addr_ptr, u32 mem_size, char *obj_name); int qat_uclo_set_cfg_ae_mask(struct icp_qat_fw_loader_handle *handle, unsigned int cfg_ae_mask); +int adf_init_misc_wq(void); +void adf_exit_misc_wq(void); +bool adf_misc_wq_queue_work(struct work_struct *work); #if defined(CONFIG_PCI_IOV) int adf_sriov_configure(struct pci_dev *pdev, int numvfs); void adf_disable_sriov(struct adf_accel_dev *accel_dev); diff --git a/drivers/crypto/qat/qat_common/adf_ctl_drv.c b/drivers/crypto/qat/qat_common/adf_ctl_drv.c index 6f64aa693146..e8ac932bbaab 100644 --- a/drivers/crypto/qat/qat_common/adf_ctl_drv.c +++ b/drivers/crypto/qat/qat_common/adf_ctl_drv.c @@ -419,6 +419,9 @@ static int __init adf_register_ctl_device_driver(void) if (adf_chr_drv_create()) goto err_chr_dev; + if (adf_init_misc_wq()) + goto err_misc_wq; + if (adf_init_aer()) goto err_aer; @@ -440,6 +443,8 @@ err_vf_wq: err_pf_wq: adf_exit_aer(); err_aer: + adf_exit_misc_wq(); +err_misc_wq: adf_chr_drv_destroy(); err_chr_dev: mutex_destroy(&adf_ctl_lock); @@ -449,6 +454,7 @@ err_chr_dev: static void __exit adf_unregister_ctl_device_driver(void) { adf_chr_drv_destroy(); + adf_exit_misc_wq(); adf_exit_aer(); adf_exit_vf_wq(); adf_exit_pf_wq(); diff --git a/drivers/crypto/qat/qat_common/adf_gen4_hw_data.h b/drivers/crypto/qat/qat_common/adf_gen4_hw_data.h index f0f71ca44ca3..43b8f864806b 100644 --- a/drivers/crypto/qat/qat_common/adf_gen4_hw_data.h +++ b/drivers/crypto/qat/qat_common/adf_gen4_hw_data.h @@ -122,6 +122,20 @@ do { \ #define ADF_WQM_CSR_RPRESETSTS_STATUS BIT(0) #define ADF_WQM_CSR_RPRESETSTS(bank) (ADF_WQM_CSR_RPRESETCTL(bank) + 4) +/* Error source registers */ +#define ADF_GEN4_ERRSOU0 (0x41A200) +#define ADF_GEN4_ERRSOU1 (0x41A204) +#define ADF_GEN4_ERRSOU2 (0x41A208) +#define ADF_GEN4_ERRSOU3 (0x41A20C) + +/* Error source mask registers */ +#define ADF_GEN4_ERRMSK0 (0x41A210) +#define ADF_GEN4_ERRMSK1 (0x41A214) +#define ADF_GEN4_ERRMSK2 (0x41A218) +#define ADF_GEN4_ERRMSK3 (0x41A21C) + +#define ADF_GEN4_VFLNOTIFY BIT(7) + void adf_gen4_set_ssm_wdtimer(struct adf_accel_dev *accel_dev); void adf_gen4_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops); int adf_gen4_ring_pair_reset(struct adf_accel_dev *accel_dev, u32 bank_number); diff --git a/drivers/crypto/qat/qat_common/adf_gen4_pfvf.c b/drivers/crypto/qat/qat_common/adf_gen4_pfvf.c index 8efbedf63bc8..d80d493a7756 100644 --- a/drivers/crypto/qat/qat_common/adf_gen4_pfvf.c +++ b/drivers/crypto/qat/qat_common/adf_gen4_pfvf.c @@ -9,15 +9,12 @@ #include "adf_pfvf_pf_proto.h" #include "adf_pfvf_utils.h" -#define ADF_4XXX_MAX_NUM_VFS 16 - #define ADF_4XXX_PF2VM_OFFSET(i) (0x40B010 + ((i) * 0x20)) #define ADF_4XXX_VM2PF_OFFSET(i) (0x40B014 + ((i) * 0x20)) /* VF2PF interrupt source registers */ -#define ADF_4XXX_VM2PF_SOU(i) (0x41A180 + ((i) * 4)) -#define ADF_4XXX_VM2PF_MSK(i) (0x41A1C0 + ((i) * 4)) -#define ADF_4XXX_VM2PF_INT_EN_MSK BIT(0) +#define ADF_4XXX_VM2PF_SOU 0x41A180 +#define ADF_4XXX_VM2PF_MSK 0x41A1C0 #define ADF_PFVF_GEN4_MSGTYPE_SHIFT 2 #define ADF_PFVF_GEN4_MSGTYPE_MASK 0x3F @@ -41,51 +38,30 @@ static u32 adf_gen4_pf_get_vf2pf_offset(u32 i) static u32 adf_gen4_get_vf2pf_sources(void __iomem *pmisc_addr) { - int i; u32 sou, mask; - int num_csrs = ADF_4XXX_MAX_NUM_VFS; - u32 vf_mask = 0; - for (i = 0; i < num_csrs; i++) { - sou = ADF_CSR_RD(pmisc_addr, ADF_4XXX_VM2PF_SOU(i)); - mask = ADF_CSR_RD(pmisc_addr, ADF_4XXX_VM2PF_MSK(i)); - sou &= ~mask; - vf_mask |= sou << i; - } + sou = ADF_CSR_RD(pmisc_addr, ADF_4XXX_VM2PF_SOU); + mask = ADF_CSR_RD(pmisc_addr, ADF_4XXX_VM2PF_MSK); - return vf_mask; + return sou & ~mask; } static void adf_gen4_enable_vf2pf_interrupts(void __iomem *pmisc_addr, u32 vf_mask) { - int num_csrs = ADF_4XXX_MAX_NUM_VFS; - unsigned long mask = vf_mask; unsigned int val; - int i; - - for_each_set_bit(i, &mask, num_csrs) { - unsigned int offset = ADF_4XXX_VM2PF_MSK(i); - val = ADF_CSR_RD(pmisc_addr, offset) & ~ADF_4XXX_VM2PF_INT_EN_MSK; - ADF_CSR_WR(pmisc_addr, offset, val); - } + val = ADF_CSR_RD(pmisc_addr, ADF_4XXX_VM2PF_MSK) & ~vf_mask; + ADF_CSR_WR(pmisc_addr, ADF_4XXX_VM2PF_MSK, val); } static void adf_gen4_disable_vf2pf_interrupts(void __iomem *pmisc_addr, u32 vf_mask) { - int num_csrs = ADF_4XXX_MAX_NUM_VFS; - unsigned long mask = vf_mask; unsigned int val; - int i; - - for_each_set_bit(i, &mask, num_csrs) { - unsigned int offset = ADF_4XXX_VM2PF_MSK(i); - val = ADF_CSR_RD(pmisc_addr, offset) | ADF_4XXX_VM2PF_INT_EN_MSK; - ADF_CSR_WR(pmisc_addr, offset, val); - } + val = ADF_CSR_RD(pmisc_addr, ADF_4XXX_VM2PF_MSK) | vf_mask; + ADF_CSR_WR(pmisc_addr, ADF_4XXX_VM2PF_MSK, val); } static int adf_gen4_pfvf_send(struct adf_accel_dev *accel_dev, diff --git a/drivers/crypto/qat/qat_common/adf_gen4_pm.c b/drivers/crypto/qat/qat_common/adf_gen4_pm.c new file mode 100644 index 000000000000..7037c0892a8a --- /dev/null +++ b/drivers/crypto/qat/qat_common/adf_gen4_pm.c @@ -0,0 +1,137 @@ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) +/* Copyright(c) 2022 Intel Corporation */ +#include <linux/bitfield.h> +#include <linux/iopoll.h> +#include "adf_accel_devices.h" +#include "adf_common_drv.h" +#include "adf_gen4_pm.h" +#include "adf_cfg_strings.h" +#include "icp_qat_fw_init_admin.h" +#include "adf_gen4_hw_data.h" +#include "adf_cfg.h" + +enum qat_pm_host_msg { + PM_NO_CHANGE = 0, + PM_SET_MIN, +}; + +struct adf_gen4_pm_data { + struct work_struct pm_irq_work; + struct adf_accel_dev *accel_dev; + u32 pm_int_sts; +}; + +static int send_host_msg(struct adf_accel_dev *accel_dev) +{ + void __iomem *pmisc = adf_get_pmisc_base(accel_dev); + u32 msg; + + msg = ADF_CSR_RD(pmisc, ADF_GEN4_PM_HOST_MSG); + if (msg & ADF_GEN4_PM_MSG_PENDING) + return -EBUSY; + + /* Send HOST_MSG */ + msg = FIELD_PREP(ADF_GEN4_PM_MSG_PAYLOAD_BIT_MASK, PM_SET_MIN); + msg |= ADF_GEN4_PM_MSG_PENDING; + ADF_CSR_WR(pmisc, ADF_GEN4_PM_HOST_MSG, msg); + + /* Poll status register to make sure the HOST_MSG has been processed */ + return read_poll_timeout(ADF_CSR_RD, msg, + !(msg & ADF_GEN4_PM_MSG_PENDING), + ADF_GEN4_PM_MSG_POLL_DELAY_US, + ADF_GEN4_PM_POLL_TIMEOUT_US, true, pmisc, + ADF_GEN4_PM_HOST_MSG); +} + +static void pm_bh_handler(struct work_struct *work) +{ + struct adf_gen4_pm_data *pm_data = + container_of(work, struct adf_gen4_pm_data, pm_irq_work); + struct adf_accel_dev *accel_dev = pm_data->accel_dev; + void __iomem *pmisc = adf_get_pmisc_base(accel_dev); + u32 pm_int_sts = pm_data->pm_int_sts; + u32 val; + + /* PM Idle interrupt */ + if (pm_int_sts & ADF_GEN4_PM_IDLE_STS) { + /* Issue host message to FW */ + if (send_host_msg(accel_dev)) + dev_warn_ratelimited(&GET_DEV(accel_dev), + "Failed to send host msg to FW\n"); + } + + /* Clear interrupt status */ + ADF_CSR_WR(pmisc, ADF_GEN4_PM_INTERRUPT, pm_int_sts); + + /* Reenable PM interrupt */ + val = ADF_CSR_RD(pmisc, ADF_GEN4_ERRMSK2); + val &= ~ADF_GEN4_PM_SOU; + ADF_CSR_WR(pmisc, ADF_GEN4_ERRMSK2, val); + + kfree(pm_data); +} + +bool adf_gen4_handle_pm_interrupt(struct adf_accel_dev *accel_dev) +{ + void __iomem *pmisc = adf_get_pmisc_base(accel_dev); + struct adf_gen4_pm_data *pm_data = NULL; + u32 errsou2; + u32 errmsk2; + u32 val; + + /* Only handle the interrupt triggered by PM */ + errmsk2 = ADF_CSR_RD(pmisc, ADF_GEN4_ERRMSK2); + if (errmsk2 & ADF_GEN4_PM_SOU) + return false; + + errsou2 = ADF_CSR_RD(pmisc, ADF_GEN4_ERRSOU2); + if (!(errsou2 & ADF_GEN4_PM_SOU)) + return false; + + /* Disable interrupt */ + val = ADF_CSR_RD(pmisc, ADF_GEN4_ERRMSK2); + val |= ADF_GEN4_PM_SOU; + ADF_CSR_WR(pmisc, ADF_GEN4_ERRMSK2, val); + + val = ADF_CSR_RD(pmisc, ADF_GEN4_PM_INTERRUPT); + + pm_data = kzalloc(sizeof(*pm_data), GFP_ATOMIC); + if (!pm_data) + return false; + + pm_data->pm_int_sts = val; + pm_data->accel_dev = accel_dev; + + INIT_WORK(&pm_data->pm_irq_work, pm_bh_handler); + adf_misc_wq_queue_work(&pm_data->pm_irq_work); + + return true; +} +EXPORT_SYMBOL_GPL(adf_gen4_handle_pm_interrupt); + +int adf_gen4_enable_pm(struct adf_accel_dev *accel_dev) +{ + void __iomem *pmisc = adf_get_pmisc_base(accel_dev); + int ret; + u32 val; + + ret = adf_init_admin_pm(accel_dev, ADF_GEN4_PM_DEFAULT_IDLE_FILTER); + if (ret) + return ret; + + /* Enable default PM interrupts: IDLE, THROTTLE */ + val = ADF_CSR_RD(pmisc, ADF_GEN4_PM_INTERRUPT); + val |= ADF_GEN4_PM_INT_EN_DEFAULT; + + /* Clear interrupt status */ + val |= ADF_GEN4_PM_INT_STS_MASK; + ADF_CSR_WR(pmisc, ADF_GEN4_PM_INTERRUPT, val); + + /* Unmask PM Interrupt */ + val = ADF_CSR_RD(pmisc, ADF_GEN4_ERRMSK2); + val &= ~ADF_GEN4_PM_SOU; + ADF_CSR_WR(pmisc, ADF_GEN4_ERRMSK2, val); + + return 0; +} +EXPORT_SYMBOL_GPL(adf_gen4_enable_pm); diff --git a/drivers/crypto/qat/qat_common/adf_gen4_pm.h b/drivers/crypto/qat/qat_common/adf_gen4_pm.h new file mode 100644 index 000000000000..f8f8a9ee29e5 --- /dev/null +++ b/drivers/crypto/qat/qat_common/adf_gen4_pm.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */ +/* Copyright(c) 2022 Intel Corporation */ +#ifndef ADF_GEN4_PM_H +#define ADF_GEN4_PM_H + +#include "adf_accel_devices.h" + +/* Power management registers */ +#define ADF_GEN4_PM_HOST_MSG (0x50A01C) + +/* Power management */ +#define ADF_GEN4_PM_POLL_DELAY_US 20 +#define ADF_GEN4_PM_POLL_TIMEOUT_US USEC_PER_SEC +#define ADF_GEN4_PM_MSG_POLL_DELAY_US (10 * USEC_PER_MSEC) +#define ADF_GEN4_PM_STATUS (0x50A00C) +#define ADF_GEN4_PM_INTERRUPT (0x50A028) + +/* Power management source in ERRSOU2 and ERRMSK2 */ +#define ADF_GEN4_PM_SOU BIT(18) + +#define ADF_GEN4_PM_IDLE_INT_EN BIT(18) +#define ADF_GEN4_PM_THROTTLE_INT_EN BIT(19) +#define ADF_GEN4_PM_DRV_ACTIVE BIT(20) +#define ADF_GEN4_PM_INIT_STATE BIT(21) +#define ADF_GEN4_PM_INT_EN_DEFAULT (ADF_GEN4_PM_IDLE_INT_EN | \ + ADF_GEN4_PM_THROTTLE_INT_EN) + +#define ADF_GEN4_PM_THR_STS BIT(0) +#define ADF_GEN4_PM_IDLE_STS BIT(1) +#define ADF_GEN4_PM_FW_INT_STS BIT(2) +#define ADF_GEN4_PM_INT_STS_MASK (ADF_GEN4_PM_THR_STS | \ + ADF_GEN4_PM_IDLE_STS | \ + ADF_GEN4_PM_FW_INT_STS) + +#define ADF_GEN4_PM_MSG_PENDING BIT(0) +#define ADF_GEN4_PM_MSG_PAYLOAD_BIT_MASK GENMASK(28, 1) + +#define ADF_GEN4_PM_DEFAULT_IDLE_FILTER (0x0) +#define ADF_GEN4_PM_MAX_IDLE_FILTER (0x7) + +int adf_gen4_enable_pm(struct adf_accel_dev *accel_dev); +bool adf_gen4_handle_pm_interrupt(struct adf_accel_dev *accel_dev); + +#endif diff --git a/drivers/crypto/qat/qat_common/adf_init.c b/drivers/crypto/qat/qat_common/adf_init.c index 2edc63c6b6ca..c2c718f1b489 100644 --- a/drivers/crypto/qat/qat_common/adf_init.c +++ b/drivers/crypto/qat/qat_common/adf_init.c @@ -181,6 +181,12 @@ int adf_dev_start(struct adf_accel_dev *accel_dev) if (hw_data->set_ssm_wdtimer) hw_data->set_ssm_wdtimer(accel_dev); + /* Enable Power Management */ + if (hw_data->enable_pm && hw_data->enable_pm(accel_dev)) { + dev_err(&GET_DEV(accel_dev), "Failed to configure Power Management\n"); + return -EFAULT; + } + list_for_each(list_itr, &service_table) { service = list_entry(list_itr, struct service_hndl, list); if (service->event_hld(accel_dev, ADF_EVENT_START)) { diff --git a/drivers/crypto/qat/qat_common/adf_isr.c b/drivers/crypto/qat/qat_common/adf_isr.c index 4ca482aa69f7..a35149f8bf1e 100644 --- a/drivers/crypto/qat/qat_common/adf_isr.c +++ b/drivers/crypto/qat/qat_common/adf_isr.c @@ -16,6 +16,7 @@ #include "adf_transport_internal.h" #define ADF_MAX_NUM_VFS 32 +static struct workqueue_struct *adf_misc_wq; static int adf_enable_msix(struct adf_accel_dev *accel_dev) { @@ -123,6 +124,17 @@ static bool adf_handle_vf2pf_int(struct adf_accel_dev *accel_dev) } #endif /* CONFIG_PCI_IOV */ +static bool adf_handle_pm_int(struct adf_accel_dev *accel_dev) +{ + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + + if (hw_data->handle_pm_interrupt && + hw_data->handle_pm_interrupt(accel_dev)) + return true; + + return false; +} + static irqreturn_t adf_msix_isr_ae(int irq, void *dev_ptr) { struct adf_accel_dev *accel_dev = dev_ptr; @@ -133,6 +145,9 @@ static irqreturn_t adf_msix_isr_ae(int irq, void *dev_ptr) return IRQ_HANDLED; #endif /* CONFIG_PCI_IOV */ + if (adf_handle_pm_int(accel_dev)) + return IRQ_HANDLED; + dev_dbg(&GET_DEV(accel_dev), "qat_dev%d spurious AE interrupt\n", accel_dev->accel_id); @@ -341,3 +356,30 @@ err_out: return ret; } EXPORT_SYMBOL_GPL(adf_isr_resource_alloc); + +/** + * adf_init_misc_wq() - Init misc workqueue + * + * Function init workqueue 'qat_misc_wq' for general purpose. + * + * Return: 0 on success, error code otherwise. + */ +int __init adf_init_misc_wq(void) +{ + adf_misc_wq = alloc_workqueue("qat_misc_wq", WQ_MEM_RECLAIM, 0); + + return !adf_misc_wq ? -ENOMEM : 0; +} + +void adf_exit_misc_wq(void) +{ + if (adf_misc_wq) + destroy_workqueue(adf_misc_wq); + + adf_misc_wq = NULL; +} + +bool adf_misc_wq_queue_work(struct work_struct *work) +{ + return queue_work(adf_misc_wq, work); +} diff --git a/drivers/crypto/qat/qat_common/adf_pfvf_vf_msg.c b/drivers/crypto/qat/qat_common/adf_pfvf_vf_msg.c index 14b222691c9c..1141258db4b6 100644 --- a/drivers/crypto/qat/qat_common/adf_pfvf_vf_msg.c +++ b/drivers/crypto/qat/qat_common/adf_pfvf_vf_msg.c @@ -96,7 +96,7 @@ int adf_vf2pf_request_version(struct adf_accel_dev *accel_dev) int adf_vf2pf_get_capabilities(struct adf_accel_dev *accel_dev) { struct adf_hw_device_data *hw_data = accel_dev->hw_device; - struct capabilities_v3 cap_msg = { { 0 }, }; + struct capabilities_v3 cap_msg = { 0 }; unsigned int len = sizeof(cap_msg); if (accel_dev->vf.pf_compat_ver < ADF_PFVF_COMPAT_CAPABILITIES) @@ -141,7 +141,7 @@ int adf_vf2pf_get_capabilities(struct adf_accel_dev *accel_dev) int adf_vf2pf_get_ring_to_svc(struct adf_accel_dev *accel_dev) { - struct ring_to_svc_map_v1 rts_map_msg = { { 0 }, }; + struct ring_to_svc_map_v1 rts_map_msg = { 0 }; unsigned int len = sizeof(rts_map_msg); if (accel_dev->vf.pf_compat_ver < ADF_PFVF_COMPAT_RING_TO_SVC_MAP) diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw_init_admin.h b/drivers/crypto/qat/qat_common/icp_qat_fw_init_admin.h index afe59a7684ac..56cb827f93ea 100644 --- a/drivers/crypto/qat/qat_common/icp_qat_fw_init_admin.h +++ b/drivers/crypto/qat/qat_common/icp_qat_fw_init_admin.h @@ -16,6 +16,7 @@ enum icp_qat_fw_init_admin_cmd_id { ICP_QAT_FW_HEARTBEAT_SYNC = 7, ICP_QAT_FW_HEARTBEAT_GET = 8, ICP_QAT_FW_COMP_CAPABILITY_GET = 9, + ICP_QAT_FW_PM_STATE_CONFIG = 128, }; enum icp_qat_fw_init_admin_resp_status { diff --git a/drivers/crypto/qat/qat_common/qat_crypto.c b/drivers/crypto/qat/qat_common/qat_crypto.c index 7234c4940fae..67c9588e89df 100644 --- a/drivers/crypto/qat/qat_common/qat_crypto.c +++ b/drivers/crypto/qat/qat_common/qat_crypto.c @@ -161,6 +161,13 @@ int qat_crypto_dev_config(struct adf_accel_dev *accel_dev) if (ret) goto err; + /* Temporarily set the number of crypto instances to zero to avoid + * registering the crypto algorithms. + * This will be removed when the algorithms will support the + * CRYPTO_TFM_REQ_MAY_BACKLOG flag + */ + instances = 0; + for (i = 0; i < instances; i++) { val = i; snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_BANK_NUM, i); diff --git a/drivers/crypto/qat/qat_common/qat_uclo.c b/drivers/crypto/qat/qat_common/qat_uclo.c index 2026cc6be8f0..6356402a2c9e 100644 --- a/drivers/crypto/qat/qat_common/qat_uclo.c +++ b/drivers/crypto/qat/qat_common/qat_uclo.c @@ -387,7 +387,9 @@ static int qat_uclo_init_ustore(struct icp_qat_fw_loader_handle *handle, page = image->page; for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) { - if (!test_bit(ae, (unsigned long *)&uof_image->ae_assigned)) + unsigned long ae_assigned = uof_image->ae_assigned; + + if (!test_bit(ae, &ae_assigned)) continue; if (!test_bit(ae, &cfg_ae_mask)) @@ -664,8 +666,9 @@ static int qat_uclo_map_ae(struct icp_qat_fw_loader_handle *handle, int max_ae) continue; for (i = 0; i < obj_handle->uimage_num; i++) { - if (!test_bit(ae, (unsigned long *) - &obj_handle->ae_uimage[i].img_ptr->ae_assigned)) + unsigned long ae_assigned = obj_handle->ae_uimage[i].img_ptr->ae_assigned; + + if (!test_bit(ae, &ae_assigned)) continue; mflag = 1; if (qat_uclo_init_ae_data(obj_handle, ae, i)) diff --git a/drivers/crypto/qcom-rng.c b/drivers/crypto/qcom-rng.c index 99ba8d51d102..11f30fd48c14 100644 --- a/drivers/crypto/qcom-rng.c +++ b/drivers/crypto/qcom-rng.c @@ -8,6 +8,7 @@ #include <linux/clk.h> #include <linux/crypto.h> #include <linux/io.h> +#include <linux/iopoll.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> @@ -43,16 +44,19 @@ static int qcom_rng_read(struct qcom_rng *rng, u8 *data, unsigned int max) { unsigned int currsize = 0; u32 val; + int ret; /* read random data from hardware */ do { - val = readl_relaxed(rng->base + PRNG_STATUS); - if (!(val & PRNG_STATUS_DATA_AVAIL)) - break; + ret = readl_poll_timeout(rng->base + PRNG_STATUS, val, + val & PRNG_STATUS_DATA_AVAIL, + 200, 10000); + if (ret) + return ret; val = readl_relaxed(rng->base + PRNG_DATA_OUT); if (!val) - break; + return -EINVAL; if ((max - currsize) >= WORD_SZ) { memcpy(data, &val, WORD_SZ); @@ -61,11 +65,10 @@ static int qcom_rng_read(struct qcom_rng *rng, u8 *data, unsigned int max) } else { /* copy only remaining bytes */ memcpy(data, &val, max - currsize); - break; } } while (currsize < max); - return currsize; + return 0; } static int qcom_rng_generate(struct crypto_rng *tfm, @@ -87,7 +90,7 @@ static int qcom_rng_generate(struct crypto_rng *tfm, mutex_unlock(&rng->lock); clk_disable_unprepare(rng->clk); - return 0; + return ret; } static int qcom_rng_seed(struct crypto_rng *tfm, const u8 *seed, diff --git a/drivers/crypto/rockchip/rk3288_crypto_skcipher.c b/drivers/crypto/rockchip/rk3288_crypto_skcipher.c index 1cece1a7d3f0..5bbf0d2722e1 100644 --- a/drivers/crypto/rockchip/rk3288_crypto_skcipher.c +++ b/drivers/crypto/rockchip/rk3288_crypto_skcipher.c @@ -506,7 +506,6 @@ struct rk_crypto_tmp rk_ecb_des3_ede_alg = { .exit = rk_ablk_exit_tfm, .min_keysize = DES3_EDE_KEY_SIZE, .max_keysize = DES3_EDE_KEY_SIZE, - .ivsize = DES_BLOCK_SIZE, .setkey = rk_tdes_setkey, .encrypt = rk_des3_ede_ecb_encrypt, .decrypt = rk_des3_ede_ecb_decrypt, diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c index 97277b7150cb..5a57c9afd8c8 100644 --- a/drivers/crypto/ux500/cryp/cryp_core.c +++ b/drivers/crypto/ux500/cryp/cryp_core.c @@ -1264,7 +1264,7 @@ static int ux500_cryp_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; dev_dbg(dev, "[%s]", __func__); - device_data = devm_kzalloc(dev, sizeof(*device_data), GFP_ATOMIC); + device_data = devm_kzalloc(dev, sizeof(*device_data), GFP_KERNEL); if (!device_data) { ret = -ENOMEM; goto out; diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c index 51a6e1a42434..5157c118d642 100644 --- a/drivers/crypto/ux500/hash/hash_core.c +++ b/drivers/crypto/ux500/hash/hash_core.c @@ -1658,7 +1658,7 @@ static int ux500_hash_probe(struct platform_device *pdev) struct hash_device_data *device_data; struct device *dev = &pdev->dev; - device_data = devm_kzalloc(dev, sizeof(*device_data), GFP_ATOMIC); + device_data = devm_kzalloc(dev, sizeof(*device_data), GFP_KERNEL); if (!device_data) { ret = -ENOMEM; goto out; diff --git a/drivers/crypto/vmx/Kconfig b/drivers/crypto/vmx/Kconfig index c85fab7ef0bd..b2c28b87f14b 100644 --- a/drivers/crypto/vmx/Kconfig +++ b/drivers/crypto/vmx/Kconfig @@ -2,7 +2,11 @@ config CRYPTO_DEV_VMX_ENCRYPT tristate "Encryption acceleration support on P8 CPU" depends on CRYPTO_DEV_VMX + select CRYPTO_AES + select CRYPTO_CBC + select CRYPTO_CTR select CRYPTO_GHASH + select CRYPTO_XTS default m help Support for VMX cryptographic acceleration instructions on Power8 CPU. diff --git a/drivers/crypto/xilinx/Makefile b/drivers/crypto/xilinx/Makefile index 534e32daf76a..730feff5b5f2 100644 --- a/drivers/crypto/xilinx/Makefile +++ b/drivers/crypto/xilinx/Makefile @@ -1,2 +1,3 @@ # SPDX-License-Identifier: GPL-2.0-only obj-$(CONFIG_CRYPTO_DEV_ZYNQMP_AES) += zynqmp-aes-gcm.o +obj-$(CONFIG_CRYPTO_DEV_ZYNQMP_SHA3) += zynqmp-sha.o diff --git a/drivers/crypto/xilinx/zynqmp-sha.c b/drivers/crypto/xilinx/zynqmp-sha.c new file mode 100644 index 000000000000..43ff170ff1c2 --- /dev/null +++ b/drivers/crypto/xilinx/zynqmp-sha.c @@ -0,0 +1,264 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Xilinx ZynqMP SHA Driver. + * Copyright (c) 2022 Xilinx Inc. + */ +#include <linux/cacheflush.h> +#include <crypto/hash.h> +#include <crypto/internal/hash.h> +#include <crypto/sha3.h> +#include <linux/crypto.h> +#include <linux/device.h> +#include <linux/dma-mapping.h> +#include <linux/firmware/xlnx-zynqmp.h> +#include <linux/init.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> + +#define ZYNQMP_DMA_BIT_MASK 32U +#define ZYNQMP_DMA_ALLOC_FIXED_SIZE 0x1000U + +enum zynqmp_sha_op { + ZYNQMP_SHA3_INIT = 1, + ZYNQMP_SHA3_UPDATE = 2, + ZYNQMP_SHA3_FINAL = 4, +}; + +struct zynqmp_sha_drv_ctx { + struct shash_alg sha3_384; + struct device *dev; +}; + +struct zynqmp_sha_tfm_ctx { + struct device *dev; + struct crypto_shash *fbk_tfm; +}; + +struct zynqmp_sha_desc_ctx { + struct shash_desc fbk_req; +}; + +static dma_addr_t update_dma_addr, final_dma_addr; +static char *ubuf, *fbuf; + +static int zynqmp_sha_init_tfm(struct crypto_shash *hash) +{ + const char *fallback_driver_name = crypto_shash_alg_name(hash); + struct zynqmp_sha_tfm_ctx *tfm_ctx = crypto_shash_ctx(hash); + struct shash_alg *alg = crypto_shash_alg(hash); + struct crypto_shash *fallback_tfm; + struct zynqmp_sha_drv_ctx *drv_ctx; + + drv_ctx = container_of(alg, struct zynqmp_sha_drv_ctx, sha3_384); + tfm_ctx->dev = drv_ctx->dev; + + /* Allocate a fallback and abort if it failed. */ + fallback_tfm = crypto_alloc_shash(fallback_driver_name, 0, + CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(fallback_tfm)) + return PTR_ERR(fallback_tfm); + + tfm_ctx->fbk_tfm = fallback_tfm; + hash->descsize += crypto_shash_descsize(tfm_ctx->fbk_tfm); + + return 0; +} + +static void zynqmp_sha_exit_tfm(struct crypto_shash *hash) +{ + struct zynqmp_sha_tfm_ctx *tfm_ctx = crypto_shash_ctx(hash); + + if (tfm_ctx->fbk_tfm) { + crypto_free_shash(tfm_ctx->fbk_tfm); + tfm_ctx->fbk_tfm = NULL; + } + + memzero_explicit(tfm_ctx, sizeof(struct zynqmp_sha_tfm_ctx)); +} + +static int zynqmp_sha_init(struct shash_desc *desc) +{ + struct zynqmp_sha_desc_ctx *dctx = shash_desc_ctx(desc); + struct zynqmp_sha_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm); + + dctx->fbk_req.tfm = tctx->fbk_tfm; + return crypto_shash_init(&dctx->fbk_req); +} + +static int zynqmp_sha_update(struct shash_desc *desc, const u8 *data, unsigned int length) +{ + struct zynqmp_sha_desc_ctx *dctx = shash_desc_ctx(desc); + + return crypto_shash_update(&dctx->fbk_req, data, length); +} + +static int zynqmp_sha_final(struct shash_desc *desc, u8 *out) +{ + struct zynqmp_sha_desc_ctx *dctx = shash_desc_ctx(desc); + + return crypto_shash_final(&dctx->fbk_req, out); +} + +static int zynqmp_sha_finup(struct shash_desc *desc, const u8 *data, unsigned int length, u8 *out) +{ + struct zynqmp_sha_desc_ctx *dctx = shash_desc_ctx(desc); + + return crypto_shash_finup(&dctx->fbk_req, data, length, out); +} + +static int zynqmp_sha_import(struct shash_desc *desc, const void *in) +{ + struct zynqmp_sha_desc_ctx *dctx = shash_desc_ctx(desc); + struct zynqmp_sha_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm); + + dctx->fbk_req.tfm = tctx->fbk_tfm; + return crypto_shash_import(&dctx->fbk_req, in); +} + +static int zynqmp_sha_export(struct shash_desc *desc, void *out) +{ + struct zynqmp_sha_desc_ctx *dctx = shash_desc_ctx(desc); + + return crypto_shash_export(&dctx->fbk_req, out); +} + +static int zynqmp_sha_digest(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out) +{ + unsigned int remaining_len = len; + int update_size; + int ret; + + ret = zynqmp_pm_sha_hash(0, 0, ZYNQMP_SHA3_INIT); + if (ret) + return ret; + + while (remaining_len != 0) { + memzero_explicit(ubuf, ZYNQMP_DMA_ALLOC_FIXED_SIZE); + if (remaining_len >= ZYNQMP_DMA_ALLOC_FIXED_SIZE) { + update_size = ZYNQMP_DMA_ALLOC_FIXED_SIZE; + remaining_len -= ZYNQMP_DMA_ALLOC_FIXED_SIZE; + } else { + update_size = remaining_len; + remaining_len = 0; + } + memcpy(ubuf, data, update_size); + flush_icache_range((unsigned long)ubuf, (unsigned long)ubuf + update_size); + ret = zynqmp_pm_sha_hash(update_dma_addr, update_size, ZYNQMP_SHA3_UPDATE); + if (ret) + return ret; + + data += update_size; + } + + ret = zynqmp_pm_sha_hash(final_dma_addr, SHA3_384_DIGEST_SIZE, ZYNQMP_SHA3_FINAL); + memcpy(out, fbuf, SHA3_384_DIGEST_SIZE); + memzero_explicit(fbuf, SHA3_384_DIGEST_SIZE); + + return ret; +} + +static struct zynqmp_sha_drv_ctx sha3_drv_ctx = { + .sha3_384 = { + .init = zynqmp_sha_init, + .update = zynqmp_sha_update, + .final = zynqmp_sha_final, + .finup = zynqmp_sha_finup, + .digest = zynqmp_sha_digest, + .export = zynqmp_sha_export, + .import = zynqmp_sha_import, + .init_tfm = zynqmp_sha_init_tfm, + .exit_tfm = zynqmp_sha_exit_tfm, + .descsize = sizeof(struct zynqmp_sha_desc_ctx), + .statesize = sizeof(struct sha3_state), + .digestsize = SHA3_384_DIGEST_SIZE, + .base = { + .cra_name = "sha3-384", + .cra_driver_name = "zynqmp-sha3-384", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = SHA3_384_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct zynqmp_sha_tfm_ctx), + .cra_alignmask = 3, + .cra_module = THIS_MODULE, + } + } +}; + +static int zynqmp_sha_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + int err; + u32 v; + + /* Verify the hardware is present */ + err = zynqmp_pm_get_api_version(&v); + if (err) + return err; + + + err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(ZYNQMP_DMA_BIT_MASK)); + if (err < 0) { + dev_err(dev, "No usable DMA configuration\n"); + return err; + } + + err = crypto_register_shash(&sha3_drv_ctx.sha3_384); + if (err < 0) { + dev_err(dev, "Failed to register shash alg.\n"); + return err; + } + + sha3_drv_ctx.dev = dev; + platform_set_drvdata(pdev, &sha3_drv_ctx); + + ubuf = dma_alloc_coherent(dev, ZYNQMP_DMA_ALLOC_FIXED_SIZE, &update_dma_addr, GFP_KERNEL); + if (!ubuf) { + err = -ENOMEM; + goto err_shash; + } + + fbuf = dma_alloc_coherent(dev, SHA3_384_DIGEST_SIZE, &final_dma_addr, GFP_KERNEL); + if (!fbuf) { + err = -ENOMEM; + goto err_mem; + } + + return 0; + +err_mem: + dma_free_coherent(sha3_drv_ctx.dev, ZYNQMP_DMA_ALLOC_FIXED_SIZE, ubuf, update_dma_addr); + +err_shash: + crypto_unregister_shash(&sha3_drv_ctx.sha3_384); + + return err; +} + +static int zynqmp_sha_remove(struct platform_device *pdev) +{ + sha3_drv_ctx.dev = platform_get_drvdata(pdev); + + dma_free_coherent(sha3_drv_ctx.dev, ZYNQMP_DMA_ALLOC_FIXED_SIZE, ubuf, update_dma_addr); + dma_free_coherent(sha3_drv_ctx.dev, SHA3_384_DIGEST_SIZE, fbuf, final_dma_addr); + crypto_unregister_shash(&sha3_drv_ctx.sha3_384); + + return 0; +} + +static struct platform_driver zynqmp_sha_driver = { + .probe = zynqmp_sha_probe, + .remove = zynqmp_sha_remove, + .driver = { + .name = "zynqmp-sha3-384", + }, +}; + +module_platform_driver(zynqmp_sha_driver); +MODULE_DESCRIPTION("ZynqMP SHA3 hardware acceleration support."); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Harsha <harsha.harsha@xilinx.com>"); diff --git a/drivers/dma-buf/dma-heap.c b/drivers/dma-buf/dma-heap.c index 56bf5ad01ad5..8f5848aa144f 100644 --- a/drivers/dma-buf/dma-heap.c +++ b/drivers/dma-buf/dma-heap.c @@ -14,6 +14,7 @@ #include <linux/xarray.h> #include <linux/list.h> #include <linux/slab.h> +#include <linux/nospec.h> #include <linux/uaccess.h> #include <linux/syscalls.h> #include <linux/dma-heap.h> @@ -135,6 +136,7 @@ static long dma_heap_ioctl(struct file *file, unsigned int ucmd, if (nr >= ARRAY_SIZE(dma_heap_ioctl_cmds)) return -EINVAL; + nr = array_index_nospec(nr, ARRAY_SIZE(dma_heap_ioctl_cmds)); /* Get the kernel ioctl cmd that matches */ kcmd = dma_heap_ioctl_cmds[nr]; diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c index a1da2b4b6d73..1476156af74b 100644 --- a/drivers/dma/at_xdmac.c +++ b/drivers/dma/at_xdmac.c @@ -1681,8 +1681,10 @@ static void at_xdmac_tasklet(struct tasklet_struct *t) __func__, atchan->irq_status); if (!(atchan->irq_status & AT_XDMAC_CIS_LIS) && - !(atchan->irq_status & error_mask)) + !(atchan->irq_status & error_mask)) { + spin_unlock_irq(&atchan->lock); return; + } if (atchan->irq_status & error_mask) at_xdmac_handle_error(atchan); diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index 110de8a60058..858400e42ec0 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -2968,7 +2968,7 @@ static int __maybe_unused pl330_suspend(struct device *dev) struct amba_device *pcdev = to_amba_device(dev); pm_runtime_force_suspend(dev); - amba_pclk_unprepare(pcdev); + clk_unprepare(pcdev->pclk); return 0; } @@ -2978,7 +2978,7 @@ static int __maybe_unused pl330_resume(struct device *dev) struct amba_device *pcdev = to_amba_device(dev); int ret; - ret = amba_pclk_prepare(pcdev); + ret = clk_prepare(pcdev->pclk); if (ret) return ret; diff --git a/drivers/dma/ptdma/ptdma-dev.c b/drivers/dma/ptdma/ptdma-dev.c index 8a6bf291a73f..daafea5bc35d 100644 --- a/drivers/dma/ptdma/ptdma-dev.c +++ b/drivers/dma/ptdma/ptdma-dev.c @@ -207,7 +207,7 @@ int pt_core_init(struct pt_device *pt) if (!cmd_q->qbase) { dev_err(dev, "unable to allocate command queue\n"); ret = -ENOMEM; - goto e_dma_alloc; + goto e_destroy_pool; } cmd_q->qidx = 0; @@ -229,8 +229,10 @@ int pt_core_init(struct pt_device *pt) /* Request an irq */ ret = request_irq(pt->pt_irq, pt_core_irq_handler, 0, dev_name(pt->dev), pt); - if (ret) - goto e_pool; + if (ret) { + dev_err(dev, "unable to allocate an IRQ\n"); + goto e_free_dma; + } /* Update the device registers with queue information. */ cmd_q->qcontrol &= ~CMD_Q_SIZE; @@ -250,21 +252,20 @@ int pt_core_init(struct pt_device *pt) /* Register the DMA engine support */ ret = pt_dmaengine_register(pt); if (ret) - goto e_dmaengine; + goto e_free_irq; /* Set up debugfs entries */ ptdma_debugfs_setup(pt); return 0; -e_dmaengine: +e_free_irq: free_irq(pt->pt_irq, pt); -e_dma_alloc: +e_free_dma: dma_free_coherent(dev, cmd_q->qsize, cmd_q->qbase, cmd_q->qbase_dma); -e_pool: - dev_err(dev, "unable to allocate an IRQ\n"); +e_destroy_pool: dma_pool_destroy(pt->cmd_q.dma_pool); return ret; diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c index 481f45c77ce1..13d12d660cc2 100644 --- a/drivers/dma/sh/rcar-dmac.c +++ b/drivers/dma/sh/rcar-dmac.c @@ -1868,8 +1868,13 @@ static int rcar_dmac_probe(struct platform_device *pdev) dmac->dev = &pdev->dev; platform_set_drvdata(pdev, dmac); - dma_set_max_seg_size(dmac->dev, RCAR_DMATCR_MASK); - dma_set_mask_and_coherent(dmac->dev, DMA_BIT_MASK(40)); + ret = dma_set_max_seg_size(dmac->dev, RCAR_DMATCR_MASK); + if (ret) + return ret; + + ret = dma_set_mask_and_coherent(dmac->dev, DMA_BIT_MASK(40)); + if (ret) + return ret; ret = rcar_dmac_parse_of(&pdev->dev, dmac); if (ret < 0) diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c index 158e5e7defae..b26ed690f03c 100644 --- a/drivers/dma/sh/shdma-base.c +++ b/drivers/dma/sh/shdma-base.c @@ -115,8 +115,10 @@ static dma_cookie_t shdma_tx_submit(struct dma_async_tx_descriptor *tx) ret = pm_runtime_get(schan->dev); spin_unlock_irq(&schan->chan_lock); - if (ret < 0) + if (ret < 0) { dev_err(schan->dev, "%s(): GET = %d\n", __func__, ret); + pm_runtime_put(schan->dev); + } pm_runtime_barrier(schan->dev); diff --git a/drivers/dma/stm32-dmamux.c b/drivers/dma/stm32-dmamux.c index a42164389ebc..d5d55732adba 100644 --- a/drivers/dma/stm32-dmamux.c +++ b/drivers/dma/stm32-dmamux.c @@ -292,10 +292,12 @@ static int stm32_dmamux_probe(struct platform_device *pdev) ret = of_dma_router_register(node, stm32_dmamux_route_allocate, &stm32_dmamux->dmarouter); if (ret) - goto err_clk; + goto pm_disable; return 0; +pm_disable: + pm_runtime_disable(&pdev->dev); err_clk: clk_disable_unprepare(stm32_dmamux->clk); diff --git a/drivers/edac/altera_edac.c b/drivers/edac/altera_edac.c index 3a6d2416cb0f..e7e8e624a436 100644 --- a/drivers/edac/altera_edac.c +++ b/drivers/edac/altera_edac.c @@ -350,7 +350,7 @@ static int altr_sdram_probe(struct platform_device *pdev) if (irq < 0) { edac_printk(KERN_ERR, EDAC_MC, "No irq %d in DT\n", irq); - return -ENODEV; + return irq; } /* Arria10 has a 2nd IRQ */ @@ -1083,8 +1083,46 @@ static int __init __maybe_unused altr_init_a10_ecc_device_type(char *compat) #ifdef CONFIG_EDAC_ALTERA_SDRAM +/* + * A legacy U-Boot bug only enabled memory mapped access to the ECC Enable + * register if ECC is enabled. Linux checks the ECC Enable register to + * determine ECC status. + * Use an SMC call (which always works) to determine ECC enablement. + */ +static int altr_s10_sdram_check_ecc_deps(struct altr_edac_device_dev *device) +{ + const struct edac_device_prv_data *prv = device->data; + unsigned long sdram_ecc_addr; + struct arm_smccc_res result; + struct device_node *np; + phys_addr_t sdram_addr; + u32 read_reg; + int ret; + + np = of_find_compatible_node(NULL, NULL, "altr,sdr-ctl"); + if (!np) + goto sdram_err; + + sdram_addr = of_translate_address(np, of_get_address(np, 0, + NULL, NULL)); + of_node_put(np); + sdram_ecc_addr = (unsigned long)sdram_addr + prv->ecc_en_ofst; + arm_smccc_smc(INTEL_SIP_SMC_REG_READ, sdram_ecc_addr, + 0, 0, 0, 0, 0, 0, &result); + read_reg = (unsigned int)result.a1; + ret = (int)result.a0; + if (!ret && (read_reg & prv->ecc_enable_mask)) + return 0; + +sdram_err: + edac_printk(KERN_ERR, EDAC_DEVICE, + "%s: No ECC present or ECC disabled.\n", + device->edac_dev_name); + return -ENODEV; +} + static const struct edac_device_prv_data s10_sdramecc_data = { - .setup = altr_check_ecc_deps, + .setup = altr_s10_sdram_check_ecc_deps, .ce_clear_mask = ALTR_S10_ECC_SERRPENA, .ue_clear_mask = ALTR_S10_ECC_DERRPENA, .ecc_enable_mask = ALTR_S10_ECC_EN, diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c index fba609ada0e6..812baa48b290 100644 --- a/drivers/edac/amd64_edac.c +++ b/drivers/edac/amd64_edac.c @@ -15,6 +15,21 @@ static struct msr __percpu *msrs; static struct amd64_family_type *fam_type; +static inline u32 get_umc_reg(u32 reg) +{ + if (!fam_type->flags.zn_regs_v2) + return reg; + + switch (reg) { + case UMCCH_ADDR_CFG: return UMCCH_ADDR_CFG_DDR5; + case UMCCH_ADDR_MASK_SEC: return UMCCH_ADDR_MASK_SEC_DDR5; + case UMCCH_DIMM_CFG: return UMCCH_DIMM_CFG_DDR5; + } + + WARN_ONCE(1, "%s: unknown register 0x%x", __func__, reg); + return 0; +} + /* Per-node stuff */ static struct ecc_settings **ecc_stngs; @@ -1429,8 +1444,10 @@ static void __dump_misc_regs_df(struct amd64_pvt *pvt) edac_dbg(1, "UMC%d x16 DIMMs present: %s\n", i, (umc->dimm_cfg & BIT(7)) ? "yes" : "no"); - if (pvt->dram_type == MEM_LRDDR4) { - amd_smn_read(pvt->mc_node_id, umc_base + UMCCH_ADDR_CFG, &tmp); + if (umc->dram_type == MEM_LRDDR4 || umc->dram_type == MEM_LRDDR5) { + amd_smn_read(pvt->mc_node_id, + umc_base + get_umc_reg(UMCCH_ADDR_CFG), + &tmp); edac_dbg(1, "UMC%d LRDIMM %dx rank multiply\n", i, 1 << ((tmp >> 4) & 0x3)); } @@ -1505,7 +1522,7 @@ static void prep_chip_selects(struct amd64_pvt *pvt) for_each_umc(umc) { pvt->csels[umc].b_cnt = 4; - pvt->csels[umc].m_cnt = 2; + pvt->csels[umc].m_cnt = fam_type->flags.zn_regs_v2 ? 4 : 2; } } else { @@ -1545,7 +1562,7 @@ static void read_umc_base_mask(struct amd64_pvt *pvt) } umc_mask_reg = get_umc_base(umc) + UMCCH_ADDR_MASK; - umc_mask_reg_sec = get_umc_base(umc) + UMCCH_ADDR_MASK_SEC; + umc_mask_reg_sec = get_umc_base(umc) + get_umc_reg(UMCCH_ADDR_MASK_SEC); for_each_chip_select_mask(cs, umc, pvt) { mask = &pvt->csels[umc].csmasks[cs]; @@ -1616,19 +1633,49 @@ static void read_dct_base_mask(struct amd64_pvt *pvt) } } +static void determine_memory_type_df(struct amd64_pvt *pvt) +{ + struct amd64_umc *umc; + u32 i; + + for_each_umc(i) { + umc = &pvt->umc[i]; + + if (!(umc->sdp_ctrl & UMC_SDP_INIT)) { + umc->dram_type = MEM_EMPTY; + continue; + } + + /* + * Check if the system supports the "DDR Type" field in UMC Config + * and has DDR5 DIMMs in use. + */ + if (fam_type->flags.zn_regs_v2 && ((umc->umc_cfg & GENMASK(2, 0)) == 0x1)) { + if (umc->dimm_cfg & BIT(5)) + umc->dram_type = MEM_LRDDR5; + else if (umc->dimm_cfg & BIT(4)) + umc->dram_type = MEM_RDDR5; + else + umc->dram_type = MEM_DDR5; + } else { + if (umc->dimm_cfg & BIT(5)) + umc->dram_type = MEM_LRDDR4; + else if (umc->dimm_cfg & BIT(4)) + umc->dram_type = MEM_RDDR4; + else + umc->dram_type = MEM_DDR4; + } + + edac_dbg(1, " UMC%d DIMM type: %s\n", i, edac_mem_types[umc->dram_type]); + } +} + static void determine_memory_type(struct amd64_pvt *pvt) { u32 dram_ctrl, dcsm; - if (pvt->umc) { - if ((pvt->umc[0].dimm_cfg | pvt->umc[1].dimm_cfg) & BIT(5)) - pvt->dram_type = MEM_LRDDR4; - else if ((pvt->umc[0].dimm_cfg | pvt->umc[1].dimm_cfg) & BIT(4)) - pvt->dram_type = MEM_RDDR4; - else - pvt->dram_type = MEM_DDR4; - return; - } + if (pvt->umc) + return determine_memory_type_df(pvt); switch (pvt->fam) { case 0xf: @@ -2149,6 +2196,7 @@ static int f17_addr_mask_to_cs_size(struct amd64_pvt *pvt, u8 umc, { u32 addr_mask_orig, addr_mask_deinterleaved; u32 msb, weight, num_zero_bits; + int cs_mask_nr = csrow_nr; int dimm, size = 0; /* No Chip Selects are enabled. */ @@ -2164,17 +2212,33 @@ static int f17_addr_mask_to_cs_size(struct amd64_pvt *pvt, u8 umc, return size; /* - * There is one mask per DIMM, and two Chip Selects per DIMM. - * CS0 and CS1 -> DIMM0 - * CS2 and CS3 -> DIMM1 + * Family 17h introduced systems with one mask per DIMM, + * and two Chip Selects per DIMM. + * + * CS0 and CS1 -> MASK0 / DIMM0 + * CS2 and CS3 -> MASK1 / DIMM1 + * + * Family 19h Model 10h introduced systems with one mask per Chip Select, + * and two Chip Selects per DIMM. + * + * CS0 -> MASK0 -> DIMM0 + * CS1 -> MASK1 -> DIMM0 + * CS2 -> MASK2 -> DIMM1 + * CS3 -> MASK3 -> DIMM1 + * + * Keep the mask number equal to the Chip Select number for newer systems, + * and shift the mask number for older systems. */ dimm = csrow_nr >> 1; + if (!fam_type->flags.zn_regs_v2) + cs_mask_nr >>= 1; + /* Asymmetric dual-rank DIMM support. */ if ((csrow_nr & 1) && (cs_mode & CS_ODD_SECONDARY)) - addr_mask_orig = pvt->csels[umc].csmasks_sec[dimm]; + addr_mask_orig = pvt->csels[umc].csmasks_sec[cs_mask_nr]; else - addr_mask_orig = pvt->csels[umc].csmasks[dimm]; + addr_mask_orig = pvt->csels[umc].csmasks[cs_mask_nr]; /* * The number of zero bits in the mask is equal to the number of bits @@ -2930,6 +2994,7 @@ static struct amd64_family_type family_types[] = { .f0_id = PCI_DEVICE_ID_AMD_19H_M10H_DF_F0, .f6_id = PCI_DEVICE_ID_AMD_19H_M10H_DF_F6, .max_mcs = 12, + .flags.zn_regs_v2 = 1, .ops = { .early_channel_count = f17_early_channel_count, .dbam_to_cs = f17_addr_mask_to_cs_size, @@ -3368,7 +3433,7 @@ static void __read_mc_regs_df(struct amd64_pvt *pvt) umc_base = get_umc_base(i); umc = &pvt->umc[i]; - amd_smn_read(nid, umc_base + UMCCH_DIMM_CFG, &umc->dimm_cfg); + amd_smn_read(nid, umc_base + get_umc_reg(UMCCH_DIMM_CFG), &umc->dimm_cfg); amd_smn_read(nid, umc_base + UMCCH_UMC_CFG, &umc->umc_cfg); amd_smn_read(nid, umc_base + UMCCH_SDP_CTRL, &umc->sdp_ctrl); amd_smn_read(nid, umc_base + UMCCH_ECC_CTRL, &umc->ecc_ctrl); @@ -3452,7 +3517,9 @@ skip: read_dct_base_mask(pvt); determine_memory_type(pvt); - edac_dbg(1, " DIMM type: %s\n", edac_mem_types[pvt->dram_type]); + + if (!pvt->umc) + edac_dbg(1, " DIMM type: %s\n", edac_mem_types[pvt->dram_type]); determine_ecc_sym_sz(pvt); } @@ -3548,7 +3615,7 @@ static int init_csrows_df(struct mem_ctl_info *mci) pvt->mc_node_id, cs); dimm->nr_pages = get_csrow_nr_pages(pvt, umc, cs); - dimm->mtype = pvt->dram_type; + dimm->mtype = pvt->umc[umc].dram_type; dimm->edac_mode = edac_mode; dimm->dtype = dev_type; dimm->grain = 64; diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h index 352bda9803f6..38e5ad95d010 100644 --- a/drivers/edac/amd64_edac.h +++ b/drivers/edac/amd64_edac.h @@ -273,8 +273,11 @@ #define UMCCH_BASE_ADDR_SEC 0x10 #define UMCCH_ADDR_MASK 0x20 #define UMCCH_ADDR_MASK_SEC 0x28 +#define UMCCH_ADDR_MASK_SEC_DDR5 0x30 #define UMCCH_ADDR_CFG 0x30 +#define UMCCH_ADDR_CFG_DDR5 0x40 #define UMCCH_DIMM_CFG 0x80 +#define UMCCH_DIMM_CFG_DDR5 0x90 #define UMCCH_UMC_CFG 0x100 #define UMCCH_SDP_CTRL 0x104 #define UMCCH_ECC_CTRL 0x14C @@ -344,6 +347,9 @@ struct amd64_umc { u32 sdp_ctrl; /* SDP Control reg */ u32 ecc_ctrl; /* DRAM ECC Control reg */ u32 umc_cap_hi; /* Capabilities High reg */ + + /* cache the dram_type */ + enum mem_type dram_type; }; struct amd64_pvt { @@ -391,7 +397,12 @@ struct amd64_pvt { /* place to store error injection parameters prior to issue */ struct error_injection injection; - /* cache the dram_type */ + /* + * cache the dram_type + * + * NOTE: Don't use this for Family 17h and later. + * Use dram_type in struct amd64_umc instead. + */ enum mem_type dram_type; struct amd64_umc *umc; /* UMC registers */ @@ -480,11 +491,22 @@ struct low_ops { unsigned cs_mode, int cs_mask_nr); }; +struct amd64_family_flags { + /* + * Indicates that the system supports the new register offsets, etc. + * first introduced with Family 19h Model 10h. + */ + __u64 zn_regs_v2 : 1, + + __reserved : 63; +}; + struct amd64_family_type { const char *ctl_name; u16 f0_id, f1_id, f2_id, f6_id; /* Maximum number of memory controllers per die/node. */ u8 max_mcs; + struct amd64_family_flags flags; struct low_ops ops; }; diff --git a/drivers/edac/edac_device_sysfs.c b/drivers/edac/edac_device_sysfs.c index 5e7593753799..9a61d92bdf42 100644 --- a/drivers/edac/edac_device_sysfs.c +++ b/drivers/edac/edac_device_sysfs.c @@ -163,13 +163,14 @@ CTL_INFO_ATTR(poll_msec, S_IRUGO | S_IWUSR, edac_device_ctl_poll_msec_show, edac_device_ctl_poll_msec_store); /* Base Attributes of the EDAC_DEVICE ECC object */ -static struct ctl_info_attribute *device_ctrl_attr[] = { - &attr_ctl_info_panic_on_ue, - &attr_ctl_info_log_ue, - &attr_ctl_info_log_ce, - &attr_ctl_info_poll_msec, +static struct attribute *device_ctrl_attrs[] = { + &attr_ctl_info_panic_on_ue.attr, + &attr_ctl_info_log_ue.attr, + &attr_ctl_info_log_ce.attr, + &attr_ctl_info_poll_msec.attr, NULL, }; +ATTRIBUTE_GROUPS(device_ctrl); /* * edac_device_ctrl_master_release @@ -217,7 +218,7 @@ static void edac_device_ctrl_master_release(struct kobject *kobj) static struct kobj_type ktype_device_ctrl = { .release = edac_device_ctrl_master_release, .sysfs_ops = &device_ctl_info_ops, - .default_attrs = (struct attribute **)device_ctrl_attr, + .default_groups = device_ctrl_groups, }; /* @@ -389,17 +390,18 @@ INSTANCE_ATTR(ce_count, S_IRUGO, instance_ce_count_show, NULL); INSTANCE_ATTR(ue_count, S_IRUGO, instance_ue_count_show, NULL); /* list of edac_dev 'instance' attributes */ -static struct instance_attribute *device_instance_attr[] = { - &attr_instance_ce_count, - &attr_instance_ue_count, +static struct attribute *device_instance_attrs[] = { + &attr_instance_ce_count.attr, + &attr_instance_ue_count.attr, NULL, }; +ATTRIBUTE_GROUPS(device_instance); /* The 'ktype' for each edac_dev 'instance' */ static struct kobj_type ktype_instance_ctrl = { .release = edac_device_ctrl_instance_release, .sysfs_ops = &device_instance_ops, - .default_attrs = (struct attribute **)device_instance_attr, + .default_groups = device_instance_groups, }; /* edac_dev -> instance -> block information */ @@ -487,17 +489,18 @@ BLOCK_ATTR(ce_count, S_IRUGO, block_ce_count_show, NULL); BLOCK_ATTR(ue_count, S_IRUGO, block_ue_count_show, NULL); /* list of edac_dev 'block' attributes */ -static struct edac_dev_sysfs_block_attribute *device_block_attr[] = { - &attr_block_ce_count, - &attr_block_ue_count, +static struct attribute *device_block_attrs[] = { + &attr_block_ce_count.attr, + &attr_block_ue_count.attr, NULL, }; +ATTRIBUTE_GROUPS(device_block); /* The 'ktype' for each edac_dev 'block' */ static struct kobj_type ktype_block_ctrl = { .release = edac_device_ctrl_block_release, .sysfs_ops = &device_block_ops, - .default_attrs = (struct attribute **)device_block_attr, + .default_groups = device_block_groups, }; /* block ctor/dtor code */ diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c index 9d9aabdec96b..d2715774af6f 100644 --- a/drivers/edac/edac_mc.c +++ b/drivers/edac/edac_mc.c @@ -213,12 +213,12 @@ void *edac_align_ptr(void **p, unsigned int size, int n_elems) else if (size > sizeof(char)) align = sizeof(short); else - return (char *)ptr; + return ptr; - r = (unsigned long)p % align; + r = (unsigned long)ptr % align; if (r == 0) - return (char *)ptr; + return ptr; *p += align - r; diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c index 53042af7262e..888d5728ecef 100644 --- a/drivers/edac/edac_pci_sysfs.c +++ b/drivers/edac/edac_pci_sysfs.c @@ -135,17 +135,18 @@ INSTANCE_ATTR(pe_count, S_IRUGO, instance_pe_count_show, NULL); INSTANCE_ATTR(npe_count, S_IRUGO, instance_npe_count_show, NULL); /* pci instance attributes */ -static struct instance_attribute *pci_instance_attr[] = { - &attr_instance_pe_count, - &attr_instance_npe_count, +static struct attribute *pci_instance_attrs[] = { + &attr_instance_pe_count.attr, + &attr_instance_npe_count.attr, NULL }; +ATTRIBUTE_GROUPS(pci_instance); /* the ktype for a pci instance */ static struct kobj_type ktype_pci_instance = { .release = edac_pci_instance_release, .sysfs_ops = &pci_instance_ops, - .default_attrs = (struct attribute **)pci_instance_attr, + .default_groups = pci_instance_groups, }; /* @@ -292,15 +293,16 @@ EDAC_PCI_ATTR(pci_parity_count, S_IRUGO, edac_pci_int_show, NULL); EDAC_PCI_ATTR(pci_nonparity_count, S_IRUGO, edac_pci_int_show, NULL); /* Base Attributes of the memory ECC object */ -static struct edac_pci_dev_attribute *edac_pci_attr[] = { - &edac_pci_attr_check_pci_errors, - &edac_pci_attr_edac_pci_log_pe, - &edac_pci_attr_edac_pci_log_npe, - &edac_pci_attr_edac_pci_panic_on_pe, - &edac_pci_attr_pci_parity_count, - &edac_pci_attr_pci_nonparity_count, +static struct attribute *edac_pci_attrs[] = { + &edac_pci_attr_check_pci_errors.attr, + &edac_pci_attr_edac_pci_log_pe.attr, + &edac_pci_attr_edac_pci_log_npe.attr, + &edac_pci_attr_edac_pci_panic_on_pe.attr, + &edac_pci_attr_pci_parity_count.attr, + &edac_pci_attr_pci_nonparity_count.attr, NULL, }; +ATTRIBUTE_GROUPS(edac_pci); /* * edac_pci_release_main_kobj @@ -327,7 +329,7 @@ static void edac_pci_release_main_kobj(struct kobject *kobj) static struct kobj_type ktype_edac_pci_main_kobj = { .release = edac_pci_release_main_kobj, .sysfs_ops = &edac_pci_sysfs_ops, - .default_attrs = (struct attribute **)edac_pci_attr, + .default_groups = edac_pci_groups, }; /** diff --git a/drivers/edac/xgene_edac.c b/drivers/edac/xgene_edac.c index 2ccd1db5e98f..7197f9fa0245 100644 --- a/drivers/edac/xgene_edac.c +++ b/drivers/edac/xgene_edac.c @@ -1919,7 +1919,7 @@ static int xgene_edac_probe(struct platform_device *pdev) irq = platform_get_irq_optional(pdev, i); if (irq < 0) { dev_err(&pdev->dev, "No IRQ resource\n"); - rc = -EINVAL; + rc = irq; goto out_err; } rc = devm_request_irq(&pdev->dev, irq, diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig index 75cb91055c17..e5cfb01353d8 100644 --- a/drivers/firmware/Kconfig +++ b/drivers/firmware/Kconfig @@ -40,6 +40,7 @@ config ARM_SCPI_POWER_DOMAIN config ARM_SDE_INTERFACE bool "ARM Software Delegated Exception Interface (SDEI)" depends on ARM64 + depends on ACPI_APEI_GHES help The Software Delegated Exception Interface (SDEI) is an ARM standard for registering callbacks from the platform firmware diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c index b406b3f78f46..d76bab3aaac4 100644 --- a/drivers/firmware/arm_scmi/driver.c +++ b/drivers/firmware/arm_scmi/driver.c @@ -2112,7 +2112,7 @@ static void __exit scmi_driver_exit(void) } module_exit(scmi_driver_exit); -MODULE_ALIAS("platform: arm-scmi"); +MODULE_ALIAS("platform:arm-scmi"); MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>"); MODULE_DESCRIPTION("ARM SCMI protocol driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/firmware/arm_sdei.c b/drivers/firmware/arm_sdei.c index a7e762c352f9..1e1a51510e83 100644 --- a/drivers/firmware/arm_sdei.c +++ b/drivers/firmware/arm_sdei.c @@ -1059,14 +1059,14 @@ static bool __init sdei_present_acpi(void) return true; } -static int __init sdei_init(void) +void __init sdei_init(void) { struct platform_device *pdev; int ret; ret = platform_driver_register(&sdei_driver); if (ret || !sdei_present_acpi()) - return ret; + return; pdev = platform_device_register_simple(sdei_driver.driver.name, 0, NULL, 0); @@ -1076,17 +1076,8 @@ static int __init sdei_init(void) pr_info("Failed to register ACPI:SDEI platform device %d\n", ret); } - - return ret; } -/* - * On an ACPI system SDEI needs to be ready before HEST:GHES tries to register - * its events. ACPI is initialised from a subsys_initcall(), GHES is initialised - * by device_initcall(). We want to be called in the middle. - */ -subsys_initcall_sync(sdei_init); - int sdei_event_handler(struct pt_regs *regs, struct sdei_registered_event *arg) { diff --git a/drivers/firmware/efi/apple-properties.c b/drivers/firmware/efi/apple-properties.c index 4c3201e290e2..ea84108035eb 100644 --- a/drivers/firmware/efi/apple-properties.c +++ b/drivers/firmware/efi/apple-properties.c @@ -24,7 +24,7 @@ static bool dump_properties __initdata; static int __init dump_properties_enable(char *arg) { dump_properties = true; - return 0; + return 1; } __setup("dump_apple_properties", dump_properties_enable); diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c index 7de3f5b6e8d0..5502e176d51b 100644 --- a/drivers/firmware/efi/efi.c +++ b/drivers/firmware/efi/efi.c @@ -212,7 +212,7 @@ static int __init efivar_ssdt_setup(char *str) memcpy(efivar_ssdt, str, strlen(str)); else pr_warn("efivar_ssdt: name too long: %s\n", str); - return 0; + return 1; } __setup("efivar_ssdt=", efivar_ssdt_setup); diff --git a/drivers/firmware/efi/libstub/riscv-stub.c b/drivers/firmware/efi/libstub/riscv-stub.c index 380e4e251399..9c460843442f 100644 --- a/drivers/firmware/efi/libstub/riscv-stub.c +++ b/drivers/firmware/efi/libstub/riscv-stub.c @@ -25,7 +25,7 @@ typedef void __noreturn (*jump_kernel_func)(unsigned int, unsigned long); static u32 hartid; -static u32 get_boot_hartid_from_fdt(void) +static int get_boot_hartid_from_fdt(void) { const void *fdt; int chosen_node, len; @@ -33,23 +33,26 @@ static u32 get_boot_hartid_from_fdt(void) fdt = get_efi_config_table(DEVICE_TREE_GUID); if (!fdt) - return U32_MAX; + return -EINVAL; chosen_node = fdt_path_offset(fdt, "/chosen"); if (chosen_node < 0) - return U32_MAX; + return -EINVAL; prop = fdt_getprop((void *)fdt, chosen_node, "boot-hartid", &len); if (!prop || len != sizeof(u32)) - return U32_MAX; + return -EINVAL; - return fdt32_to_cpu(*prop); + hartid = fdt32_to_cpu(*prop); + return 0; } efi_status_t check_platform_features(void) { - hartid = get_boot_hartid_from_fdt(); - if (hartid == U32_MAX) { + int ret; + + ret = get_boot_hartid_from_fdt(); + if (ret) { efi_err("/chosen/boot-hartid missing or invalid!\n"); return EFI_UNSUPPORTED; } diff --git a/drivers/firmware/efi/mokvar-table.c b/drivers/firmware/efi/mokvar-table.c index 38722d2009e2..5ed0602c2f75 100644 --- a/drivers/firmware/efi/mokvar-table.c +++ b/drivers/firmware/efi/mokvar-table.c @@ -359,4 +359,4 @@ static int __init efi_mokvar_sysfs_init(void) } return err; } -device_initcall(efi_mokvar_sysfs_init); +fs_initcall(efi_mokvar_sysfs_init); diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c index abdc8a6a3963..cae590bd08f2 100644 --- a/drivers/firmware/efi/vars.c +++ b/drivers/firmware/efi/vars.c @@ -742,6 +742,7 @@ int efivar_entry_set_safe(efi_char16_t *name, efi_guid_t vendor, u32 attributes, { const struct efivar_operations *ops; efi_status_t status; + unsigned long varsize; if (!__efivars) return -EINVAL; @@ -764,15 +765,17 @@ int efivar_entry_set_safe(efi_char16_t *name, efi_guid_t vendor, u32 attributes, return efivar_entry_set_nonblocking(name, vendor, attributes, size, data); + varsize = size + ucs2_strsize(name, 1024); if (!block) { if (down_trylock(&efivars_lock)) return -EBUSY; + status = check_var_size_nonblocking(attributes, varsize); } else { if (down_interruptible(&efivars_lock)) return -EINTR; + status = check_var_size(attributes, varsize); } - status = check_var_size(attributes, size + ucs2_strsize(name, 1024)); if (status != EFI_SUCCESS) { up(&efivars_lock); return -ENOSPC; diff --git a/drivers/firmware/xilinx/zynqmp.c b/drivers/firmware/xilinx/zynqmp.c index 450c5f6a1cbf..5e5b0bb2e4e0 100644 --- a/drivers/firmware/xilinx/zynqmp.c +++ b/drivers/firmware/xilinx/zynqmp.c @@ -1121,6 +1121,32 @@ int zynqmp_pm_aes_engine(const u64 address, u32 *out) EXPORT_SYMBOL_GPL(zynqmp_pm_aes_engine); /** + * zynqmp_pm_sha_hash - Access the SHA engine to calculate the hash + * @address: Address of the data/ Address of output buffer where + * hash should be stored. + * @size: Size of the data. + * @flags: + * BIT(0) - for initializing csudma driver and SHA3(Here address + * and size inputs can be NULL). + * BIT(1) - to call Sha3_Update API which can be called multiple + * times when data is not contiguous. + * BIT(2) - to get final hash of the whole updated data. + * Hash will be overwritten at provided address with + * 48 bytes. + * + * Return: Returns status, either success or error code. + */ +int zynqmp_pm_sha_hash(const u64 address, const u32 size, const u32 flags) +{ + u32 lower_addr = lower_32_bits(address); + u32 upper_addr = upper_32_bits(address); + + return zynqmp_pm_invoke_fn(PM_SECURE_SHA, upper_addr, lower_addr, + size, flags, NULL); +} +EXPORT_SYMBOL_GPL(zynqmp_pm_sha_hash); + +/** * zynqmp_pm_register_notifier() - PM API for register a subsystem * to be notified about specific * event/error. diff --git a/drivers/gpio/gpio-aggregator.c b/drivers/gpio/gpio-aggregator.c index 869dc952cf45..0cb2664085cf 100644 --- a/drivers/gpio/gpio-aggregator.c +++ b/drivers/gpio/gpio-aggregator.c @@ -278,7 +278,8 @@ static int gpio_fwd_get(struct gpio_chip *chip, unsigned int offset) { struct gpiochip_fwd *fwd = gpiochip_get_data(chip); - return gpiod_get_value(fwd->descs[offset]); + return chip->can_sleep ? gpiod_get_value_cansleep(fwd->descs[offset]) + : gpiod_get_value(fwd->descs[offset]); } static int gpio_fwd_get_multiple(struct gpiochip_fwd *fwd, unsigned long *mask, @@ -293,7 +294,10 @@ static int gpio_fwd_get_multiple(struct gpiochip_fwd *fwd, unsigned long *mask, for_each_set_bit(i, mask, fwd->chip.ngpio) descs[j++] = fwd->descs[i]; - error = gpiod_get_array_value(j, descs, NULL, values); + if (fwd->chip.can_sleep) + error = gpiod_get_array_value_cansleep(j, descs, NULL, values); + else + error = gpiod_get_array_value(j, descs, NULL, values); if (error) return error; @@ -328,7 +332,10 @@ static void gpio_fwd_set(struct gpio_chip *chip, unsigned int offset, int value) { struct gpiochip_fwd *fwd = gpiochip_get_data(chip); - gpiod_set_value(fwd->descs[offset], value); + if (chip->can_sleep) + gpiod_set_value_cansleep(fwd->descs[offset], value); + else + gpiod_set_value(fwd->descs[offset], value); } static void gpio_fwd_set_multiple(struct gpiochip_fwd *fwd, unsigned long *mask, @@ -343,7 +350,10 @@ static void gpio_fwd_set_multiple(struct gpiochip_fwd *fwd, unsigned long *mask, descs[j++] = fwd->descs[i]; } - gpiod_set_array_value(j, descs, NULL, values); + if (fwd->chip.can_sleep) + gpiod_set_array_value_cansleep(j, descs, NULL, values); + else + gpiod_set_array_value(j, descs, NULL, values); } static void gpio_fwd_set_multiple_locked(struct gpio_chip *chip, diff --git a/drivers/gpio/gpio-mt7621.c b/drivers/gpio/gpio-mt7621.c index ccaad1cb3c2e..d8a26e503ca5 100644 --- a/drivers/gpio/gpio-mt7621.c +++ b/drivers/gpio/gpio-mt7621.c @@ -239,7 +239,6 @@ mediatek_gpio_bank_probe(struct device *dev, int bank) rg->chip.offset = bank * MTK_BANK_WIDTH; rg->irq_chip.name = dev_name(dev); - rg->irq_chip.parent_device = dev; rg->irq_chip.irq_unmask = mediatek_gpio_irq_unmask; rg->irq_chip.irq_mask = mediatek_gpio_irq_mask; rg->irq_chip.irq_mask_ack = mediatek_gpio_irq_mask; diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c index e099c39e0355..80ddc43fd875 100644 --- a/drivers/gpio/gpio-omap.c +++ b/drivers/gpio/gpio-omap.c @@ -986,7 +986,8 @@ static void omap_gpio_mod_init(struct gpio_bank *bank) writel_relaxed(0, base + bank->regs->ctrl); } -static int omap_gpio_chip_init(struct gpio_bank *bank, struct irq_chip *irqc) +static int omap_gpio_chip_init(struct gpio_bank *bank, struct irq_chip *irqc, + struct device *pm_dev) { struct gpio_irq_chip *irq; static int gpio; @@ -1052,6 +1053,7 @@ static int omap_gpio_chip_init(struct gpio_bank *bank, struct irq_chip *irqc) if (ret) return dev_err_probe(bank->chip.parent, ret, "Could not register gpio chip\n"); + irq_domain_set_pm_device(bank->chip.irq.domain, pm_dev); ret = devm_request_irq(bank->chip.parent, bank->irq, omap_gpio_irq_handler, 0, dev_name(bank->chip.parent), bank); @@ -1402,7 +1404,6 @@ static int omap_gpio_probe(struct platform_device *pdev) irqc->irq_bus_sync_unlock = gpio_irq_bus_sync_unlock, irqc->name = dev_name(&pdev->dev); irqc->flags = IRQCHIP_MASK_ON_SUSPEND; - irqc->parent_device = dev; bank->irq = platform_get_irq(pdev, 0); if (bank->irq <= 0) { @@ -1466,7 +1467,7 @@ static int omap_gpio_probe(struct platform_device *pdev) omap_gpio_mod_init(bank); - ret = omap_gpio_chip_init(bank, irqc); + ret = omap_gpio_chip_init(bank, irqc, dev); if (ret) { pm_runtime_put_sync(dev); pm_runtime_disable(dev); diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c index bd2e16d6e21c..3a76538f27fa 100644 --- a/drivers/gpio/gpio-rcar.c +++ b/drivers/gpio/gpio-rcar.c @@ -530,7 +530,6 @@ static int gpio_rcar_probe(struct platform_device *pdev) irq_chip = &p->irq_chip; irq_chip->name = "gpio-rcar"; - irq_chip->parent_device = dev; irq_chip->irq_mask = gpio_rcar_irq_disable; irq_chip->irq_unmask = gpio_rcar_irq_enable; irq_chip->irq_set_type = gpio_rcar_irq_set_type; @@ -552,6 +551,7 @@ static int gpio_rcar_probe(struct platform_device *pdev) goto err0; } + irq_domain_set_pm_device(gpio_chip->irq.domain, dev); ret = devm_request_irq(dev, p->irq_parent, gpio_rcar_irq_handler, IRQF_SHARED, name, p); if (ret) { diff --git a/drivers/gpio/gpio-rockchip.c b/drivers/gpio/gpio-rockchip.c index a4c4e4584f5b..099e358d2491 100644 --- a/drivers/gpio/gpio-rockchip.c +++ b/drivers/gpio/gpio-rockchip.c @@ -410,10 +410,8 @@ static int rockchip_irq_set_type(struct irq_data *d, unsigned int type) level = rockchip_gpio_readl(bank, bank->gpio_regs->int_type); polarity = rockchip_gpio_readl(bank, bank->gpio_regs->int_polarity); - switch (type) { - case IRQ_TYPE_EDGE_BOTH: + if (type == IRQ_TYPE_EDGE_BOTH) { if (bank->gpio_type == GPIO_TYPE_V2) { - bank->toggle_edge_mode &= ~mask; rockchip_gpio_writel_bit(bank, d->hwirq, 1, bank->gpio_regs->int_bothedge); goto out; @@ -431,30 +429,34 @@ static int rockchip_irq_set_type(struct irq_data *d, unsigned int type) else polarity |= mask; } - break; - case IRQ_TYPE_EDGE_RISING: - bank->toggle_edge_mode &= ~mask; - level |= mask; - polarity |= mask; - break; - case IRQ_TYPE_EDGE_FALLING: - bank->toggle_edge_mode &= ~mask; - level |= mask; - polarity &= ~mask; - break; - case IRQ_TYPE_LEVEL_HIGH: - bank->toggle_edge_mode &= ~mask; - level &= ~mask; - polarity |= mask; - break; - case IRQ_TYPE_LEVEL_LOW: - bank->toggle_edge_mode &= ~mask; - level &= ~mask; - polarity &= ~mask; - break; - default: - ret = -EINVAL; - goto out; + } else { + if (bank->gpio_type == GPIO_TYPE_V2) { + rockchip_gpio_writel_bit(bank, d->hwirq, 0, + bank->gpio_regs->int_bothedge); + } else { + bank->toggle_edge_mode &= ~mask; + } + switch (type) { + case IRQ_TYPE_EDGE_RISING: + level |= mask; + polarity |= mask; + break; + case IRQ_TYPE_EDGE_FALLING: + level |= mask; + polarity &= ~mask; + break; + case IRQ_TYPE_LEVEL_HIGH: + level &= ~mask; + polarity |= mask; + break; + case IRQ_TYPE_LEVEL_LOW: + level &= ~mask; + polarity &= ~mask; + break; + default: + ret = -EINVAL; + goto out; + } } rockchip_gpio_writel(bank, level, bank->gpio_regs->int_type); diff --git a/drivers/gpio/gpio-sifive.c b/drivers/gpio/gpio-sifive.c index 403f9e833d6a..7d82388b4ab7 100644 --- a/drivers/gpio/gpio-sifive.c +++ b/drivers/gpio/gpio-sifive.c @@ -223,7 +223,7 @@ static int sifive_gpio_probe(struct platform_device *pdev) NULL, chip->base + SIFIVE_GPIO_OUTPUT_EN, chip->base + SIFIVE_GPIO_INPUT_EN, - 0); + BGPIOF_READ_OUTPUT_REG_SET); if (ret) { dev_err(dev, "unable to init generic GPIO\n"); return ret; diff --git a/drivers/gpio/gpio-sim.c b/drivers/gpio/gpio-sim.c index 04b137eca8da..8e5d87984a48 100644 --- a/drivers/gpio/gpio-sim.c +++ b/drivers/gpio/gpio-sim.c @@ -547,7 +547,7 @@ struct gpio_sim_bank { * * So we need to store the pointer to the parent struct here. We can * dereference it anywhere we need with no checks and no locking as - * it's guaranteed to survive the childred and protected by configfs + * it's guaranteed to survive the children and protected by configfs * locks. * * Same for other structures. @@ -570,6 +570,11 @@ static struct gpio_sim_bank *to_gpio_sim_bank(struct config_item *item) return container_of(group, struct gpio_sim_bank, group); } +static bool gpio_sim_bank_has_label(struct gpio_sim_bank *bank) +{ + return bank->label && *bank->label; +} + static struct gpio_sim_device * gpio_sim_bank_get_device(struct gpio_sim_bank *bank) { @@ -770,9 +775,15 @@ static int gpio_sim_add_hogs(struct gpio_sim_device *dev) * point the device doesn't exist yet and so dev_name() * is not available. */ - hog->chip_label = kasprintf(GFP_KERNEL, - "gpio-sim.%u-%s", dev->id, - fwnode_get_name(bank->swnode)); + if (gpio_sim_bank_has_label(bank)) + hog->chip_label = kstrdup(bank->label, + GFP_KERNEL); + else + hog->chip_label = kasprintf(GFP_KERNEL, + "gpio-sim.%u-%s", + dev->id, + fwnode_get_name( + bank->swnode)); if (!hog->chip_label) { gpio_sim_remove_hogs(dev); return -ENOMEM; @@ -816,7 +827,7 @@ gpio_sim_make_bank_swnode(struct gpio_sim_bank *bank, properties[prop_idx++] = PROPERTY_ENTRY_U32("ngpios", bank->num_lines); - if (bank->label && (strlen(bank->label) > 0)) + if (gpio_sim_bank_has_label(bank)) properties[prop_idx++] = PROPERTY_ENTRY_STRING("gpio-sim,label", bank->label); @@ -1311,7 +1322,7 @@ static void gpio_sim_hog_config_item_release(struct config_item *item) kfree(hog); } -struct configfs_item_operations gpio_sim_hog_config_item_ops = { +static struct configfs_item_operations gpio_sim_hog_config_item_ops = { .release = gpio_sim_hog_config_item_release, }; diff --git a/drivers/gpio/gpio-tegra186.c b/drivers/gpio/gpio-tegra186.c index 34b36a8c035f..031fe105b58e 100644 --- a/drivers/gpio/gpio-tegra186.c +++ b/drivers/gpio/gpio-tegra186.c @@ -343,9 +343,12 @@ static int tegra186_gpio_of_xlate(struct gpio_chip *chip, return offset + pin; } +#define to_tegra_gpio(x) container_of((x), struct tegra_gpio, gpio) + static void tegra186_irq_ack(struct irq_data *data) { - struct tegra_gpio *gpio = irq_data_get_irq_chip_data(data); + struct gpio_chip *gc = irq_data_get_irq_chip_data(data); + struct tegra_gpio *gpio = to_tegra_gpio(gc); void __iomem *base; base = tegra186_gpio_get_base(gpio, data->hwirq); @@ -357,7 +360,8 @@ static void tegra186_irq_ack(struct irq_data *data) static void tegra186_irq_mask(struct irq_data *data) { - struct tegra_gpio *gpio = irq_data_get_irq_chip_data(data); + struct gpio_chip *gc = irq_data_get_irq_chip_data(data); + struct tegra_gpio *gpio = to_tegra_gpio(gc); void __iomem *base; u32 value; @@ -372,7 +376,8 @@ static void tegra186_irq_mask(struct irq_data *data) static void tegra186_irq_unmask(struct irq_data *data) { - struct tegra_gpio *gpio = irq_data_get_irq_chip_data(data); + struct gpio_chip *gc = irq_data_get_irq_chip_data(data); + struct tegra_gpio *gpio = to_tegra_gpio(gc); void __iomem *base; u32 value; @@ -387,7 +392,8 @@ static void tegra186_irq_unmask(struct irq_data *data) static int tegra186_irq_set_type(struct irq_data *data, unsigned int type) { - struct tegra_gpio *gpio = irq_data_get_irq_chip_data(data); + struct gpio_chip *gc = irq_data_get_irq_chip_data(data); + struct tegra_gpio *gpio = to_tegra_gpio(gc); void __iomem *base; u32 value; @@ -1069,6 +1075,7 @@ static const struct tegra_gpio_soc tegra241_main_soc = { .ports = tegra241_main_ports, .name = "tegra241-gpio", .instance = 0, + .num_irqs_per_bank = 8, }; #define TEGRA241_AON_GPIO_PORT(_name, _bank, _port, _pins) \ @@ -1089,6 +1096,7 @@ static const struct tegra_gpio_soc tegra241_aon_soc = { .ports = tegra241_aon_ports, .name = "tegra241-gpio-aon", .instance = 1, + .num_irqs_per_bank = 8, }; static const struct of_device_id tegra186_gpio_of_match[] = { diff --git a/drivers/gpio/gpio-tqmx86.c b/drivers/gpio/gpio-tqmx86.c index 5b103221b58d..fa4bc7481f9a 100644 --- a/drivers/gpio/gpio-tqmx86.c +++ b/drivers/gpio/gpio-tqmx86.c @@ -281,7 +281,6 @@ static int tqmx86_gpio_probe(struct platform_device *pdev) u8 irq_status; irq_chip->name = chip->label; - irq_chip->parent_device = &pdev->dev; irq_chip->irq_mask = tqmx86_gpio_irq_mask; irq_chip->irq_unmask = tqmx86_gpio_irq_unmask; irq_chip->irq_set_type = tqmx86_gpio_irq_set_type; @@ -316,6 +315,8 @@ static int tqmx86_gpio_probe(struct platform_device *pdev) goto out_pm_dis; } + irq_domain_set_pm_device(girq->domain, dev); + dev_info(dev, "GPIO functionality initialized with %d pins\n", chip->ngpio); diff --git a/drivers/gpio/gpio-ts4900.c b/drivers/gpio/gpio-ts4900.c index d885032cf814..d918d2df4de2 100644 --- a/drivers/gpio/gpio-ts4900.c +++ b/drivers/gpio/gpio-ts4900.c @@ -1,7 +1,7 @@ /* * Digital I/O driver for Technologic Systems I2C FPGA Core * - * Copyright (C) 2015 Technologic Systems + * Copyright (C) 2015, 2018 Technologic Systems * Copyright (C) 2016 Savoir-Faire Linux * * This program is free software; you can redistribute it and/or @@ -55,19 +55,33 @@ static int ts4900_gpio_direction_input(struct gpio_chip *chip, { struct ts4900_gpio_priv *priv = gpiochip_get_data(chip); - /* - * This will clear the output enable bit, the other bits are - * dontcare when this is cleared + /* Only clear the OE bit here, requires a RMW. Prevents potential issue + * with OE and data getting to the physical pin at different times. */ - return regmap_write(priv->regmap, offset, 0); + return regmap_update_bits(priv->regmap, offset, TS4900_GPIO_OE, 0); } static int ts4900_gpio_direction_output(struct gpio_chip *chip, unsigned int offset, int value) { struct ts4900_gpio_priv *priv = gpiochip_get_data(chip); + unsigned int reg; int ret; + /* If changing from an input to an output, we need to first set the + * proper data bit to what is requested and then set OE bit. This + * prevents a glitch that can occur on the IO line + */ + regmap_read(priv->regmap, offset, ®); + if (!(reg & TS4900_GPIO_OE)) { + if (value) + reg = TS4900_GPIO_OUT; + else + reg &= ~TS4900_GPIO_OUT; + + regmap_write(priv->regmap, offset, reg); + } + if (value) ret = regmap_write(priv->regmap, offset, TS4900_GPIO_OE | TS4900_GPIO_OUT); diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c index c0f6a25c3279..a5495ad31c9c 100644 --- a/drivers/gpio/gpiolib-acpi.c +++ b/drivers/gpio/gpiolib-acpi.c @@ -307,7 +307,8 @@ static struct gpio_desc *acpi_request_own_gpiod(struct gpio_chip *chip, if (IS_ERR(desc)) return desc; - ret = gpio_set_debounce_timeout(desc, agpio->debounce_timeout); + /* ACPI uses hundredths of milliseconds units */ + ret = gpio_set_debounce_timeout(desc, agpio->debounce_timeout * 10); if (ret) dev_warn(chip->parent, "Failed to set debounce-timeout for pin 0x%04X, err %d\n", @@ -1035,7 +1036,8 @@ int acpi_dev_gpio_irq_get_by(struct acpi_device *adev, const char *name, int ind if (ret < 0) return ret; - ret = gpio_set_debounce_timeout(desc, info.debounce); + /* ACPI uses hundredths of milliseconds units */ + ret = gpio_set_debounce_timeout(desc, info.debounce * 10); if (ret) return ret; diff --git a/drivers/gpio/gpiolib-cdev.c b/drivers/gpio/gpiolib-cdev.c index c7b5446d01fd..ffa0256cad5a 100644 --- a/drivers/gpio/gpiolib-cdev.c +++ b/drivers/gpio/gpiolib-cdev.c @@ -330,7 +330,7 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip) goto out_free_lh; } - ret = gpiod_request(desc, lh->label); + ret = gpiod_request_user(desc, lh->label); if (ret) goto out_free_lh; lh->descs[i] = desc; @@ -1378,7 +1378,7 @@ static int linereq_create(struct gpio_device *gdev, void __user *ip) goto out_free_linereq; } - ret = gpiod_request(desc, lr->label); + ret = gpiod_request_user(desc, lr->label); if (ret) goto out_free_linereq; @@ -1764,7 +1764,7 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip) } } - ret = gpiod_request(desc, le->label); + ret = gpiod_request_user(desc, le->label); if (ret) goto out_free_le; le->desc = desc; diff --git a/drivers/gpio/gpiolib-sysfs.c b/drivers/gpio/gpiolib-sysfs.c index 4098bc7f88b7..44c1ad51b3fe 100644 --- a/drivers/gpio/gpiolib-sysfs.c +++ b/drivers/gpio/gpiolib-sysfs.c @@ -475,12 +475,9 @@ static ssize_t export_store(struct class *class, * they may be undone on its behalf too. */ - status = gpiod_request(desc, "sysfs"); - if (status) { - if (status == -EPROBE_DEFER) - status = -ENODEV; + status = gpiod_request_user(desc, "sysfs"); + if (status) goto done; - } status = gpiod_set_transitory(desc, false); if (!status) { diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index 3859911b61e9..6630d92e30ad 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -2227,6 +2227,16 @@ static int gpio_set_bias(struct gpio_desc *desc) return gpio_set_config_with_argument_optional(desc, bias, arg); } +/** + * gpio_set_debounce_timeout() - Set debounce timeout + * @desc: GPIO descriptor to set the debounce timeout + * @debounce: Debounce timeout in microseconds + * + * The function calls the certain GPIO driver to set debounce timeout + * in the hardware. + * + * Returns 0 on success, or negative error code otherwise. + */ int gpio_set_debounce_timeout(struct gpio_desc *desc, unsigned int debounce) { return gpio_set_config_with_argument_optional(desc, @@ -3147,6 +3157,16 @@ int gpiod_to_irq(const struct gpio_desc *desc) return retirq; } +#ifdef CONFIG_GPIOLIB_IRQCHIP + if (gc->irq.chip) { + /* + * Avoid race condition with other code, which tries to lookup + * an IRQ before the irqchip has been properly registered, + * i.e. while gpiochip is still being brought up. + */ + return -EPROBE_DEFER; + } +#endif return -ENXIO; } EXPORT_SYMBOL_GPL(gpiod_to_irq); diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h index 30bc3f80f83e..c31f4626915d 100644 --- a/drivers/gpio/gpiolib.h +++ b/drivers/gpio/gpiolib.h @@ -135,6 +135,18 @@ struct gpio_desc { int gpiod_request(struct gpio_desc *desc, const char *label); void gpiod_free(struct gpio_desc *desc); + +static inline int gpiod_request_user(struct gpio_desc *desc, const char *label) +{ + int ret; + + ret = gpiod_request(desc, label); + if (ret == -EPROBE_DEFER) + ret = -ENODEV; + + return ret; +} + int gpiod_configure_flags(struct gpio_desc *desc, const char *con_id, unsigned long lflags, enum gpiod_flags dflags); int gpio_set_debounce_timeout(struct gpio_desc *desc, unsigned int debounce); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index d8b854fcbffa..9a53a4de2bb7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1408,12 +1408,10 @@ int amdgpu_acpi_smart_shift_update(struct drm_device *dev, enum amdgpu_ss ss_sta int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev); void amdgpu_acpi_get_backlight_caps(struct amdgpu_dm_backlight_caps *caps); -bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev); void amdgpu_acpi_detect(void); #else static inline int amdgpu_acpi_init(struct amdgpu_device *adev) { return 0; } static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { } -static inline bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) { return false; } static inline void amdgpu_acpi_detect(void) { } static inline bool amdgpu_acpi_is_power_shift_control_supported(void) { return false; } static inline int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev, @@ -1422,6 +1420,14 @@ static inline int amdgpu_acpi_smart_shift_update(struct drm_device *dev, enum amdgpu_ss ss_state) { return 0; } #endif +#if defined(CONFIG_ACPI) && defined(CONFIG_SUSPEND) +bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev); +bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev); +#else +static inline bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) { return false; } +static inline bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev) { return false; } +#endif + int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser, uint64_t addr, struct amdgpu_bo **bo, struct amdgpu_bo_va_mapping **mapping); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c index 4811b0faafd9..0e12315fa0cb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c @@ -1031,6 +1031,20 @@ void amdgpu_acpi_detect(void) } } +#if IS_ENABLED(CONFIG_SUSPEND) +/** + * amdgpu_acpi_is_s3_active + * + * @adev: amdgpu_device_pointer + * + * returns true if supported, false if not. + */ +bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev) +{ + return !(adev->flags & AMD_IS_APU) || + (pm_suspend_target_state == PM_SUSPEND_MEM); +} + /** * amdgpu_acpi_is_s0ix_active * @@ -1040,11 +1054,24 @@ void amdgpu_acpi_detect(void) */ bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) { -#if IS_ENABLED(CONFIG_AMD_PMC) && IS_ENABLED(CONFIG_SUSPEND) - if (acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0) { - if (adev->flags & AMD_IS_APU) - return pm_suspend_target_state == PM_SUSPEND_TO_IDLE; + if (!(adev->flags & AMD_IS_APU) || + (pm_suspend_target_state != PM_SUSPEND_TO_IDLE)) + return false; + + if (!(acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0)) { + dev_warn_once(adev->dev, + "Power consumption will be higher as BIOS has not been configured for suspend-to-idle.\n" + "To use suspend-to-idle change the sleep mode in BIOS setup.\n"); + return false; } -#endif + +#if !IS_ENABLED(CONFIG_AMD_PMC) + dev_warn_once(adev->dev, + "Power consumption will be higher as the kernel has not been compiled with CONFIG_AMD_PMC.\n"); return false; +#else + return true; +#endif /* CONFIG_AMD_PMC */ } + +#endif /* CONFIG_SUSPEND */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index 82011e75ed85..c4387b38229c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c @@ -1141,7 +1141,7 @@ int amdgpu_display_framebuffer_init(struct drm_device *dev, if (ret) return ret; - if (!dev->mode_config.allow_fb_modifiers) { + if (!dev->mode_config.allow_fb_modifiers && !adev->enable_virtual_display) { drm_WARN_ONCE(dev, adev->family >= AMDGPU_FAMILY_AI, "GFX9+ requires FB check based on format modifier\n"); ret = check_tiling_flags_gfx6(rfb); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 4c83f1db8a24..0ead08ba58c2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -2011,6 +2011,9 @@ static int amdgpu_pci_probe(struct pci_dev *pdev, return -ENODEV; } + if (amdgpu_aspm == -1 && !pcie_aspm_enabled(pdev)) + amdgpu_aspm = 0; + if (amdgpu_virtual_display || amdgpu_device_asic_has_dc_support(flags & AMD_ASIC_MASK)) supports_atomic = true; @@ -2246,13 +2249,20 @@ static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work) static int amdgpu_pmops_prepare(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); + struct amdgpu_device *adev = drm_to_adev(drm_dev); /* Return a positive number here so * DPM_FLAG_SMART_SUSPEND works properly */ if (amdgpu_device_supports_boco(drm_dev)) - return pm_runtime_suspended(dev) && - pm_suspend_via_firmware(); + return pm_runtime_suspended(dev); + + /* if we will not support s3 or s2i for the device + * then skip suspend + */ + if (!amdgpu_acpi_is_s0ix_active(adev) && + !amdgpu_acpi_is_s3_active(adev)) + return 1; return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 5c3f24069f2a..4655702a5e00 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -1904,7 +1904,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset, unsigned i; int r; - if (direct_submit && !ring->sched.ready) { + if (!direct_submit && !ring->sched.ready) { DRM_ERROR("Trying to move memory with ring turned off.\n"); return -EINVAL; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c index d99c8779b51e..5224d9a39737 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c @@ -391,7 +391,6 @@ static struct drm_plane *amdgpu_vkms_plane_init(struct drm_device *dev, int index) { struct drm_plane *plane; - uint64_t modifiers[] = {DRM_FORMAT_MOD_LINEAR, DRM_FORMAT_MOD_INVALID}; int ret; plane = kzalloc(sizeof(*plane), GFP_KERNEL); @@ -402,7 +401,7 @@ static struct drm_plane *amdgpu_vkms_plane_init(struct drm_device *dev, &amdgpu_vkms_plane_funcs, amdgpu_vkms_formats, ARRAY_SIZE(amdgpu_vkms_formats), - modifiers, type, NULL); + NULL, type, NULL); if (ret) { kfree(plane); return ERR_PTR(ret); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index b37fc7d7d2c7..418341a67517 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -768,11 +768,17 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, * Check if all VM PDs/PTs are ready for updates * * Returns: - * True if eviction list is empty. + * True if VM is not evicting. */ bool amdgpu_vm_ready(struct amdgpu_vm *vm) { - return list_empty(&vm->evicted); + bool ret; + + amdgpu_vm_eviction_lock(vm); + ret = !vm->evicting; + amdgpu_vm_eviction_unlock(vm); + + return ret && list_empty(&vm->evicted); } /** diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c index b4eddf6e98a6..ff738e9725ee 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c @@ -543,7 +543,9 @@ static void gfxhub_v2_1_utcl2_harvest(struct amdgpu_device *adev) adev->gfx.config.max_sh_per_se * adev->gfx.config.max_shader_engines); - if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 3)) { + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(10, 3, 1): + case IP_VERSION(10, 3, 3): /* Get SA disabled bitmap from eFuse setting */ efuse_setting = RREG32_SOC15(GC, 0, mmCC_GC_SA_UNIT_DISABLE); efuse_setting &= CC_GC_SA_UNIT_DISABLE__SA_DISABLE_MASK; @@ -566,6 +568,9 @@ static void gfxhub_v2_1_utcl2_harvest(struct amdgpu_device *adev) disabled_sa = tmp; WREG32_SOC15(GC, 0, mmGCUTCL2_HARVEST_BYPASS_GROUPS_YELLOW_CARP, disabled_sa); + break; + default: + break; } } diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c index 38bb42727715..a2f8ed0e6a64 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c @@ -1140,6 +1140,9 @@ static void gmc_v10_0_get_clockgating_state(void *handle, u32 *flags) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 1, 3)) + return; + adev->mmhub.funcs->get_clockgating(adev, flags); if (adev->ip_versions[ATHUB_HWIP][0] >= IP_VERSION(2, 1, 0)) diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index e8e4749e9c79..f0638db57111 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -2057,6 +2057,10 @@ static int sdma_v4_0_suspend(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + /* SMU saves SDMA state for us */ + if (adev->in_s0ix) + return 0; + return sdma_v4_0_hw_fini(adev); } @@ -2064,6 +2068,10 @@ static int sdma_v4_0_resume(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + /* SMU restores SDMA state for us */ + if (adev->in_s0ix) + return 0; + return sdma_v4_0_hw_init(adev); } diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 0fc1747e4a70..12f80fdc1fbc 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -619,8 +619,8 @@ soc15_asic_reset_method(struct amdgpu_device *adev) static int soc15_asic_reset(struct amdgpu_device *adev) { /* original raven doesn't have full asic reset */ - if ((adev->apu_flags & AMD_APU_IS_RAVEN) && - !(adev->apu_flags & AMD_APU_IS_RAVEN2)) + if ((adev->apu_flags & AMD_APU_IS_RAVEN) || + (adev->apu_flags & AMD_APU_IS_RAVEN2)) return 0; switch (soc15_asic_reset_method(adev)) { @@ -1114,8 +1114,11 @@ static int soc15_common_early_init(void *handle) AMD_CG_SUPPORT_SDMA_LS | AMD_CG_SUPPORT_VCN_MGCG; + /* + * MMHUB PG needs to be disabled for Picasso for + * stability reasons. + */ adev->pg_flags = AMD_PG_SUPPORT_SDMA | - AMD_PG_SUPPORT_MMHUB | AMD_PG_SUPPORT_VCN; } else { adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 7f9773f8dab6..075429bea427 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -3653,7 +3653,7 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev) /* Use GRPH_PFLIP interrupt */ for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT; - i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1; + i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + dc->caps.max_otg_num - 1; i++) { r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq); if (r) { @@ -4256,6 +4256,9 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) } #endif + /* Disable vblank IRQs aggressively for power-saving. */ + adev_to_drm(adev)->vblank_disable_immediate = true; + /* loops over all connectors on the board */ for (i = 0; i < link_cnt; i++) { struct dc_link *link = NULL; @@ -4301,19 +4304,17 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) update_connector_ext_caps(aconnector); if (psr_feature_enabled) amdgpu_dm_set_psr_caps(link); + + /* TODO: Fix vblank control helpers to delay PSR entry to allow this when + * PSR is also supported. + */ + if (link->psr_settings.psr_feature_enabled) + adev_to_drm(adev)->vblank_disable_immediate = false; } } - /* - * Disable vblank IRQs aggressively for power-saving. - * - * TODO: Fix vblank control helpers to delay PSR entry to allow this when PSR - * is also supported. - */ - adev_to_drm(adev)->vblank_disable_immediate = !psr_feature_enabled; - /* Software is initialized. Now we can register interrupt handlers. */ switch (adev->asic_type) { #if defined(CONFIG_DRM_AMD_DC_SI) diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c index f977f29907df..10c7be40dfb0 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c @@ -473,8 +473,10 @@ static void dcn3_get_memclk_states_from_smu(struct clk_mgr *clk_mgr_base) clk_mgr_base->bw_params->dc_mode_softmax_memclk = dcn30_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_UCLK); /* Refresh bounding box */ + DC_FP_START(); clk_mgr_base->ctx->dc->res_pool->funcs->update_bw_bounding_box( clk_mgr->base.ctx->dc, clk_mgr_base->bw_params); + DC_FP_END(); } static bool dcn3_is_smu_present(struct clk_mgr *clk_mgr_base) diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c index 48005def1164..bc4ddc36fe58 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c @@ -570,32 +570,32 @@ static struct wm_table lpddr5_wm_table = { .wm_inst = WM_A, .wm_type = WM_TYPE_PSTATE_CHG, .pstate_latency_us = 11.65333, - .sr_exit_time_us = 7.95, - .sr_enter_plus_exit_time_us = 9, + .sr_exit_time_us = 13.5, + .sr_enter_plus_exit_time_us = 16.5, .valid = true, }, { .wm_inst = WM_B, .wm_type = WM_TYPE_PSTATE_CHG, .pstate_latency_us = 11.65333, - .sr_exit_time_us = 9.82, - .sr_enter_plus_exit_time_us = 11.196, + .sr_exit_time_us = 13.5, + .sr_enter_plus_exit_time_us = 16.5, .valid = true, }, { .wm_inst = WM_C, .wm_type = WM_TYPE_PSTATE_CHG, .pstate_latency_us = 11.65333, - .sr_exit_time_us = 9.89, - .sr_enter_plus_exit_time_us = 11.24, + .sr_exit_time_us = 13.5, + .sr_enter_plus_exit_time_us = 16.5, .valid = true, }, { .wm_inst = WM_D, .wm_type = WM_TYPE_PSTATE_CHG, .pstate_latency_us = 11.65333, - .sr_exit_time_us = 9.748, - .sr_enter_plus_exit_time_us = 11.102, + .sr_exit_time_us = 13.5, + .sr_enter_plus_exit_time_us = 16.5, .valid = true, }, } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c index 4162ce40089b..9d17c5a5ae01 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c @@ -329,38 +329,38 @@ static struct clk_bw_params dcn31_bw_params = { }; -static struct wm_table ddr4_wm_table = { +static struct wm_table ddr5_wm_table = { .entries = { { .wm_inst = WM_A, .wm_type = WM_TYPE_PSTATE_CHG, .pstate_latency_us = 11.72, - .sr_exit_time_us = 6.09, - .sr_enter_plus_exit_time_us = 7.14, + .sr_exit_time_us = 9, + .sr_enter_plus_exit_time_us = 11, .valid = true, }, { .wm_inst = WM_B, .wm_type = WM_TYPE_PSTATE_CHG, .pstate_latency_us = 11.72, - .sr_exit_time_us = 10.12, - .sr_enter_plus_exit_time_us = 11.48, + .sr_exit_time_us = 9, + .sr_enter_plus_exit_time_us = 11, .valid = true, }, { .wm_inst = WM_C, .wm_type = WM_TYPE_PSTATE_CHG, .pstate_latency_us = 11.72, - .sr_exit_time_us = 10.12, - .sr_enter_plus_exit_time_us = 11.48, + .sr_exit_time_us = 9, + .sr_enter_plus_exit_time_us = 11, .valid = true, }, { .wm_inst = WM_D, .wm_type = WM_TYPE_PSTATE_CHG, .pstate_latency_us = 11.72, - .sr_exit_time_us = 10.12, - .sr_enter_plus_exit_time_us = 11.48, + .sr_exit_time_us = 9, + .sr_enter_plus_exit_time_us = 11, .valid = true, }, } @@ -687,7 +687,7 @@ void dcn31_clk_mgr_construct( if (ctx->dc_bios->integrated_info->memory_type == LpDdr5MemType) { dcn31_bw_params.wm_table = lpddr5_wm_table; } else { - dcn31_bw_params.wm_table = ddr4_wm_table; + dcn31_bw_params.wm_table = ddr5_wm_table; } /* Saved clocks configured at boot for debug purposes */ dcn31_dump_clk_registers(&clk_mgr->base.base.boot_snapshot, &clk_mgr->base.base, &log_info); diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c index a1011f3273f3..de3f4643eeef 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c @@ -120,7 +120,11 @@ static int dcn31_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr, result = dcn31_smu_wait_for_response(clk_mgr, 10, 200000); if (result == VBIOSSMC_Result_Failed) { - ASSERT(0); + if (msg_id == VBIOSSMC_MSG_TransferTableDram2Smu && + param == TABLE_WATERMARKS) + DC_LOG_WARNING("Watermarks table not configured properly by SMU"); + else + ASSERT(0); REG_WRITE(MP1_SMN_C2PMSG_91, VBIOSSMC_Result_OK); return -1; } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 6f5528d34093..ba1aa994db4b 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -985,10 +985,13 @@ static bool dc_construct(struct dc *dc, goto fail; #ifdef CONFIG_DRM_AMD_DC_DCN dc->clk_mgr->force_smu_not_present = init_params->force_smu_not_present; -#endif - if (dc->res_pool->funcs->update_bw_bounding_box) + if (dc->res_pool->funcs->update_bw_bounding_box) { + DC_FP_START(); dc->res_pool->funcs->update_bw_bounding_box(dc, dc->clk_mgr->bw_params); + DC_FP_END(); + } +#endif /* Creation of current_state must occur after dc->dml * is initialized in dc_create_resource_pool because @@ -1220,6 +1223,8 @@ struct dc *dc_create(const struct dc_init_data *init_params) dc->caps.max_dp_protocol_version = DP_VERSION_1_4; + dc->caps.max_otg_num = dc->res_pool->res_cap->num_timing_generator; + if (dc->res_pool->dmcu != NULL) dc->versions.dmcu_version = dc->res_pool->dmcu->dmcu_version; } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index 4c3ab2575e4b..61b8f29a0c30 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -5597,6 +5597,26 @@ static bool retrieve_link_cap(struct dc_link *link) dp_hw_fw_revision.ieee_fw_rev, sizeof(dp_hw_fw_revision.ieee_fw_rev)); + /* Quirk for Apple MBP 2018 15" Retina panels: wrong DP_MAX_LINK_RATE */ + { + uint8_t str_mbp_2018[] = { 101, 68, 21, 103, 98, 97 }; + uint8_t fwrev_mbp_2018[] = { 7, 4 }; + uint8_t fwrev_mbp_2018_vega[] = { 8, 4 }; + + /* We also check for the firmware revision as 16,1 models have an + * identical device id and are incorrectly quirked otherwise. + */ + if ((link->dpcd_caps.sink_dev_id == 0x0010fa) && + !memcmp(link->dpcd_caps.sink_dev_id_str, str_mbp_2018, + sizeof(str_mbp_2018)) && + (!memcmp(link->dpcd_caps.sink_fw_revision, fwrev_mbp_2018, + sizeof(fwrev_mbp_2018)) || + !memcmp(link->dpcd_caps.sink_fw_revision, fwrev_mbp_2018_vega, + sizeof(fwrev_mbp_2018_vega)))) { + link->reported_link_cap.link_rate = LINK_RATE_RBR2; + } + } + memset(&link->dpcd_caps.dsc_caps, '\0', sizeof(link->dpcd_caps.dsc_caps)); memset(&link->dpcd_caps.fec_cap, '\0', sizeof(link->dpcd_caps.fec_cap)); diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index b3912ff9dc91..18757c158523 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -1964,10 +1964,6 @@ enum dc_status dc_remove_stream_from_ctx( dc->res_pool, del_pipe->stream_res.stream_enc, false); - /* Release link encoder from stream in new dc_state. */ - if (dc->res_pool->funcs->link_enc_unassign) - dc->res_pool->funcs->link_enc_unassign(new_ctx, del_pipe->stream); - #if defined(CONFIG_DRM_AMD_DC_DCN) if (is_dp_128b_132b_signal(del_pipe)) { update_hpo_dp_stream_engine_usage( diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 288e7b01f561..b51864890621 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -202,6 +202,7 @@ struct dc_caps { bool edp_dsc_support; bool vbios_lttpr_aware; bool vbios_lttpr_enable; + uint32_t max_otg_num; }; struct dc_bug_wa { diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index f3ff141b706a..eb2755bdb30e 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c @@ -1608,11 +1608,6 @@ static enum dc_status apply_single_controller_ctx_to_hw( pipe_ctx->stream_res.stream_enc, pipe_ctx->stream_res.tg->inst); - if (dc_is_embedded_signal(pipe_ctx->stream->signal) && - pipe_ctx->stream_res.stream_enc->funcs->reset_fifo) - pipe_ctx->stream_res.stream_enc->funcs->reset_fifo( - pipe_ctx->stream_res.stream_enc); - if (dc_is_dp_signal(pipe_ctx->stream->signal)) dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_CONNECT_DIG_FE_OTG); @@ -1839,9 +1834,29 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context) break; } } - // We are trying to enable eDP, don't power down VDD - if (can_apply_edp_fast_boot) + + /* + * TO-DO: So far the code logic below only addresses single eDP case. + * For dual eDP case, there are a few things that need to be + * implemented first: + * + * 1. Change the fastboot logic above, so eDP link[0 or 1]'s + * stream[0 or 1] will all be checked. + * + * 2. Change keep_edp_vdd_on to an array, and maintain keep_edp_vdd_on + * for each eDP. + * + * Once above 2 things are completed, we can then change the logic below + * correspondingly, so dual eDP case will be fully covered. + */ + + // We are trying to enable eDP, don't power down VDD if eDP stream is existing + if ((edp_stream_num == 1 && edp_streams[0] != NULL) || can_apply_edp_fast_boot) { keep_edp_vdd_on = true; + DC_LOG_EVENT_LINK_TRAINING("Keep eDP Vdd on\n"); + } else { + DC_LOG_EVENT_LINK_TRAINING("No eDP stream enabled, turn eDP Vdd off\n"); + } } // Check seamless boot support diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c index bf4436d7aaab..b0c08ee6bc2c 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c @@ -902,19 +902,6 @@ void enc1_stream_encoder_stop_dp_info_packets( } -void enc1_stream_encoder_reset_fifo( - struct stream_encoder *enc) -{ - struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); - - /* set DIG_START to 0x1 to reset FIFO */ - REG_UPDATE(DIG_FE_CNTL, DIG_START, 1); - udelay(100); - - /* write 0 to take the FIFO out of reset */ - REG_UPDATE(DIG_FE_CNTL, DIG_START, 0); -} - void enc1_stream_encoder_dp_blank( struct dc_link *link, struct stream_encoder *enc) @@ -1600,8 +1587,6 @@ static const struct stream_encoder_funcs dcn10_str_enc_funcs = { enc1_stream_encoder_send_immediate_sdp_message, .stop_dp_info_packets = enc1_stream_encoder_stop_dp_info_packets, - .reset_fifo = - enc1_stream_encoder_reset_fifo, .dp_blank = enc1_stream_encoder_dp_blank, .dp_unblank = diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h index a146a41f68e9..687d7e4bf7ca 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h @@ -626,9 +626,6 @@ void enc1_stream_encoder_send_immediate_sdp_message( void enc1_stream_encoder_stop_dp_info_packets( struct stream_encoder *enc); -void enc1_stream_encoder_reset_fifo( - struct stream_encoder *enc); - void enc1_stream_encoder_dp_blank( struct dc_link *link, struct stream_encoder *enc); diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c index 2bc93df023ad..2a72517e2b28 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c @@ -1069,7 +1069,7 @@ static const struct dc_debug_options debug_defaults_drv = { .timing_trace = false, .clock_trace = true, .disable_pplib_clock_request = true, - .pipe_split_policy = MPC_SPLIT_DYNAMIC, + .pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP, .force_single_disp_pipe_split = false, .disable_dcc = DCC_ENABLE, .vsr_support = true, diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c index 8a70f92795c2..aab25ca8343a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c @@ -593,8 +593,6 @@ static const struct stream_encoder_funcs dcn20_str_enc_funcs = { enc1_stream_encoder_send_immediate_sdp_message, .stop_dp_info_packets = enc1_stream_encoder_stop_dp_info_packets, - .reset_fifo = - enc1_stream_encoder_reset_fifo, .dp_blank = enc1_stream_encoder_dp_blank, .dp_unblank = diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c index 8daa12730bc1..a04ca4a98392 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c @@ -789,8 +789,6 @@ static const struct stream_encoder_funcs dcn30_str_enc_funcs = { enc3_stream_encoder_update_dp_info_packets, .stop_dp_info_packets = enc1_stream_encoder_stop_dp_info_packets, - .reset_fifo = - enc1_stream_encoder_reset_fifo, .dp_blank = enc1_stream_encoder_dp_blank, .dp_unblank = diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c index 90c73a1cb986..5e3bcaf12cac 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c @@ -138,8 +138,11 @@ static uint32_t convert_and_clamp( ret_val = wm_ns * refclk_mhz; ret_val /= 1000; - if (ret_val > clamp_value) + if (ret_val > clamp_value) { + /* clamping WMs is abnormal, unexpected and may lead to underflow*/ + ASSERT(0); ret_val = clamp_value; + } return ret_val; } @@ -159,7 +162,7 @@ static bool hubbub31_program_urgent_watermarks( if (safe_to_lower || watermarks->a.urgent_ns > hubbub2->watermarks.a.urgent_ns) { hubbub2->watermarks.a.urgent_ns = watermarks->a.urgent_ns; prog_wm_value = convert_and_clamp(watermarks->a.urgent_ns, - refclk_mhz, 0x1fffff); + refclk_mhz, 0x3fff); REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, 0, DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value); @@ -193,7 +196,7 @@ static bool hubbub31_program_urgent_watermarks( if (safe_to_lower || watermarks->a.urgent_latency_ns > hubbub2->watermarks.a.urgent_latency_ns) { hubbub2->watermarks.a.urgent_latency_ns = watermarks->a.urgent_latency_ns; prog_wm_value = convert_and_clamp(watermarks->a.urgent_latency_ns, - refclk_mhz, 0x1fffff); + refclk_mhz, 0x3fff); REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, 0, DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, prog_wm_value); } else if (watermarks->a.urgent_latency_ns < hubbub2->watermarks.a.urgent_latency_ns) @@ -203,7 +206,7 @@ static bool hubbub31_program_urgent_watermarks( if (safe_to_lower || watermarks->b.urgent_ns > hubbub2->watermarks.b.urgent_ns) { hubbub2->watermarks.b.urgent_ns = watermarks->b.urgent_ns; prog_wm_value = convert_and_clamp(watermarks->b.urgent_ns, - refclk_mhz, 0x1fffff); + refclk_mhz, 0x3fff); REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, 0, DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, prog_wm_value); @@ -237,7 +240,7 @@ static bool hubbub31_program_urgent_watermarks( if (safe_to_lower || watermarks->b.urgent_latency_ns > hubbub2->watermarks.b.urgent_latency_ns) { hubbub2->watermarks.b.urgent_latency_ns = watermarks->b.urgent_latency_ns; prog_wm_value = convert_and_clamp(watermarks->b.urgent_latency_ns, - refclk_mhz, 0x1fffff); + refclk_mhz, 0x3fff); REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, 0, DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, prog_wm_value); } else if (watermarks->b.urgent_latency_ns < hubbub2->watermarks.b.urgent_latency_ns) @@ -247,7 +250,7 @@ static bool hubbub31_program_urgent_watermarks( if (safe_to_lower || watermarks->c.urgent_ns > hubbub2->watermarks.c.urgent_ns) { hubbub2->watermarks.c.urgent_ns = watermarks->c.urgent_ns; prog_wm_value = convert_and_clamp(watermarks->c.urgent_ns, - refclk_mhz, 0x1fffff); + refclk_mhz, 0x3fff); REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, 0, DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, prog_wm_value); @@ -281,7 +284,7 @@ static bool hubbub31_program_urgent_watermarks( if (safe_to_lower || watermarks->c.urgent_latency_ns > hubbub2->watermarks.c.urgent_latency_ns) { hubbub2->watermarks.c.urgent_latency_ns = watermarks->c.urgent_latency_ns; prog_wm_value = convert_and_clamp(watermarks->c.urgent_latency_ns, - refclk_mhz, 0x1fffff); + refclk_mhz, 0x3fff); REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, 0, DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, prog_wm_value); } else if (watermarks->c.urgent_latency_ns < hubbub2->watermarks.c.urgent_latency_ns) @@ -291,7 +294,7 @@ static bool hubbub31_program_urgent_watermarks( if (safe_to_lower || watermarks->d.urgent_ns > hubbub2->watermarks.d.urgent_ns) { hubbub2->watermarks.d.urgent_ns = watermarks->d.urgent_ns; prog_wm_value = convert_and_clamp(watermarks->d.urgent_ns, - refclk_mhz, 0x1fffff); + refclk_mhz, 0x3fff); REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, 0, DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, prog_wm_value); @@ -325,7 +328,7 @@ static bool hubbub31_program_urgent_watermarks( if (safe_to_lower || watermarks->d.urgent_latency_ns > hubbub2->watermarks.d.urgent_latency_ns) { hubbub2->watermarks.d.urgent_latency_ns = watermarks->d.urgent_latency_ns; prog_wm_value = convert_and_clamp(watermarks->d.urgent_latency_ns, - refclk_mhz, 0x1fffff); + refclk_mhz, 0x3fff); REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, 0, DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, prog_wm_value); } else if (watermarks->d.urgent_latency_ns < hubbub2->watermarks.d.urgent_latency_ns) @@ -351,7 +354,7 @@ static bool hubbub31_program_stutter_watermarks( watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns; prog_wm_value = convert_and_clamp( watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns, - refclk_mhz, 0x1fffff); + refclk_mhz, 0xffff); REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, 0, DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_A calculated =%d\n" @@ -367,7 +370,7 @@ static bool hubbub31_program_stutter_watermarks( watermarks->a.cstate_pstate.cstate_exit_ns; prog_wm_value = convert_and_clamp( watermarks->a.cstate_pstate.cstate_exit_ns, - refclk_mhz, 0x1fffff); + refclk_mhz, 0xffff); REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, 0, DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_A calculated =%d\n" @@ -383,7 +386,7 @@ static bool hubbub31_program_stutter_watermarks( watermarks->a.cstate_pstate.cstate_enter_plus_exit_z8_ns; prog_wm_value = convert_and_clamp( watermarks->a.cstate_pstate.cstate_enter_plus_exit_z8_ns, - refclk_mhz, 0x1fffff); + refclk_mhz, 0xffff); REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_A, 0, DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_A, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_Z8_A calculated =%d\n" @@ -399,7 +402,7 @@ static bool hubbub31_program_stutter_watermarks( watermarks->a.cstate_pstate.cstate_exit_z8_ns; prog_wm_value = convert_and_clamp( watermarks->a.cstate_pstate.cstate_exit_z8_ns, - refclk_mhz, 0x1fffff); + refclk_mhz, 0xffff); REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_A, 0, DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_A, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_Z8_A calculated =%d\n" @@ -416,7 +419,7 @@ static bool hubbub31_program_stutter_watermarks( watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns; prog_wm_value = convert_and_clamp( watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns, - refclk_mhz, 0x1fffff); + refclk_mhz, 0xffff); REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, 0, DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_B calculated =%d\n" @@ -432,7 +435,7 @@ static bool hubbub31_program_stutter_watermarks( watermarks->b.cstate_pstate.cstate_exit_ns; prog_wm_value = convert_and_clamp( watermarks->b.cstate_pstate.cstate_exit_ns, - refclk_mhz, 0x1fffff); + refclk_mhz, 0xffff); REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, 0, DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_B calculated =%d\n" @@ -448,7 +451,7 @@ static bool hubbub31_program_stutter_watermarks( watermarks->b.cstate_pstate.cstate_enter_plus_exit_z8_ns; prog_wm_value = convert_and_clamp( watermarks->b.cstate_pstate.cstate_enter_plus_exit_z8_ns, - refclk_mhz, 0x1fffff); + refclk_mhz, 0xffff); REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_B, 0, DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_B, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_Z8_B calculated =%d\n" @@ -464,7 +467,7 @@ static bool hubbub31_program_stutter_watermarks( watermarks->b.cstate_pstate.cstate_exit_z8_ns; prog_wm_value = convert_and_clamp( watermarks->b.cstate_pstate.cstate_exit_z8_ns, - refclk_mhz, 0x1fffff); + refclk_mhz, 0xffff); REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_B, 0, DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_B, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_Z8_B calculated =%d\n" @@ -481,7 +484,7 @@ static bool hubbub31_program_stutter_watermarks( watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns; prog_wm_value = convert_and_clamp( watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns, - refclk_mhz, 0x1fffff); + refclk_mhz, 0xffff); REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, 0, DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_C calculated =%d\n" @@ -497,7 +500,7 @@ static bool hubbub31_program_stutter_watermarks( watermarks->c.cstate_pstate.cstate_exit_ns; prog_wm_value = convert_and_clamp( watermarks->c.cstate_pstate.cstate_exit_ns, - refclk_mhz, 0x1fffff); + refclk_mhz, 0xffff); REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, 0, DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_C calculated =%d\n" @@ -513,7 +516,7 @@ static bool hubbub31_program_stutter_watermarks( watermarks->c.cstate_pstate.cstate_enter_plus_exit_z8_ns; prog_wm_value = convert_and_clamp( watermarks->c.cstate_pstate.cstate_enter_plus_exit_z8_ns, - refclk_mhz, 0x1fffff); + refclk_mhz, 0xffff); REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_C, 0, DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_C, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_Z8_C calculated =%d\n" @@ -529,7 +532,7 @@ static bool hubbub31_program_stutter_watermarks( watermarks->c.cstate_pstate.cstate_exit_z8_ns; prog_wm_value = convert_and_clamp( watermarks->c.cstate_pstate.cstate_exit_z8_ns, - refclk_mhz, 0x1fffff); + refclk_mhz, 0xffff); REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_C, 0, DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_C, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_Z8_C calculated =%d\n" @@ -546,7 +549,7 @@ static bool hubbub31_program_stutter_watermarks( watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns; prog_wm_value = convert_and_clamp( watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns, - refclk_mhz, 0x1fffff); + refclk_mhz, 0xffff); REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, 0, DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_D calculated =%d\n" @@ -562,7 +565,7 @@ static bool hubbub31_program_stutter_watermarks( watermarks->d.cstate_pstate.cstate_exit_ns; prog_wm_value = convert_and_clamp( watermarks->d.cstate_pstate.cstate_exit_ns, - refclk_mhz, 0x1fffff); + refclk_mhz, 0xffff); REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, 0, DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_D calculated =%d\n" @@ -578,7 +581,7 @@ static bool hubbub31_program_stutter_watermarks( watermarks->d.cstate_pstate.cstate_enter_plus_exit_z8_ns; prog_wm_value = convert_and_clamp( watermarks->d.cstate_pstate.cstate_enter_plus_exit_z8_ns, - refclk_mhz, 0x1fffff); + refclk_mhz, 0xffff); REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_D, 0, DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_D, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_Z8_D calculated =%d\n" @@ -594,7 +597,7 @@ static bool hubbub31_program_stutter_watermarks( watermarks->d.cstate_pstate.cstate_exit_z8_ns; prog_wm_value = convert_and_clamp( watermarks->d.cstate_pstate.cstate_exit_z8_ns, - refclk_mhz, 0x1fffff); + refclk_mhz, 0xffff); REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_D, 0, DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_D, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_Z8_D calculated =%d\n" @@ -625,7 +628,7 @@ static bool hubbub31_program_pstate_watermarks( watermarks->a.cstate_pstate.pstate_change_ns; prog_wm_value = convert_and_clamp( watermarks->a.cstate_pstate.pstate_change_ns, - refclk_mhz, 0x1fffff); + refclk_mhz, 0xffff); REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, 0, DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_A calculated =%d\n" @@ -642,7 +645,7 @@ static bool hubbub31_program_pstate_watermarks( watermarks->b.cstate_pstate.pstate_change_ns; prog_wm_value = convert_and_clamp( watermarks->b.cstate_pstate.pstate_change_ns, - refclk_mhz, 0x1fffff); + refclk_mhz, 0xffff); REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, 0, DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_B calculated =%d\n" @@ -659,7 +662,7 @@ static bool hubbub31_program_pstate_watermarks( watermarks->c.cstate_pstate.pstate_change_ns; prog_wm_value = convert_and_clamp( watermarks->c.cstate_pstate.pstate_change_ns, - refclk_mhz, 0x1fffff); + refclk_mhz, 0xffff); REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, 0, DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_C calculated =%d\n" @@ -676,7 +679,7 @@ static bool hubbub31_program_pstate_watermarks( watermarks->d.cstate_pstate.pstate_change_ns; prog_wm_value = convert_and_clamp( watermarks->d.cstate_pstate.pstate_change_ns, - refclk_mhz, 0x1fffff); + refclk_mhz, 0xffff); REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, 0, DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_D calculated =%d\n" diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h index 073f8b667eff..c88e113b94d1 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h @@ -164,10 +164,6 @@ struct stream_encoder_funcs { void (*stop_dp_info_packets)( struct stream_encoder *enc); - void (*reset_fifo)( - struct stream_encoder *enc - ); - void (*dp_blank)( struct dc_link *link, struct stream_encoder *enc); diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c index e2cae97f4ff1..48cc009d9bdf 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c @@ -3462,8 +3462,7 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj, attr == &sensor_dev_attr_power2_cap_min.dev_attr.attr || attr == &sensor_dev_attr_power2_cap.dev_attr.attr || attr == &sensor_dev_attr_power2_cap_default.dev_attr.attr || - attr == &sensor_dev_attr_power2_label.dev_attr.attr || - attr == &sensor_dev_attr_power1_label.dev_attr.attr)) + attr == &sensor_dev_attr_power2_label.dev_attr.attr)) return 0; return effective_mode; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c index 777f717c37ae..5488a0edb942 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c @@ -421,6 +421,36 @@ static int sienna_cichlid_store_powerplay_table(struct smu_context *smu) return 0; } +static int sienna_cichlid_patch_pptable_quirk(struct smu_context *smu) +{ + struct amdgpu_device *adev = smu->adev; + uint32_t *board_reserved; + uint16_t *freq_table_gfx; + uint32_t i; + + /* Fix some OEM SKU specific stability issues */ + GET_PPTABLE_MEMBER(BoardReserved, &board_reserved); + if ((adev->pdev->device == 0x73DF) && + (adev->pdev->revision == 0XC3) && + (adev->pdev->subsystem_device == 0x16C2) && + (adev->pdev->subsystem_vendor == 0x1043)) + board_reserved[0] = 1387; + + GET_PPTABLE_MEMBER(FreqTableGfx, &freq_table_gfx); + if ((adev->pdev->device == 0x73DF) && + (adev->pdev->revision == 0XC3) && + ((adev->pdev->subsystem_device == 0x16C2) || + (adev->pdev->subsystem_device == 0x133C)) && + (adev->pdev->subsystem_vendor == 0x1043)) { + for (i = 0; i < NUM_GFXCLK_DPM_LEVELS; i++) { + if (freq_table_gfx[i] > 2500) + freq_table_gfx[i] = 2500; + } + } + + return 0; +} + static int sienna_cichlid_setup_pptable(struct smu_context *smu) { int ret = 0; @@ -441,7 +471,7 @@ static int sienna_cichlid_setup_pptable(struct smu_context *smu) if (ret) return ret; - return ret; + return sienna_cichlid_patch_pptable_quirk(smu); } static int sienna_cichlid_tables_init(struct smu_context *smu) @@ -1238,21 +1268,37 @@ static int sienna_cichlid_populate_umd_state_clk(struct smu_context *smu) &dpm_context->dpm_tables.soc_table; struct smu_umd_pstate_table *pstate_table = &smu->pstate_table; + struct amdgpu_device *adev = smu->adev; pstate_table->gfxclk_pstate.min = gfx_table->min; pstate_table->gfxclk_pstate.peak = gfx_table->max; - if (gfx_table->max >= SIENNA_CICHLID_UMD_PSTATE_PROFILING_GFXCLK) - pstate_table->gfxclk_pstate.standard = SIENNA_CICHLID_UMD_PSTATE_PROFILING_GFXCLK; pstate_table->uclk_pstate.min = mem_table->min; pstate_table->uclk_pstate.peak = mem_table->max; - if (mem_table->max >= SIENNA_CICHLID_UMD_PSTATE_PROFILING_MEMCLK) - pstate_table->uclk_pstate.standard = SIENNA_CICHLID_UMD_PSTATE_PROFILING_MEMCLK; pstate_table->socclk_pstate.min = soc_table->min; pstate_table->socclk_pstate.peak = soc_table->max; - if (soc_table->max >= SIENNA_CICHLID_UMD_PSTATE_PROFILING_SOCCLK) + + switch (adev->asic_type) { + case CHIP_SIENNA_CICHLID: + case CHIP_NAVY_FLOUNDER: + pstate_table->gfxclk_pstate.standard = SIENNA_CICHLID_UMD_PSTATE_PROFILING_GFXCLK; + pstate_table->uclk_pstate.standard = SIENNA_CICHLID_UMD_PSTATE_PROFILING_MEMCLK; pstate_table->socclk_pstate.standard = SIENNA_CICHLID_UMD_PSTATE_PROFILING_SOCCLK; + break; + case CHIP_DIMGREY_CAVEFISH: + pstate_table->gfxclk_pstate.standard = DIMGREY_CAVEFISH_UMD_PSTATE_PROFILING_GFXCLK; + pstate_table->uclk_pstate.standard = DIMGREY_CAVEFISH_UMD_PSTATE_PROFILING_MEMCLK; + pstate_table->socclk_pstate.standard = DIMGREY_CAVEFISH_UMD_PSTATE_PROFILING_SOCCLK; + break; + case CHIP_BEIGE_GOBY: + pstate_table->gfxclk_pstate.standard = BEIGE_GOBY_UMD_PSTATE_PROFILING_GFXCLK; + pstate_table->uclk_pstate.standard = BEIGE_GOBY_UMD_PSTATE_PROFILING_MEMCLK; + pstate_table->socclk_pstate.standard = BEIGE_GOBY_UMD_PSTATE_PROFILING_SOCCLK; + break; + default: + break; + } return 0; } @@ -3696,14 +3742,14 @@ static ssize_t sienna_cichlid_get_gpu_metrics(struct smu_context *smu, static int sienna_cichlid_enable_mgpu_fan_boost(struct smu_context *smu) { - struct smu_table_context *table_context = &smu->smu_table; - PPTable_t *smc_pptable = table_context->driver_pptable; + uint16_t *mgpu_fan_boost_limit_rpm; + GET_PPTABLE_MEMBER(MGpuFanBoostLimitRpm, &mgpu_fan_boost_limit_rpm); /* * Skip the MGpuFanBoost setting for those ASICs * which do not support it */ - if (!smc_pptable->MGpuFanBoostLimitRpm) + if (*mgpu_fan_boost_limit_rpm == 0) return 0; return smu_cmn_send_smc_msg_with_param(smu, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.h b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.h index 38cd0ece24f6..42f705c7a36f 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.h +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.h @@ -33,6 +33,14 @@ typedef enum { #define SIENNA_CICHLID_UMD_PSTATE_PROFILING_SOCCLK 960 #define SIENNA_CICHLID_UMD_PSTATE_PROFILING_MEMCLK 1000 +#define DIMGREY_CAVEFISH_UMD_PSTATE_PROFILING_GFXCLK 1950 +#define DIMGREY_CAVEFISH_UMD_PSTATE_PROFILING_SOCCLK 960 +#define DIMGREY_CAVEFISH_UMD_PSTATE_PROFILING_MEMCLK 676 + +#define BEIGE_GOBY_UMD_PSTATE_PROFILING_GFXCLK 2200 +#define BEIGE_GOBY_UMD_PSTATE_PROFILING_SOCCLK 960 +#define BEIGE_GOBY_UMD_PSTATE_PROFILING_MEMCLK 1000 + extern void sienna_cichlid_set_ppt_funcs(struct smu_context *smu); #endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c index caf1775d48ef..0bc84b709a93 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c @@ -282,14 +282,9 @@ static int yellow_carp_post_smu_init(struct smu_context *smu) static int yellow_carp_mode_reset(struct smu_context *smu, int type) { - int ret = 0, index = 0; - - index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG, - SMU_MSG_GfxDeviceDriverReset); - if (index < 0) - return index == -EACCES ? 0 : index; + int ret = 0; - ret = smu_cmn_send_smc_msg_with_param(smu, (uint16_t)index, type, NULL); + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset, type, NULL); if (ret) dev_err(smu->adev->dev, "Failed to mode reset!\n"); diff --git a/drivers/gpu/drm/arm/Kconfig b/drivers/gpu/drm/arm/Kconfig index 58a242871b28..6e3f1d600541 100644 --- a/drivers/gpu/drm/arm/Kconfig +++ b/drivers/gpu/drm/arm/Kconfig @@ -6,6 +6,7 @@ config DRM_HDLCD depends on DRM && OF && (ARM || ARM64 || COMPILE_TEST) depends on COMMON_CLK select DRM_KMS_HELPER + select DRM_GEM_CMA_HELPER help Choose this option if you have an ARM High Definition Colour LCD controller. diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig index 61db5a66b493..44ad70939663 100644 --- a/drivers/gpu/drm/bridge/Kconfig +++ b/drivers/gpu/drm/bridge/Kconfig @@ -8,7 +8,6 @@ config DRM_BRIDGE config DRM_PANEL_BRIDGE def_bool y depends on DRM_BRIDGE - depends on DRM_KMS_HELPER select DRM_PANEL help DRM bridge wrapper of DRM panels @@ -30,6 +29,7 @@ config DRM_CDNS_DSI config DRM_CHIPONE_ICN6211 tristate "Chipone ICN6211 MIPI-DSI/RGB Converter bridge" depends on OF + select DRM_KMS_HELPER select DRM_MIPI_DSI select DRM_PANEL_BRIDGE help diff --git a/drivers/gpu/drm/bridge/nwl-dsi.c b/drivers/gpu/drm/bridge/nwl-dsi.c index a7389a0facfb..af07eeb47ca0 100644 --- a/drivers/gpu/drm/bridge/nwl-dsi.c +++ b/drivers/gpu/drm/bridge/nwl-dsi.c @@ -7,6 +7,7 @@ */ #include <linux/bitfield.h> +#include <linux/bits.h> #include <linux/clk.h> #include <linux/irq.h> #include <linux/math64.h> @@ -196,12 +197,9 @@ static u32 ps2bc(struct nwl_dsi *dsi, unsigned long long ps) /* * ui2bc - UI time periods to byte clock cycles */ -static u32 ui2bc(struct nwl_dsi *dsi, unsigned long long ui) +static u32 ui2bc(unsigned int ui) { - u32 bpp = mipi_dsi_pixel_format_to_bpp(dsi->format); - - return DIV64_U64_ROUND_UP(ui * dsi->lanes, - dsi->mode.clock * 1000 * bpp); + return DIV_ROUND_UP(ui, BITS_PER_BYTE); } /* @@ -232,12 +230,12 @@ static int nwl_dsi_config_host(struct nwl_dsi *dsi) } /* values in byte clock cycles */ - cycles = ui2bc(dsi, cfg->clk_pre); + cycles = ui2bc(cfg->clk_pre); DRM_DEV_DEBUG_DRIVER(dsi->dev, "cfg_t_pre: 0x%x\n", cycles); nwl_dsi_write(dsi, NWL_DSI_CFG_T_PRE, cycles); cycles = ps2bc(dsi, cfg->lpx + cfg->clk_prepare + cfg->clk_zero); DRM_DEV_DEBUG_DRIVER(dsi->dev, "cfg_tx_gap (pre): 0x%x\n", cycles); - cycles += ui2bc(dsi, cfg->clk_pre); + cycles += ui2bc(cfg->clk_pre); DRM_DEV_DEBUG_DRIVER(dsi->dev, "cfg_t_post: 0x%x\n", cycles); nwl_dsi_write(dsi, NWL_DSI_CFG_T_POST, cycles); cycles = ps2bc(dsi, cfg->hs_exit); diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c index dab8f76618f3..68d8415e6c28 100644 --- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c +++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c @@ -1802,6 +1802,7 @@ static inline void ti_sn_gpio_unregister(void) {} static void ti_sn65dsi86_runtime_disable(void *data) { + pm_runtime_dont_use_autosuspend(data); pm_runtime_disable(data); } @@ -1861,11 +1862,11 @@ static int ti_sn65dsi86_probe(struct i2c_client *client, "failed to get reference clock\n"); pm_runtime_enable(dev); + pm_runtime_set_autosuspend_delay(pdata->dev, 500); + pm_runtime_use_autosuspend(pdata->dev); ret = devm_add_action_or_reset(dev, ti_sn65dsi86_runtime_disable, dev); if (ret) return ret; - pm_runtime_set_autosuspend_delay(pdata->dev, 500); - pm_runtime_use_autosuspend(pdata->dev); ti_sn65dsi86_debugfs_init(pdata); diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c index 9781722519c3..54d62fdb4ef9 100644 --- a/drivers/gpu/drm/drm_atomic_uapi.c +++ b/drivers/gpu/drm/drm_atomic_uapi.c @@ -76,15 +76,17 @@ int drm_atomic_set_mode_for_crtc(struct drm_crtc_state *state, state->mode_blob = NULL; if (mode) { + struct drm_property_blob *blob; + drm_mode_convert_to_umode(&umode, mode); - state->mode_blob = - drm_property_create_blob(state->crtc->dev, - sizeof(umode), - &umode); - if (IS_ERR(state->mode_blob)) - return PTR_ERR(state->mode_blob); + blob = drm_property_create_blob(crtc->dev, + sizeof(umode), &umode); + if (IS_ERR(blob)) + return PTR_ERR(blob); drm_mode_copy(&state->mode, mode); + + state->mode_blob = blob; state->enable = true; drm_dbg_atomic(crtc->dev, "Set [MODE:%s] for [CRTC:%d:%s] state %p\n", diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c index a50c82bc2b2f..76a8c707c34b 100644 --- a/drivers/gpu/drm/drm_connector.c +++ b/drivers/gpu/drm/drm_connector.c @@ -2330,6 +2330,9 @@ EXPORT_SYMBOL(drm_connector_atomic_hdr_metadata_equal); void drm_connector_set_vrr_capable_property( struct drm_connector *connector, bool capable) { + if (!connector->vrr_capable_property) + return; + drm_object_property_set_value(&connector->base, connector->vrr_capable_property, capable); diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 12893e7be89b..f5f5de362ff2 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -5345,6 +5345,7 @@ u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edi if (!(edid->input & DRM_EDID_INPUT_DIGITAL)) return quirks; + info->color_formats |= DRM_COLOR_FORMAT_RGB444; drm_parse_cea_ext(connector, edid); /* @@ -5393,7 +5394,6 @@ u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edi DRM_DEBUG("%s: Assigning EDID-1.4 digital sink color depth as %d bpc.\n", connector->name, info->bpc); - info->color_formats |= DRM_COLOR_FORMAT_RGB444; if (edid->features & DRM_EDID_FEATURE_RGB_YCRCB444) info->color_formats |= DRM_COLOR_FORMAT_YCRCB444; if (edid->features & DRM_EDID_FEATURE_RGB_YCRCB422) diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c index cefd0cbf9deb..dc275c466c9c 100644 --- a/drivers/gpu/drm/drm_gem_cma_helper.c +++ b/drivers/gpu/drm/drm_gem_cma_helper.c @@ -512,6 +512,7 @@ int drm_gem_cma_mmap(struct drm_gem_cma_object *cma_obj, struct vm_area_struct * */ vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node); vma->vm_flags &= ~VM_PFNMAP; + vma->vm_flags |= VM_DONTEXPAND; if (cma_obj->map_noncoherent) { vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); diff --git a/drivers/gpu/drm/drm_privacy_screen.c b/drivers/gpu/drm/drm_privacy_screen.c index beaf99e9120a..b688841c18e4 100644 --- a/drivers/gpu/drm/drm_privacy_screen.c +++ b/drivers/gpu/drm/drm_privacy_screen.c @@ -269,7 +269,7 @@ EXPORT_SYMBOL(drm_privacy_screen_get_state); * * The notifier is called with no locks held. The new hw_state and sw_state * can be retrieved using the drm_privacy_screen_get_state() function. - * A pointer to the drm_privacy_screen's struct is passed as the void *data + * A pointer to the drm_privacy_screen's struct is passed as the ``void *data`` * argument of the notifier_block's notifier_call. * * The notifier will NOT be called when changes are made through diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c index 12571ac45540..c04264f70ad1 100644 --- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c +++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c @@ -678,7 +678,6 @@ static int decon_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct decon_context *ctx; struct device_node *i80_if_timings; - struct resource *res; int ret; if (!dev->of_node) @@ -728,16 +727,11 @@ static int decon_probe(struct platform_device *pdev) goto err_iounmap; } - res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, - ctx->i80_if ? "lcd_sys" : "vsync"); - if (!res) { - dev_err(dev, "irq request failed.\n"); - ret = -ENXIO; + ret = platform_get_irq_byname(pdev, ctx->i80_if ? "lcd_sys" : "vsync"); + if (ret < 0) goto err_iounmap; - } - ret = devm_request_irq(dev, res->start, decon_irq_handler, - 0, "drm_decon", ctx); + ret = devm_request_irq(dev, ret, decon_irq_handler, 0, "drm_decon", ctx); if (ret) { dev_err(dev, "irq request failed.\n"); goto err_iounmap; diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c index 32a36572b894..d13f5e3a030d 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c @@ -1334,8 +1334,10 @@ static int exynos_dsi_register_te_irq(struct exynos_dsi *dsi, int ret; int te_gpio_irq; - dsi->te_gpio = devm_gpiod_get_optional(dsi->dev, "te", GPIOD_IN); - if (IS_ERR(dsi->te_gpio)) { + dsi->te_gpio = gpiod_get_optional(panel, "te", GPIOD_IN); + if (!dsi->te_gpio) { + return 0; + } else if (IS_ERR(dsi->te_gpio)) { dev_err(dsi->dev, "gpio request failed with %ld\n", PTR_ERR(dsi->te_gpio)); return PTR_ERR(dsi->te_gpio); diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c index 023f54ee61a8..0ee32e4b1e43 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c @@ -1267,7 +1267,6 @@ static int fimc_probe(struct platform_device *pdev) struct exynos_drm_ipp_formats *formats; struct device *dev = &pdev->dev; struct fimc_context *ctx; - struct resource *res; int ret; int i, j, num_limits, num_formats; @@ -1330,14 +1329,12 @@ static int fimc_probe(struct platform_device *pdev) return PTR_ERR(ctx->regs); /* resource irq */ - res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); - if (!res) { - dev_err(dev, "failed to request irq resource.\n"); - return -ENOENT; - } + ret = platform_get_irq(pdev, 0); + if (ret < 0) + return ret; - ret = devm_request_irq(dev, res->start, fimc_irq_handler, - 0, dev_name(dev), ctx); + ret = devm_request_irq(dev, ret, fimc_irq_handler, + 0, dev_name(dev), ctx); if (ret < 0) { dev_err(dev, "failed to request irq.\n"); return ret; diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c index c735e53939d8..7d5a483a54de 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c @@ -1133,7 +1133,6 @@ static int fimd_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct fimd_context *ctx; struct device_node *i80_if_timings; - struct resource *res; int ret; if (!dev->of_node) @@ -1206,15 +1205,11 @@ static int fimd_probe(struct platform_device *pdev) if (IS_ERR(ctx->regs)) return PTR_ERR(ctx->regs); - res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, - ctx->i80_if ? "lcd_sys" : "vsync"); - if (!res) { - dev_err(dev, "irq request failed.\n"); - return -ENXIO; - } + ret = platform_get_irq_byname(pdev, ctx->i80_if ? "lcd_sys" : "vsync"); + if (ret < 0) + return ret; - ret = devm_request_irq(dev, res->start, fimd_irq_handler, - 0, "drm_fimd", ctx); + ret = devm_request_irq(dev, ret, fimd_irq_handler, 0, "drm_fimd", ctx); if (ret) { dev_err(dev, "irq request failed.\n"); return ret; diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c index 166a80262896..964dceb28c1e 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c @@ -1220,7 +1220,6 @@ static int gsc_probe(struct platform_device *pdev) struct gsc_driverdata *driver_data; struct exynos_drm_ipp_formats *formats; struct gsc_context *ctx; - struct resource *res; int num_formats, ret, i, j; ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); @@ -1275,13 +1274,10 @@ static int gsc_probe(struct platform_device *pdev) return PTR_ERR(ctx->regs); /* resource irq */ - res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); - if (!res) { - dev_err(dev, "failed to request irq resource.\n"); - return -ENOENT; - } + ctx->irq = platform_get_irq(pdev, 0); + if (ctx->irq < 0) + return ctx->irq; - ctx->irq = res->start; ret = devm_request_irq(dev, ctx->irq, gsc_irq_handler, 0, dev_name(dev), ctx); if (ret < 0) { diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c index 41c54f1f60bc..e5204be86093 100644 --- a/drivers/gpu/drm/exynos/exynos_mixer.c +++ b/drivers/gpu/drm/exynos/exynos_mixer.c @@ -809,19 +809,17 @@ static int mixer_resources_init(struct mixer_context *mixer_ctx) return -ENXIO; } - res = platform_get_resource(mixer_ctx->pdev, IORESOURCE_IRQ, 0); - if (res == NULL) { - dev_err(dev, "get interrupt resource failed.\n"); - return -ENXIO; - } + ret = platform_get_irq(mixer_ctx->pdev, 0); + if (ret < 0) + return ret; + mixer_ctx->irq = ret; - ret = devm_request_irq(dev, res->start, mixer_irq_handler, - 0, "drm_mixer", mixer_ctx); + ret = devm_request_irq(dev, mixer_ctx->irq, mixer_irq_handler, + 0, "drm_mixer", mixer_ctx); if (ret) { dev_err(dev, "request interrupt failed.\n"); return ret; } - mixer_ctx->irq = res->start; return 0; } diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig index a4c94dc2e216..cfd932514da2 100644 --- a/drivers/gpu/drm/i915/Kconfig +++ b/drivers/gpu/drm/i915/Kconfig @@ -101,6 +101,7 @@ config DRM_I915_USERPTR config DRM_I915_GVT bool "Enable Intel GVT-g graphics virtualization host support" depends on DRM_I915 + depends on X86 depends on 64BIT default n help diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c index 2da4aacc956b..8ac196e814d5 100644 --- a/drivers/gpu/drm/i915/display/intel_bw.c +++ b/drivers/gpu/drm/i915/display/intel_bw.c @@ -825,6 +825,7 @@ int intel_bw_atomic_check(struct intel_atomic_state *state) unsigned int max_bw_point = 0, max_bw = 0; unsigned int num_qgv_points = dev_priv->max_bw[0].num_qgv_points; unsigned int num_psf_gv_points = dev_priv->max_bw[0].num_psf_gv_points; + bool changed = false; u32 mask = 0; /* FIXME earlier gens need some checks too */ @@ -868,6 +869,8 @@ int intel_bw_atomic_check(struct intel_atomic_state *state) new_bw_state->data_rate[crtc->pipe] = new_data_rate; new_bw_state->num_active_planes[crtc->pipe] = new_active_planes; + changed = true; + drm_dbg_kms(&dev_priv->drm, "pipe %c data rate %u num active planes %u\n", pipe_name(crtc->pipe), @@ -875,7 +878,19 @@ int intel_bw_atomic_check(struct intel_atomic_state *state) new_bw_state->num_active_planes[crtc->pipe]); } - if (!new_bw_state) + old_bw_state = intel_atomic_get_old_bw_state(state); + new_bw_state = intel_atomic_get_new_bw_state(state); + + if (new_bw_state && + intel_can_enable_sagv(dev_priv, old_bw_state) != + intel_can_enable_sagv(dev_priv, new_bw_state)) + changed = true; + + /* + * If none of our inputs (data rates, number of active + * planes, SAGV yes/no) changed then nothing to do here. + */ + if (!changed) return 0; ret = intel_atomic_lock_global_state(&new_bw_state->base); @@ -961,7 +976,6 @@ int intel_bw_atomic_check(struct intel_atomic_state *state) */ new_bw_state->qgv_points_mask = ~allowed_points & mask; - old_bw_state = intel_atomic_get_old_bw_state(state); /* * If the actual mask had changed we need to make sure that * the commits are serialized(in case this is a nomodeset, nonblocking) diff --git a/drivers/gpu/drm/i915/display/intel_bw.h b/drivers/gpu/drm/i915/display/intel_bw.h index 46c6eecbd917..0ceaed1c9656 100644 --- a/drivers/gpu/drm/i915/display/intel_bw.h +++ b/drivers/gpu/drm/i915/display/intel_bw.h @@ -30,19 +30,19 @@ struct intel_bw_state { */ u8 pipe_sagv_reject; + /* bitmask of active pipes */ + u8 active_pipes; + /* * Current QGV points mask, which restricts * some particular SAGV states, not to confuse * with pipe_sagv_mask. */ - u8 qgv_points_mask; + u16 qgv_points_mask; unsigned int data_rate[I915_MAX_PIPES]; u8 num_active_planes[I915_MAX_PIPES]; - /* bitmask of active pipes */ - u8 active_pipes; - int min_cdclk; }; diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index bf7ce684dd8e..bb4a85445fc6 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -10673,6 +10673,7 @@ intel_modeset_setup_hw_state(struct drm_device *dev, vlv_wm_sanitize(dev_priv); } else if (DISPLAY_VER(dev_priv) >= 9) { skl_wm_get_hw_state(dev_priv); + skl_wm_sanitize(dev_priv); } else if (HAS_PCH_SPLIT(dev_priv)) { ilk_wm_get_hw_state(dev_priv); } diff --git a/drivers/gpu/drm/i915/display/intel_drrs.c b/drivers/gpu/drm/i915/display/intel_drrs.c index c1439fcb5a95..3ff149df4a77 100644 --- a/drivers/gpu/drm/i915/display/intel_drrs.c +++ b/drivers/gpu/drm/i915/display/intel_drrs.c @@ -405,6 +405,7 @@ intel_drrs_init(struct intel_connector *connector, struct drm_display_mode *fixed_mode) { struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + struct intel_encoder *encoder = connector->encoder; struct drm_display_mode *downclock_mode = NULL; INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_drrs_downclock_work); @@ -416,6 +417,13 @@ intel_drrs_init(struct intel_connector *connector, return NULL; } + if ((DISPLAY_VER(dev_priv) < 8 && !HAS_GMCH(dev_priv)) && + encoder->port != PORT_A) { + drm_dbg_kms(&dev_priv->drm, + "DRRS only supported on eDP port A\n"); + return NULL; + } + if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) { drm_dbg_kms(&dev_priv->drm, "VBT doesn't support DRRS\n"); return NULL; diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c index 160fd2bdafe5..957feeccff3f 100644 --- a/drivers/gpu/drm/i915/display/intel_fbc.c +++ b/drivers/gpu/drm/i915/display/intel_fbc.c @@ -1115,7 +1115,8 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state, /* Wa_22010751166: icl, ehl, tgl, dg1, rkl */ if (DISPLAY_VER(i915) >= 11 && - (plane_state->view.color_plane[0].y + drm_rect_height(&plane_state->uapi.src)) & 3) { + (plane_state->view.color_plane[0].y + + (drm_rect_height(&plane_state->uapi.src) >> 16)) & 3) { plane_state->no_fbc_reason = "plane end Y offset misaligned"; return false; } diff --git a/drivers/gpu/drm/i915/display/intel_opregion.c b/drivers/gpu/drm/i915/display/intel_opregion.c index 0065111593a6..4a2662838cd8 100644 --- a/drivers/gpu/drm/i915/display/intel_opregion.c +++ b/drivers/gpu/drm/i915/display/intel_opregion.c @@ -360,6 +360,21 @@ int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, port++; } + /* + * The port numbering and mapping here is bizarre. The now-obsolete + * swsci spec supports ports numbered [0..4]. Port E is handled as a + * special case, but port F and beyond are not. The functionality is + * supposed to be obsolete for new platforms. Just bail out if the port + * number is out of bounds after mapping. + */ + if (port > 4) { + drm_dbg_kms(&dev_priv->drm, + "[ENCODER:%d:%s] port %c (index %u) out of bounds for display power state notification\n", + intel_encoder->base.base.id, intel_encoder->base.name, + port_name(intel_encoder->port), port); + return -EINVAL; + } + if (!enable) parm |= 4 << 8; diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c index 1a376e9a1ff3..d610e48cab94 100644 --- a/drivers/gpu/drm/i915/display/intel_overlay.c +++ b/drivers/gpu/drm/i915/display/intel_overlay.c @@ -959,6 +959,9 @@ static int check_overlay_dst(struct intel_overlay *overlay, const struct intel_crtc_state *pipe_config = overlay->crtc->config; + if (rec->dst_height == 0 || rec->dst_width == 0) + return -EINVAL; + if (rec->dst_x < pipe_config->pipe_src_w && rec->dst_x + rec->dst_width <= pipe_config->pipe_src_w && rec->dst_y < pipe_config->pipe_src_h && diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c index a1a663f362e7..00279e8c2775 100644 --- a/drivers/gpu/drm/i915/display/intel_psr.c +++ b/drivers/gpu/drm/i915/display/intel_psr.c @@ -1406,6 +1406,13 @@ static inline u32 man_trk_ctl_single_full_frame_bit_get(struct drm_i915_private PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME; } +static inline u32 man_trk_ctl_partial_frame_bit_get(struct drm_i915_private *dev_priv) +{ + return IS_ALDERLAKE_P(dev_priv) ? + ADLP_PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE : + PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE; +} + static void psr_force_hw_tracking_exit(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); @@ -1510,7 +1517,13 @@ static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state, { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - u32 val = PSR2_MAN_TRK_CTL_ENABLE; + u32 val = 0; + + if (!IS_ALDERLAKE_P(dev_priv)) + val = PSR2_MAN_TRK_CTL_ENABLE; + + /* SF partial frame enable has to be set even on full update */ + val |= man_trk_ctl_partial_frame_bit_get(dev_priv); if (full_update) { /* @@ -1530,7 +1543,6 @@ static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state, } else { drm_WARN_ON(crtc_state->uapi.crtc->dev, clip->y1 % 4 || clip->y2 % 4); - val |= PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE; val |= PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(clip->y1 / 4 + 1); val |= PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(clip->y2 / 4 + 1); } diff --git a/drivers/gpu/drm/i915/display/intel_snps_phy.c b/drivers/gpu/drm/i915/display/intel_snps_phy.c index 09f405e4d363..92ff654f54f5 100644 --- a/drivers/gpu/drm/i915/display/intel_snps_phy.c +++ b/drivers/gpu/drm/i915/display/intel_snps_phy.c @@ -34,7 +34,7 @@ void intel_snps_phy_wait_for_calibration(struct drm_i915_private *dev_priv) if (intel_de_wait_for_clear(dev_priv, ICL_PHY_MISC(phy), DG2_PHY_DP_TX_ACK_MASK, 25)) DRM_ERROR("SNPS PHY %c failed to calibrate after 25ms.\n", - phy); + phy_name(phy)); } } diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c index 40faa18947c9..7784c30fe893 100644 --- a/drivers/gpu/drm/i915/display/intel_tc.c +++ b/drivers/gpu/drm/i915/display/intel_tc.c @@ -345,10 +345,11 @@ static bool icl_tc_phy_status_complete(struct intel_digital_port *dig_port) static bool adl_tc_phy_status_complete(struct intel_digital_port *dig_port) { struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port); struct intel_uncore *uncore = &i915->uncore; u32 val; - val = intel_uncore_read(uncore, TCSS_DDI_STATUS(dig_port->tc_phy_fia_idx)); + val = intel_uncore_read(uncore, TCSS_DDI_STATUS(tc_port)); if (val == 0xffffffff) { drm_dbg_kms(&i915->drm, "Port %s: PHY in TCCOLD, assuming not complete\n", @@ -690,6 +691,8 @@ void intel_tc_port_sanitize(struct intel_digital_port *dig_port) { struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); struct intel_encoder *encoder = &dig_port->base; + intel_wakeref_t tc_cold_wref; + enum intel_display_power_domain domain; int active_links = 0; mutex_lock(&dig_port->tc_lock); @@ -701,12 +704,11 @@ void intel_tc_port_sanitize(struct intel_digital_port *dig_port) drm_WARN_ON(&i915->drm, dig_port->tc_mode != TC_PORT_DISCONNECTED); drm_WARN_ON(&i915->drm, dig_port->tc_lock_wakeref); - if (active_links) { - enum intel_display_power_domain domain; - intel_wakeref_t tc_cold_wref = tc_cold_block(dig_port, &domain); - dig_port->tc_mode = intel_tc_port_get_current_mode(dig_port); + tc_cold_wref = tc_cold_block(dig_port, &domain); + dig_port->tc_mode = intel_tc_port_get_current_mode(dig_port); + if (active_links) { if (!icl_tc_phy_is_connected(dig_port)) drm_dbg_kms(&i915->drm, "Port %s: PHY disconnected with %d active link(s)\n", @@ -715,10 +717,23 @@ void intel_tc_port_sanitize(struct intel_digital_port *dig_port) dig_port->tc_lock_wakeref = tc_cold_block(dig_port, &dig_port->tc_lock_power_domain); - - tc_cold_unblock(dig_port, domain, tc_cold_wref); + } else { + /* + * TBT-alt is the default mode in any case the PHY ownership is not + * held (regardless of the sink's connected live state), so + * we'll just switch to disconnected mode from it here without + * a note. + */ + if (dig_port->tc_mode != TC_PORT_TBT_ALT) + drm_dbg_kms(&i915->drm, + "Port %s: PHY left in %s mode on disabled port, disconnecting it\n", + dig_port->tc_port_name, + tc_port_mode_name(dig_port->tc_mode)); + icl_tc_phy_disconnect(dig_port); } + tc_cold_unblock(dig_port, domain, tc_cold_wref); + drm_dbg_kms(&i915->drm, "Port %s: sanitize mode (%s)\n", dig_port->tc_port_name, tc_port_mode_name(dig_port->tc_mode)); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c index 3a5b247be738..1736efa43339 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c @@ -2505,9 +2505,14 @@ static int eb_pin_timeline(struct i915_execbuffer *eb, struct intel_context *ce, timeout) < 0) { i915_request_put(rq); - tl = intel_context_timeline_lock(ce); + /* + * Error path, cannot use intel_context_timeline_lock as + * that is user interruptable and this clean up step + * must be done. + */ + mutex_lock(&ce->timeline->mutex); intel_context_exit(ce); - intel_context_timeline_unlock(tl); + mutex_unlock(&ce->timeline->mutex); if (nonblock) return -EWOULDBLOCK; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c index de3fe79b665a..1f880c8c66e7 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c @@ -842,11 +842,9 @@ void i915_ttm_adjust_lru(struct drm_i915_gem_object *obj) } else if (obj->mm.madv != I915_MADV_WILLNEED) { bo->priority = I915_TTM_PRIO_PURGE; } else if (!i915_gem_object_has_pages(obj)) { - if (bo->priority < I915_TTM_PRIO_HAS_PAGES) - bo->priority = I915_TTM_PRIO_HAS_PAGES; + bo->priority = I915_TTM_PRIO_NO_PAGES; } else { - if (bo->priority > I915_TTM_PRIO_NO_PAGES) - bo->priority = I915_TTM_PRIO_NO_PAGES; + bo->priority = I915_TTM_PRIO_HAS_PAGES; } ttm_bo_move_to_lru_tail(bo, bo->resource, NULL); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c index ee9612a3ee5e..e130c820ae4e 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c @@ -427,11 +427,17 @@ __i915_ttm_move(struct ttm_buffer_object *bo, if (!IS_ERR(fence)) goto out; - } else if (move_deps) { - int err = i915_deps_sync(move_deps, ctx); + } else { + int err = PTR_ERR(fence); + + if (err == -EINTR || err == -ERESTARTSYS || err == -EAGAIN) + return fence; - if (err) - return ERR_PTR(err); + if (move_deps) { + err = i915_deps_sync(move_deps, ctx); + if (err) + return ERR_PTR(err); + } } /* Error intercept failed or no accelerated migration to start with */ diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h index f9240d4baa69..3aabe164c329 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h @@ -206,6 +206,11 @@ struct intel_guc { * context usage for overflows. */ struct delayed_work work; + + /** + * @shift: Right shift value for the gpm timestamp + */ + u32 shift; } timestamp; #ifdef CONFIG_DRM_I915_SELFTEST diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c index 13b27b8ff74e..ba21ace973da 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c @@ -110,7 +110,7 @@ static int guc_action_slpc_unset_param(struct intel_guc *guc, u8 id) { u32 request[] = { GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST, - SLPC_EVENT(SLPC_EVENT_PARAMETER_UNSET, 2), + SLPC_EVENT(SLPC_EVENT_PARAMETER_UNSET, 1), id, }; diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c index e7517206af82..154ad726e266 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c @@ -1113,6 +1113,19 @@ __extend_last_switch(struct intel_guc *guc, u64 *prev_start, u32 new_start) if (new_start == lower_32_bits(*prev_start)) return; + /* + * When gt is unparked, we update the gt timestamp and start the ping + * worker that updates the gt_stamp every POLL_TIME_CLKS. As long as gt + * is unparked, all switched in contexts will have a start time that is + * within +/- POLL_TIME_CLKS of the most recent gt_stamp. + * + * If neither gt_stamp nor new_start has rolled over, then the + * gt_stamp_hi does not need to be adjusted, however if one of them has + * rolled over, we need to adjust gt_stamp_hi accordingly. + * + * The below conditions address the cases of new_start rollover and + * gt_stamp_last rollover respectively. + */ if (new_start < gt_stamp_last && (new_start - gt_stamp_last) <= POLL_TIME_CLKS) gt_stamp_hi++; @@ -1124,17 +1137,45 @@ __extend_last_switch(struct intel_guc *guc, u64 *prev_start, u32 new_start) *prev_start = ((u64)gt_stamp_hi << 32) | new_start; } -static void guc_update_engine_gt_clks(struct intel_engine_cs *engine) +/* + * GuC updates shared memory and KMD reads it. Since this is not synchronized, + * we run into a race where the value read is inconsistent. Sometimes the + * inconsistency is in reading the upper MSB bytes of the last_in value when + * this race occurs. 2 types of cases are seen - upper 8 bits are zero and upper + * 24 bits are zero. Since these are non-zero values, it is non-trivial to + * determine validity of these values. Instead we read the values multiple times + * until they are consistent. In test runs, 3 attempts results in consistent + * values. The upper bound is set to 6 attempts and may need to be tuned as per + * any new occurences. + */ +static void __get_engine_usage_record(struct intel_engine_cs *engine, + u32 *last_in, u32 *id, u32 *total) { struct guc_engine_usage_record *rec = intel_guc_engine_usage(engine); + int i = 0; + + do { + *last_in = READ_ONCE(rec->last_switch_in_stamp); + *id = READ_ONCE(rec->current_context_index); + *total = READ_ONCE(rec->total_runtime); + + if (READ_ONCE(rec->last_switch_in_stamp) == *last_in && + READ_ONCE(rec->current_context_index) == *id && + READ_ONCE(rec->total_runtime) == *total) + break; + } while (++i < 6); +} + +static void guc_update_engine_gt_clks(struct intel_engine_cs *engine) +{ struct intel_engine_guc_stats *stats = &engine->stats.guc; struct intel_guc *guc = &engine->gt->uc.guc; - u32 last_switch = rec->last_switch_in_stamp; - u32 ctx_id = rec->current_context_index; - u32 total = rec->total_runtime; + u32 last_switch, ctx_id, total; lockdep_assert_held(&guc->timestamp.lock); + __get_engine_usage_record(engine, &last_switch, &ctx_id, &total); + stats->running = ctx_id != ~0U && last_switch; if (stats->running) __extend_last_switch(guc, &stats->start_gt_clk, last_switch); @@ -1149,23 +1190,51 @@ static void guc_update_engine_gt_clks(struct intel_engine_cs *engine) } } -static void guc_update_pm_timestamp(struct intel_guc *guc, - struct intel_engine_cs *engine, - ktime_t *now) +static u32 gpm_timestamp_shift(struct intel_gt *gt) +{ + intel_wakeref_t wakeref; + u32 reg, shift; + + with_intel_runtime_pm(gt->uncore->rpm, wakeref) + reg = intel_uncore_read(gt->uncore, RPM_CONFIG0); + + shift = (reg & GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK) >> + GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_SHIFT; + + return 3 - shift; +} + +static u64 gpm_timestamp(struct intel_gt *gt) +{ + u32 lo, hi, old_hi, loop = 0; + + hi = intel_uncore_read(gt->uncore, MISC_STATUS1); + do { + lo = intel_uncore_read(gt->uncore, MISC_STATUS0); + old_hi = hi; + hi = intel_uncore_read(gt->uncore, MISC_STATUS1); + } while (old_hi != hi && loop++ < 2); + + return ((u64)hi << 32) | lo; +} + +static void guc_update_pm_timestamp(struct intel_guc *guc, ktime_t *now) { - u32 gt_stamp_now, gt_stamp_hi; + struct intel_gt *gt = guc_to_gt(guc); + u32 gt_stamp_lo, gt_stamp_hi; + u64 gpm_ts; lockdep_assert_held(&guc->timestamp.lock); gt_stamp_hi = upper_32_bits(guc->timestamp.gt_stamp); - gt_stamp_now = intel_uncore_read(engine->uncore, - RING_TIMESTAMP(engine->mmio_base)); + gpm_ts = gpm_timestamp(gt) >> guc->timestamp.shift; + gt_stamp_lo = lower_32_bits(gpm_ts); *now = ktime_get(); - if (gt_stamp_now < lower_32_bits(guc->timestamp.gt_stamp)) + if (gt_stamp_lo < lower_32_bits(guc->timestamp.gt_stamp)) gt_stamp_hi++; - guc->timestamp.gt_stamp = ((u64)gt_stamp_hi << 32) | gt_stamp_now; + guc->timestamp.gt_stamp = ((u64)gt_stamp_hi << 32) | gt_stamp_lo; } /* @@ -1208,8 +1277,12 @@ static ktime_t guc_engine_busyness(struct intel_engine_cs *engine, ktime_t *now) if (!in_reset && intel_gt_pm_get_if_awake(gt)) { stats_saved = *stats; gt_stamp_saved = guc->timestamp.gt_stamp; + /* + * Update gt_clks, then gt timestamp to simplify the 'gt_stamp - + * start_gt_clk' calculation below for active engines. + */ guc_update_engine_gt_clks(engine); - guc_update_pm_timestamp(guc, engine, now); + guc_update_pm_timestamp(guc, now); intel_gt_pm_put_async(gt); if (i915_reset_count(gpu_error) != reset_count) { *stats = stats_saved; @@ -1241,8 +1314,8 @@ static void __reset_guc_busyness_stats(struct intel_guc *guc) spin_lock_irqsave(&guc->timestamp.lock, flags); + guc_update_pm_timestamp(guc, &unused); for_each_engine(engine, gt, id) { - guc_update_pm_timestamp(guc, engine, &unused); guc_update_engine_gt_clks(engine); engine->stats.guc.prev_total = 0; } @@ -1259,10 +1332,11 @@ static void __update_guc_busyness_stats(struct intel_guc *guc) ktime_t unused; spin_lock_irqsave(&guc->timestamp.lock, flags); - for_each_engine(engine, gt, id) { - guc_update_pm_timestamp(guc, engine, &unused); + + guc_update_pm_timestamp(guc, &unused); + for_each_engine(engine, gt, id) guc_update_engine_gt_clks(engine); - } + spin_unlock_irqrestore(&guc->timestamp.lock, flags); } @@ -1335,10 +1409,15 @@ void intel_guc_busyness_park(struct intel_gt *gt) void intel_guc_busyness_unpark(struct intel_gt *gt) { struct intel_guc *guc = >->uc.guc; + unsigned long flags; + ktime_t unused; if (!guc_submission_initialized(guc)) return; + spin_lock_irqsave(&guc->timestamp.lock, flags); + guc_update_pm_timestamp(guc, &unused); + spin_unlock_irqrestore(&guc->timestamp.lock, flags); mod_delayed_work(system_highpri_wq, &guc->timestamp.work, guc->timestamp.ping_delay); } @@ -1783,6 +1862,7 @@ int intel_guc_submission_init(struct intel_guc *guc) spin_lock_init(&guc->timestamp.lock); INIT_DELAYED_WORK(&guc->timestamp.work, guc_timestamp_ping); guc->timestamp.ping_delay = (POLL_TIME_CLKS / gt->clock_frequency + 1) * HZ; + guc->timestamp.shift = gpm_timestamp_shift(gt); return 0; } diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index 99d1781fa5f0..af79b39048f7 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -1148,7 +1148,7 @@ static inline void ppgtt_generate_shadow_entry(struct intel_gvt_gtt_entry *se, ops->set_pfn(se, s->shadow_page.mfn); } -/** +/* * Check if can do 2M page * @vgpu: target vgpu * @entry: target pfn's gtt entry @@ -2193,7 +2193,7 @@ static int emulate_ggtt_mmio_read(struct intel_vgpu *vgpu, } /** - * intel_vgpu_emulate_gtt_mmio_read - emulate GTT MMIO register read + * intel_vgpu_emulate_ggtt_mmio_read - emulate GTT MMIO register read * @vgpu: a vGPU * @off: register offset * @p_data: data will be returned to guest diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index 5ae812d60abe..0633888a411e 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -1522,7 +1522,7 @@ capture_engine(struct intel_engine_cs *engine, struct i915_request *rq = NULL; unsigned long flags; - ee = intel_engine_coredump_alloc(engine, GFP_KERNEL); + ee = intel_engine_coredump_alloc(engine, ALLOW_FAIL); if (!ee) return NULL; diff --git a/drivers/gpu/drm/i915/i915_mm.h b/drivers/gpu/drm/i915/i915_mm.h index 76f1d53bdf34..3ad22bbe80eb 100644 --- a/drivers/gpu/drm/i915/i915_mm.h +++ b/drivers/gpu/drm/i915/i915_mm.h @@ -6,6 +6,7 @@ #ifndef __I915_MM_H__ #define __I915_MM_H__ +#include <linux/bug.h> #include <linux/types.h> struct vm_area_struct; diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index c32420cb8ed5..902e4c802a12 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -2684,7 +2684,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define RING_WAIT (1 << 11) /* gen3+, PRBx_CTL */ #define RING_WAIT_SEMAPHORE (1 << 10) /* gen6+ */ -#define GUCPMTIMESTAMP _MMIO(0xC3E8) +#define MISC_STATUS0 _MMIO(0xA500) +#define MISC_STATUS1 _MMIO(0xA504) /* There are 16 64-bit CS General Purpose Registers per-engine on Gen8+ */ #define GEN8_RING_CS_GPR(base, n) _MMIO((base) + 0x600 + (n) * 8) @@ -4828,6 +4829,7 @@ enum { #define ADLP_PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(val) REG_FIELD_PREP(ADLP_PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR_MASK, val) #define ADLP_PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR_MASK REG_GENMASK(12, 0) #define ADLP_PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(val) REG_FIELD_PREP(ADLP_PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR_MASK, val) +#define ADLP_PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE REG_BIT(31) #define ADLP_PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME REG_BIT(14) #define ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME REG_BIT(13) diff --git a/drivers/gpu/drm/i915/intel_pch.c b/drivers/gpu/drm/i915/intel_pch.c index da8f82c2342f..fc8a68f3a2ed 100644 --- a/drivers/gpu/drm/i915/intel_pch.c +++ b/drivers/gpu/drm/i915/intel_pch.c @@ -108,6 +108,7 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id) /* Comet Lake V PCH is based on KBP, which is SPT compatible */ return PCH_SPT; case INTEL_PCH_ICP_DEVICE_ID_TYPE: + case INTEL_PCH_ICP2_DEVICE_ID_TYPE: drm_dbg_kms(&dev_priv->drm, "Found Ice Lake PCH\n"); drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv)); return PCH_ICP; @@ -123,7 +124,6 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id) !IS_GEN9_BC(dev_priv)); return PCH_TGP; case INTEL_PCH_JSP_DEVICE_ID_TYPE: - case INTEL_PCH_JSP2_DEVICE_ID_TYPE: drm_dbg_kms(&dev_priv->drm, "Found Jasper Lake PCH\n"); drm_WARN_ON(&dev_priv->drm, !IS_JSL_EHL(dev_priv)); return PCH_JSP; diff --git a/drivers/gpu/drm/i915/intel_pch.h b/drivers/gpu/drm/i915/intel_pch.h index 6bff77521094..4ba0f1967cca 100644 --- a/drivers/gpu/drm/i915/intel_pch.h +++ b/drivers/gpu/drm/i915/intel_pch.h @@ -50,11 +50,11 @@ enum intel_pch { #define INTEL_PCH_CMP2_DEVICE_ID_TYPE 0x0680 #define INTEL_PCH_CMP_V_DEVICE_ID_TYPE 0xA380 #define INTEL_PCH_ICP_DEVICE_ID_TYPE 0x3480 +#define INTEL_PCH_ICP2_DEVICE_ID_TYPE 0x3880 #define INTEL_PCH_MCC_DEVICE_ID_TYPE 0x4B00 #define INTEL_PCH_TGP_DEVICE_ID_TYPE 0xA080 #define INTEL_PCH_TGP2_DEVICE_ID_TYPE 0x4380 #define INTEL_PCH_JSP_DEVICE_ID_TYPE 0x4D80 -#define INTEL_PCH_JSP2_DEVICE_ID_TYPE 0x3880 #define INTEL_PCH_ADP_DEVICE_ID_TYPE 0x7A80 #define INTEL_PCH_ADP2_DEVICE_ID_TYPE 0x5180 #define INTEL_PCH_ADP3_DEVICE_ID_TYPE 0x7A00 diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 434b1f8b7fe3..fae4f7818d28 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -4029,6 +4029,17 @@ static int intel_compute_sagv_mask(struct intel_atomic_state *state) return ret; } + if (intel_can_enable_sagv(dev_priv, new_bw_state) != + intel_can_enable_sagv(dev_priv, old_bw_state)) { + ret = intel_atomic_serialize_global_state(&new_bw_state->base); + if (ret) + return ret; + } else if (new_bw_state->pipe_sagv_reject != old_bw_state->pipe_sagv_reject) { + ret = intel_atomic_lock_global_state(&new_bw_state->base); + if (ret) + return ret; + } + for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { struct skl_pipe_wm *pipe_wm = &new_crtc_state->wm.skl.optimal; @@ -4044,17 +4055,6 @@ static int intel_compute_sagv_mask(struct intel_atomic_state *state) intel_can_enable_sagv(dev_priv, new_bw_state); } - if (intel_can_enable_sagv(dev_priv, new_bw_state) != - intel_can_enable_sagv(dev_priv, old_bw_state)) { - ret = intel_atomic_serialize_global_state(&new_bw_state->base); - if (ret) - return ret; - } else if (new_bw_state->pipe_sagv_reject != old_bw_state->pipe_sagv_reject) { - ret = intel_atomic_lock_global_state(&new_bw_state->base); - if (ret) - return ret; - } - return 0; } @@ -4717,6 +4717,10 @@ static const struct dbuf_slice_conf_entry dg2_allowed_dbufs[] = { }; static const struct dbuf_slice_conf_entry adlp_allowed_dbufs[] = { + /* + * Keep the join_mbus cases first so check_mbus_joined() + * will prefer them over the !join_mbus cases. + */ { .active_pipes = BIT(PIPE_A), .dbuf_mask = { @@ -4732,6 +4736,20 @@ static const struct dbuf_slice_conf_entry adlp_allowed_dbufs[] = { .join_mbus = true, }, { + .active_pipes = BIT(PIPE_A), + .dbuf_mask = { + [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2), + }, + .join_mbus = false, + }, + { + .active_pipes = BIT(PIPE_B), + .dbuf_mask = { + [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4), + }, + .join_mbus = false, + }, + { .active_pipes = BIT(PIPE_A) | BIT(PIPE_B), .dbuf_mask = { [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2), @@ -4835,7 +4853,7 @@ static bool check_mbus_joined(u8 active_pipes, { int i; - for (i = 0; i < dbuf_slices[i].active_pipes; i++) { + for (i = 0; dbuf_slices[i].active_pipes != 0; i++) { if (dbuf_slices[i].active_pipes == active_pipes) return dbuf_slices[i].join_mbus; } @@ -4847,13 +4865,14 @@ static bool adlp_check_mbus_joined(u8 active_pipes) return check_mbus_joined(active_pipes, adlp_allowed_dbufs); } -static u8 compute_dbuf_slices(enum pipe pipe, u8 active_pipes, +static u8 compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus, const struct dbuf_slice_conf_entry *dbuf_slices) { int i; - for (i = 0; i < dbuf_slices[i].active_pipes; i++) { - if (dbuf_slices[i].active_pipes == active_pipes) + for (i = 0; dbuf_slices[i].active_pipes != 0; i++) { + if (dbuf_slices[i].active_pipes == active_pipes && + dbuf_slices[i].join_mbus == join_mbus) return dbuf_slices[i].dbuf_mask[pipe]; } return 0; @@ -4864,7 +4883,7 @@ static u8 compute_dbuf_slices(enum pipe pipe, u8 active_pipes, * returns correspondent DBuf slice mask as stated in BSpec for particular * platform. */ -static u8 icl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes) +static u8 icl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus) { /* * FIXME: For ICL this is still a bit unclear as prev BSpec revision @@ -4878,37 +4897,41 @@ static u8 icl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes) * still here - we will need it once those additional constraints * pop up. */ - return compute_dbuf_slices(pipe, active_pipes, icl_allowed_dbufs); + return compute_dbuf_slices(pipe, active_pipes, join_mbus, + icl_allowed_dbufs); } -static u8 tgl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes) +static u8 tgl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus) { - return compute_dbuf_slices(pipe, active_pipes, tgl_allowed_dbufs); + return compute_dbuf_slices(pipe, active_pipes, join_mbus, + tgl_allowed_dbufs); } -static u32 adlp_compute_dbuf_slices(enum pipe pipe, u32 active_pipes) +static u8 adlp_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus) { - return compute_dbuf_slices(pipe, active_pipes, adlp_allowed_dbufs); + return compute_dbuf_slices(pipe, active_pipes, join_mbus, + adlp_allowed_dbufs); } -static u32 dg2_compute_dbuf_slices(enum pipe pipe, u32 active_pipes) +static u8 dg2_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus) { - return compute_dbuf_slices(pipe, active_pipes, dg2_allowed_dbufs); + return compute_dbuf_slices(pipe, active_pipes, join_mbus, + dg2_allowed_dbufs); } -static u8 skl_compute_dbuf_slices(struct intel_crtc *crtc, u8 active_pipes) +static u8 skl_compute_dbuf_slices(struct intel_crtc *crtc, u8 active_pipes, bool join_mbus) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; if (IS_DG2(dev_priv)) - return dg2_compute_dbuf_slices(pipe, active_pipes); + return dg2_compute_dbuf_slices(pipe, active_pipes, join_mbus); else if (IS_ALDERLAKE_P(dev_priv)) - return adlp_compute_dbuf_slices(pipe, active_pipes); + return adlp_compute_dbuf_slices(pipe, active_pipes, join_mbus); else if (DISPLAY_VER(dev_priv) == 12) - return tgl_compute_dbuf_slices(pipe, active_pipes); + return tgl_compute_dbuf_slices(pipe, active_pipes, join_mbus); else if (DISPLAY_VER(dev_priv) == 11) - return icl_compute_dbuf_slices(pipe, active_pipes); + return icl_compute_dbuf_slices(pipe, active_pipes, join_mbus); /* * For anything else just return one slice yet. * Should be extended for other platforms. @@ -6127,11 +6150,16 @@ skl_compute_ddb(struct intel_atomic_state *state) return ret; } + if (IS_ALDERLAKE_P(dev_priv)) + new_dbuf_state->joined_mbus = + adlp_check_mbus_joined(new_dbuf_state->active_pipes); + for_each_intel_crtc(&dev_priv->drm, crtc) { enum pipe pipe = crtc->pipe; new_dbuf_state->slices[pipe] = - skl_compute_dbuf_slices(crtc, new_dbuf_state->active_pipes); + skl_compute_dbuf_slices(crtc, new_dbuf_state->active_pipes, + new_dbuf_state->joined_mbus); if (old_dbuf_state->slices[pipe] == new_dbuf_state->slices[pipe]) continue; @@ -6143,9 +6171,6 @@ skl_compute_ddb(struct intel_atomic_state *state) new_dbuf_state->enabled_slices = intel_dbuf_enabled_slices(new_dbuf_state); - if (IS_ALDERLAKE_P(dev_priv)) - new_dbuf_state->joined_mbus = adlp_check_mbus_joined(new_dbuf_state->active_pipes); - if (old_dbuf_state->enabled_slices != new_dbuf_state->enabled_slices || old_dbuf_state->joined_mbus != new_dbuf_state->joined_mbus) { ret = intel_atomic_serialize_global_state(&new_dbuf_state->base); @@ -6626,6 +6651,7 @@ void skl_wm_get_hw_state(struct drm_i915_private *dev_priv) enum pipe pipe = crtc->pipe; unsigned int mbus_offset; enum plane_id plane_id; + u8 slices; skl_pipe_wm_get_hw_state(crtc, &crtc_state->wm.skl.optimal); crtc_state->wm.skl.raw = crtc_state->wm.skl.optimal; @@ -6645,19 +6671,22 @@ void skl_wm_get_hw_state(struct drm_i915_private *dev_priv) skl_ddb_entry_union(&dbuf_state->ddb[pipe], ddb_uv); } - dbuf_state->slices[pipe] = - skl_compute_dbuf_slices(crtc, dbuf_state->active_pipes); - dbuf_state->weight[pipe] = intel_crtc_ddb_weight(crtc_state); /* * Used for checking overlaps, so we need absolute * offsets instead of MBUS relative offsets. */ - mbus_offset = mbus_ddb_offset(dev_priv, dbuf_state->slices[pipe]); + slices = skl_compute_dbuf_slices(crtc, dbuf_state->active_pipes, + dbuf_state->joined_mbus); + mbus_offset = mbus_ddb_offset(dev_priv, slices); crtc_state->wm.skl.ddb.start = mbus_offset + dbuf_state->ddb[pipe].start; crtc_state->wm.skl.ddb.end = mbus_offset + dbuf_state->ddb[pipe].end; + /* The slices actually used by the planes on the pipe */ + dbuf_state->slices[pipe] = + skl_ddb_dbuf_slice_mask(dev_priv, &crtc_state->wm.skl.ddb); + drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] dbuf slices 0x%x, ddb (%d - %d), active pipes 0x%x, mbus joined: %s\n", crtc->base.base.id, crtc->base.name, @@ -6669,6 +6698,74 @@ void skl_wm_get_hw_state(struct drm_i915_private *dev_priv) dbuf_state->enabled_slices = dev_priv->dbuf.enabled_slices; } +static bool skl_dbuf_is_misconfigured(struct drm_i915_private *i915) +{ + const struct intel_dbuf_state *dbuf_state = + to_intel_dbuf_state(i915->dbuf.obj.state); + struct skl_ddb_entry entries[I915_MAX_PIPES] = {}; + struct intel_crtc *crtc; + + for_each_intel_crtc(&i915->drm, crtc) { + const struct intel_crtc_state *crtc_state = + to_intel_crtc_state(crtc->base.state); + + entries[crtc->pipe] = crtc_state->wm.skl.ddb; + } + + for_each_intel_crtc(&i915->drm, crtc) { + const struct intel_crtc_state *crtc_state = + to_intel_crtc_state(crtc->base.state); + u8 slices; + + slices = skl_compute_dbuf_slices(crtc, dbuf_state->active_pipes, + dbuf_state->joined_mbus); + if (dbuf_state->slices[crtc->pipe] & ~slices) + return true; + + if (skl_ddb_allocation_overlaps(&crtc_state->wm.skl.ddb, entries, + I915_MAX_PIPES, crtc->pipe)) + return true; + } + + return false; +} + +void skl_wm_sanitize(struct drm_i915_private *i915) +{ + struct intel_crtc *crtc; + + /* + * On TGL/RKL (at least) the BIOS likes to assign the planes + * to the wrong DBUF slices. This will cause an infinite loop + * in skl_commit_modeset_enables() as it can't find a way to + * transition between the old bogus DBUF layout to the new + * proper DBUF layout without DBUF allocation overlaps between + * the planes (which cannot be allowed or else the hardware + * may hang). If we detect a bogus DBUF layout just turn off + * all the planes so that skl_commit_modeset_enables() can + * simply ignore them. + */ + if (!skl_dbuf_is_misconfigured(i915)) + return; + + drm_dbg_kms(&i915->drm, "BIOS has misprogrammed the DBUF, disabling all planes\n"); + + for_each_intel_crtc(&i915->drm, crtc) { + struct intel_plane *plane = to_intel_plane(crtc->base.primary); + const struct intel_plane_state *plane_state = + to_intel_plane_state(plane->base.state); + struct intel_crtc_state *crtc_state = + to_intel_crtc_state(crtc->base.state); + + if (plane_state->uapi.visible) + intel_plane_disable_noatomic(crtc, plane); + + drm_WARN_ON(&i915->drm, crtc_state->active_planes != 0); + + memset(&crtc_state->wm.skl.ddb, 0, sizeof(crtc_state->wm.skl.ddb)); + } +} + static void ilk_pipe_wm_get_hw_state(struct intel_crtc *crtc) { struct drm_device *dev = crtc->base.dev; diff --git a/drivers/gpu/drm/i915/intel_pm.h b/drivers/gpu/drm/i915/intel_pm.h index 990cdcaf85ce..d2243653a893 100644 --- a/drivers/gpu/drm/i915/intel_pm.h +++ b/drivers/gpu/drm/i915/intel_pm.h @@ -47,6 +47,7 @@ void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc, struct skl_pipe_wm *out); void g4x_wm_sanitize(struct drm_i915_private *dev_priv); void vlv_wm_sanitize(struct drm_i915_private *dev_priv); +void skl_wm_sanitize(struct drm_i915_private *dev_priv); bool intel_can_enable_sagv(struct drm_i915_private *dev_priv, const struct intel_bw_state *bw_state); void intel_sagv_pre_plane_update(struct intel_atomic_state *state); diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index 53f1ccb78849..64c2708efc9e 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -68,9 +68,7 @@ static noinline depot_stack_handle_t __save_depot_stack(void) static void init_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm) { spin_lock_init(&rpm->debug.lock); - - if (rpm->available) - stack_depot_init(); + stack_depot_init(); } static noinline depot_stack_handle_t diff --git a/drivers/gpu/drm/imx/dcss/Kconfig b/drivers/gpu/drm/imx/dcss/Kconfig index 7374f1952762..5c2b2277afbf 100644 --- a/drivers/gpu/drm/imx/dcss/Kconfig +++ b/drivers/gpu/drm/imx/dcss/Kconfig @@ -2,6 +2,7 @@ config DRM_IMX_DCSS tristate "i.MX8MQ DCSS" select IMX_IRQSTEER select DRM_KMS_HELPER + select DRM_GEM_CMA_HELPER select VIDEOMODE_HELPERS depends on DRM && ARCH_MXC && ARM64 help diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c index a8aba0141ce7..06cb1a59b9bc 100644 --- a/drivers/gpu/drm/imx/parallel-display.c +++ b/drivers/gpu/drm/imx/parallel-display.c @@ -217,14 +217,6 @@ static int imx_pd_bridge_atomic_check(struct drm_bridge *bridge, if (!imx_pd_format_supported(bus_fmt)) return -EINVAL; - if (bus_flags & - ~(DRM_BUS_FLAG_DE_LOW | DRM_BUS_FLAG_DE_HIGH | - DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE | - DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE)) { - dev_warn(imxpd->dev, "invalid bus_flags (%x)\n", bus_flags); - return -EINVAL; - } - bridge_state->output_bus_cfg.flags = bus_flags; bridge_state->input_bus_cfg.flags = bus_flags; imx_crtc_state->bus_flags = bus_flags; diff --git a/drivers/gpu/drm/kmb/kmb_plane.c b/drivers/gpu/drm/kmb/kmb_plane.c index 00404ba4126d..2735b8eb3537 100644 --- a/drivers/gpu/drm/kmb/kmb_plane.c +++ b/drivers/gpu/drm/kmb/kmb_plane.c @@ -158,12 +158,6 @@ static void kmb_plane_atomic_disable(struct drm_plane *plane, case LAYER_1: kmb->plane_status[plane_id].ctrl = LCD_CTRL_VL2_ENABLE; break; - case LAYER_2: - kmb->plane_status[plane_id].ctrl = LCD_CTRL_GL1_ENABLE; - break; - case LAYER_3: - kmb->plane_status[plane_id].ctrl = LCD_CTRL_GL2_ENABLE; - break; } kmb->plane_status[plane_id].disable = true; diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c index 5d90d2eb0019..bced4c7d668e 100644 --- a/drivers/gpu/drm/mediatek/mtk_dsi.c +++ b/drivers/gpu/drm/mediatek/mtk_dsi.c @@ -786,18 +786,101 @@ void mtk_dsi_ddp_stop(struct device *dev) mtk_dsi_poweroff(dsi); } +static int mtk_dsi_encoder_init(struct drm_device *drm, struct mtk_dsi *dsi) +{ + int ret; + + ret = drm_simple_encoder_init(drm, &dsi->encoder, + DRM_MODE_ENCODER_DSI); + if (ret) { + DRM_ERROR("Failed to encoder init to drm\n"); + return ret; + } + + dsi->encoder.possible_crtcs = mtk_drm_find_possible_crtc_by_comp(drm, dsi->host.dev); + + ret = drm_bridge_attach(&dsi->encoder, &dsi->bridge, NULL, + DRM_BRIDGE_ATTACH_NO_CONNECTOR); + if (ret) + goto err_cleanup_encoder; + + dsi->connector = drm_bridge_connector_init(drm, &dsi->encoder); + if (IS_ERR(dsi->connector)) { + DRM_ERROR("Unable to create bridge connector\n"); + ret = PTR_ERR(dsi->connector); + goto err_cleanup_encoder; + } + drm_connector_attach_encoder(dsi->connector, &dsi->encoder); + + return 0; + +err_cleanup_encoder: + drm_encoder_cleanup(&dsi->encoder); + return ret; +} + +static int mtk_dsi_bind(struct device *dev, struct device *master, void *data) +{ + int ret; + struct drm_device *drm = data; + struct mtk_dsi *dsi = dev_get_drvdata(dev); + + ret = mtk_dsi_encoder_init(drm, dsi); + if (ret) + return ret; + + return device_reset_optional(dev); +} + +static void mtk_dsi_unbind(struct device *dev, struct device *master, + void *data) +{ + struct mtk_dsi *dsi = dev_get_drvdata(dev); + + drm_encoder_cleanup(&dsi->encoder); +} + +static const struct component_ops mtk_dsi_component_ops = { + .bind = mtk_dsi_bind, + .unbind = mtk_dsi_unbind, +}; + static int mtk_dsi_host_attach(struct mipi_dsi_host *host, struct mipi_dsi_device *device) { struct mtk_dsi *dsi = host_to_dsi(host); + struct device *dev = host->dev; + int ret; dsi->lanes = device->lanes; dsi->format = device->format; dsi->mode_flags = device->mode_flags; + dsi->next_bridge = devm_drm_of_get_bridge(dev, dev->of_node, 0, 0); + if (IS_ERR(dsi->next_bridge)) + return PTR_ERR(dsi->next_bridge); + + drm_bridge_add(&dsi->bridge); + + ret = component_add(host->dev, &mtk_dsi_component_ops); + if (ret) { + DRM_ERROR("failed to add dsi_host component: %d\n", ret); + drm_bridge_remove(&dsi->bridge); + return ret; + } return 0; } +static int mtk_dsi_host_detach(struct mipi_dsi_host *host, + struct mipi_dsi_device *device) +{ + struct mtk_dsi *dsi = host_to_dsi(host); + + component_del(host->dev, &mtk_dsi_component_ops); + drm_bridge_remove(&dsi->bridge); + return 0; +} + static void mtk_dsi_wait_for_idle(struct mtk_dsi *dsi) { int ret; @@ -938,73 +1021,14 @@ static ssize_t mtk_dsi_host_transfer(struct mipi_dsi_host *host, static const struct mipi_dsi_host_ops mtk_dsi_ops = { .attach = mtk_dsi_host_attach, + .detach = mtk_dsi_host_detach, .transfer = mtk_dsi_host_transfer, }; -static int mtk_dsi_encoder_init(struct drm_device *drm, struct mtk_dsi *dsi) -{ - int ret; - - ret = drm_simple_encoder_init(drm, &dsi->encoder, - DRM_MODE_ENCODER_DSI); - if (ret) { - DRM_ERROR("Failed to encoder init to drm\n"); - return ret; - } - - dsi->encoder.possible_crtcs = mtk_drm_find_possible_crtc_by_comp(drm, dsi->host.dev); - - ret = drm_bridge_attach(&dsi->encoder, &dsi->bridge, NULL, - DRM_BRIDGE_ATTACH_NO_CONNECTOR); - if (ret) - goto err_cleanup_encoder; - - dsi->connector = drm_bridge_connector_init(drm, &dsi->encoder); - if (IS_ERR(dsi->connector)) { - DRM_ERROR("Unable to create bridge connector\n"); - ret = PTR_ERR(dsi->connector); - goto err_cleanup_encoder; - } - drm_connector_attach_encoder(dsi->connector, &dsi->encoder); - - return 0; - -err_cleanup_encoder: - drm_encoder_cleanup(&dsi->encoder); - return ret; -} - -static int mtk_dsi_bind(struct device *dev, struct device *master, void *data) -{ - int ret; - struct drm_device *drm = data; - struct mtk_dsi *dsi = dev_get_drvdata(dev); - - ret = mtk_dsi_encoder_init(drm, dsi); - if (ret) - return ret; - - return device_reset_optional(dev); -} - -static void mtk_dsi_unbind(struct device *dev, struct device *master, - void *data) -{ - struct mtk_dsi *dsi = dev_get_drvdata(dev); - - drm_encoder_cleanup(&dsi->encoder); -} - -static const struct component_ops mtk_dsi_component_ops = { - .bind = mtk_dsi_bind, - .unbind = mtk_dsi_unbind, -}; - static int mtk_dsi_probe(struct platform_device *pdev) { struct mtk_dsi *dsi; struct device *dev = &pdev->dev; - struct drm_panel *panel; struct resource *regs; int irq_num; int ret; @@ -1021,19 +1045,6 @@ static int mtk_dsi_probe(struct platform_device *pdev) return ret; } - ret = drm_of_find_panel_or_bridge(dev->of_node, 0, 0, - &panel, &dsi->next_bridge); - if (ret) - goto err_unregister_host; - - if (panel) { - dsi->next_bridge = devm_drm_panel_bridge_add(dev, panel); - if (IS_ERR(dsi->next_bridge)) { - ret = PTR_ERR(dsi->next_bridge); - goto err_unregister_host; - } - } - dsi->driver_data = of_device_get_match_data(dev); dsi->engine_clk = devm_clk_get(dev, "engine"); @@ -1098,14 +1109,6 @@ static int mtk_dsi_probe(struct platform_device *pdev) dsi->bridge.of_node = dev->of_node; dsi->bridge.type = DRM_MODE_CONNECTOR_DSI; - drm_bridge_add(&dsi->bridge); - - ret = component_add(&pdev->dev, &mtk_dsi_component_ops); - if (ret) { - dev_err(&pdev->dev, "failed to add component: %d\n", ret); - goto err_unregister_host; - } - return 0; err_unregister_host: @@ -1118,8 +1121,6 @@ static int mtk_dsi_remove(struct platform_device *pdev) struct mtk_dsi *dsi = platform_get_drvdata(pdev); mtk_output_dsi_disable(dsi); - drm_bridge_remove(&dsi->bridge); - component_del(&pdev->dev, &mtk_dsi_component_ops); mipi_dsi_host_unregister(&dsi->host); return 0; diff --git a/drivers/gpu/drm/mgag200/mgag200_pll.c b/drivers/gpu/drm/mgag200/mgag200_pll.c index e9ae22b4f813..52be08b744ad 100644 --- a/drivers/gpu/drm/mgag200/mgag200_pll.c +++ b/drivers/gpu/drm/mgag200/mgag200_pll.c @@ -404,9 +404,9 @@ mgag200_pixpll_update_g200wb(struct mgag200_pll *pixpll, const struct mgag200_pl udelay(50); /* program pixel pll register */ - WREG_DAC(MGA1064_PIX_PLLC_N, xpixpllcn); - WREG_DAC(MGA1064_PIX_PLLC_M, xpixpllcm); - WREG_DAC(MGA1064_PIX_PLLC_P, xpixpllcp); + WREG_DAC(MGA1064_WB_PIX_PLLC_N, xpixpllcn); + WREG_DAC(MGA1064_WB_PIX_PLLC_M, xpixpllcm); + WREG_DAC(MGA1064_WB_PIX_PLLC_P, xpixpllcp); udelay(50); diff --git a/drivers/gpu/drm/mxsfb/mxsfb_kms.c b/drivers/gpu/drm/mxsfb/mxsfb_kms.c index 0655582ae8ed..4cfb6c001679 100644 --- a/drivers/gpu/drm/mxsfb/mxsfb_kms.c +++ b/drivers/gpu/drm/mxsfb/mxsfb_kms.c @@ -361,7 +361,11 @@ static void mxsfb_crtc_atomic_enable(struct drm_crtc *crtc, bridge_state = drm_atomic_get_new_bridge_state(state, mxsfb->bridge); - bus_format = bridge_state->input_bus_cfg.format; + if (!bridge_state) + bus_format = MEDIA_BUS_FMT_FIXED; + else + bus_format = bridge_state->input_bus_cfg.format; + if (bus_format == MEDIA_BUS_FMT_FIXED) { dev_warn_once(drm->dev, "Bridge does not provide bus format, assuming MEDIA_BUS_FMT_RGB888_1X24.\n" diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c index d0f52d59fc2f..64e423dddd9e 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c @@ -38,7 +38,7 @@ nvbios_addr(struct nvkm_bios *bios, u32 *addr, u8 size) *addr += bios->imaged_addr; } - if (unlikely(*addr + size >= bios->size)) { + if (unlikely(*addr + size > bios->size)) { nvkm_error(&bios->subdev, "OOB %d %08x %08x\n", size, p, *addr); return false; } diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig index 434c2861bb40..9989a316fe88 100644 --- a/drivers/gpu/drm/panel/Kconfig +++ b/drivers/gpu/drm/panel/Kconfig @@ -106,6 +106,8 @@ config DRM_PANEL_EDP depends on PM select VIDEOMODE_HELPERS select DRM_DP_AUX_BUS + select DRM_DP_HELPER + select DRM_KMS_HELPER help DRM panel driver for dumb eDP panels that need at most a regulator and a GPIO to be powered up. Optionally a backlight can be attached so diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c index 9e46db5e359c..b42c1d816e79 100644 --- a/drivers/gpu/drm/panel/panel-simple.c +++ b/drivers/gpu/drm/panel/panel-simple.c @@ -588,6 +588,7 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc) err = panel_dpi_probe(dev, panel); if (err) goto free_ddc; + desc = panel->desc; } else { if (!of_get_display_timing(dev->of_node, "panel-timing", &dt)) panel_simple_parse_panel_timing_node(dev, panel, &dt); @@ -2016,7 +2017,7 @@ static const struct display_timing innolux_g070y2_l01_timing = { static const struct panel_desc innolux_g070y2_l01 = { .timings = &innolux_g070y2_l01_timing, .num_timings = 1, - .bpc = 6, + .bpc = 8, .size = { .width = 152, .height = 91, diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c index 0fce73b9a646..70bd84b7ef2b 100644 --- a/drivers/gpu/drm/radeon/atombios_encoders.c +++ b/drivers/gpu/drm/radeon/atombios_encoders.c @@ -198,7 +198,8 @@ void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder, * so don't register a backlight device */ if ((rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) && - (rdev->pdev->device == 0x6741)) + (rdev->pdev->device == 0x6741) && + !dmi_match(DMI_PRODUCT_NAME, "iMac12,1")) return; if (!radeon_encoder->enc_priv) diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c index 377f9cdb5b53..84013faa4756 100644 --- a/drivers/gpu/drm/radeon/radeon_uvd.c +++ b/drivers/gpu/drm/radeon/radeon_uvd.c @@ -470,8 +470,8 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo, int32_t *msg, msg_type, handle; unsigned img_size = 0; void *ptr; - - int i, r; + long r; + int i; if (offset & 0x3F) { DRM_ERROR("UVD messages must be 64 byte aligned!\n"); @@ -481,13 +481,13 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo, r = dma_resv_wait_timeout(bo->tbo.base.resv, false, false, MAX_SCHEDULE_TIMEOUT); if (r <= 0) { - DRM_ERROR("Failed waiting for UVD message (%d)!\n", r); + DRM_ERROR("Failed waiting for UVD message (%ld)!\n", r); return r ? r : -ETIME; } r = radeon_bo_kmap(bo, &ptr); if (r) { - DRM_ERROR("Failed mapping the UVD message (%d)!\n", r); + DRM_ERROR("Failed mapping the UVD message (%ld)!\n", r); return r; } diff --git a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c index 830bdd5e9b7c..8677c8271678 100644 --- a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c +++ b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c @@ -529,13 +529,6 @@ static int dw_hdmi_rockchip_bind(struct device *dev, struct device *master, return ret; } - ret = clk_prepare_enable(hdmi->vpll_clk); - if (ret) { - DRM_DEV_ERROR(hdmi->dev, "Failed to enable HDMI vpll: %d\n", - ret); - return ret; - } - hdmi->phy = devm_phy_optional_get(dev, "hdmi"); if (IS_ERR(hdmi->phy)) { ret = PTR_ERR(hdmi->phy); @@ -544,6 +537,13 @@ static int dw_hdmi_rockchip_bind(struct device *dev, struct device *master, return ret; } + ret = clk_prepare_enable(hdmi->vpll_clk); + if (ret) { + DRM_DEV_ERROR(hdmi->dev, "Failed to enable HDMI vpll: %d\n", + ret); + return ret; + } + drm_encoder_helper_add(encoder, &dw_hdmi_rockchip_encoder_helper_funcs); drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS); diff --git a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c index 1f7353f0684a..798b542e5916 100644 --- a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c +++ b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c @@ -902,6 +902,7 @@ static const struct vop_win_phy rk3399_win01_data = { .enable = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 0), .format = VOP_REG(RK3288_WIN0_CTRL0, 0x7, 1), .rb_swap = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 12), + .x_mir_en = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 21), .y_mir_en = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 22), .act_info = VOP_REG(RK3288_WIN0_ACT_INFO, 0x1fff1fff, 0), .dsp_info = VOP_REG(RK3288_WIN0_DSP_INFO, 0x0fff0fff, 0), @@ -912,6 +913,7 @@ static const struct vop_win_phy rk3399_win01_data = { .uv_vir = VOP_REG(RK3288_WIN0_VIR, 0x3fff, 16), .src_alpha_ctl = VOP_REG(RK3288_WIN0_SRC_ALPHA_CTRL, 0xff, 0), .dst_alpha_ctl = VOP_REG(RK3288_WIN0_DST_ALPHA_CTRL, 0xff, 0), + .channel = VOP_REG(RK3288_WIN0_CTRL2, 0xff, 0), }; /* @@ -922,11 +924,11 @@ static const struct vop_win_phy rk3399_win01_data = { static const struct vop_win_data rk3399_vop_win_data[] = { { .base = 0x00, .phy = &rk3399_win01_data, .type = DRM_PLANE_TYPE_PRIMARY }, - { .base = 0x40, .phy = &rk3288_win01_data, + { .base = 0x40, .phy = &rk3368_win01_data, .type = DRM_PLANE_TYPE_OVERLAY }, - { .base = 0x00, .phy = &rk3288_win23_data, + { .base = 0x00, .phy = &rk3368_win23_data, .type = DRM_PLANE_TYPE_OVERLAY }, - { .base = 0x50, .phy = &rk3288_win23_data, + { .base = 0x50, .phy = &rk3368_win23_data, .type = DRM_PLANE_TYPE_CURSOR }, }; diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.h b/drivers/gpu/drm/sun4i/sun8i_mixer.h index 145833a9d82d..5b3fbee18671 100644 --- a/drivers/gpu/drm/sun4i/sun8i_mixer.h +++ b/drivers/gpu/drm/sun4i/sun8i_mixer.h @@ -111,10 +111,10 @@ /* format 13 is semi-planar YUV411 VUVU */ #define SUN8I_MIXER_FBFMT_YUV411 14 /* format 15 doesn't exist */ -/* format 16 is P010 YVU */ -#define SUN8I_MIXER_FBFMT_P010_YUV 17 -/* format 18 is P210 YVU */ -#define SUN8I_MIXER_FBFMT_P210_YUV 19 +#define SUN8I_MIXER_FBFMT_P010_YUV 16 +/* format 17 is P010 YVU */ +#define SUN8I_MIXER_FBFMT_P210_YUV 18 +/* format 19 is P210 YVU */ /* format 20 is packed YVU444 10-bit */ /* format 21 is packed YUV444 10-bit */ diff --git a/drivers/gpu/drm/tegra/Kconfig b/drivers/gpu/drm/tegra/Kconfig index 8cf5aeb9db6c..201f5175ecfe 100644 --- a/drivers/gpu/drm/tegra/Kconfig +++ b/drivers/gpu/drm/tegra/Kconfig @@ -5,6 +5,7 @@ config DRM_TEGRA depends on COMMON_CLK depends on DRM depends on OF + select DRM_DP_AUX_BUS select DRM_KMS_HELPER select DRM_MIPI_DSI select DRM_PANEL diff --git a/drivers/gpu/drm/tegra/dpaux.c b/drivers/gpu/drm/tegra/dpaux.c index 1f96e416fa08..d7a731d287d2 100644 --- a/drivers/gpu/drm/tegra/dpaux.c +++ b/drivers/gpu/drm/tegra/dpaux.c @@ -19,6 +19,7 @@ #include <linux/workqueue.h> #include <drm/drm_dp_helper.h> +#include <drm/drm_dp_aux_bus.h> #include <drm/drm_panel.h> #include "dp.h" @@ -570,6 +571,12 @@ static int tegra_dpaux_probe(struct platform_device *pdev) list_add_tail(&dpaux->list, &dpaux_list); mutex_unlock(&dpaux_lock); + err = devm_of_dp_aux_populate_ep_devices(&dpaux->aux); + if (err < 0) { + dev_err(dpaux->dev, "failed to populate AUX bus: %d\n", err); + return err; + } + return 0; } diff --git a/drivers/gpu/drm/tegra/falcon.c b/drivers/gpu/drm/tegra/falcon.c index 223ab2ceb7e6..3762d87759d9 100644 --- a/drivers/gpu/drm/tegra/falcon.c +++ b/drivers/gpu/drm/tegra/falcon.c @@ -63,7 +63,7 @@ static void falcon_copy_firmware_image(struct falcon *falcon, /* copy the whole thing taking into account endianness */ for (i = 0; i < firmware->size / sizeof(u32); i++) - virt[i] = le32_to_cpu(((u32 *)firmware->data)[i]); + virt[i] = le32_to_cpu(((__le32 *)firmware->data)[i]); } static int falcon_parse_firmware_image(struct falcon *falcon) diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c index 287dbc89ad64..783890e8d43a 100644 --- a/drivers/gpu/drm/vc4/vc4_crtc.c +++ b/drivers/gpu/drm/vc4/vc4_crtc.c @@ -525,9 +525,11 @@ int vc4_crtc_disable_at_boot(struct drm_crtc *crtc) if (ret) return ret; - ret = pm_runtime_put(&vc4_hdmi->pdev->dev); - if (ret) - return ret; + /* + * post_crtc_powerdown will have called pm_runtime_put, so we + * don't need it here otherwise we'll get the reference counting + * wrong. + */ return 0; } @@ -671,7 +673,6 @@ static int vc4_crtc_atomic_check(struct drm_crtc *crtc, const struct drm_display_mode *mode = &crtc_state->adjusted_mode; struct vc4_encoder *vc4_encoder = to_vc4_encoder(encoder); - mode = &crtc_state->adjusted_mode; if (vc4_encoder->type == VC4_ENCODER_TYPE_HDMI0) { vc4_state->hvs_load = max(mode->clock * mode->hdisplay / mode->htotal + 1000, mode->clock * 9 / 10) * 1000; diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c index 053fbaf765ca..3a1626f261e5 100644 --- a/drivers/gpu/drm/vc4/vc4_hdmi.c +++ b/drivers/gpu/drm/vc4/vc4_hdmi.c @@ -196,14 +196,8 @@ vc4_hdmi_connector_detect(struct drm_connector *connector, bool force) if (gpiod_get_value_cansleep(vc4_hdmi->hpd_gpio)) connected = true; } else { - unsigned long flags; - u32 hotplug; - - spin_lock_irqsave(&vc4_hdmi->hw_lock, flags); - hotplug = HDMI_READ(HDMI_HOTPLUG); - spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags); - - if (hotplug & VC4_HDMI_HOTPLUG_CONNECTED) + if (vc4_hdmi->variant->hp_detect && + vc4_hdmi->variant->hp_detect(vc4_hdmi)) connected = true; } @@ -1251,6 +1245,7 @@ static int vc4_hdmi_encoder_atomic_check(struct drm_encoder *encoder, unsigned long long tmds_rate; if (vc4_hdmi->variant->unsupported_odd_h_timings && + !(mode->flags & DRM_MODE_FLAG_DBLCLK) && ((mode->hdisplay % 2) || (mode->hsync_start % 2) || (mode->hsync_end % 2) || (mode->htotal % 2))) return -EINVAL; @@ -1298,6 +1293,7 @@ vc4_hdmi_encoder_mode_valid(struct drm_encoder *encoder, struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder); if (vc4_hdmi->variant->unsupported_odd_h_timings && + !(mode->flags & DRM_MODE_FLAG_DBLCLK) && ((mode->hdisplay % 2) || (mode->hsync_start % 2) || (mode->hsync_end % 2) || (mode->htotal % 2))) return MODE_H_ILLEGAL; @@ -1343,6 +1339,18 @@ static u32 vc5_hdmi_channel_map(struct vc4_hdmi *vc4_hdmi, u32 channel_mask) return channel_map; } +static bool vc5_hdmi_hp_detect(struct vc4_hdmi *vc4_hdmi) +{ + unsigned long flags; + u32 hotplug; + + spin_lock_irqsave(&vc4_hdmi->hw_lock, flags); + hotplug = HDMI_READ(HDMI_HOTPLUG); + spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags); + + return !!(hotplug & VC4_HDMI_HOTPLUG_CONNECTED); +} + /* HDMI audio codec callbacks */ static void vc4_hdmi_audio_set_mai_clock(struct vc4_hdmi *vc4_hdmi, unsigned int samplerate) @@ -1741,6 +1749,7 @@ static int vc4_hdmi_audio_init(struct vc4_hdmi *vc4_hdmi) dev_err(dev, "Couldn't register the HDMI codec: %ld\n", PTR_ERR(codec_pdev)); return PTR_ERR(codec_pdev); } + vc4_hdmi->audio.codec_pdev = codec_pdev; dai_link->cpus = &vc4_hdmi->audio.cpu; dai_link->codecs = &vc4_hdmi->audio.codec; @@ -1780,6 +1789,12 @@ static int vc4_hdmi_audio_init(struct vc4_hdmi *vc4_hdmi) } +static void vc4_hdmi_audio_exit(struct vc4_hdmi *vc4_hdmi) +{ + platform_device_unregister(vc4_hdmi->audio.codec_pdev); + vc4_hdmi->audio.codec_pdev = NULL; +} + static irqreturn_t vc4_hdmi_hpd_irq_thread(int irq, void *priv) { struct vc4_hdmi *vc4_hdmi = priv; @@ -2504,7 +2519,8 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data) * vc4_hdmi_disable_scrambling() will thus run at boot, make * sure it's disabled, and avoid any inconsistency. */ - vc4_hdmi->scdc_enabled = true; + if (variant->max_pixel_clock > HDMI_14_MAX_TMDS_CLK) + vc4_hdmi->scdc_enabled = true; ret = variant->init_resources(vc4_hdmi); if (ret) @@ -2651,6 +2667,7 @@ static void vc4_hdmi_unbind(struct device *dev, struct device *master, kfree(vc4_hdmi->hdmi_regset.regs); kfree(vc4_hdmi->hd_regset.regs); + vc4_hdmi_audio_exit(vc4_hdmi); vc4_hdmi_cec_exit(vc4_hdmi); vc4_hdmi_hotplug_exit(vc4_hdmi); vc4_hdmi_connector_destroy(&vc4_hdmi->connector); @@ -2723,6 +2740,7 @@ static const struct vc4_hdmi_variant bcm2711_hdmi0_variant = { .phy_rng_disable = vc5_hdmi_phy_rng_disable, .channel_map = vc5_hdmi_channel_map, .supports_hdr = true, + .hp_detect = vc5_hdmi_hp_detect, }; static const struct vc4_hdmi_variant bcm2711_hdmi1_variant = { @@ -2751,6 +2769,7 @@ static const struct vc4_hdmi_variant bcm2711_hdmi1_variant = { .phy_rng_disable = vc5_hdmi_phy_rng_disable, .channel_map = vc5_hdmi_channel_map, .supports_hdr = true, + .hp_detect = vc5_hdmi_hp_detect, }; static const struct of_device_id vc4_hdmi_dt_match[] = { diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.h b/drivers/gpu/drm/vc4/vc4_hdmi.h index 36c0b082a43b..6ffdd4ec5fb6 100644 --- a/drivers/gpu/drm/vc4/vc4_hdmi.h +++ b/drivers/gpu/drm/vc4/vc4_hdmi.h @@ -102,6 +102,9 @@ struct vc4_hdmi_variant { /* Enables HDR metadata */ bool supports_hdr; + + /* Callback for hardware specific hotplug detect */ + bool (*hp_detect)(struct vc4_hdmi *vc4_hdmi); }; /* HDMI audio information */ @@ -113,6 +116,7 @@ struct vc4_hdmi_audio { struct snd_soc_dai_link_component platform; struct snd_dmaengine_dai_dma_data dma_data; struct hdmi_audio_infoframe infoframe; + struct platform_device *codec_pdev; bool streaming; }; diff --git a/drivers/gpu/host1x/syncpt.c b/drivers/gpu/host1x/syncpt.c index e08e331e46ae..f87a8705f518 100644 --- a/drivers/gpu/host1x/syncpt.c +++ b/drivers/gpu/host1x/syncpt.c @@ -137,8 +137,15 @@ void host1x_syncpt_restore(struct host1x *host) struct host1x_syncpt *sp_base = host->syncpt; unsigned int i; - for (i = 0; i < host1x_syncpt_nb_pts(host); i++) + for (i = 0; i < host1x_syncpt_nb_pts(host); i++) { + /* + * Unassign syncpt from channels for purposes of Tegra186 + * syncpoint protection. This prevents any channel from + * accessing it until it is reassigned. + */ + host1x_hw_syncpt_assign_to_channel(host, sp_base + i, NULL); host1x_hw_syncpt_restore(host, sp_base + i); + } for (i = 0; i < host1x_syncpt_nb_bases(host); i++) host1x_hw_syncpt_restore_wait_base(host, sp_base + i); @@ -227,27 +234,12 @@ int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh, long timeout, void *ref; struct host1x_waitlist *waiter; int err = 0, check_count = 0; - u32 val; if (value) - *value = 0; - - /* first check cache */ - if (host1x_syncpt_is_expired(sp, thresh)) { - if (value) - *value = host1x_syncpt_load(sp); + *value = host1x_syncpt_load(sp); + if (host1x_syncpt_is_expired(sp, thresh)) return 0; - } - - /* try to read from register */ - val = host1x_hw_syncpt_load(sp->host, sp); - if (host1x_syncpt_is_expired(sp, thresh)) { - if (value) - *value = val; - - goto done; - } if (!timeout) { err = -EAGAIN; @@ -352,13 +344,6 @@ int host1x_syncpt_init(struct host1x *host) for (i = 0; i < host->info->nb_pts; i++) { syncpt[i].id = i; syncpt[i].host = host; - - /* - * Unassign syncpt from channels for purposes of Tegra186 - * syncpoint protection. This prevents any channel from - * accessing it until it is reassigned. - */ - host1x_hw_syncpt_assign_to_channel(host, &syncpt[i], NULL); } for (i = 0; i < host->info->nb_bases; i++) diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c index 2503be0253d3..19fa734a9a79 100644 --- a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c +++ b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c @@ -37,11 +37,11 @@ static int amd_sfh_wait_response_v2(struct amd_mp2_dev *mp2, u8 sid, u32 sensor_ { union cmd_response cmd_resp; - /* Get response with status within a max of 800 ms timeout */ + /* Get response with status within a max of 1600 ms timeout */ if (!readl_poll_timeout(mp2->mmio + AMD_P2C_MSG(0), cmd_resp.resp, (cmd_resp.response_v2.response == sensor_sts && cmd_resp.response_v2.status == 0 && (sid == 0xff || - cmd_resp.response_v2.sensor_id == sid)), 500, 800000)) + cmd_resp.response_v2.sensor_id == sid)), 500, 1600000)) return cmd_resp.response_v2.response; return SENSOR_DISABLED; @@ -53,6 +53,7 @@ static void amd_start_sensor_v2(struct amd_mp2_dev *privdata, struct amd_mp2_sen cmd_base.ul = 0; cmd_base.cmd_v2.cmd_id = ENABLE_SENSOR; + cmd_base.cmd_v2.intr_disable = 1; cmd_base.cmd_v2.period = info.period; cmd_base.cmd_v2.sensor_id = info.sensor_idx; cmd_base.cmd_v2.length = 16; @@ -70,6 +71,7 @@ static void amd_stop_sensor_v2(struct amd_mp2_dev *privdata, u16 sensor_idx) cmd_base.ul = 0; cmd_base.cmd_v2.cmd_id = DISABLE_SENSOR; + cmd_base.cmd_v2.intr_disable = 1; cmd_base.cmd_v2.period = 0; cmd_base.cmd_v2.sensor_id = sensor_idx; cmd_base.cmd_v2.length = 16; @@ -83,12 +85,51 @@ static void amd_stop_all_sensor_v2(struct amd_mp2_dev *privdata) union sfh_cmd_base cmd_base; cmd_base.cmd_v2.cmd_id = STOP_ALL_SENSORS; + cmd_base.cmd_v2.intr_disable = 1; cmd_base.cmd_v2.period = 0; cmd_base.cmd_v2.sensor_id = 0; writel(cmd_base.ul, privdata->mmio + AMD_C2P_MSG0); } +static void amd_sfh_clear_intr_v2(struct amd_mp2_dev *privdata) +{ + if (readl(privdata->mmio + AMD_P2C_MSG(4))) { + writel(0, privdata->mmio + AMD_P2C_MSG(4)); + writel(0xf, privdata->mmio + AMD_P2C_MSG(5)); + } +} + +static void amd_sfh_clear_intr(struct amd_mp2_dev *privdata) +{ + if (privdata->mp2_ops->clear_intr) + privdata->mp2_ops->clear_intr(privdata); +} + +static irqreturn_t amd_sfh_irq_handler(int irq, void *data) +{ + amd_sfh_clear_intr(data); + + return IRQ_HANDLED; +} + +static int amd_sfh_irq_init_v2(struct amd_mp2_dev *privdata) +{ + int rc; + + pci_intx(privdata->pdev, true); + + rc = devm_request_irq(&privdata->pdev->dev, privdata->pdev->irq, + amd_sfh_irq_handler, 0, DRIVER_NAME, privdata); + if (rc) { + dev_err(&privdata->pdev->dev, "failed to request irq %d err=%d\n", + privdata->pdev->irq, rc); + return rc; + } + + return 0; +} + void amd_start_sensor(struct amd_mp2_dev *privdata, struct amd_mp2_sensor_info info) { union sfh_cmd_param cmd_param; @@ -193,6 +234,8 @@ static void amd_mp2_pci_remove(void *privdata) struct amd_mp2_dev *mp2 = privdata; amd_sfh_hid_client_deinit(privdata); mp2->mp2_ops->stop_all(mp2); + pci_intx(mp2->pdev, false); + amd_sfh_clear_intr(mp2); } static const struct amd_mp2_ops amd_sfh_ops_v2 = { @@ -200,6 +243,8 @@ static const struct amd_mp2_ops amd_sfh_ops_v2 = { .stop = amd_stop_sensor_v2, .stop_all = amd_stop_all_sensor_v2, .response = amd_sfh_wait_response_v2, + .clear_intr = amd_sfh_clear_intr_v2, + .init_intr = amd_sfh_irq_init_v2, }; static const struct amd_mp2_ops amd_sfh_ops = { @@ -225,6 +270,14 @@ static void mp2_select_ops(struct amd_mp2_dev *privdata) } } +static int amd_sfh_irq_init(struct amd_mp2_dev *privdata) +{ + if (privdata->mp2_ops->init_intr) + return privdata->mp2_ops->init_intr(privdata); + + return 0; +} + static int amd_mp2_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct amd_mp2_dev *privdata; @@ -261,9 +314,20 @@ static int amd_mp2_pci_probe(struct pci_dev *pdev, const struct pci_device_id *i mp2_select_ops(privdata); + rc = amd_sfh_irq_init(privdata); + if (rc) { + dev_err(&pdev->dev, "amd_sfh_irq_init failed\n"); + return rc; + } + rc = amd_sfh_hid_client_init(privdata); - if (rc) + if (rc) { + amd_sfh_clear_intr(privdata); + dev_err(&pdev->dev, "amd_sfh_hid_client_init failed\n"); return rc; + } + + amd_sfh_clear_intr(privdata); return devm_add_action_or_reset(&pdev->dev, amd_mp2_pci_remove, privdata); } @@ -290,6 +354,9 @@ static int __maybe_unused amd_mp2_pci_resume(struct device *dev) } } + schedule_delayed_work(&cl_data->work_buffer, msecs_to_jiffies(AMD_SFH_IDLE_LOOP)); + amd_sfh_clear_intr(mp2); + return 0; } @@ -312,6 +379,9 @@ static int __maybe_unused amd_mp2_pci_suspend(struct device *dev) } } + cancel_delayed_work_sync(&cl_data->work_buffer); + amd_sfh_clear_intr(mp2); + return 0; } diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h index ae30e059f847..97b99861fae2 100644 --- a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h +++ b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h @@ -49,7 +49,7 @@ union sfh_cmd_base { } s; struct { u32 cmd_id : 4; - u32 intr_enable : 1; + u32 intr_disable : 1; u32 rsvd1 : 3; u32 length : 7; u32 mem_type : 1; @@ -141,5 +141,7 @@ struct amd_mp2_ops { void (*stop)(struct amd_mp2_dev *privdata, u16 sensor_idx); void (*stop_all)(struct amd_mp2_dev *privdata); int (*response)(struct amd_mp2_dev *mp2, u8 sid, u32 sensor_sts); + void (*clear_intr)(struct amd_mp2_dev *privdata); + int (*init_intr)(struct amd_mp2_dev *privdata); }; #endif diff --git a/drivers/hid/amd-sfh-hid/hid_descriptor/amd_sfh_hid_desc.c b/drivers/hid/amd-sfh-hid/hid_descriptor/amd_sfh_hid_desc.c index be41f83b0289..76095bd53c65 100644 --- a/drivers/hid/amd-sfh-hid/hid_descriptor/amd_sfh_hid_desc.c +++ b/drivers/hid/amd-sfh-hid/hid_descriptor/amd_sfh_hid_desc.c @@ -27,6 +27,7 @@ #define HID_USAGE_SENSOR_STATE_READY_ENUM 0x02 #define HID_USAGE_SENSOR_STATE_INITIALIZING_ENUM 0x05 #define HID_USAGE_SENSOR_EVENT_DATA_UPDATED_ENUM 0x04 +#define ILLUMINANCE_MASK GENMASK(14, 0) int get_report_descriptor(int sensor_idx, u8 *rep_desc) { @@ -246,7 +247,8 @@ u8 get_input_report(u8 current_index, int sensor_idx, int report_id, struct amd_ get_common_inputs(&als_input.common_property, report_id); /* For ALS ,V2 Platforms uses C2P_MSG5 register instead of DRAM access method */ if (supported_input == V2_STATUS) - als_input.illuminance_value = (int)readl(privdata->mmio + AMD_C2P_MSG(5)); + als_input.illuminance_value = + readl(privdata->mmio + AMD_C2P_MSG(5)) & ILLUMINANCE_MASK; else als_input.illuminance_value = (int)sensor_virt_addr[0] / AMD_SFH_FW_MULTIPLIER; diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c index 24802a4a636e..7dc89dc6b0f0 100644 --- a/drivers/hid/hid-apple.c +++ b/drivers/hid/hid-apple.c @@ -691,49 +691,49 @@ static const struct hid_device_id apple_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI), .driver_data = APPLE_HAS_FN }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ISO), - .driver_data = APPLE_HAS_FN }, + .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_JIS), .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_ANSI), .driver_data = APPLE_HAS_FN }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_ISO), - .driver_data = APPLE_HAS_FN }, + .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_JIS), .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI), .driver_data = APPLE_HAS_FN }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO), - .driver_data = APPLE_HAS_FN }, + .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS), .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_ANSI), .driver_data = APPLE_HAS_FN }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_ISO), - .driver_data = APPLE_HAS_FN }, + .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS), .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI), .driver_data = APPLE_HAS_FN }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ISO), - .driver_data = APPLE_HAS_FN }, + .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_JIS), .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_ANSI), .driver_data = APPLE_HAS_FN }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_ISO), - .driver_data = APPLE_HAS_FN }, + .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_JIS), .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI), .driver_data = APPLE_HAS_FN }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ISO), - .driver_data = APPLE_HAS_FN }, + .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_JIS), .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI), .driver_data = APPLE_HAS_FN }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ISO), - .driver_data = APPLE_HAS_FN }, + .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_JIS), .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI), diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c index 26c31d759914..81e7e404a5fc 100644 --- a/drivers/hid/hid-debug.c +++ b/drivers/hid/hid-debug.c @@ -860,7 +860,9 @@ static const char *keys[KEY_MAX + 1] = { [KEY_F22] = "F22", [KEY_F23] = "F23", [KEY_F24] = "F24", [KEY_PLAYCD] = "PlayCD", [KEY_PAUSECD] = "PauseCD", [KEY_PROG3] = "Prog3", - [KEY_PROG4] = "Prog4", [KEY_SUSPEND] = "Suspend", + [KEY_PROG4] = "Prog4", + [KEY_ALL_APPLICATIONS] = "AllApplications", + [KEY_SUSPEND] = "Suspend", [KEY_CLOSE] = "Close", [KEY_PLAY] = "Play", [KEY_FASTFORWARD] = "FastForward", [KEY_BASSBOOST] = "BassBoost", [KEY_PRINT] = "Print", [KEY_HP] = "HP", @@ -969,6 +971,7 @@ static const char *keys[KEY_MAX + 1] = { [KEY_ASSISTANT] = "Assistant", [KEY_KBD_LAYOUT_NEXT] = "KbdLayoutNext", [KEY_EMOJI_PICKER] = "EmojiPicker", + [KEY_DICTATE] = "Dictate", [KEY_BRIGHTNESS_MIN] = "BrightnessMin", [KEY_BRIGHTNESS_MAX] = "BrightnessMax", [KEY_BRIGHTNESS_AUTO] = "BrightnessAuto", diff --git a/drivers/hid/hid-elo.c b/drivers/hid/hid-elo.c index 8e960d7b233b..2876cb6a7dca 100644 --- a/drivers/hid/hid-elo.c +++ b/drivers/hid/hid-elo.c @@ -228,7 +228,6 @@ static int elo_probe(struct hid_device *hdev, const struct hid_device_id *id) { struct elo_priv *priv; int ret; - struct usb_device *udev; if (!hid_is_usb(hdev)) return -EINVAL; @@ -238,8 +237,7 @@ static int elo_probe(struct hid_device *hdev, const struct hid_device_id *id) return -ENOMEM; INIT_DELAYED_WORK(&priv->work, elo_work); - udev = interface_to_usbdev(to_usb_interface(hdev->dev.parent)); - priv->usbdev = usb_get_dev(udev); + priv->usbdev = interface_to_usbdev(to_usb_interface(hdev->dev.parent)); hid_set_drvdata(hdev, priv); @@ -270,8 +268,6 @@ static void elo_remove(struct hid_device *hdev) { struct elo_priv *priv = hid_get_drvdata(hdev); - usb_put_dev(priv->usbdev); - hid_hw_stop(hdev); cancel_delayed_work_sync(&priv->work); kfree(priv); diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 85975031389b..78bd3ddda442 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -1370,6 +1370,7 @@ #define USB_VENDOR_ID_UGTIZER 0x2179 #define USB_DEVICE_ID_UGTIZER_TABLET_GP0610 0x0053 #define USB_DEVICE_ID_UGTIZER_TABLET_GT5040 0x0077 +#define USB_DEVICE_ID_UGTIZER_TABLET_WP5540 0x0004 #define USB_VENDOR_ID_VIEWSONIC 0x0543 #define USB_DEVICE_ID_VIEWSONIC_PD1011 0xe621 diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c index 112901d2d8d2..56ec27398a00 100644 --- a/drivers/hid/hid-input.c +++ b/drivers/hid/hid-input.c @@ -992,6 +992,7 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel case 0x0cd: map_key_clear(KEY_PLAYPAUSE); break; case 0x0cf: map_key_clear(KEY_VOICECOMMAND); break; + case 0x0d8: map_key_clear(KEY_DICTATE); break; case 0x0d9: map_key_clear(KEY_EMOJI_PICKER); break; case 0x0e0: map_abs_clear(ABS_VOLUME); break; @@ -1083,6 +1084,8 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel case 0x29d: map_key_clear(KEY_KBD_LAYOUT_NEXT); break; + case 0x2a2: map_key_clear(KEY_ALL_APPLICATIONS); break; + case 0x2c7: map_key_clear(KEY_KBDINPUTASSIST_PREV); break; case 0x2c8: map_key_clear(KEY_KBDINPUTASSIST_NEXT); break; case 0x2c9: map_key_clear(KEY_KBDINPUTASSIST_PREVGROUP); break; diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c index 7106b921b53c..c358778e070b 100644 --- a/drivers/hid/hid-logitech-dj.c +++ b/drivers/hid/hid-logitech-dj.c @@ -1068,6 +1068,7 @@ static void logi_hidpp_recv_queue_notif(struct hid_device *hdev, workitem.reports_supported |= STD_KEYBOARD; break; case 0x0f: + case 0x11: device_type = "eQUAD Lightspeed 1.2"; logi_hidpp_dev_conn_notif_equad(hdev, hidpp_report, &workitem); workitem.reports_supported |= STD_KEYBOARD; diff --git a/drivers/hid/hid-nintendo.c b/drivers/hid/hid-nintendo.c index b6a9a0f3966e..2204de889739 100644 --- a/drivers/hid/hid-nintendo.c +++ b/drivers/hid/hid-nintendo.c @@ -2128,6 +2128,10 @@ static int nintendo_hid_probe(struct hid_device *hdev, spin_lock_init(&ctlr->lock); ctlr->rumble_queue = alloc_workqueue("hid-nintendo-rumble_wq", WQ_FREEZABLE | WQ_MEM_RECLAIM, 0); + if (!ctlr->rumble_queue) { + ret = -ENOMEM; + goto err; + } INIT_WORK(&ctlr->rumble_worker, joycon_rumble_worker); ret = hid_parse(hdev); diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c index 9af1dc8ae3a2..c066ba901867 100644 --- a/drivers/hid/hid-quirks.c +++ b/drivers/hid/hid-quirks.c @@ -187,6 +187,7 @@ static const struct hid_device_id hid_quirks[] = { { HID_USB_DEVICE(USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_TURBOX_KEYBOARD), HID_QUIRK_NOGET }, { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_KNA5), HID_QUIRK_MULTI_INPUT }, { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_TWA60), HID_QUIRK_MULTI_INPUT }, + { HID_USB_DEVICE(USB_VENDOR_ID_UGTIZER, USB_DEVICE_ID_UGTIZER_TABLET_WP5540), HID_QUIRK_MULTI_INPUT }, { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_10_6_INCH), HID_QUIRK_MULTI_INPUT }, { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_14_1_INCH), HID_QUIRK_MULTI_INPUT }, { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SIRIUS_BATTERY_FREE_TABLET), HID_QUIRK_MULTI_INPUT }, diff --git a/drivers/hid/hid-thrustmaster.c b/drivers/hid/hid-thrustmaster.c index 03b935ff02d5..c3e6d69fdfbd 100644 --- a/drivers/hid/hid-thrustmaster.c +++ b/drivers/hid/hid-thrustmaster.c @@ -64,7 +64,9 @@ struct tm_wheel_info { */ static const struct tm_wheel_info tm_wheels_infos[] = { {0x0306, 0x0006, "Thrustmaster T150RS"}, + {0x0200, 0x0005, "Thrustmaster T300RS (Missing Attachment)"}, {0x0206, 0x0005, "Thrustmaster T300RS"}, + {0x0209, 0x0005, "Thrustmaster T300RS (Open Wheel Attachment)"}, {0x0204, 0x0005, "Thrustmaster T300 Ferrari Alcantara Edition"}, {0x0002, 0x0002, "Thrustmaster T500RS"} //{0x0407, 0x0001, "Thrustmaster TMX"} @@ -158,6 +160,12 @@ static void thrustmaster_interrupts(struct hid_device *hdev) return; } + if (usbif->cur_altsetting->desc.bNumEndpoints < 2) { + kfree(send_buf); + hid_err(hdev, "Wrong number of endpoints?\n"); + return; + } + ep = &usbif->cur_altsetting->endpoint[1]; b_ep = ep->desc.bEndpointAddress; diff --git a/drivers/hid/hid-vivaldi.c b/drivers/hid/hid-vivaldi.c index efa6140915f4..42ceb2058a09 100644 --- a/drivers/hid/hid-vivaldi.c +++ b/drivers/hid/hid-vivaldi.c @@ -144,7 +144,7 @@ out: static int vivaldi_input_configured(struct hid_device *hdev, struct hid_input *hidinput) { - return sysfs_create_group(&hdev->dev.kobj, &input_attribute_group); + return devm_device_add_group(&hdev->dev, &input_attribute_group); } static const struct hid_device_id vivaldi_table[] = { diff --git a/drivers/hid/i2c-hid/i2c-hid-of-goodix.c b/drivers/hid/i2c-hid/i2c-hid-of-goodix.c index b4dad66fa954..ec6c73f75ffe 100644 --- a/drivers/hid/i2c-hid/i2c-hid-of-goodix.c +++ b/drivers/hid/i2c-hid/i2c-hid-of-goodix.c @@ -27,7 +27,6 @@ struct i2c_hid_of_goodix { struct regulator *vdd; struct notifier_block nb; - struct mutex regulator_mutex; struct gpio_desc *reset_gpio; const struct goodix_i2c_hid_timing_data *timings; }; @@ -67,8 +66,6 @@ static int ihid_goodix_vdd_notify(struct notifier_block *nb, container_of(nb, struct i2c_hid_of_goodix, nb); int ret = NOTIFY_OK; - mutex_lock(&ihid_goodix->regulator_mutex); - switch (event) { case REGULATOR_EVENT_PRE_DISABLE: gpiod_set_value_cansleep(ihid_goodix->reset_gpio, 1); @@ -87,8 +84,6 @@ static int ihid_goodix_vdd_notify(struct notifier_block *nb, break; } - mutex_unlock(&ihid_goodix->regulator_mutex); - return ret; } @@ -102,8 +97,6 @@ static int i2c_hid_of_goodix_probe(struct i2c_client *client, if (!ihid_goodix) return -ENOMEM; - mutex_init(&ihid_goodix->regulator_mutex); - ihid_goodix->ops.power_up = goodix_i2c_hid_power_up; ihid_goodix->ops.power_down = goodix_i2c_hid_power_down; @@ -130,25 +123,28 @@ static int i2c_hid_of_goodix_probe(struct i2c_client *client, * long. Holding the controller in reset apparently draws extra * power. */ - mutex_lock(&ihid_goodix->regulator_mutex); ihid_goodix->nb.notifier_call = ihid_goodix_vdd_notify; ret = devm_regulator_register_notifier(ihid_goodix->vdd, &ihid_goodix->nb); - if (ret) { - mutex_unlock(&ihid_goodix->regulator_mutex); + if (ret) return dev_err_probe(&client->dev, ret, "regulator notifier request failed\n"); - } /* * If someone else is holding the regulator on (or the regulator is * an always-on one) we might never be told to deassert reset. Do it - * now. Here we'll assume that someone else might have _just - * barely_ turned the regulator on so we'll do the full - * "post_power_delay" just in case. + * now... and temporarily bump the regulator reference count just to + * make sure it is impossible for this to race with our own notifier! + * We also assume that someone else might have _just barely_ turned + * the regulator on so we'll do the full "post_power_delay" just in + * case. */ - if (ihid_goodix->reset_gpio && regulator_is_enabled(ihid_goodix->vdd)) + if (ihid_goodix->reset_gpio && regulator_is_enabled(ihid_goodix->vdd)) { + ret = regulator_enable(ihid_goodix->vdd); + if (ret) + return ret; goodix_i2c_hid_deassert_reset(ihid_goodix, true); - mutex_unlock(&ihid_goodix->regulator_mutex); + regulator_disable(ihid_goodix->vdd); + } return i2c_hid_core_probe(client, &ihid_goodix->ops, 0x0001, 0); } diff --git a/drivers/hv/hv_utils_transport.c b/drivers/hv/hv_utils_transport.c index eb2833d2b5d0..832885198643 100644 --- a/drivers/hv/hv_utils_transport.c +++ b/drivers/hv/hv_utils_transport.c @@ -13,7 +13,7 @@ #include "hv_utils_transport.h" static DEFINE_SPINLOCK(hvt_list_lock); -static struct list_head hvt_list = LIST_HEAD_INIT(hvt_list); +static LIST_HEAD(hvt_list); static void hvt_reset(struct hvutil_transport *hvt) { diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c index 17bf55fe3169..12a2b37e87f3 100644 --- a/drivers/hv/vmbus_drv.c +++ b/drivers/hv/vmbus_drv.c @@ -2028,8 +2028,10 @@ int vmbus_add_channel_kobj(struct hv_device *dev, struct vmbus_channel *channel) kobj->kset = dev->channels_kset; ret = kobject_init_and_add(kobj, &vmbus_chan_ktype, NULL, "%u", relid); - if (ret) + if (ret) { + kobject_put(kobj); return ret; + } ret = sysfs_create_group(kobj, &vmbus_chan_group); @@ -2038,6 +2040,7 @@ int vmbus_add_channel_kobj(struct hv_device *dev, struct vmbus_channel *channel) * The calling functions' error handling paths will cleanup the * empty channel directory. */ + kobject_put(kobj); dev_err(device, "Unable to set up channel sysfs files\n"); return ret; } @@ -2079,7 +2082,6 @@ struct hv_device *vmbus_device_create(const guid_t *type, return child_device_obj; } -static u64 vmbus_dma_mask = DMA_BIT_MASK(64); /* * vmbus_device_register - Register the child device */ @@ -2120,8 +2122,9 @@ int vmbus_device_register(struct hv_device *child_device_obj) } hv_debug_add_dev_dir(child_device_obj); - child_device_obj->device.dma_mask = &vmbus_dma_mask; child_device_obj->device.dma_parms = &child_device_obj->dma_parms; + child_device_obj->device.dma_mask = &child_device_obj->dma_mask; + dma_set_mask(&child_device_obj->device, DMA_BIT_MASK(64)); return 0; err_kset_unregister: diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c index 3501a3ead4ba..3ae961986fc3 100644 --- a/drivers/hwmon/hwmon.c +++ b/drivers/hwmon/hwmon.c @@ -214,12 +214,14 @@ static int hwmon_thermal_add_sensor(struct device *dev, int index) tzd = devm_thermal_zone_of_sensor_register(dev, index, tdata, &hwmon_thermal_ops); - /* - * If CONFIG_THERMAL_OF is disabled, this returns -ENODEV, - * so ignore that error but forward any other error. - */ - if (IS_ERR(tzd) && (PTR_ERR(tzd) != -ENODEV)) - return PTR_ERR(tzd); + if (IS_ERR(tzd)) { + if (PTR_ERR(tzd) != -ENODEV) + return PTR_ERR(tzd); + dev_info(dev, "temp%d_input not attached to any thermal zone\n", + index + 1); + devm_kfree(dev, tdata); + return 0; + } err = devm_add_action(dev, hwmon_thermal_remove_sensor, &tdata->node); if (err) diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c index 414204f5704c..9c9e9f4ccb9e 100644 --- a/drivers/hwmon/ntc_thermistor.c +++ b/drivers/hwmon/ntc_thermistor.c @@ -59,7 +59,7 @@ static const struct platform_device_id ntc_thermistor_id[] = { [NTC_NCP15XH103] = { "ncp15xh103", TYPE_NCPXXXH103 }, [NTC_NCP18WB473] = { "ncp18wb473", TYPE_NCPXXWB473 }, [NTC_NCP21WB473] = { "ncp21wb473", TYPE_NCPXXWB473 }, - [NTC_SSG1404001221] = { "ssg1404-001221", TYPE_NCPXXWB473 }, + [NTC_SSG1404001221] = { "ssg1404_001221", TYPE_NCPXXWB473 }, [NTC_LAST] = { }, }; diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c index 776ee2237be2..ac2fbee1ba9c 100644 --- a/drivers/hwmon/pmbus/pmbus_core.c +++ b/drivers/hwmon/pmbus/pmbus_core.c @@ -911,6 +911,11 @@ static int pmbus_get_boolean(struct i2c_client *client, struct pmbus_boolean *b, pmbus_update_sensor_data(client, s2); regval = status & mask; + if (regval) { + ret = pmbus_write_byte_data(client, page, reg, regval); + if (ret) + goto unlock; + } if (s1 && s2) { s64 v1, v2; diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index 42da31c1ab70..8a6c6ee28556 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig @@ -488,7 +488,7 @@ config I2C_BRCMSTB config I2C_CADENCE tristate "Cadence I2C Controller" - depends on ARCH_ZYNQ || ARM64 || XTENSA + depends on ARCH_ZYNQ || ARM64 || XTENSA || COMPILE_TEST help Say yes here to select Cadence I2C Host Controller. This controller is e.g. used by Xilinx Zynq. @@ -680,7 +680,7 @@ config I2C_IMG config I2C_IMX tristate "IMX I2C interface" - depends on ARCH_MXC || ARCH_LAYERSCAPE || COLDFIRE + depends on ARCH_MXC || ARCH_LAYERSCAPE || COLDFIRE || COMPILE_TEST select I2C_SLAVE help Say Y here if you want to use the IIC bus controller on @@ -935,7 +935,7 @@ config I2C_QCOM_GENI config I2C_QUP tristate "Qualcomm QUP based I2C controller" - depends on ARCH_QCOM + depends on ARCH_QCOM || COMPILE_TEST help If you say yes to this option, support will be included for the built-in I2C interface on the Qualcomm SoCs. diff --git a/drivers/i2c/busses/i2c-bcm2835.c b/drivers/i2c/busses/i2c-bcm2835.c index dfc534065595..5149454eef4a 100644 --- a/drivers/i2c/busses/i2c-bcm2835.c +++ b/drivers/i2c/busses/i2c-bcm2835.c @@ -23,6 +23,11 @@ #define BCM2835_I2C_FIFO 0x10 #define BCM2835_I2C_DIV 0x14 #define BCM2835_I2C_DEL 0x18 +/* + * 16-bit field for the number of SCL cycles to wait after rising SCL + * before deciding the slave is not responding. 0 disables the + * timeout detection. + */ #define BCM2835_I2C_CLKT 0x1c #define BCM2835_I2C_C_READ BIT(0) @@ -474,6 +479,12 @@ static int bcm2835_i2c_probe(struct platform_device *pdev) adap->dev.of_node = pdev->dev.of_node; adap->quirks = of_device_get_match_data(&pdev->dev); + /* + * Disable the hardware clock stretching timeout. SMBUS + * specifies a limit for how long the device can stretch the + * clock, but core I2C doesn't. + */ + bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_CLKT, 0); bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_C, 0); ret = i2c_add_adapter(adap); diff --git a/drivers/i2c/busses/i2c-brcmstb.c b/drivers/i2c/busses/i2c-brcmstb.c index 490ee3962645..b00f35c0b066 100644 --- a/drivers/i2c/busses/i2c-brcmstb.c +++ b/drivers/i2c/busses/i2c-brcmstb.c @@ -673,7 +673,7 @@ static int brcmstb_i2c_probe(struct platform_device *pdev) /* set the data in/out register size for compatible SoCs */ if (of_device_is_compatible(dev->device->of_node, - "brcmstb,brcmper-i2c")) + "brcm,brcmper-i2c")) dev->data_regsz = sizeof(u8); else dev->data_regsz = sizeof(u32); diff --git a/drivers/i2c/busses/i2c-qcom-cci.c b/drivers/i2c/busses/i2c-qcom-cci.c index c1de8eb66169..cf54f1cb4c57 100644 --- a/drivers/i2c/busses/i2c-qcom-cci.c +++ b/drivers/i2c/busses/i2c-qcom-cci.c @@ -558,7 +558,7 @@ static int cci_probe(struct platform_device *pdev) cci->master[idx].adap.quirks = &cci->data->quirks; cci->master[idx].adap.algo = &cci_algo; cci->master[idx].adap.dev.parent = dev; - cci->master[idx].adap.dev.of_node = child; + cci->master[idx].adap.dev.of_node = of_node_get(child); cci->master[idx].master = idx; cci->master[idx].cci = cci; @@ -643,8 +643,10 @@ static int cci_probe(struct platform_device *pdev) continue; ret = i2c_add_adapter(&cci->master[i].adap); - if (ret < 0) + if (ret < 0) { + of_node_put(cci->master[i].adap.dev.of_node); goto error_i2c; + } } pm_runtime_set_autosuspend_delay(dev, MSEC_PER_SEC); @@ -655,9 +657,11 @@ static int cci_probe(struct platform_device *pdev) return 0; error_i2c: - for (; i >= 0; i--) { - if (cci->master[i].cci) + for (--i ; i >= 0; i--) { + if (cci->master[i].cci) { i2c_del_adapter(&cci->master[i].adap); + of_node_put(cci->master[i].adap.dev.of_node); + } } error: disable_irq(cci->irq); @@ -673,8 +677,10 @@ static int cci_remove(struct platform_device *pdev) int i; for (i = 0; i < cci->data->num_masters; i++) { - if (cci->master[i].cci) + if (cci->master[i].cci) { i2c_del_adapter(&cci->master[i].adap); + of_node_put(cci->master[i].adap.dev.of_node); + } cci_halt(cci, i); } diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index 0b66e25c0e2d..b7640cfe0020 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c @@ -64,6 +64,7 @@ static struct cpuidle_driver intel_idle_driver = { /* intel_idle.max_cstate=0 disables driver */ static int max_cstate = CPUIDLE_STATE_MAX - 1; static unsigned int disabled_states_mask; +static unsigned int preferred_states_mask; static struct cpuidle_device __percpu *intel_idle_cpuidle_devices; @@ -121,9 +122,6 @@ static unsigned int mwait_substates __initdata; * If the local APIC timer is not known to be reliable in the target idle state, * enable one-shot tick broadcasting for the target CPU before executing MWAIT. * - * Optionally call leave_mm() for the target CPU upfront to avoid wakeups due to - * flushing user TLBs. - * * Must be called under local_irq_disable(). */ static __cpuidle int intel_idle(struct cpuidle_device *dev, @@ -761,6 +759,46 @@ static struct cpuidle_state icx_cstates[] __initdata = { .enter = NULL } }; +/* + * On Sapphire Rapids Xeon C1 has to be disabled if C1E is enabled, and vice + * versa. On SPR C1E is enabled only if "C1E promotion" bit is set in + * MSR_IA32_POWER_CTL. But in this case there effectively no C1, because C1 + * requests are promoted to C1E. If the "C1E promotion" bit is cleared, then + * both C1 and C1E requests end up with C1, so there is effectively no C1E. + * + * By default we enable C1 and disable C1E by marking it with + * 'CPUIDLE_FLAG_UNUSABLE'. + */ +static struct cpuidle_state spr_cstates[] __initdata = { + { + .name = "C1", + .desc = "MWAIT 0x00", + .flags = MWAIT2flg(0x00), + .exit_latency = 1, + .target_residency = 1, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { + .name = "C1E", + .desc = "MWAIT 0x01", + .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE | + CPUIDLE_FLAG_UNUSABLE, + .exit_latency = 2, + .target_residency = 4, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { + .name = "C6", + .desc = "MWAIT 0x20", + .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 290, + .target_residency = 800, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { + .enter = NULL } +}; + static struct cpuidle_state atom_cstates[] __initdata = { { .name = "C1E", @@ -1104,6 +1142,12 @@ static const struct idle_cpu idle_cpu_icx __initconst = { .use_acpi = true, }; +static const struct idle_cpu idle_cpu_spr __initconst = { + .state_table = spr_cstates, + .disable_promotion_to_c1e = true, + .use_acpi = true, +}; + static const struct idle_cpu idle_cpu_avn __initconst = { .state_table = avn_cstates, .disable_promotion_to_c1e = true, @@ -1166,6 +1210,7 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = { X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X, &idle_cpu_skx), X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, &idle_cpu_icx), X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, &idle_cpu_icx), + X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, &idle_cpu_spr), X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNL, &idle_cpu_knl), X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNM, &idle_cpu_knl), X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT, &idle_cpu_bxt), @@ -1353,6 +1398,8 @@ static inline void intel_idle_init_cstates_acpi(struct cpuidle_driver *drv) { } static inline bool intel_idle_off_by_default(u32 mwait_hint) { return false; } #endif /* !CONFIG_ACPI_PROCESSOR_CSTATE */ +static void c1e_promotion_enable(void); + /** * ivt_idle_state_table_update - Tune the idle states table for Ivy Town. * @@ -1523,6 +1570,41 @@ static void __init skx_idle_state_table_update(void) } } +/** + * spr_idle_state_table_update - Adjust Sapphire Rapids idle states table. + */ +static void __init spr_idle_state_table_update(void) +{ + unsigned long long msr; + + /* Check if user prefers C1E over C1. */ + if (preferred_states_mask & BIT(2)) { + if (preferred_states_mask & BIT(1)) + /* Both can't be enabled, stick to the defaults. */ + return; + + spr_cstates[0].flags |= CPUIDLE_FLAG_UNUSABLE; + spr_cstates[1].flags &= ~CPUIDLE_FLAG_UNUSABLE; + + /* Enable C1E using the "C1E promotion" bit. */ + c1e_promotion_enable(); + disable_promotion_to_c1e = false; + } + + /* + * By default, the C6 state assumes the worst-case scenario of package + * C6. However, if PC6 is disabled, we update the numbers to match + * core C6. + */ + rdmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr); + + /* Limit value 2 and above allow for PC6. */ + if ((msr & 0x7) < 2) { + spr_cstates[2].exit_latency = 190; + spr_cstates[2].target_residency = 600; + } +} + static bool __init intel_idle_verify_cstate(unsigned int mwait_hint) { unsigned int mwait_cstate = MWAIT_HINT2CSTATE(mwait_hint) + 1; @@ -1557,6 +1639,9 @@ static void __init intel_idle_init_cstates_icpu(struct cpuidle_driver *drv) case INTEL_FAM6_SKYLAKE_X: skx_idle_state_table_update(); break; + case INTEL_FAM6_SAPPHIRERAPIDS_X: + spr_idle_state_table_update(); + break; } for (cstate = 0; cstate < CPUIDLE_STATE_MAX; ++cstate) { @@ -1629,6 +1714,15 @@ static void auto_demotion_disable(void) wrmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr_bits); } +static void c1e_promotion_enable(void) +{ + unsigned long long msr_bits; + + rdmsrl(MSR_IA32_POWER_CTL, msr_bits); + msr_bits |= 0x2; + wrmsrl(MSR_IA32_POWER_CTL, msr_bits); +} + static void c1e_promotion_disable(void) { unsigned long long msr_bits; @@ -1798,3 +1892,14 @@ module_param(max_cstate, int, 0444); */ module_param_named(states_off, disabled_states_mask, uint, 0444); MODULE_PARM_DESC(states_off, "Mask of disabled idle states"); +/* + * Some platforms come with mutually exclusive C-states, so that if one is + * enabled, the other C-states must not be used. Example: C1 and C1E on + * Sapphire Rapids platform. This parameter allows for selecting the + * preferred C-states among the groups of mutually exclusive C-states - the + * selected C-states will be registered, the other C-states from the mutually + * exclusive group won't be registered. If the platform has no mutually + * exclusive C-states, this parameter has no effect. + */ +module_param_named(preferred_cstates, preferred_states_mask, uint, 0444); +MODULE_PARM_DESC(preferred_cstates, "Mask of preferred idle states"); diff --git a/drivers/iio/accel/bmc150-accel-core.c b/drivers/iio/accel/bmc150-accel-core.c index e6081dd0a880..d11f668016a6 100644 --- a/drivers/iio/accel/bmc150-accel-core.c +++ b/drivers/iio/accel/bmc150-accel-core.c @@ -1783,11 +1783,14 @@ int bmc150_accel_core_probe(struct device *dev, struct regmap *regmap, int irq, ret = iio_device_register(indio_dev); if (ret < 0) { dev_err(dev, "Unable to register iio device\n"); - goto err_trigger_unregister; + goto err_pm_cleanup; } return 0; +err_pm_cleanup: + pm_runtime_dont_use_autosuspend(dev); + pm_runtime_disable(dev); err_trigger_unregister: bmc150_accel_unregister_triggers(data, BMC150_ACCEL_TRIGGERS - 1); err_buffer_cleanup: diff --git a/drivers/iio/accel/fxls8962af-core.c b/drivers/iio/accel/fxls8962af-core.c index 32989d91b982..f7fd9e046588 100644 --- a/drivers/iio/accel/fxls8962af-core.c +++ b/drivers/iio/accel/fxls8962af-core.c @@ -173,12 +173,20 @@ struct fxls8962af_data { u16 upper_thres; }; -const struct regmap_config fxls8962af_regmap_conf = { +const struct regmap_config fxls8962af_i2c_regmap_conf = { .reg_bits = 8, .val_bits = 8, .max_register = FXLS8962AF_MAX_REG, }; -EXPORT_SYMBOL_GPL(fxls8962af_regmap_conf); +EXPORT_SYMBOL_GPL(fxls8962af_i2c_regmap_conf); + +const struct regmap_config fxls8962af_spi_regmap_conf = { + .reg_bits = 8, + .pad_bits = 8, + .val_bits = 8, + .max_register = FXLS8962AF_MAX_REG, +}; +EXPORT_SYMBOL_GPL(fxls8962af_spi_regmap_conf); enum { fxls8962af_idx_x, diff --git a/drivers/iio/accel/fxls8962af-i2c.c b/drivers/iio/accel/fxls8962af-i2c.c index cfb004b20455..6bde9891effb 100644 --- a/drivers/iio/accel/fxls8962af-i2c.c +++ b/drivers/iio/accel/fxls8962af-i2c.c @@ -18,7 +18,7 @@ static int fxls8962af_probe(struct i2c_client *client) { struct regmap *regmap; - regmap = devm_regmap_init_i2c(client, &fxls8962af_regmap_conf); + regmap = devm_regmap_init_i2c(client, &fxls8962af_i2c_regmap_conf); if (IS_ERR(regmap)) { dev_err(&client->dev, "Failed to initialize i2c regmap\n"); return PTR_ERR(regmap); diff --git a/drivers/iio/accel/fxls8962af-spi.c b/drivers/iio/accel/fxls8962af-spi.c index 57108d3d480b..6f4dff3238d3 100644 --- a/drivers/iio/accel/fxls8962af-spi.c +++ b/drivers/iio/accel/fxls8962af-spi.c @@ -18,7 +18,7 @@ static int fxls8962af_probe(struct spi_device *spi) { struct regmap *regmap; - regmap = devm_regmap_init_spi(spi, &fxls8962af_regmap_conf); + regmap = devm_regmap_init_spi(spi, &fxls8962af_spi_regmap_conf); if (IS_ERR(regmap)) { dev_err(&spi->dev, "Failed to initialize spi regmap\n"); return PTR_ERR(regmap); diff --git a/drivers/iio/accel/fxls8962af.h b/drivers/iio/accel/fxls8962af.h index b67572c3ef06..9cbe98c3ba9a 100644 --- a/drivers/iio/accel/fxls8962af.h +++ b/drivers/iio/accel/fxls8962af.h @@ -17,6 +17,7 @@ int fxls8962af_core_probe(struct device *dev, struct regmap *regmap, int irq); int fxls8962af_core_remove(struct device *dev); extern const struct dev_pm_ops fxls8962af_pm_ops; -extern const struct regmap_config fxls8962af_regmap_conf; +extern const struct regmap_config fxls8962af_i2c_regmap_conf; +extern const struct regmap_config fxls8962af_spi_regmap_conf; #endif /* _FXLS8962AF_H_ */ diff --git a/drivers/iio/accel/kxcjk-1013.c b/drivers/iio/accel/kxcjk-1013.c index 0fe570316848..ac74cdcd2bc8 100644 --- a/drivers/iio/accel/kxcjk-1013.c +++ b/drivers/iio/accel/kxcjk-1013.c @@ -1590,11 +1590,14 @@ static int kxcjk1013_probe(struct i2c_client *client, ret = iio_device_register(indio_dev); if (ret < 0) { dev_err(&client->dev, "unable to register iio device\n"); - goto err_buffer_cleanup; + goto err_pm_cleanup; } return 0; +err_pm_cleanup: + pm_runtime_dont_use_autosuspend(&client->dev); + pm_runtime_disable(&client->dev); err_buffer_cleanup: iio_triggered_buffer_cleanup(indio_dev); err_trigger_unregister: diff --git a/drivers/iio/accel/mma9551.c b/drivers/iio/accel/mma9551.c index 4c359fb05480..c53a3398b14c 100644 --- a/drivers/iio/accel/mma9551.c +++ b/drivers/iio/accel/mma9551.c @@ -495,11 +495,14 @@ static int mma9551_probe(struct i2c_client *client, ret = iio_device_register(indio_dev); if (ret < 0) { dev_err(&client->dev, "unable to register iio device\n"); - goto out_poweroff; + goto err_pm_cleanup; } return 0; +err_pm_cleanup: + pm_runtime_dont_use_autosuspend(&client->dev); + pm_runtime_disable(&client->dev); out_poweroff: mma9551_set_device_state(client, false); diff --git a/drivers/iio/accel/mma9553.c b/drivers/iio/accel/mma9553.c index 0570ab1cc064..5ff6bc70708b 100644 --- a/drivers/iio/accel/mma9553.c +++ b/drivers/iio/accel/mma9553.c @@ -1134,12 +1134,15 @@ static int mma9553_probe(struct i2c_client *client, ret = iio_device_register(indio_dev); if (ret < 0) { dev_err(&client->dev, "unable to register iio device\n"); - goto out_poweroff; + goto err_pm_cleanup; } dev_dbg(&indio_dev->dev, "Registered device %s\n", name); return 0; +err_pm_cleanup: + pm_runtime_dont_use_autosuspend(&client->dev); + pm_runtime_disable(&client->dev); out_poweroff: mma9551_set_device_state(client, false); return ret; diff --git a/drivers/iio/adc/ad7124.c b/drivers/iio/adc/ad7124.c index bc2cfa5f9592..b400bbe291aa 100644 --- a/drivers/iio/adc/ad7124.c +++ b/drivers/iio/adc/ad7124.c @@ -76,7 +76,7 @@ #define AD7124_CONFIG_REF_SEL(x) FIELD_PREP(AD7124_CONFIG_REF_SEL_MSK, x) #define AD7124_CONFIG_PGA_MSK GENMASK(2, 0) #define AD7124_CONFIG_PGA(x) FIELD_PREP(AD7124_CONFIG_PGA_MSK, x) -#define AD7124_CONFIG_IN_BUFF_MSK GENMASK(7, 6) +#define AD7124_CONFIG_IN_BUFF_MSK GENMASK(6, 5) #define AD7124_CONFIG_IN_BUFF(x) FIELD_PREP(AD7124_CONFIG_IN_BUFF_MSK, x) /* AD7124_FILTER_X */ diff --git a/drivers/iio/adc/men_z188_adc.c b/drivers/iio/adc/men_z188_adc.c index 42ea8bc7e780..adc5ceaef8c9 100644 --- a/drivers/iio/adc/men_z188_adc.c +++ b/drivers/iio/adc/men_z188_adc.c @@ -103,6 +103,7 @@ static int men_z188_probe(struct mcb_device *dev, struct z188_adc *adc; struct iio_dev *indio_dev; struct resource *mem; + int ret; indio_dev = devm_iio_device_alloc(&dev->dev, sizeof(struct z188_adc)); if (!indio_dev) @@ -128,8 +129,14 @@ static int men_z188_probe(struct mcb_device *dev, adc->mem = mem; mcb_set_drvdata(dev, indio_dev); - return iio_device_register(indio_dev); + ret = iio_device_register(indio_dev); + if (ret) + goto err_unmap; + + return 0; +err_unmap: + iounmap(adc->base); err: mcb_release_mem(mem); return -ENXIO; diff --git a/drivers/iio/adc/ti-tsc2046.c b/drivers/iio/adc/ti-tsc2046.c index d84ae6b008c1..e8fc4d01f30b 100644 --- a/drivers/iio/adc/ti-tsc2046.c +++ b/drivers/iio/adc/ti-tsc2046.c @@ -388,7 +388,7 @@ static int tsc2046_adc_update_scan_mode(struct iio_dev *indio_dev, mutex_lock(&priv->slock); size = 0; - for_each_set_bit(ch_idx, active_scan_mask, indio_dev->num_channels) { + for_each_set_bit(ch_idx, active_scan_mask, ARRAY_SIZE(priv->l)) { size += tsc2046_adc_group_set_layout(priv, group, ch_idx); tsc2046_adc_group_set_cmd(priv, group, ch_idx); group++; @@ -548,7 +548,7 @@ static int tsc2046_adc_setup_spi_msg(struct tsc2046_adc_priv *priv) * enabled. */ size = 0; - for (ch_idx = 0; ch_idx < priv->dcfg->num_channels; ch_idx++) + for (ch_idx = 0; ch_idx < ARRAY_SIZE(priv->l); ch_idx++) size += tsc2046_adc_group_set_layout(priv, ch_idx, ch_idx); priv->tx = devm_kzalloc(&priv->spi->dev, size, GFP_KERNEL); diff --git a/drivers/iio/addac/ad74413r.c b/drivers/iio/addac/ad74413r.c index 5271073bb74e..acd230a6af35 100644 --- a/drivers/iio/addac/ad74413r.c +++ b/drivers/iio/addac/ad74413r.c @@ -134,7 +134,6 @@ struct ad74413r_state { #define AD74413R_CH_EN_MASK(x) BIT(x) #define AD74413R_REG_DIN_COMP_OUT 0x25 -#define AD74413R_DIN_COMP_OUT_SHIFT_X(x) x #define AD74413R_REG_ADC_RESULT_X(x) (0x26 + (x)) #define AD74413R_ADC_RESULT_MAX GENMASK(15, 0) @@ -288,7 +287,7 @@ static void ad74413r_gpio_set_multiple(struct gpio_chip *chip, unsigned int offset = 0; int ret; - for_each_set_bit_from(offset, mask, AD74413R_CHANNEL_MAX) { + for_each_set_bit_from(offset, mask, chip->ngpio) { unsigned int real_offset = st->gpo_gpio_offsets[offset]; ret = ad74413r_set_gpo_config(st, real_offset, @@ -316,7 +315,7 @@ static int ad74413r_gpio_get(struct gpio_chip *chip, unsigned int offset) if (ret) return ret; - status &= AD74413R_DIN_COMP_OUT_SHIFT_X(real_offset); + status &= BIT(real_offset); return status ? 1 : 0; } @@ -334,11 +333,10 @@ static int ad74413r_gpio_get_multiple(struct gpio_chip *chip, if (ret) return ret; - for_each_set_bit_from(offset, mask, AD74413R_CHANNEL_MAX) { + for_each_set_bit_from(offset, mask, chip->ngpio) { unsigned int real_offset = st->comp_gpio_offsets[offset]; - if (val & BIT(real_offset)) - *bits |= offset; + __assign_bit(offset, bits, val & BIT(real_offset)); } return ret; @@ -840,7 +838,7 @@ static int ad74413r_update_scan_mode(struct iio_dev *indio_dev, { struct ad74413r_state *st = iio_priv(indio_dev); struct spi_transfer *xfer = st->adc_samples_xfer; - u8 *rx_buf = &st->adc_samples_buf.rx_buf[-1 * AD74413R_FRAME_SIZE]; + u8 *rx_buf = st->adc_samples_buf.rx_buf; u8 *tx_buf = st->adc_samples_tx_buf; unsigned int channel; int ret = -EINVAL; @@ -894,9 +892,10 @@ static int ad74413r_update_scan_mode(struct iio_dev *indio_dev, spi_message_add_tail(xfer, &st->adc_samples_msg); - xfer++; tx_buf += AD74413R_FRAME_SIZE; - rx_buf += AD74413R_FRAME_SIZE; + if (xfer != st->adc_samples_xfer) + rx_buf += AD74413R_FRAME_SIZE; + xfer++; } xfer->rx_buf = rx_buf; diff --git a/drivers/iio/frequency/admv1013.c b/drivers/iio/frequency/admv1013.c index 6cdeb50143af..3f3c478e9baa 100644 --- a/drivers/iio/frequency/admv1013.c +++ b/drivers/iio/frequency/admv1013.c @@ -348,7 +348,7 @@ static int admv1013_update_mixer_vgate(struct admv1013_state *st) vcm = regulator_get_voltage(st->reg); - if (vcm >= 0 && vcm < 1800000) + if (vcm < 1800000) mixer_vgate = (2389 * vcm / 1000000 + 8100) / 100; else if (vcm > 1800000 && vcm < 2600000) mixer_vgate = (2375 * vcm / 1000000 + 125) / 100; diff --git a/drivers/iio/gyro/bmg160_core.c b/drivers/iio/gyro/bmg160_core.c index 17b939a367ad..81a6d09788bd 100644 --- a/drivers/iio/gyro/bmg160_core.c +++ b/drivers/iio/gyro/bmg160_core.c @@ -1188,11 +1188,14 @@ int bmg160_core_probe(struct device *dev, struct regmap *regmap, int irq, ret = iio_device_register(indio_dev); if (ret < 0) { dev_err(dev, "unable to register iio device\n"); - goto err_buffer_cleanup; + goto err_pm_cleanup; } return 0; +err_pm_cleanup: + pm_runtime_dont_use_autosuspend(dev); + pm_runtime_disable(dev); err_buffer_cleanup: iio_triggered_buffer_cleanup(indio_dev); err_trigger_unregister: diff --git a/drivers/iio/imu/adis16480.c b/drivers/iio/imu/adis16480.c index ed129321a14d..f9b4540db1f4 100644 --- a/drivers/iio/imu/adis16480.c +++ b/drivers/iio/imu/adis16480.c @@ -1403,6 +1403,7 @@ static int adis16480_probe(struct spi_device *spi) { const struct spi_device_id *id = spi_get_device_id(spi); const struct adis_data *adis16480_data; + irq_handler_t trigger_handler = NULL; struct iio_dev *indio_dev; struct adis16480 *st; int ret; @@ -1474,8 +1475,12 @@ static int adis16480_probe(struct spi_device *spi) st->clk_freq = st->chip_info->int_clk; } + /* Only use our trigger handler if burst mode is supported */ + if (adis16480_data->burst_len) + trigger_handler = adis16480_trigger_handler; + ret = devm_adis_setup_buffer_and_trigger(&st->adis, indio_dev, - adis16480_trigger_handler); + trigger_handler); if (ret) return ret; diff --git a/drivers/iio/imu/kmx61.c b/drivers/iio/imu/kmx61.c index 1dabfd615dab..f89724481df9 100644 --- a/drivers/iio/imu/kmx61.c +++ b/drivers/iio/imu/kmx61.c @@ -1385,7 +1385,7 @@ static int kmx61_probe(struct i2c_client *client, ret = iio_device_register(data->acc_indio_dev); if (ret < 0) { dev_err(&client->dev, "Failed to register acc iio device\n"); - goto err_buffer_cleanup_mag; + goto err_pm_cleanup; } ret = iio_device_register(data->mag_indio_dev); @@ -1398,6 +1398,9 @@ static int kmx61_probe(struct i2c_client *client, err_iio_unregister_acc: iio_device_unregister(data->acc_indio_dev); +err_pm_cleanup: + pm_runtime_dont_use_autosuspend(&client->dev); + pm_runtime_disable(&client->dev); err_buffer_cleanup_mag: if (client->irq > 0) iio_triggered_buffer_cleanup(data->mag_indio_dev); diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c index 727b4b6ac696..93f0c6bce502 100644 --- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c +++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c @@ -1374,8 +1374,12 @@ static int st_lsm6dsx_read_oneshot(struct st_lsm6dsx_sensor *sensor, if (err < 0) return err; + /* + * we need to wait for sensor settling time before + * reading data in order to avoid corrupted samples + */ delay = 1000000000 / sensor->odr; - usleep_range(delay, 2 * delay); + usleep_range(3 * delay, 4 * delay); err = st_lsm6dsx_read_locked(hw, addr, &data, sizeof(data)); if (err < 0) diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c index 94eb9f6cf128..208b5193c621 100644 --- a/drivers/iio/industrialio-buffer.c +++ b/drivers/iio/industrialio-buffer.c @@ -1569,9 +1569,17 @@ static long iio_device_buffer_getfd(struct iio_dev *indio_dev, unsigned long arg } if (copy_to_user(ival, &fd, sizeof(fd))) { - put_unused_fd(fd); - ret = -EFAULT; - goto error_free_ib; + /* + * "Leak" the fd, as there's not much we can do about this + * anyway. 'fd' might have been closed already, as + * anon_inode_getfd() called fd_install() on it, which made + * it reachable by userland. + * + * Instead of allowing a malicious user to play tricks with + * us, rely on the process exit path to do any necessary + * cleanup, as in releasing the file, if still needed. + */ + return -EFAULT; } return 0; diff --git a/drivers/iio/magnetometer/bmc150_magn.c b/drivers/iio/magnetometer/bmc150_magn.c index f96f53175349..3d4d21f979fa 100644 --- a/drivers/iio/magnetometer/bmc150_magn.c +++ b/drivers/iio/magnetometer/bmc150_magn.c @@ -962,13 +962,14 @@ int bmc150_magn_probe(struct device *dev, struct regmap *regmap, ret = iio_device_register(indio_dev); if (ret < 0) { dev_err(dev, "unable to register iio device\n"); - goto err_disable_runtime_pm; + goto err_pm_cleanup; } dev_dbg(dev, "Registered device %s\n", name); return 0; -err_disable_runtime_pm: +err_pm_cleanup: + pm_runtime_dont_use_autosuspend(dev); pm_runtime_disable(dev); err_buffer_cleanup: iio_triggered_buffer_cleanup(indio_dev); diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index c903b74f46a4..35f0d5e7533d 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c @@ -3322,7 +3322,7 @@ static int cm_lap_handler(struct cm_work *work) ret = cm_init_av_by_path(param->alternate_path, NULL, &alt_av); if (ret) { rdma_destroy_ah_attr(&ah_attr); - return -EINVAL; + goto deref; } spin_lock_irq(&cm_id_priv->lock); diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 27a00ce2e101..50c53409ceb6 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -67,8 +67,8 @@ static const char * const cma_events[] = { [RDMA_CM_EVENT_TIMEWAIT_EXIT] = "timewait exit", }; -static void cma_set_mgid(struct rdma_id_private *id_priv, struct sockaddr *addr, - union ib_gid *mgid); +static void cma_iboe_set_mgid(struct sockaddr *addr, union ib_gid *mgid, + enum ib_gid_type gid_type); const char *__attribute_const__ rdma_event_msg(enum rdma_cm_event_type event) { @@ -1846,17 +1846,19 @@ static void destroy_mc(struct rdma_id_private *id_priv, if (dev_addr->bound_dev_if) ndev = dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if); - if (ndev) { + if (ndev && !send_only) { + enum ib_gid_type gid_type; union ib_gid mgid; - cma_set_mgid(id_priv, (struct sockaddr *)&mc->addr, - &mgid); - - if (!send_only) - cma_igmp_send(ndev, &mgid, false); - - dev_put(ndev); + gid_type = id_priv->cma_dev->default_gid_type + [id_priv->id.port_num - + rdma_start_port( + id_priv->cma_dev->device)]; + cma_iboe_set_mgid((struct sockaddr *)&mc->addr, &mgid, + gid_type); + cma_igmp_send(ndev, &mgid, false); } + dev_put(ndev); cancel_work_sync(&mc->iboe_join.work); } @@ -3368,22 +3370,30 @@ err: static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr, const struct sockaddr *dst_addr) { - if (!src_addr || !src_addr->sa_family) { - src_addr = (struct sockaddr *) &id->route.addr.src_addr; - src_addr->sa_family = dst_addr->sa_family; - if (IS_ENABLED(CONFIG_IPV6) && - dst_addr->sa_family == AF_INET6) { - struct sockaddr_in6 *src_addr6 = (struct sockaddr_in6 *) src_addr; - struct sockaddr_in6 *dst_addr6 = (struct sockaddr_in6 *) dst_addr; - src_addr6->sin6_scope_id = dst_addr6->sin6_scope_id; - if (ipv6_addr_type(&dst_addr6->sin6_addr) & IPV6_ADDR_LINKLOCAL) - id->route.addr.dev_addr.bound_dev_if = dst_addr6->sin6_scope_id; - } else if (dst_addr->sa_family == AF_IB) { - ((struct sockaddr_ib *) src_addr)->sib_pkey = - ((struct sockaddr_ib *) dst_addr)->sib_pkey; - } - } - return rdma_bind_addr(id, src_addr); + struct sockaddr_storage zero_sock = {}; + + if (src_addr && src_addr->sa_family) + return rdma_bind_addr(id, src_addr); + + /* + * When the src_addr is not specified, automatically supply an any addr + */ + zero_sock.ss_family = dst_addr->sa_family; + if (IS_ENABLED(CONFIG_IPV6) && dst_addr->sa_family == AF_INET6) { + struct sockaddr_in6 *src_addr6 = + (struct sockaddr_in6 *)&zero_sock; + struct sockaddr_in6 *dst_addr6 = + (struct sockaddr_in6 *)dst_addr; + + src_addr6->sin6_scope_id = dst_addr6->sin6_scope_id; + if (ipv6_addr_type(&dst_addr6->sin6_addr) & IPV6_ADDR_LINKLOCAL) + id->route.addr.dev_addr.bound_dev_if = + dst_addr6->sin6_scope_id; + } else if (dst_addr->sa_family == AF_IB) { + ((struct sockaddr_ib *)&zero_sock)->sib_pkey = + ((struct sockaddr_ib *)dst_addr)->sib_pkey; + } + return rdma_bind_addr(id, (struct sockaddr *)&zero_sock); } /* diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c index 2b72c4fa9550..9d6ac9dff39a 100644 --- a/drivers/infiniband/core/ucma.c +++ b/drivers/infiniband/core/ucma.c @@ -95,6 +95,7 @@ struct ucma_context { u64 uid; struct list_head list; + struct list_head mc_list; struct work_struct close_work; }; @@ -105,6 +106,7 @@ struct ucma_multicast { u64 uid; u8 join_state; + struct list_head list; struct sockaddr_storage addr; }; @@ -198,6 +200,7 @@ static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file) INIT_WORK(&ctx->close_work, ucma_close_id); init_completion(&ctx->comp); + INIT_LIST_HEAD(&ctx->mc_list); /* So list_del() will work if we don't do ucma_finish_ctx() */ INIT_LIST_HEAD(&ctx->list); ctx->file = file; @@ -484,19 +487,19 @@ err1: static void ucma_cleanup_multicast(struct ucma_context *ctx) { - struct ucma_multicast *mc; - unsigned long index; + struct ucma_multicast *mc, *tmp; - xa_for_each(&multicast_table, index, mc) { - if (mc->ctx != ctx) - continue; + xa_lock(&multicast_table); + list_for_each_entry_safe(mc, tmp, &ctx->mc_list, list) { + list_del(&mc->list); /* * At this point mc->ctx->ref is 0 so the mc cannot leave the * lock on the reader and this is enough serialization */ - xa_erase(&multicast_table, index); + __xa_erase(&multicast_table, mc->id); kfree(mc); } + xa_unlock(&multicast_table); } static void ucma_cleanup_mc_events(struct ucma_multicast *mc) @@ -1469,12 +1472,16 @@ static ssize_t ucma_process_join(struct ucma_file *file, mc->uid = cmd->uid; memcpy(&mc->addr, addr, cmd->addr_size); - if (xa_alloc(&multicast_table, &mc->id, NULL, xa_limit_32b, + xa_lock(&multicast_table); + if (__xa_alloc(&multicast_table, &mc->id, NULL, xa_limit_32b, GFP_KERNEL)) { ret = -ENOMEM; goto err_free_mc; } + list_add_tail(&mc->list, &ctx->mc_list); + xa_unlock(&multicast_table); + mutex_lock(&ctx->mutex); ret = rdma_join_multicast(ctx->cm_id, (struct sockaddr *)&mc->addr, join_state, mc); @@ -1500,8 +1507,11 @@ err_leave_multicast: mutex_unlock(&ctx->mutex); ucma_cleanup_mc_events(mc); err_xa_erase: - xa_erase(&multicast_table, mc->id); + xa_lock(&multicast_table); + list_del(&mc->list); + __xa_erase(&multicast_table, mc->id); err_free_mc: + xa_unlock(&multicast_table); kfree(mc); err_put_ctx: ucma_put_ctx(ctx); @@ -1569,15 +1579,17 @@ static ssize_t ucma_leave_multicast(struct ucma_file *file, mc = ERR_PTR(-EINVAL); else if (!refcount_inc_not_zero(&mc->ctx->ref)) mc = ERR_PTR(-ENXIO); - else - __xa_erase(&multicast_table, mc->id); - xa_unlock(&multicast_table); if (IS_ERR(mc)) { + xa_unlock(&multicast_table); ret = PTR_ERR(mc); goto out; } + list_del(&mc->list); + __xa_erase(&multicast_table, mc->id); + xa_unlock(&multicast_table); + mutex_lock(&mc->ctx->mutex); rdma_leave_multicast(mc->ctx->cm_id, (struct sockaddr *) &mc->addr); mutex_unlock(&mc->ctx->mutex); diff --git a/drivers/infiniband/hw/hfi1/ipoib.h b/drivers/infiniband/hw/hfi1/ipoib.h index 909122934246..aec60d4888eb 100644 --- a/drivers/infiniband/hw/hfi1/ipoib.h +++ b/drivers/infiniband/hw/hfi1/ipoib.h @@ -55,7 +55,7 @@ union hfi1_ipoib_flow { */ struct ipoib_txreq { struct sdma_txreq txreq; - struct hfi1_sdma_header sdma_hdr; + struct hfi1_sdma_header *sdma_hdr; int sdma_status; int complete; struct hfi1_ipoib_dev_priv *priv; diff --git a/drivers/infiniband/hw/hfi1/ipoib_main.c b/drivers/infiniband/hw/hfi1/ipoib_main.c index e1a2b02bbd91..5d814afdf7f3 100644 --- a/drivers/infiniband/hw/hfi1/ipoib_main.c +++ b/drivers/infiniband/hw/hfi1/ipoib_main.c @@ -22,26 +22,35 @@ static int hfi1_ipoib_dev_init(struct net_device *dev) int ret; dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); + if (!dev->tstats) + return -ENOMEM; ret = priv->netdev_ops->ndo_init(dev); if (ret) - return ret; + goto out_ret; ret = hfi1_netdev_add_data(priv->dd, qpn_from_mac(priv->netdev->dev_addr), dev); if (ret < 0) { priv->netdev_ops->ndo_uninit(dev); - return ret; + goto out_ret; } return 0; +out_ret: + free_percpu(dev->tstats); + dev->tstats = NULL; + return ret; } static void hfi1_ipoib_dev_uninit(struct net_device *dev) { struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev); + free_percpu(dev->tstats); + dev->tstats = NULL; + hfi1_netdev_remove_data(priv->dd, qpn_from_mac(priv->netdev->dev_addr)); priv->netdev_ops->ndo_uninit(dev); @@ -166,12 +175,7 @@ static void hfi1_ipoib_netdev_dtor(struct net_device *dev) hfi1_ipoib_rxq_deinit(priv->netdev); free_percpu(dev->tstats); -} - -static void hfi1_ipoib_free_rdma_netdev(struct net_device *dev) -{ - hfi1_ipoib_netdev_dtor(dev); - free_netdev(dev); + dev->tstats = NULL; } static void hfi1_ipoib_set_id(struct net_device *dev, int id) @@ -211,24 +215,23 @@ static int hfi1_ipoib_setup_rn(struct ib_device *device, priv->port_num = port_num; priv->netdev_ops = netdev->netdev_ops; - netdev->netdev_ops = &hfi1_ipoib_netdev_ops; - ib_query_pkey(device, port_num, priv->pkey_index, &priv->pkey); rc = hfi1_ipoib_txreq_init(priv); if (rc) { dd_dev_err(dd, "IPoIB netdev TX init - failed(%d)\n", rc); - hfi1_ipoib_free_rdma_netdev(netdev); return rc; } rc = hfi1_ipoib_rxq_init(netdev); if (rc) { dd_dev_err(dd, "IPoIB netdev RX init - failed(%d)\n", rc); - hfi1_ipoib_free_rdma_netdev(netdev); + hfi1_ipoib_txreq_deinit(priv); return rc; } + netdev->netdev_ops = &hfi1_ipoib_netdev_ops; + netdev->priv_destructor = hfi1_ipoib_netdev_dtor; netdev->needs_free_netdev = true; diff --git a/drivers/infiniband/hw/hfi1/ipoib_tx.c b/drivers/infiniband/hw/hfi1/ipoib_tx.c index f4010890309f..d6bbdb8fcb50 100644 --- a/drivers/infiniband/hw/hfi1/ipoib_tx.c +++ b/drivers/infiniband/hw/hfi1/ipoib_tx.c @@ -122,7 +122,7 @@ static void hfi1_ipoib_free_tx(struct ipoib_txreq *tx, int budget) dd_dev_warn(priv->dd, "%s: Status = 0x%x pbc 0x%llx txq = %d sde = %d\n", __func__, tx->sdma_status, - le64_to_cpu(tx->sdma_hdr.pbc), tx->txq->q_idx, + le64_to_cpu(tx->sdma_hdr->pbc), tx->txq->q_idx, tx->txq->sde->this_idx); } @@ -231,7 +231,7 @@ static int hfi1_ipoib_build_tx_desc(struct ipoib_txreq *tx, { struct hfi1_devdata *dd = txp->dd; struct sdma_txreq *txreq = &tx->txreq; - struct hfi1_sdma_header *sdma_hdr = &tx->sdma_hdr; + struct hfi1_sdma_header *sdma_hdr = tx->sdma_hdr; u16 pkt_bytes = sizeof(sdma_hdr->pbc) + (txp->hdr_dwords << 2) + tx->skb->len; int ret; @@ -256,7 +256,7 @@ static void hfi1_ipoib_build_ib_tx_headers(struct ipoib_txreq *tx, struct ipoib_txparms *txp) { struct hfi1_ipoib_dev_priv *priv = tx->txq->priv; - struct hfi1_sdma_header *sdma_hdr = &tx->sdma_hdr; + struct hfi1_sdma_header *sdma_hdr = tx->sdma_hdr; struct sk_buff *skb = tx->skb; struct hfi1_pportdata *ppd = ppd_from_ibp(txp->ibp); struct rdma_ah_attr *ah_attr = txp->ah_attr; @@ -483,7 +483,7 @@ static int hfi1_ipoib_send_dma_single(struct net_device *dev, if (likely(!ret)) { tx_ok: trace_sdma_output_ibhdr(txq->priv->dd, - &tx->sdma_hdr.hdr, + &tx->sdma_hdr->hdr, ib_is_sc5(txp->flow.sc5)); hfi1_ipoib_check_queue_depth(txq); return NETDEV_TX_OK; @@ -547,7 +547,7 @@ static int hfi1_ipoib_send_dma_list(struct net_device *dev, hfi1_ipoib_check_queue_depth(txq); trace_sdma_output_ibhdr(txq->priv->dd, - &tx->sdma_hdr.hdr, + &tx->sdma_hdr->hdr, ib_is_sc5(txp->flow.sc5)); if (!netdev_xmit_more()) @@ -683,7 +683,8 @@ int hfi1_ipoib_txreq_init(struct hfi1_ipoib_dev_priv *priv) { struct net_device *dev = priv->netdev; u32 tx_ring_size, tx_item_size; - int i; + struct hfi1_ipoib_circ_buf *tx_ring; + int i, j; /* * Ring holds 1 less than tx_ring_size @@ -701,7 +702,9 @@ int hfi1_ipoib_txreq_init(struct hfi1_ipoib_dev_priv *priv) for (i = 0; i < dev->num_tx_queues; i++) { struct hfi1_ipoib_txq *txq = &priv->txqs[i]; + struct ipoib_txreq *tx; + tx_ring = &txq->tx_ring; iowait_init(&txq->wait, 0, hfi1_ipoib_flush_txq, @@ -725,14 +728,19 @@ int hfi1_ipoib_txreq_init(struct hfi1_ipoib_dev_priv *priv) priv->dd->node); txq->tx_ring.items = - kcalloc_node(tx_ring_size, tx_item_size, - GFP_KERNEL, priv->dd->node); + kvzalloc_node(array_size(tx_ring_size, tx_item_size), + GFP_KERNEL, priv->dd->node); if (!txq->tx_ring.items) goto free_txqs; txq->tx_ring.max_items = tx_ring_size; - txq->tx_ring.shift = ilog2(tx_ring_size); + txq->tx_ring.shift = ilog2(tx_item_size); txq->tx_ring.avail = hfi1_ipoib_ring_hwat(txq); + tx_ring = &txq->tx_ring; + for (j = 0; j < tx_ring_size; j++) + hfi1_txreq_from_idx(tx_ring, j)->sdma_hdr = + kzalloc_node(sizeof(*tx->sdma_hdr), + GFP_KERNEL, priv->dd->node); netif_tx_napi_add(dev, &txq->napi, hfi1_ipoib_poll_tx_ring, @@ -746,7 +754,10 @@ free_txqs: struct hfi1_ipoib_txq *txq = &priv->txqs[i]; netif_napi_del(&txq->napi); - kfree(txq->tx_ring.items); + tx_ring = &txq->tx_ring; + for (j = 0; j < tx_ring_size; j++) + kfree(hfi1_txreq_from_idx(tx_ring, j)->sdma_hdr); + kvfree(tx_ring->items); } kfree(priv->txqs); @@ -780,17 +791,20 @@ static void hfi1_ipoib_drain_tx_list(struct hfi1_ipoib_txq *txq) void hfi1_ipoib_txreq_deinit(struct hfi1_ipoib_dev_priv *priv) { - int i; + int i, j; for (i = 0; i < priv->netdev->num_tx_queues; i++) { struct hfi1_ipoib_txq *txq = &priv->txqs[i]; + struct hfi1_ipoib_circ_buf *tx_ring = &txq->tx_ring; iowait_cancel_work(&txq->wait); iowait_sdma_drain(&txq->wait); hfi1_ipoib_drain_tx_list(txq); netif_napi_del(&txq->napi); hfi1_ipoib_drain_tx_ring(txq); - kfree(txq->tx_ring.items); + for (j = 0; j < tx_ring->max_items; j++) + kfree(hfi1_txreq_from_idx(tx_ring, j)->sdma_hdr); + kvfree(tx_ring->items); } kfree(priv->txqs); diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index 1c3d97229988..93b1650eacfa 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -3237,7 +3237,7 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr, case MLX4_DEV_EVENT_PORT_MGMT_CHANGE: ew = kmalloc(sizeof *ew, GFP_ATOMIC); if (!ew) - break; + return; INIT_WORK(&ew->work, handle_port_mgmt_change_event); memcpy(&ew->ib_eqe, eqe, sizeof *eqe); diff --git a/drivers/infiniband/hw/qib/qib_sysfs.c b/drivers/infiniband/hw/qib/qib_sysfs.c index 0a3b28142c05..41c272980f91 100644 --- a/drivers/infiniband/hw/qib/qib_sysfs.c +++ b/drivers/infiniband/hw/qib/qib_sysfs.c @@ -541,7 +541,7 @@ static struct attribute *port_diagc_attributes[] = { }; static const struct attribute_group port_diagc_group = { - .name = "linkcontrol", + .name = "diag_counters", .attrs = port_diagc_attributes, }; diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c index 3305f2744bfa..ae50b56e8913 100644 --- a/drivers/infiniband/sw/rdmavt/qp.c +++ b/drivers/infiniband/sw/rdmavt/qp.c @@ -3073,6 +3073,8 @@ do_write: case IB_WR_ATOMIC_FETCH_AND_ADD: if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC))) goto inv_err; + if (unlikely(wqe->atomic_wr.remote_addr & (sizeof(u64) - 1))) + goto inv_err; if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64), wqe->atomic_wr.remote_addr, wqe->atomic_wr.rkey, diff --git a/drivers/infiniband/sw/siw/siw.h b/drivers/infiniband/sw/siw/siw.h index 368959ae9a8c..df03d84c6868 100644 --- a/drivers/infiniband/sw/siw/siw.h +++ b/drivers/infiniband/sw/siw/siw.h @@ -644,14 +644,9 @@ static inline struct siw_sqe *orq_get_current(struct siw_qp *qp) return &qp->orq[qp->orq_get % qp->attrs.orq_size]; } -static inline struct siw_sqe *orq_get_tail(struct siw_qp *qp) -{ - return &qp->orq[qp->orq_put % qp->attrs.orq_size]; -} - static inline struct siw_sqe *orq_get_free(struct siw_qp *qp) { - struct siw_sqe *orq_e = orq_get_tail(qp); + struct siw_sqe *orq_e = &qp->orq[qp->orq_put % qp->attrs.orq_size]; if (READ_ONCE(orq_e->flags) == 0) return orq_e; diff --git a/drivers/infiniband/sw/siw/siw_qp_rx.c b/drivers/infiniband/sw/siw/siw_qp_rx.c index 60116f20653c..875ea6f1b04a 100644 --- a/drivers/infiniband/sw/siw/siw_qp_rx.c +++ b/drivers/infiniband/sw/siw/siw_qp_rx.c @@ -1153,11 +1153,12 @@ static int siw_check_tx_fence(struct siw_qp *qp) spin_lock_irqsave(&qp->orq_lock, flags); - rreq = orq_get_current(qp); - /* free current orq entry */ + rreq = orq_get_current(qp); WRITE_ONCE(rreq->flags, 0); + qp->orq_get++; + if (qp->tx_ctx.orq_fence) { if (unlikely(tx_waiting->wr_status != SIW_WR_QUEUED)) { pr_warn("siw: [QP %u]: fence resume: bad status %d\n", @@ -1165,10 +1166,12 @@ static int siw_check_tx_fence(struct siw_qp *qp) rv = -EPROTO; goto out; } - /* resume SQ processing */ + /* resume SQ processing, if possible */ if (tx_waiting->sqe.opcode == SIW_OP_READ || tx_waiting->sqe.opcode == SIW_OP_READ_LOCAL_INV) { - rreq = orq_get_tail(qp); + + /* SQ processing was stopped because of a full ORQ */ + rreq = orq_get_free(qp); if (unlikely(!rreq)) { pr_warn("siw: [QP %u]: no ORQE\n", qp_id(qp)); rv = -EPROTO; @@ -1181,15 +1184,14 @@ static int siw_check_tx_fence(struct siw_qp *qp) resume_tx = 1; } else if (siw_orq_empty(qp)) { + /* + * SQ processing was stopped by fenced work request. + * Resume since all previous Read's are now completed. + */ qp->tx_ctx.orq_fence = 0; resume_tx = 1; - } else { - pr_warn("siw: [QP %u]: fence resume: orq idx: %d:%d\n", - qp_id(qp), qp->orq_get, qp->orq_put); - rv = -EPROTO; } } - qp->orq_get++; out: spin_unlock_irqrestore(&qp->orq_lock, flags); diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c index a3dd2cb6d5c9..54ef367b074a 100644 --- a/drivers/infiniband/sw/siw/siw_verbs.c +++ b/drivers/infiniband/sw/siw/siw_verbs.c @@ -313,7 +313,8 @@ int siw_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attrs, if (atomic_inc_return(&sdev->num_qp) > SIW_MAX_QP) { siw_dbg(base_dev, "too many QP's\n"); - return -ENOMEM; + rv = -ENOMEM; + goto err_atomic; } if (attrs->qp_type != IB_QPT_RC) { siw_dbg(base_dev, "only RC QP's supported\n"); diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.c b/drivers/infiniband/ulp/rtrs/rtrs-clt.c index 7c3f98e57889..759b85f03331 100644 --- a/drivers/infiniband/ulp/rtrs/rtrs-clt.c +++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.c @@ -2682,6 +2682,8 @@ static void rtrs_clt_dev_release(struct device *dev) struct rtrs_clt_sess *clt = container_of(dev, struct rtrs_clt_sess, dev); + mutex_destroy(&clt->paths_ev_mutex); + mutex_destroy(&clt->paths_mutex); kfree(clt); } @@ -2711,6 +2713,8 @@ static struct rtrs_clt_sess *alloc_clt(const char *sessname, size_t paths_num, return ERR_PTR(-ENOMEM); } + clt->dev.class = rtrs_clt_dev_class; + clt->dev.release = rtrs_clt_dev_release; uuid_gen(&clt->paths_uuid); INIT_LIST_HEAD_RCU(&clt->paths_list); clt->paths_num = paths_num; @@ -2727,53 +2731,51 @@ static struct rtrs_clt_sess *alloc_clt(const char *sessname, size_t paths_num, init_waitqueue_head(&clt->permits_wait); mutex_init(&clt->paths_ev_mutex); mutex_init(&clt->paths_mutex); + device_initialize(&clt->dev); - clt->dev.class = rtrs_clt_dev_class; - clt->dev.release = rtrs_clt_dev_release; err = dev_set_name(&clt->dev, "%s", sessname); if (err) - goto err; + goto err_put; + /* * Suppress user space notification until * sysfs files are created */ dev_set_uevent_suppress(&clt->dev, true); - err = device_register(&clt->dev); - if (err) { - put_device(&clt->dev); - goto err; - } + err = device_add(&clt->dev); + if (err) + goto err_put; clt->kobj_paths = kobject_create_and_add("paths", &clt->dev.kobj); if (!clt->kobj_paths) { err = -ENOMEM; - goto err_dev; + goto err_del; } err = rtrs_clt_create_sysfs_root_files(clt); if (err) { kobject_del(clt->kobj_paths); kobject_put(clt->kobj_paths); - goto err_dev; + goto err_del; } dev_set_uevent_suppress(&clt->dev, false); kobject_uevent(&clt->dev.kobj, KOBJ_ADD); return clt; -err_dev: - device_unregister(&clt->dev); -err: +err_del: + device_del(&clt->dev); +err_put: free_percpu(clt->pcpu_path); - kfree(clt); + put_device(&clt->dev); return ERR_PTR(err); } static void free_clt(struct rtrs_clt_sess *clt) { - free_permits(clt); free_percpu(clt->pcpu_path); - mutex_destroy(&clt->paths_ev_mutex); - mutex_destroy(&clt->paths_mutex); - /* release callback will free clt in last put */ + + /* + * release callback will free clt and destroy mutexes in last put + */ device_unregister(&clt->dev); } @@ -2890,6 +2892,7 @@ void rtrs_clt_close(struct rtrs_clt_sess *clt) rtrs_clt_destroy_path_files(clt_path, NULL); kobject_put(&clt_path->kobj); } + free_permits(clt); free_clt(clt); } EXPORT_SYMBOL(rtrs_clt_close); diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index e174e853f8a4..285b766e4e70 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c @@ -4047,9 +4047,11 @@ static void srp_remove_one(struct ib_device *device, void *client_data) spin_unlock(&host->target_lock); /* - * Wait for tl_err and target port removal tasks. + * srp_queue_remove_work() queues a call to + * srp_remove_target(). The latter function cancels + * target->tl_err_work so waiting for the remove works to + * finish is sufficient. */ - flush_workqueue(system_long_wq); flush_workqueue(srp_remove_wq); kfree(host); diff --git a/drivers/input/input.c b/drivers/input/input.c index ccaeb2426385..c3139bc2aa0d 100644 --- a/drivers/input/input.c +++ b/drivers/input/input.c @@ -2285,6 +2285,12 @@ int input_register_device(struct input_dev *dev) /* KEY_RESERVED is not supposed to be transmitted to userspace. */ __clear_bit(KEY_RESERVED, dev->keybit); + /* Buttonpads should not map BTN_RIGHT and/or BTN_MIDDLE. */ + if (test_bit(INPUT_PROP_BUTTONPAD, dev->propbit)) { + __clear_bit(BTN_RIGHT, dev->keybit); + __clear_bit(BTN_MIDDLE, dev->keybit); + } + /* Make sure that bitmasks not mentioned in dev->evbit are clean. */ input_cleanse_bitmasks(dev); diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig index 0c607da9ee10..9417ee0b1eff 100644 --- a/drivers/input/keyboard/Kconfig +++ b/drivers/input/keyboard/Kconfig @@ -556,7 +556,7 @@ config KEYBOARD_PMIC8XXX config KEYBOARD_SAMSUNG tristate "Samsung keypad support" - depends on HAVE_CLK + depends on HAS_IOMEM && HAVE_CLK select INPUT_MATRIXKMAP help Say Y here if you want to use the keypad on your Samsung mobile diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c index 47af62c12267..e1758d5ffe42 100644 --- a/drivers/input/mouse/elan_i2c_core.c +++ b/drivers/input/mouse/elan_i2c_core.c @@ -186,55 +186,21 @@ static int elan_get_fwinfo(u16 ic_type, u8 iap_version, u16 *validpage_count, return 0; } -static int elan_enable_power(struct elan_tp_data *data) +static int elan_set_power(struct elan_tp_data *data, bool on) { int repeat = ETP_RETRY_COUNT; int error; - error = regulator_enable(data->vcc); - if (error) { - dev_err(&data->client->dev, - "failed to enable regulator: %d\n", error); - return error; - } - do { - error = data->ops->power_control(data->client, true); + error = data->ops->power_control(data->client, on); if (error >= 0) return 0; msleep(30); } while (--repeat > 0); - dev_err(&data->client->dev, "failed to enable power: %d\n", error); - return error; -} - -static int elan_disable_power(struct elan_tp_data *data) -{ - int repeat = ETP_RETRY_COUNT; - int error; - - do { - error = data->ops->power_control(data->client, false); - if (!error) { - error = regulator_disable(data->vcc); - if (error) { - dev_err(&data->client->dev, - "failed to disable regulator: %d\n", - error); - /* Attempt to power the chip back up */ - data->ops->power_control(data->client, true); - break; - } - - return 0; - } - - msleep(30); - } while (--repeat > 0); - - dev_err(&data->client->dev, "failed to disable power: %d\n", error); + dev_err(&data->client->dev, "failed to set power %s: %d\n", + on ? "on" : "off", error); return error; } @@ -1399,9 +1365,19 @@ static int __maybe_unused elan_suspend(struct device *dev) /* Enable wake from IRQ */ data->irq_wake = (enable_irq_wake(client->irq) == 0); } else { - ret = elan_disable_power(data); + ret = elan_set_power(data, false); + if (ret) + goto err; + + ret = regulator_disable(data->vcc); + if (ret) { + dev_err(dev, "error %d disabling regulator\n", ret); + /* Attempt to power the chip back up */ + elan_set_power(data, true); + } } +err: mutex_unlock(&data->sysfs_mutex); return ret; } @@ -1412,12 +1388,18 @@ static int __maybe_unused elan_resume(struct device *dev) struct elan_tp_data *data = i2c_get_clientdata(client); int error; - if (device_may_wakeup(dev) && data->irq_wake) { + if (!device_may_wakeup(dev)) { + error = regulator_enable(data->vcc); + if (error) { + dev_err(dev, "error %d enabling regulator\n", error); + goto err; + } + } else if (data->irq_wake) { disable_irq_wake(client->irq); data->irq_wake = false; } - error = elan_enable_power(data); + error = elan_set_power(data, true); if (error) { dev_err(dev, "power up when resuming failed: %d\n", error); goto err; diff --git a/drivers/input/mouse/psmouse-smbus.c b/drivers/input/mouse/psmouse-smbus.c index a472489ccbad..164f6c757f6b 100644 --- a/drivers/input/mouse/psmouse-smbus.c +++ b/drivers/input/mouse/psmouse-smbus.c @@ -75,6 +75,8 @@ static void psmouse_smbus_detach_i2c_client(struct i2c_client *client) "Marking SMBus companion %s as gone\n", dev_name(&smbdev->client->dev)); smbdev->dead = true; + device_link_remove(&smbdev->client->dev, + &smbdev->psmouse->ps2dev.serio->dev); serio_rescan(smbdev->psmouse->ps2dev.serio); } else { list_del(&smbdev->node); @@ -174,6 +176,8 @@ static void psmouse_smbus_disconnect(struct psmouse *psmouse) kfree(smbdev); } else { smbdev->dead = true; + device_link_remove(&smbdev->client->dev, + &psmouse->ps2dev.serio->dev); psmouse_dbg(smbdev->psmouse, "posting removal request for SMBus companion %s\n", dev_name(&smbdev->client->dev)); @@ -270,6 +274,12 @@ int psmouse_smbus_init(struct psmouse *psmouse, if (smbdev->client) { /* We have our companion device */ + if (!device_link_add(&smbdev->client->dev, + &psmouse->ps2dev.serio->dev, + DL_FLAG_STATELESS)) + psmouse_warn(psmouse, + "failed to set up link with iSMBus companion %s\n", + dev_name(&smbdev->client->dev)); return 0; } diff --git a/drivers/input/tablet/aiptek.c b/drivers/input/tablet/aiptek.c index fcb1b646436a..1581f6ef0927 100644 --- a/drivers/input/tablet/aiptek.c +++ b/drivers/input/tablet/aiptek.c @@ -1787,15 +1787,13 @@ aiptek_probe(struct usb_interface *intf, const struct usb_device_id *id) input_set_abs_params(inputdev, ABS_TILT_Y, AIPTEK_TILT_MIN, AIPTEK_TILT_MAX, 0, 0); input_set_abs_params(inputdev, ABS_WHEEL, AIPTEK_WHEEL_MIN, AIPTEK_WHEEL_MAX - 1, 0, 0); - /* Verify that a device really has an endpoint */ - if (intf->cur_altsetting->desc.bNumEndpoints < 1) { + err = usb_find_common_endpoints(intf->cur_altsetting, + NULL, NULL, &endpoint, NULL); + if (err) { dev_err(&intf->dev, - "interface has %d endpoints, but must have minimum 1\n", - intf->cur_altsetting->desc.bNumEndpoints); - err = -EINVAL; + "interface has no int in endpoints, but must have minimum 1\n"); goto fail3; } - endpoint = &intf->cur_altsetting->endpoint[0].desc; /* Go set up our URB, which is called when the tablet receives * input. diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c index a3bfc7a41679..752e8ba4fecb 100644 --- a/drivers/input/touchscreen/goodix.c +++ b/drivers/input/touchscreen/goodix.c @@ -18,6 +18,7 @@ #include <linux/delay.h> #include <linux/irq.h> #include <linux/interrupt.h> +#include <linux/platform_data/x86/soc.h> #include <linux/slab.h> #include <linux/acpi.h> #include <linux/of.h> @@ -805,21 +806,6 @@ static int goodix_reset(struct goodix_ts_data *ts) } #ifdef ACPI_GPIO_SUPPORT -#include <asm/cpu_device_id.h> -#include <asm/intel-family.h> - -static const struct x86_cpu_id baytrail_cpu_ids[] = { - { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT, X86_FEATURE_ANY, }, - {} -}; - -static inline bool is_byt(void) -{ - const struct x86_cpu_id *id = x86_match_cpu(baytrail_cpu_ids); - - return !!id; -} - static const struct acpi_gpio_params first_gpio = { 0, 0, false }; static const struct acpi_gpio_params second_gpio = { 1, 0, false }; @@ -878,7 +864,7 @@ static int goodix_add_acpi_gpio_mappings(struct goodix_ts_data *ts) const struct acpi_gpio_mapping *gpio_mapping = NULL; struct device *dev = &ts->client->dev; LIST_HEAD(resources); - int ret; + int irq, ret; ts->gpio_count = 0; ts->gpio_int_idx = -1; @@ -891,6 +877,20 @@ static int goodix_add_acpi_gpio_mappings(struct goodix_ts_data *ts) acpi_dev_free_resource_list(&resources); + /* + * CHT devices should have a GpioInt + a regular GPIO ACPI resource. + * Some CHT devices have a bug (where the also is bogus Interrupt + * resource copied from a previous BYT based generation). i2c-core-acpi + * will use the non-working Interrupt resource, fix this up. + */ + if (soc_intel_is_cht() && ts->gpio_count == 2 && ts->gpio_int_idx != -1) { + irq = acpi_dev_gpio_irq_get(ACPI_COMPANION(dev), 0); + if (irq > 0 && irq != ts->client->irq) { + dev_warn(dev, "Overriding IRQ %d -> %d\n", ts->client->irq, irq); + ts->client->irq = irq; + } + } + if (ts->gpio_count == 2 && ts->gpio_int_idx == 0) { ts->irq_pin_access_method = IRQ_PIN_ACCESS_ACPI_GPIO; gpio_mapping = acpi_goodix_int_first_gpios; @@ -903,7 +903,7 @@ static int goodix_add_acpi_gpio_mappings(struct goodix_ts_data *ts) dev_info(dev, "Using ACPI INTI and INTO methods for IRQ pin access\n"); ts->irq_pin_access_method = IRQ_PIN_ACCESS_ACPI_METHOD; gpio_mapping = acpi_goodix_reset_only_gpios; - } else if (is_byt() && ts->gpio_count == 2 && ts->gpio_int_idx == -1) { + } else if (soc_intel_is_byt() && ts->gpio_count == 2 && ts->gpio_int_idx == -1) { dev_info(dev, "No ACPI GpioInt resource, assuming that the GPIO order is reset, int\n"); ts->irq_pin_access_method = IRQ_PIN_ACCESS_ACPI_GPIO; gpio_mapping = acpi_goodix_int_last_gpios; diff --git a/drivers/input/touchscreen/wm97xx-core.c b/drivers/input/touchscreen/wm97xx-core.c index 78d2ee99f37a..1b58611c8084 100644 --- a/drivers/input/touchscreen/wm97xx-core.c +++ b/drivers/input/touchscreen/wm97xx-core.c @@ -615,10 +615,9 @@ static int wm97xx_register_touch(struct wm97xx *wm) * extensions) */ wm->touch_dev = platform_device_alloc("wm97xx-touch", -1); - if (!wm->touch_dev) { - ret = -ENOMEM; - goto touch_err; - } + if (!wm->touch_dev) + return -ENOMEM; + platform_set_drvdata(wm->touch_dev, wm); wm->touch_dev->dev.parent = wm->dev; wm->touch_dev->dev.platform_data = pdata; @@ -629,9 +628,6 @@ static int wm97xx_register_touch(struct wm97xx *wm) return 0; touch_reg_err: platform_device_put(wm->touch_dev); -touch_err: - input_unregister_device(wm->input_dev); - wm->input_dev = NULL; return ret; } @@ -639,8 +635,6 @@ touch_err: static void wm97xx_unregister_touch(struct wm97xx *wm) { platform_device_unregister(wm->touch_dev); - input_unregister_device(wm->input_dev); - wm->input_dev = NULL; } static int _wm97xx_probe(struct wm97xx *wm) diff --git a/drivers/input/touchscreen/zinitix.c b/drivers/input/touchscreen/zinitix.c index 7c82c4f5fa6b..8bd03278ad9a 100644 --- a/drivers/input/touchscreen/zinitix.c +++ b/drivers/input/touchscreen/zinitix.c @@ -135,7 +135,7 @@ struct point_coord { struct touch_event { __le16 status; - u8 finger_cnt; + u8 finger_mask; u8 time_stamp; struct point_coord point_coord[MAX_SUPPORTED_FINGER_NUM]; }; @@ -322,11 +322,32 @@ static int zinitix_send_power_on_sequence(struct bt541_ts_data *bt541) static void zinitix_report_finger(struct bt541_ts_data *bt541, int slot, const struct point_coord *p) { + u16 x, y; + + if (unlikely(!(p->sub_status & + (SUB_BIT_UP | SUB_BIT_DOWN | SUB_BIT_MOVE)))) { + dev_dbg(&bt541->client->dev, "unknown finger event %#02x\n", + p->sub_status); + return; + } + + x = le16_to_cpu(p->x); + y = le16_to_cpu(p->y); + input_mt_slot(bt541->input_dev, slot); - input_mt_report_slot_state(bt541->input_dev, MT_TOOL_FINGER, true); - touchscreen_report_pos(bt541->input_dev, &bt541->prop, - le16_to_cpu(p->x), le16_to_cpu(p->y), true); - input_report_abs(bt541->input_dev, ABS_MT_TOUCH_MAJOR, p->width); + if (input_mt_report_slot_state(bt541->input_dev, MT_TOOL_FINGER, + !(p->sub_status & SUB_BIT_UP))) { + touchscreen_report_pos(bt541->input_dev, + &bt541->prop, x, y, true); + input_report_abs(bt541->input_dev, + ABS_MT_TOUCH_MAJOR, p->width); + dev_dbg(&bt541->client->dev, "finger %d %s (%u, %u)\n", + slot, p->sub_status & SUB_BIT_DOWN ? "down" : "move", + x, y); + } else { + dev_dbg(&bt541->client->dev, "finger %d up (%u, %u)\n", + slot, x, y); + } } static irqreturn_t zinitix_ts_irq_handler(int irq, void *bt541_handler) @@ -334,6 +355,7 @@ static irqreturn_t zinitix_ts_irq_handler(int irq, void *bt541_handler) struct bt541_ts_data *bt541 = bt541_handler; struct i2c_client *client = bt541->client; struct touch_event touch_event; + unsigned long finger_mask; int error; int i; @@ -346,10 +368,14 @@ static irqreturn_t zinitix_ts_irq_handler(int irq, void *bt541_handler) goto out; } - for (i = 0; i < MAX_SUPPORTED_FINGER_NUM; i++) - if (touch_event.point_coord[i].sub_status & SUB_BIT_EXIST) - zinitix_report_finger(bt541, i, - &touch_event.point_coord[i]); + finger_mask = touch_event.finger_mask; + for_each_set_bit(i, &finger_mask, MAX_SUPPORTED_FINGER_NUM) { + const struct point_coord *p = &touch_event.point_coord[i]; + + /* Only process contacts that are actually reported */ + if (p->sub_status & SUB_BIT_EXIST) + zinitix_report_finger(bt541, i, p); + } input_mt_sync_frame(bt541->input_dev); input_sync(bt541->input_dev); @@ -571,8 +597,20 @@ static SIMPLE_DEV_PM_OPS(zinitix_pm_ops, zinitix_suspend, zinitix_resume); #ifdef CONFIG_OF static const struct of_device_id zinitix_of_match[] = { + { .compatible = "zinitix,bt402" }, + { .compatible = "zinitix,bt403" }, + { .compatible = "zinitix,bt404" }, + { .compatible = "zinitix,bt412" }, + { .compatible = "zinitix,bt413" }, + { .compatible = "zinitix,bt431" }, + { .compatible = "zinitix,bt432" }, + { .compatible = "zinitix,bt531" }, { .compatible = "zinitix,bt532" }, + { .compatible = "zinitix,bt538" }, { .compatible = "zinitix,bt541" }, + { .compatible = "zinitix,bt548" }, + { .compatible = "zinitix,bt554" }, + { .compatible = "zinitix,at100" }, { } }; MODULE_DEVICE_TABLE(of, zinitix_of_match); diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index 3eb68fa1b8cc..c79a0df090c0 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig @@ -144,8 +144,8 @@ config IOMMU_DMA select IRQ_MSI_IOMMU select NEED_SG_DMA_LENGTH -# Shared Virtual Addressing library -config IOMMU_SVA_LIB +# Shared Virtual Addressing +config IOMMU_SVA bool select IOASID @@ -379,7 +379,7 @@ config ARM_SMMU_V3 config ARM_SMMU_V3_SVA bool "Shared Virtual Addressing support for the ARM SMMUv3" depends on ARM_SMMU_V3 - select IOMMU_SVA_LIB + select IOMMU_SVA select MMU_NOTIFIER help Support for sharing process address spaces with devices using the diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile index bc7f730edbb0..44475a9b3eea 100644 --- a/drivers/iommu/Makefile +++ b/drivers/iommu/Makefile @@ -27,6 +27,6 @@ obj-$(CONFIG_FSL_PAMU) += fsl_pamu.o fsl_pamu_domain.o obj-$(CONFIG_S390_IOMMU) += s390-iommu.o obj-$(CONFIG_HYPERV_IOMMU) += hyperv-iommu.o obj-$(CONFIG_VIRTIO_IOMMU) += virtio-iommu.o -obj-$(CONFIG_IOMMU_SVA_LIB) += iommu-sva-lib.o io-pgfault.o +obj-$(CONFIG_IOMMU_SVA) += iommu-sva-lib.o io-pgfault.o obj-$(CONFIG_SPRD_IOMMU) += sprd-iommu.o obj-$(CONFIG_APPLE_DART) += apple-dart.o diff --git a/drivers/iommu/amd/amd_iommu.h b/drivers/iommu/amd/amd_iommu.h index 416815a525d6..bb95edf74415 100644 --- a/drivers/iommu/amd/amd_iommu.h +++ b/drivers/iommu/amd/amd_iommu.h @@ -14,6 +14,7 @@ extern irqreturn_t amd_iommu_int_thread(int irq, void *data); extern irqreturn_t amd_iommu_int_handler(int irq, void *data); extern void amd_iommu_apply_erratum_63(u16 devid); +extern void amd_iommu_restart_event_logging(struct amd_iommu *iommu); extern void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu); extern int amd_iommu_init_devices(void); extern void amd_iommu_uninit_devices(void); diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h index ffc89c4fb120..47108ed44fbb 100644 --- a/drivers/iommu/amd/amd_iommu_types.h +++ b/drivers/iommu/amd/amd_iommu_types.h @@ -110,6 +110,7 @@ #define PASID_MASK 0x0000ffff /* MMIO status bits */ +#define MMIO_STATUS_EVT_OVERFLOW_INT_MASK (1 << 0) #define MMIO_STATUS_EVT_INT_MASK (1 << 1) #define MMIO_STATUS_COM_WAIT_INT_MASK (1 << 2) #define MMIO_STATUS_PPR_INT_MASK (1 << 6) diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c index dc338acf3338..7bfe37e52e21 100644 --- a/drivers/iommu/amd/init.c +++ b/drivers/iommu/amd/init.c @@ -21,6 +21,7 @@ #include <linux/export.h> #include <linux/kmemleak.h> #include <linux/cc_platform.h> +#include <linux/iopoll.h> #include <asm/pci-direct.h> #include <asm/iommu.h> #include <asm/apic.h> @@ -657,6 +658,16 @@ static int __init alloc_command_buffer(struct amd_iommu *iommu) } /* + * This function restarts event logging in case the IOMMU experienced + * an event log buffer overflow. + */ +void amd_iommu_restart_event_logging(struct amd_iommu *iommu) +{ + iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN); + iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN); +} + +/* * This function resets the command buffer if the IOMMU stopped fetching * commands from it. */ @@ -834,6 +845,7 @@ static int iommu_ga_log_enable(struct amd_iommu *iommu) status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); if (status & (MMIO_STATUS_GALOG_RUN_MASK)) break; + udelay(10); } if (WARN_ON(i >= LOOP_TIMEOUT)) diff --git a/drivers/iommu/amd/io_pgtable.c b/drivers/iommu/amd/io_pgtable.c index b1bf4125b0f7..6608d1717574 100644 --- a/drivers/iommu/amd/io_pgtable.c +++ b/drivers/iommu/amd/io_pgtable.c @@ -492,18 +492,18 @@ static void v1_free_pgtable(struct io_pgtable *iop) dom = container_of(pgtable, struct protection_domain, iop); - /* Update data structure */ - amd_iommu_domain_clr_pt_root(dom); - - /* Make changes visible to IOMMUs */ - amd_iommu_domain_update(dom); - /* Page-table is not visible to IOMMU anymore, so free it */ BUG_ON(pgtable->mode < PAGE_MODE_NONE || pgtable->mode > PAGE_MODE_6_LEVEL); free_sub_pt(pgtable->root, pgtable->mode, &freelist); + /* Update data structure */ + amd_iommu_domain_clr_pt_root(dom); + + /* Make changes visible to IOMMUs */ + amd_iommu_domain_update(dom); + put_pages_list(&freelist); } diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c index 461f1844ed1f..a18b549951bb 100644 --- a/drivers/iommu/amd/iommu.c +++ b/drivers/iommu/amd/iommu.c @@ -764,7 +764,8 @@ amd_iommu_set_pci_msi_domain(struct device *dev, struct amd_iommu *iommu) { } #endif /* !CONFIG_IRQ_REMAP */ #define AMD_IOMMU_INT_MASK \ - (MMIO_STATUS_EVT_INT_MASK | \ + (MMIO_STATUS_EVT_OVERFLOW_INT_MASK | \ + MMIO_STATUS_EVT_INT_MASK | \ MMIO_STATUS_PPR_INT_MASK | \ MMIO_STATUS_GALOG_INT_MASK) @@ -774,7 +775,7 @@ irqreturn_t amd_iommu_int_thread(int irq, void *data) u32 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); while (status & AMD_IOMMU_INT_MASK) { - /* Enable EVT and PPR and GA interrupts again */ + /* Enable interrupt sources again */ writel(AMD_IOMMU_INT_MASK, iommu->mmio_base + MMIO_STATUS_OFFSET); @@ -795,6 +796,11 @@ irqreturn_t amd_iommu_int_thread(int irq, void *data) } #endif + if (status & MMIO_STATUS_EVT_OVERFLOW_INT_MASK) { + pr_info_ratelimited("IOMMU event log overflow\n"); + amd_iommu_restart_event_logging(iommu); + } + /* * Hardware bug: ERBT1312 * When re-enabling interrupt (by writing 1 diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c index a737ba5f727e..22ddd05bbdcd 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c @@ -340,14 +340,12 @@ __arm_smmu_sva_bind(struct device *dev, struct mm_struct *mm) bond->smmu_mn = arm_smmu_mmu_notifier_get(smmu_domain, mm); if (IS_ERR(bond->smmu_mn)) { ret = PTR_ERR(bond->smmu_mn); - goto err_free_pasid; + goto err_free_bond; } list_add(&bond->list, &master->bonds); return &bond->sva; -err_free_pasid: - iommu_sva_free_pasid(mm); err_free_bond: kfree(bond); return ERR_PTR(ret); @@ -377,7 +375,6 @@ void arm_smmu_sva_unbind(struct iommu_sva *handle) if (refcount_dec_and_test(&bond->refs)) { list_del(&bond->list); arm_smmu_mmu_notifier_put(bond->smmu_mn); - iommu_sva_free_pasid(bond->mm); kfree(bond); } mutex_unlock(&sva_lock); diff --git a/drivers/iommu/intel/Kconfig b/drivers/iommu/intel/Kconfig index 247d0f2d5fdf..39a06d245f12 100644 --- a/drivers/iommu/intel/Kconfig +++ b/drivers/iommu/intel/Kconfig @@ -52,7 +52,7 @@ config INTEL_IOMMU_SVM select PCI_PRI select MMU_NOTIFIER select IOASID - select IOMMU_SVA_LIB + select IOMMU_SVA help Shared Virtual Memory (SVM) provides a facility for devices to access DMA resources through process address space by diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index 92fea3fbbb11..1ce1741a7fa4 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -2738,7 +2738,7 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu, spin_unlock_irqrestore(&device_domain_lock, flags); /* PASID table is mandatory for a PCI device in scalable mode. */ - if (dev && dev_is_pci(dev) && sm_supported(iommu)) { + if (sm_supported(iommu) && !dev_is_real_dma_subdevice(dev)) { ret = intel_pasid_alloc_table(dev); if (ret) { dev_err(dev, "PASID table allocation failed\n"); @@ -4781,7 +4781,7 @@ attach_failed: link_failed: spin_unlock_irqrestore(&device_domain_lock, flags); if (list_empty(&domain->subdevices) && domain->default_pasid > 0) - ioasid_put(domain->default_pasid); + ioasid_free(domain->default_pasid); return ret; } @@ -4811,7 +4811,7 @@ static void aux_domain_remove_dev(struct dmar_domain *domain, spin_unlock_irqrestore(&device_domain_lock, flags); if (list_empty(&domain->subdevices) && domain->default_pasid > 0) - ioasid_put(domain->default_pasid); + ioasid_free(domain->default_pasid); } static int prepare_domain_attach_device(struct iommu_domain *domain, diff --git a/drivers/iommu/intel/irq_remapping.c b/drivers/iommu/intel/irq_remapping.c index f912fe45bea2..a67319597884 100644 --- a/drivers/iommu/intel/irq_remapping.c +++ b/drivers/iommu/intel/irq_remapping.c @@ -569,9 +569,8 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu) fn, &intel_ir_domain_ops, iommu); if (!iommu->ir_domain) { - irq_domain_free_fwnode(fn); pr_err("IR%d: failed to allocate irqdomain\n", iommu->seq_id); - goto out_free_bitmap; + goto out_free_fwnode; } iommu->ir_msi_domain = arch_create_remap_msi_irq_domain(iommu->ir_domain, @@ -595,7 +594,7 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu) if (dmar_enable_qi(iommu)) { pr_err("Failed to enable queued invalidation\n"); - goto out_free_bitmap; + goto out_free_ir_domain; } } @@ -619,6 +618,14 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu) return 0; +out_free_ir_domain: + if (iommu->ir_msi_domain) + irq_domain_remove(iommu->ir_msi_domain); + iommu->ir_msi_domain = NULL; + irq_domain_remove(iommu->ir_domain); + iommu->ir_domain = NULL; +out_free_fwnode: + irq_domain_free_fwnode(fn); out_free_bitmap: bitmap_free(bitmap); out_free_pages: diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c index 5b5d69b04fcc..51ac2096b3da 100644 --- a/drivers/iommu/intel/svm.c +++ b/drivers/iommu/intel/svm.c @@ -514,11 +514,6 @@ static int intel_svm_alloc_pasid(struct device *dev, struct mm_struct *mm, return iommu_sva_alloc_pasid(mm, PASID_MIN, max_pasid - 1); } -static void intel_svm_free_pasid(struct mm_struct *mm) -{ - iommu_sva_free_pasid(mm); -} - static struct iommu_sva *intel_svm_bind_mm(struct intel_iommu *iommu, struct device *dev, struct mm_struct *mm, @@ -662,8 +657,6 @@ static int intel_svm_unbind_mm(struct device *dev, u32 pasid) kfree(svm); } } - /* Drop a PASID reference and free it if no reference. */ - intel_svm_free_pasid(mm); } out: return ret; @@ -1047,8 +1040,6 @@ struct iommu_sva *intel_svm_bind(struct device *dev, struct mm_struct *mm, void } sva = intel_svm_bind_mm(iommu, dev, mm, flags); - if (IS_ERR_OR_NULL(sva)) - intel_svm_free_pasid(mm); mutex_unlock(&pasid_mutex); return sva; diff --git a/drivers/iommu/ioasid.c b/drivers/iommu/ioasid.c index 50ee27bbd04e..a786c034907c 100644 --- a/drivers/iommu/ioasid.c +++ b/drivers/iommu/ioasid.c @@ -2,7 +2,7 @@ /* * I/O Address Space ID allocator. There is one global IOASID space, split into * subsets. Users create a subset with DECLARE_IOASID_SET, then allocate and - * free IOASIDs with ioasid_alloc and ioasid_put. + * free IOASIDs with ioasid_alloc() and ioasid_free(). */ #include <linux/ioasid.h> #include <linux/module.h> @@ -15,7 +15,6 @@ struct ioasid_data { struct ioasid_set *set; void *private; struct rcu_head rcu; - refcount_t refs; }; /* @@ -315,7 +314,6 @@ ioasid_t ioasid_alloc(struct ioasid_set *set, ioasid_t min, ioasid_t max, data->set = set; data->private = private; - refcount_set(&data->refs, 1); /* * Custom allocator needs allocator data to perform platform specific @@ -348,34 +346,11 @@ exit_free: EXPORT_SYMBOL_GPL(ioasid_alloc); /** - * ioasid_get - obtain a reference to the IOASID - */ -void ioasid_get(ioasid_t ioasid) -{ - struct ioasid_data *ioasid_data; - - spin_lock(&ioasid_allocator_lock); - ioasid_data = xa_load(&active_allocator->xa, ioasid); - if (ioasid_data) - refcount_inc(&ioasid_data->refs); - else - WARN_ON(1); - spin_unlock(&ioasid_allocator_lock); -} -EXPORT_SYMBOL_GPL(ioasid_get); - -/** - * ioasid_put - Release a reference to an ioasid + * ioasid_free - Free an ioasid * @ioasid: the ID to remove - * - * Put a reference to the IOASID, free it when the number of references drops to - * zero. - * - * Return: %true if the IOASID was freed, %false otherwise. */ -bool ioasid_put(ioasid_t ioasid) +void ioasid_free(ioasid_t ioasid) { - bool free = false; struct ioasid_data *ioasid_data; spin_lock(&ioasid_allocator_lock); @@ -385,10 +360,6 @@ bool ioasid_put(ioasid_t ioasid) goto exit_unlock; } - free = refcount_dec_and_test(&ioasid_data->refs); - if (!free) - goto exit_unlock; - active_allocator->ops->free(ioasid, active_allocator->ops->pdata); /* Custom allocator needs additional steps to free the xa element */ if (active_allocator->flags & IOASID_ALLOCATOR_CUSTOM) { @@ -398,9 +369,8 @@ bool ioasid_put(ioasid_t ioasid) exit_unlock: spin_unlock(&ioasid_allocator_lock); - return free; } -EXPORT_SYMBOL_GPL(ioasid_put); +EXPORT_SYMBOL_GPL(ioasid_free); /** * ioasid_find - Find IOASID data diff --git a/drivers/iommu/iommu-sva-lib.c b/drivers/iommu/iommu-sva-lib.c index bd41405d34e9..106506143896 100644 --- a/drivers/iommu/iommu-sva-lib.c +++ b/drivers/iommu/iommu-sva-lib.c @@ -18,8 +18,7 @@ static DECLARE_IOASID_SET(iommu_sva_pasid); * * Try to allocate a PASID for this mm, or take a reference to the existing one * provided it fits within the [@min, @max] range. On success the PASID is - * available in mm->pasid, and must be released with iommu_sva_free_pasid(). - * @min must be greater than 0, because 0 indicates an unused mm->pasid. + * available in mm->pasid and will be available for the lifetime of the mm. * * Returns 0 on success and < 0 on error. */ @@ -33,38 +32,24 @@ int iommu_sva_alloc_pasid(struct mm_struct *mm, ioasid_t min, ioasid_t max) return -EINVAL; mutex_lock(&iommu_sva_lock); - if (mm->pasid) { - if (mm->pasid >= min && mm->pasid <= max) - ioasid_get(mm->pasid); - else + /* Is a PASID already associated with this mm? */ + if (pasid_valid(mm->pasid)) { + if (mm->pasid < min || mm->pasid >= max) ret = -EOVERFLOW; - } else { - pasid = ioasid_alloc(&iommu_sva_pasid, min, max, mm); - if (pasid == INVALID_IOASID) - ret = -ENOMEM; - else - mm->pasid = pasid; + goto out; } + + pasid = ioasid_alloc(&iommu_sva_pasid, min, max, mm); + if (!pasid_valid(pasid)) + ret = -ENOMEM; + else + mm_pasid_set(mm, pasid); +out: mutex_unlock(&iommu_sva_lock); return ret; } EXPORT_SYMBOL_GPL(iommu_sva_alloc_pasid); -/** - * iommu_sva_free_pasid - Release the mm's PASID - * @mm: the mm - * - * Drop one reference to a PASID allocated with iommu_sva_alloc_pasid() - */ -void iommu_sva_free_pasid(struct mm_struct *mm) -{ - mutex_lock(&iommu_sva_lock); - if (ioasid_put(mm->pasid)) - mm->pasid = 0; - mutex_unlock(&iommu_sva_lock); -} -EXPORT_SYMBOL_GPL(iommu_sva_free_pasid); - /* ioasid_find getter() requires a void * argument */ static bool __mmget_not_zero(void *mm) { diff --git a/drivers/iommu/iommu-sva-lib.h b/drivers/iommu/iommu-sva-lib.h index 031155010ca8..8909ea1094e3 100644 --- a/drivers/iommu/iommu-sva-lib.h +++ b/drivers/iommu/iommu-sva-lib.h @@ -9,7 +9,6 @@ #include <linux/mm_types.h> int iommu_sva_alloc_pasid(struct mm_struct *mm, ioasid_t min, ioasid_t max); -void iommu_sva_free_pasid(struct mm_struct *mm); struct mm_struct *iommu_sva_find(ioasid_t pasid); /* I/O Page fault */ @@ -17,7 +16,7 @@ struct device; struct iommu_fault; struct iopf_queue; -#ifdef CONFIG_IOMMU_SVA_LIB +#ifdef CONFIG_IOMMU_SVA int iommu_queue_iopf(struct iommu_fault *fault, void *cookie); int iopf_queue_add_device(struct iopf_queue *queue, struct device *dev); @@ -28,7 +27,7 @@ struct iopf_queue *iopf_queue_alloc(const char *name); void iopf_queue_free(struct iopf_queue *queue); int iopf_queue_discard_partial(struct iopf_queue *queue); -#else /* CONFIG_IOMMU_SVA_LIB */ +#else /* CONFIG_IOMMU_SVA */ static inline int iommu_queue_iopf(struct iommu_fault *fault, void *cookie) { return -ENODEV; @@ -64,5 +63,5 @@ static inline int iopf_queue_discard_partial(struct iopf_queue *queue) { return -ENODEV; } -#endif /* CONFIG_IOMMU_SVA_LIB */ +#endif /* CONFIG_IOMMU_SVA */ #endif /* _IOMMU_SVA_LIB_H */ diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 8b86406b7162..107dcf5938d6 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -207,9 +207,14 @@ static struct dev_iommu *dev_iommu_get(struct device *dev) static void dev_iommu_free(struct device *dev) { - iommu_fwspec_free(dev); - kfree(dev->iommu); + struct dev_iommu *param = dev->iommu; + dev->iommu = NULL; + if (param->fwspec) { + fwnode_handle_put(param->fwspec->iommu_fwnode); + kfree(param->fwspec); + } + kfree(param); } static int __iommu_probe_device(struct device *dev, struct list_head *group_list) @@ -980,17 +985,6 @@ static int iommu_group_device_count(struct iommu_group *group) return ret; } -/** - * iommu_group_for_each_dev - iterate over each device in the group - * @group: the group - * @data: caller opaque data to be passed to callback function - * @fn: caller supplied callback function - * - * This function is called by group users to iterate over group devices. - * Callers should hold a reference count to the group during callback. - * The group->mutex is held across callbacks, which will block calls to - * iommu_group_add/remove_device. - */ static int __iommu_group_for_each_dev(struct iommu_group *group, void *data, int (*fn)(struct device *, void *)) { @@ -1005,7 +999,17 @@ static int __iommu_group_for_each_dev(struct iommu_group *group, void *data, return ret; } - +/** + * iommu_group_for_each_dev - iterate over each device in the group + * @group: the group + * @data: caller opaque data to be passed to callback function + * @fn: caller supplied callback function + * + * This function is called by group users to iterate over group devices. + * Callers should hold a reference count to the group during callback. + * The group->mutex is held across callbacks, which will block calls to + * iommu_group_add/remove_device. + */ int iommu_group_for_each_dev(struct iommu_group *group, void *data, int (*fn)(struct device *, void *)) { @@ -3032,6 +3036,7 @@ EXPORT_SYMBOL_GPL(iommu_aux_get_pasid); * iommu_sva_bind_device() - Bind a process address space to a device * @dev: the device * @mm: the mm to bind, caller must hold a reference to it + * @drvdata: opaque data pointer to pass to bind callback * * Create a bond between device and address space, allowing the device to access * the mm using the returned PASID. If a bond already exists between @device and diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c index 91749654fd49..980e4af3f06b 100644 --- a/drivers/iommu/omap-iommu.c +++ b/drivers/iommu/omap-iommu.c @@ -1085,7 +1085,7 @@ static __maybe_unused int omap_iommu_runtime_resume(struct device *dev) } /** - * omap_iommu_suspend_prepare - prepare() dev_pm_ops implementation + * omap_iommu_prepare - prepare() dev_pm_ops implementation * @dev: iommu device * * This function performs the necessary checks to determine if the IOMMU diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c index e900e3c46903..2561ce8a2ce8 100644 --- a/drivers/iommu/tegra-smmu.c +++ b/drivers/iommu/tegra-smmu.c @@ -808,8 +808,10 @@ static struct tegra_smmu *tegra_smmu_find(struct device_node *np) return NULL; mc = platform_get_drvdata(pdev); - if (!mc) + if (!mc) { + put_device(&pdev->dev); return NULL; + } return mc->smmu; } diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig index 7038957f4a77..680d2fcf2686 100644 --- a/drivers/irqchip/Kconfig +++ b/drivers/irqchip/Kconfig @@ -430,6 +430,14 @@ config QCOM_PDC Power Domain Controller driver to manage and configure wakeup IRQs for Qualcomm Technologies Inc (QTI) mobile chips. +config QCOM_MPM + tristate "QCOM MPM" + depends on ARCH_QCOM + select IRQ_DOMAIN_HIERARCHY + help + MSM Power Manager driver to manage and configure wakeup + IRQs for Qualcomm Technologies Inc (QTI) mobile chips. + config CSKY_MPINTC bool depends on CSKY diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile index c1f611cbfbf8..1f8990f812f1 100644 --- a/drivers/irqchip/Makefile +++ b/drivers/irqchip/Makefile @@ -94,6 +94,7 @@ obj-$(CONFIG_MESON_IRQ_GPIO) += irq-meson-gpio.o obj-$(CONFIG_GOLDFISH_PIC) += irq-goldfish-pic.o obj-$(CONFIG_NDS32) += irq-ativic32.o obj-$(CONFIG_QCOM_PDC) += qcom-pdc.o +obj-$(CONFIG_QCOM_MPM) += irq-qcom-mpm.o obj-$(CONFIG_CSKY_MPINTC) += irq-csky-mpintc.o obj-$(CONFIG_CSKY_APB_INTC) += irq-csky-apb-intc.o obj-$(CONFIG_RISCV_INTC) += irq-riscv-intc.o diff --git a/drivers/irqchip/irq-apple-aic.c b/drivers/irqchip/irq-apple-aic.c index 38091ebb9403..12dd48727a15 100644 --- a/drivers/irqchip/irq-apple-aic.c +++ b/drivers/irqchip/irq-apple-aic.c @@ -24,7 +24,7 @@ * - Default "this CPU" register view and explicit per-CPU views * * In addition, this driver also handles FIQs, as these are routed to the same - * IRQ vector. These are used for Fast IPIs (TODO), the ARMv8 timer IRQs, and + * IRQ vector. These are used for Fast IPIs, the ARMv8 timer IRQs, and * performance counters (TODO). * * Implementation notes: @@ -52,9 +52,12 @@ #include <linux/irqchip.h> #include <linux/irqchip/arm-vgic-info.h> #include <linux/irqdomain.h> +#include <linux/jump_label.h> #include <linux/limits.h> #include <linux/of_address.h> #include <linux/slab.h> +#include <asm/apple_m1_pmu.h> +#include <asm/cputype.h> #include <asm/exception.h> #include <asm/sysreg.h> #include <asm/virt.h> @@ -62,20 +65,22 @@ #include <dt-bindings/interrupt-controller/apple-aic.h> /* - * AIC registers (MMIO) + * AIC v1 registers (MMIO) */ #define AIC_INFO 0x0004 -#define AIC_INFO_NR_HW GENMASK(15, 0) +#define AIC_INFO_NR_IRQ GENMASK(15, 0) #define AIC_CONFIG 0x0010 #define AIC_WHOAMI 0x2000 #define AIC_EVENT 0x2004 -#define AIC_EVENT_TYPE GENMASK(31, 16) +#define AIC_EVENT_DIE GENMASK(31, 24) +#define AIC_EVENT_TYPE GENMASK(23, 16) #define AIC_EVENT_NUM GENMASK(15, 0) -#define AIC_EVENT_TYPE_HW 1 +#define AIC_EVENT_TYPE_FIQ 0 /* Software use */ +#define AIC_EVENT_TYPE_IRQ 1 #define AIC_EVENT_TYPE_IPI 4 #define AIC_EVENT_IPI_OTHER 1 #define AIC_EVENT_IPI_SELF 2 @@ -91,34 +96,73 @@ #define AIC_IPI_SELF BIT(31) #define AIC_TARGET_CPU 0x3000 -#define AIC_SW_SET 0x4000 -#define AIC_SW_CLR 0x4080 -#define AIC_MASK_SET 0x4100 -#define AIC_MASK_CLR 0x4180 #define AIC_CPU_IPI_SET(cpu) (0x5008 + ((cpu) << 7)) #define AIC_CPU_IPI_CLR(cpu) (0x500c + ((cpu) << 7)) #define AIC_CPU_IPI_MASK_SET(cpu) (0x5024 + ((cpu) << 7)) #define AIC_CPU_IPI_MASK_CLR(cpu) (0x5028 + ((cpu) << 7)) +#define AIC_MAX_IRQ 0x400 + +/* + * AIC v2 registers (MMIO) + */ + +#define AIC2_VERSION 0x0000 +#define AIC2_VERSION_VER GENMASK(7, 0) + +#define AIC2_INFO1 0x0004 +#define AIC2_INFO1_NR_IRQ GENMASK(15, 0) +#define AIC2_INFO1_LAST_DIE GENMASK(27, 24) + +#define AIC2_INFO2 0x0008 + +#define AIC2_INFO3 0x000c +#define AIC2_INFO3_MAX_IRQ GENMASK(15, 0) +#define AIC2_INFO3_MAX_DIE GENMASK(27, 24) + +#define AIC2_RESET 0x0010 +#define AIC2_RESET_RESET BIT(0) + +#define AIC2_CONFIG 0x0014 +#define AIC2_CONFIG_ENABLE BIT(0) +#define AIC2_CONFIG_PREFER_PCPU BIT(28) + +#define AIC2_TIMEOUT 0x0028 +#define AIC2_CLUSTER_PRIO 0x0030 +#define AIC2_DELAY_GROUPS 0x0100 + +#define AIC2_IRQ_CFG 0x2000 + +/* + * AIC2 registers are laid out like this, starting at AIC2_IRQ_CFG: + * + * Repeat for each die: + * IRQ_CFG: u32 * MAX_IRQS + * SW_SET: u32 * (MAX_IRQS / 32) + * SW_CLR: u32 * (MAX_IRQS / 32) + * MASK_SET: u32 * (MAX_IRQS / 32) + * MASK_CLR: u32 * (MAX_IRQS / 32) + * HW_STATE: u32 * (MAX_IRQS / 32) + * + * This is followed by a set of event registers, each 16K page aligned. + * The first one is the AP event register we will use. Unfortunately, + * the actual implemented die count is not specified anywhere in the + * capability registers, so we have to explicitly specify the event + * register as a second reg entry in the device tree to remain + * forward-compatible. + */ + +#define AIC2_IRQ_CFG_TARGET GENMASK(3, 0) +#define AIC2_IRQ_CFG_DELAY_IDX GENMASK(7, 5) + #define MASK_REG(x) (4 * ((x) >> 5)) #define MASK_BIT(x) BIT((x) & GENMASK(4, 0)) /* * IMP-DEF sysregs that control FIQ sources - * Note: sysreg-based IPIs are not supported yet. */ -/* Core PMC control register */ -#define SYS_IMP_APL_PMCR0_EL1 sys_reg(3, 1, 15, 0, 0) -#define PMCR0_IMODE GENMASK(10, 8) -#define PMCR0_IMODE_OFF 0 -#define PMCR0_IMODE_PMI 1 -#define PMCR0_IMODE_AIC 2 -#define PMCR0_IMODE_HALT 3 -#define PMCR0_IMODE_FIQ 4 -#define PMCR0_IACT BIT(11) - /* IPI request registers */ #define SYS_IMP_APL_IPI_RR_LOCAL_EL1 sys_reg(3, 5, 15, 0, 0) #define SYS_IMP_APL_IPI_RR_GLOBAL_EL1 sys_reg(3, 5, 15, 0, 1) @@ -155,7 +199,18 @@ #define SYS_IMP_APL_UPMSR_EL1 sys_reg(3, 7, 15, 6, 4) #define UPMSR_IACT BIT(0) -#define AIC_NR_FIQ 4 +/* MPIDR fields */ +#define MPIDR_CPU(x) MPIDR_AFFINITY_LEVEL(x, 0) +#define MPIDR_CLUSTER(x) MPIDR_AFFINITY_LEVEL(x, 1) + +#define AIC_IRQ_HWIRQ(die, irq) (FIELD_PREP(AIC_EVENT_DIE, die) | \ + FIELD_PREP(AIC_EVENT_TYPE, AIC_EVENT_TYPE_IRQ) | \ + FIELD_PREP(AIC_EVENT_NUM, irq)) +#define AIC_FIQ_HWIRQ(x) (FIELD_PREP(AIC_EVENT_TYPE, AIC_EVENT_TYPE_FIQ) | \ + FIELD_PREP(AIC_EVENT_NUM, x)) +#define AIC_HWIRQ_IRQ(x) FIELD_GET(AIC_EVENT_NUM, x) +#define AIC_HWIRQ_DIE(x) FIELD_GET(AIC_EVENT_DIE, x) +#define AIC_NR_FIQ 6 #define AIC_NR_SWIPI 32 /* @@ -173,11 +228,81 @@ #define AIC_TMR_EL02_PHYS AIC_TMR_GUEST_PHYS #define AIC_TMR_EL02_VIRT AIC_TMR_GUEST_VIRT +DEFINE_STATIC_KEY_TRUE(use_fast_ipi); + +struct aic_info { + int version; + + /* Register offsets */ + u32 event; + u32 target_cpu; + u32 irq_cfg; + u32 sw_set; + u32 sw_clr; + u32 mask_set; + u32 mask_clr; + + u32 die_stride; + + /* Features */ + bool fast_ipi; +}; + +static const struct aic_info aic1_info = { + .version = 1, + + .event = AIC_EVENT, + .target_cpu = AIC_TARGET_CPU, +}; + +static const struct aic_info aic1_fipi_info = { + .version = 1, + + .event = AIC_EVENT, + .target_cpu = AIC_TARGET_CPU, + + .fast_ipi = true, +}; + +static const struct aic_info aic2_info = { + .version = 2, + + .irq_cfg = AIC2_IRQ_CFG, + + .fast_ipi = true, +}; + +static const struct of_device_id aic_info_match[] = { + { + .compatible = "apple,t8103-aic", + .data = &aic1_fipi_info, + }, + { + .compatible = "apple,aic", + .data = &aic1_info, + }, + { + .compatible = "apple,aic2", + .data = &aic2_info, + }, + {} +}; + struct aic_irq_chip { void __iomem *base; + void __iomem *event; struct irq_domain *hw_domain; struct irq_domain *ipi_domain; - int nr_hw; + struct { + cpumask_t aff; + } *fiq_aff[AIC_NR_FIQ]; + + int nr_irq; + int max_irq; + int nr_die; + int max_die; + + struct aic_info info; }; static DEFINE_PER_CPU(uint32_t, aic_fiq_unmasked); @@ -205,18 +330,24 @@ static void aic_ic_write(struct aic_irq_chip *ic, u32 reg, u32 val) static void aic_irq_mask(struct irq_data *d) { + irq_hw_number_t hwirq = irqd_to_hwirq(d); struct aic_irq_chip *ic = irq_data_get_irq_chip_data(d); - aic_ic_write(ic, AIC_MASK_SET + MASK_REG(irqd_to_hwirq(d)), - MASK_BIT(irqd_to_hwirq(d))); + u32 off = AIC_HWIRQ_DIE(hwirq) * ic->info.die_stride; + u32 irq = AIC_HWIRQ_IRQ(hwirq); + + aic_ic_write(ic, ic->info.mask_set + off + MASK_REG(irq), MASK_BIT(irq)); } static void aic_irq_unmask(struct irq_data *d) { + irq_hw_number_t hwirq = irqd_to_hwirq(d); struct aic_irq_chip *ic = irq_data_get_irq_chip_data(d); - aic_ic_write(ic, AIC_MASK_CLR + MASK_REG(d->hwirq), - MASK_BIT(irqd_to_hwirq(d))); + u32 off = AIC_HWIRQ_DIE(hwirq) * ic->info.die_stride; + u32 irq = AIC_HWIRQ_IRQ(hwirq); + + aic_ic_write(ic, ic->info.mask_clr + off + MASK_REG(irq), MASK_BIT(irq)); } static void aic_irq_eoi(struct irq_data *d) @@ -239,12 +370,12 @@ static void __exception_irq_entry aic_handle_irq(struct pt_regs *regs) * We cannot use a relaxed read here, as reads from DMA buffers * need to be ordered after the IRQ fires. */ - event = readl(ic->base + AIC_EVENT); + event = readl(ic->event + ic->info.event); type = FIELD_GET(AIC_EVENT_TYPE, event); irq = FIELD_GET(AIC_EVENT_NUM, event); - if (type == AIC_EVENT_TYPE_HW) - generic_handle_domain_irq(aic_irqc->hw_domain, irq); + if (type == AIC_EVENT_TYPE_IRQ) + generic_handle_domain_irq(aic_irqc->hw_domain, event); else if (type == AIC_EVENT_TYPE_IPI && irq == 1) aic_handle_ipi(regs); else if (event != 0) @@ -271,12 +402,14 @@ static int aic_irq_set_affinity(struct irq_data *d, struct aic_irq_chip *ic = irq_data_get_irq_chip_data(d); int cpu; + BUG_ON(!ic->info.target_cpu); + if (force) cpu = cpumask_first(mask_val); else cpu = cpumask_any_and(mask_val, cpu_online_mask); - aic_ic_write(ic, AIC_TARGET_CPU + hwirq * 4, BIT(cpu)); + aic_ic_write(ic, ic->info.target_cpu + AIC_HWIRQ_IRQ(hwirq) * 4, BIT(cpu)); irq_data_update_effective_affinity(d, cpumask_of(cpu)); return IRQ_SET_MASK_OK; @@ -300,15 +433,21 @@ static struct irq_chip aic_chip = { .irq_set_type = aic_irq_set_type, }; +static struct irq_chip aic2_chip = { + .name = "AIC2", + .irq_mask = aic_irq_mask, + .irq_unmask = aic_irq_unmask, + .irq_eoi = aic_irq_eoi, + .irq_set_type = aic_irq_set_type, +}; + /* * FIQ irqchip */ static unsigned long aic_fiq_get_idx(struct irq_data *d) { - struct aic_irq_chip *ic = irq_data_get_irq_chip_data(d); - - return irqd_to_hwirq(d) - ic->nr_hw; + return AIC_HWIRQ_IRQ(irqd_to_hwirq(d)); } static void aic_fiq_set_mask(struct irq_data *d) @@ -386,17 +525,21 @@ static void __exception_irq_entry aic_handle_fiq(struct pt_regs *regs) */ if (read_sysreg_s(SYS_IMP_APL_IPI_SR_EL1) & IPI_SR_PENDING) { - pr_err_ratelimited("Fast IPI fired. Acking.\n"); - write_sysreg_s(IPI_SR_PENDING, SYS_IMP_APL_IPI_SR_EL1); + if (static_branch_likely(&use_fast_ipi)) { + aic_handle_ipi(regs); + } else { + pr_err_ratelimited("Fast IPI fired. Acking.\n"); + write_sysreg_s(IPI_SR_PENDING, SYS_IMP_APL_IPI_SR_EL1); + } } if (TIMER_FIRING(read_sysreg(cntp_ctl_el0))) generic_handle_domain_irq(aic_irqc->hw_domain, - aic_irqc->nr_hw + AIC_TMR_EL0_PHYS); + AIC_FIQ_HWIRQ(AIC_TMR_EL0_PHYS)); if (TIMER_FIRING(read_sysreg(cntv_ctl_el0))) generic_handle_domain_irq(aic_irqc->hw_domain, - aic_irqc->nr_hw + AIC_TMR_EL0_VIRT); + AIC_FIQ_HWIRQ(AIC_TMR_EL0_VIRT)); if (is_kernel_in_hyp_mode()) { uint64_t enabled = read_sysreg_s(SYS_IMP_APL_VM_TMR_FIQ_ENA_EL2); @@ -404,24 +547,23 @@ static void __exception_irq_entry aic_handle_fiq(struct pt_regs *regs) if ((enabled & VM_TMR_FIQ_ENABLE_P) && TIMER_FIRING(read_sysreg_s(SYS_CNTP_CTL_EL02))) generic_handle_domain_irq(aic_irqc->hw_domain, - aic_irqc->nr_hw + AIC_TMR_EL02_PHYS); + AIC_FIQ_HWIRQ(AIC_TMR_EL02_PHYS)); if ((enabled & VM_TMR_FIQ_ENABLE_V) && TIMER_FIRING(read_sysreg_s(SYS_CNTV_CTL_EL02))) generic_handle_domain_irq(aic_irqc->hw_domain, - aic_irqc->nr_hw + AIC_TMR_EL02_VIRT); + AIC_FIQ_HWIRQ(AIC_TMR_EL02_VIRT)); } - if ((read_sysreg_s(SYS_IMP_APL_PMCR0_EL1) & (PMCR0_IMODE | PMCR0_IACT)) == - (FIELD_PREP(PMCR0_IMODE, PMCR0_IMODE_FIQ) | PMCR0_IACT)) { - /* - * Not supported yet, let's figure out how to handle this when - * we implement these proprietary performance counters. For now, - * just mask it and move on. - */ - pr_err_ratelimited("PMC FIQ fired. Masking.\n"); - sysreg_clear_set_s(SYS_IMP_APL_PMCR0_EL1, PMCR0_IMODE | PMCR0_IACT, - FIELD_PREP(PMCR0_IMODE, PMCR0_IMODE_OFF)); + if (read_sysreg_s(SYS_IMP_APL_PMCR0_EL1) & PMCR0_IACT) { + int irq; + if (cpumask_test_cpu(smp_processor_id(), + &aic_irqc->fiq_aff[AIC_CPU_PMU_P]->aff)) + irq = AIC_CPU_PMU_P; + else + irq = AIC_CPU_PMU_E; + generic_handle_domain_irq(aic_irqc->hw_domain, + AIC_FIQ_HWIRQ(irq)); } if (FIELD_GET(UPMCR0_IMODE, read_sysreg_s(SYS_IMP_APL_UPMCR0_EL1)) == UPMCR0_IMODE_FIQ && @@ -455,13 +597,29 @@ static int aic_irq_domain_map(struct irq_domain *id, unsigned int irq, irq_hw_number_t hw) { struct aic_irq_chip *ic = id->host_data; + u32 type = FIELD_GET(AIC_EVENT_TYPE, hw); + struct irq_chip *chip = &aic_chip; - if (hw < ic->nr_hw) { - irq_domain_set_info(id, irq, hw, &aic_chip, id->host_data, + if (ic->info.version == 2) + chip = &aic2_chip; + + if (type == AIC_EVENT_TYPE_IRQ) { + irq_domain_set_info(id, irq, hw, chip, id->host_data, handle_fasteoi_irq, NULL, NULL); irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(irq))); } else { - irq_set_percpu_devid(irq); + int fiq = FIELD_GET(AIC_EVENT_NUM, hw); + + switch (fiq) { + case AIC_CPU_PMU_P: + case AIC_CPU_PMU_E: + irq_set_percpu_devid_partition(irq, &ic->fiq_aff[fiq]->aff); + break; + default: + irq_set_percpu_devid(irq); + break; + } + irq_domain_set_info(id, irq, hw, &fiq_chip, id->host_data, handle_percpu_devid_irq, NULL, NULL); } @@ -475,32 +633,46 @@ static int aic_irq_domain_translate(struct irq_domain *id, unsigned int *type) { struct aic_irq_chip *ic = id->host_data; + u32 *args; + u32 die = 0; - if (fwspec->param_count != 3 || !is_of_node(fwspec->fwnode)) + if (fwspec->param_count < 3 || fwspec->param_count > 4 || + !is_of_node(fwspec->fwnode)) return -EINVAL; + args = &fwspec->param[1]; + + if (fwspec->param_count == 4) { + die = args[0]; + args++; + } + switch (fwspec->param[0]) { case AIC_IRQ: - if (fwspec->param[1] >= ic->nr_hw) + if (die >= ic->nr_die) return -EINVAL; - *hwirq = fwspec->param[1]; + if (args[0] >= ic->nr_irq) + return -EINVAL; + *hwirq = AIC_IRQ_HWIRQ(die, args[0]); break; case AIC_FIQ: - if (fwspec->param[1] >= AIC_NR_FIQ) + if (die != 0) + return -EINVAL; + if (args[0] >= AIC_NR_FIQ) return -EINVAL; - *hwirq = ic->nr_hw + fwspec->param[1]; + *hwirq = AIC_FIQ_HWIRQ(args[0]); /* * In EL1 the non-redirected registers are the guest's, * not EL2's, so remap the hwirqs to match. */ if (!is_kernel_in_hyp_mode()) { - switch (fwspec->param[1]) { + switch (args[0]) { case AIC_TMR_GUEST_PHYS: - *hwirq = ic->nr_hw + AIC_TMR_EL0_PHYS; + *hwirq = AIC_FIQ_HWIRQ(AIC_TMR_EL0_PHYS); break; case AIC_TMR_GUEST_VIRT: - *hwirq = ic->nr_hw + AIC_TMR_EL0_VIRT; + *hwirq = AIC_FIQ_HWIRQ(AIC_TMR_EL0_VIRT); break; case AIC_TMR_HV_PHYS: case AIC_TMR_HV_VIRT: @@ -514,7 +686,7 @@ static int aic_irq_domain_translate(struct irq_domain *id, return -EINVAL; } - *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK; + *type = args[1] & IRQ_TYPE_SENSE_MASK; return 0; } @@ -563,6 +735,22 @@ static const struct irq_domain_ops aic_irq_domain_ops = { * IPI irqchip */ +static void aic_ipi_send_fast(int cpu) +{ + u64 mpidr = cpu_logical_map(cpu); + u64 my_mpidr = read_cpuid_mpidr(); + u64 cluster = MPIDR_CLUSTER(mpidr); + u64 idx = MPIDR_CPU(mpidr); + + if (MPIDR_CLUSTER(my_mpidr) == cluster) + write_sysreg_s(FIELD_PREP(IPI_RR_CPU, idx), + SYS_IMP_APL_IPI_RR_LOCAL_EL1); + else + write_sysreg_s(FIELD_PREP(IPI_RR_CPU, idx) | FIELD_PREP(IPI_RR_CLUSTER, cluster), + SYS_IMP_APL_IPI_RR_GLOBAL_EL1); + isb(); +} + static void aic_ipi_mask(struct irq_data *d) { u32 irq_bit = BIT(irqd_to_hwirq(d)); @@ -588,8 +776,12 @@ static void aic_ipi_unmask(struct irq_data *d) * If a pending vIPI was unmasked, raise a HW IPI to ourselves. * No barriers needed here since this is a self-IPI. */ - if (atomic_read(this_cpu_ptr(&aic_vipi_flag)) & irq_bit) - aic_ic_write(ic, AIC_IPI_SEND, AIC_IPI_SEND_CPU(smp_processor_id())); + if (atomic_read(this_cpu_ptr(&aic_vipi_flag)) & irq_bit) { + if (static_branch_likely(&use_fast_ipi)) + aic_ipi_send_fast(smp_processor_id()); + else + aic_ic_write(ic, AIC_IPI_SEND, AIC_IPI_SEND_CPU(smp_processor_id())); + } } static void aic_ipi_send_mask(struct irq_data *d, const struct cpumask *mask) @@ -617,8 +809,12 @@ static void aic_ipi_send_mask(struct irq_data *d, const struct cpumask *mask) smp_mb__after_atomic(); if (!(pending & irq_bit) && - (atomic_read(per_cpu_ptr(&aic_vipi_enable, cpu)) & irq_bit)) - send |= AIC_IPI_SEND_CPU(cpu); + (atomic_read(per_cpu_ptr(&aic_vipi_enable, cpu)) & irq_bit)) { + if (static_branch_likely(&use_fast_ipi)) + aic_ipi_send_fast(cpu); + else + send |= AIC_IPI_SEND_CPU(cpu); + } } /* @@ -650,8 +846,16 @@ static void aic_handle_ipi(struct pt_regs *regs) /* * Ack the IPI. We need to order this after the AIC event read, but * that is enforced by normal MMIO ordering guarantees. + * + * For the Fast IPI case, this needs to be ordered before the vIPI + * handling below, so we need to isb(); */ - aic_ic_write(aic_irqc, AIC_IPI_ACK, AIC_IPI_OTHER); + if (static_branch_likely(&use_fast_ipi)) { + write_sysreg_s(IPI_SR_PENDING, SYS_IMP_APL_IPI_SR_EL1); + isb(); + } else { + aic_ic_write(aic_irqc, AIC_IPI_ACK, AIC_IPI_OTHER); + } /* * The mask read does not need to be ordered. Only we can change @@ -679,7 +883,8 @@ static void aic_handle_ipi(struct pt_regs *regs) * No ordering needed here; at worst this just changes the timing of * when the next IPI will be delivered. */ - aic_ic_write(aic_irqc, AIC_IPI_MASK_CLR, AIC_IPI_OTHER); + if (!static_branch_likely(&use_fast_ipi)) + aic_ic_write(aic_irqc, AIC_IPI_MASK_CLR, AIC_IPI_OTHER); } static int aic_ipi_alloc(struct irq_domain *d, unsigned int virq, @@ -766,20 +971,27 @@ static int aic_init_cpu(unsigned int cpu) /* Commit all of the above */ isb(); - /* - * Make sure the kernel's idea of logical CPU order is the same as AIC's - * If we ever end up with a mismatch here, we will have to introduce - * a mapping table similar to what other irqchip drivers do. - */ - WARN_ON(aic_ic_read(aic_irqc, AIC_WHOAMI) != smp_processor_id()); + if (aic_irqc->info.version == 1) { + /* + * Make sure the kernel's idea of logical CPU order is the same as AIC's + * If we ever end up with a mismatch here, we will have to introduce + * a mapping table similar to what other irqchip drivers do. + */ + WARN_ON(aic_ic_read(aic_irqc, AIC_WHOAMI) != smp_processor_id()); - /* - * Always keep IPIs unmasked at the hardware level (except auto-masking - * by AIC during processing). We manage masks at the vIPI level. - */ - aic_ic_write(aic_irqc, AIC_IPI_ACK, AIC_IPI_SELF | AIC_IPI_OTHER); - aic_ic_write(aic_irqc, AIC_IPI_MASK_SET, AIC_IPI_SELF); - aic_ic_write(aic_irqc, AIC_IPI_MASK_CLR, AIC_IPI_OTHER); + /* + * Always keep IPIs unmasked at the hardware level (except auto-masking + * by AIC during processing). We manage masks at the vIPI level. + * These registers only exist on AICv1, AICv2 always uses fast IPIs. + */ + aic_ic_write(aic_irqc, AIC_IPI_ACK, AIC_IPI_SELF | AIC_IPI_OTHER); + if (static_branch_likely(&use_fast_ipi)) { + aic_ic_write(aic_irqc, AIC_IPI_MASK_SET, AIC_IPI_SELF | AIC_IPI_OTHER); + } else { + aic_ic_write(aic_irqc, AIC_IPI_MASK_SET, AIC_IPI_SELF); + aic_ic_write(aic_irqc, AIC_IPI_MASK_CLR, AIC_IPI_OTHER); + } + } /* Initialize the local mask state */ __this_cpu_write(aic_fiq_unmasked, 0); @@ -793,68 +1005,193 @@ static struct gic_kvm_info vgic_info __initdata = { .no_hw_deactivation = true, }; +static void build_fiq_affinity(struct aic_irq_chip *ic, struct device_node *aff) +{ + int i, n; + u32 fiq; + + if (of_property_read_u32(aff, "apple,fiq-index", &fiq) || + WARN_ON(fiq >= AIC_NR_FIQ) || ic->fiq_aff[fiq]) + return; + + n = of_property_count_elems_of_size(aff, "cpus", sizeof(u32)); + if (WARN_ON(n < 0)) + return; + + ic->fiq_aff[fiq] = kzalloc(sizeof(*ic->fiq_aff[fiq]), GFP_KERNEL); + if (!ic->fiq_aff[fiq]) + return; + + for (i = 0; i < n; i++) { + struct device_node *cpu_node; + u32 cpu_phandle; + int cpu; + + if (of_property_read_u32_index(aff, "cpus", i, &cpu_phandle)) + continue; + + cpu_node = of_find_node_by_phandle(cpu_phandle); + if (WARN_ON(!cpu_node)) + continue; + + cpu = of_cpu_node_to_id(cpu_node); + if (WARN_ON(cpu < 0)) + continue; + + cpumask_set_cpu(cpu, &ic->fiq_aff[fiq]->aff); + } +} + static int __init aic_of_ic_init(struct device_node *node, struct device_node *parent) { - int i; + int i, die; + u32 off, start_off; void __iomem *regs; - u32 info; struct aic_irq_chip *irqc; + struct device_node *affs; + const struct of_device_id *match; regs = of_iomap(node, 0); if (WARN_ON(!regs)) return -EIO; irqc = kzalloc(sizeof(*irqc), GFP_KERNEL); - if (!irqc) + if (!irqc) { + iounmap(regs); return -ENOMEM; + } - aic_irqc = irqc; irqc->base = regs; - info = aic_ic_read(irqc, AIC_INFO); - irqc->nr_hw = FIELD_GET(AIC_INFO_NR_HW, info); + match = of_match_node(aic_info_match, node); + if (!match) + goto err_unmap; - irqc->hw_domain = irq_domain_create_linear(of_node_to_fwnode(node), - irqc->nr_hw + AIC_NR_FIQ, - &aic_irq_domain_ops, irqc); - if (WARN_ON(!irqc->hw_domain)) { - iounmap(irqc->base); - kfree(irqc); - return -ENODEV; + irqc->info = *(struct aic_info *)match->data; + + aic_irqc = irqc; + + switch (irqc->info.version) { + case 1: { + u32 info; + + info = aic_ic_read(irqc, AIC_INFO); + irqc->nr_irq = FIELD_GET(AIC_INFO_NR_IRQ, info); + irqc->max_irq = AIC_MAX_IRQ; + irqc->nr_die = irqc->max_die = 1; + + off = start_off = irqc->info.target_cpu; + off += sizeof(u32) * irqc->max_irq; /* TARGET_CPU */ + + irqc->event = irqc->base; + + break; } + case 2: { + u32 info1, info3; + + info1 = aic_ic_read(irqc, AIC2_INFO1); + info3 = aic_ic_read(irqc, AIC2_INFO3); + + irqc->nr_irq = FIELD_GET(AIC2_INFO1_NR_IRQ, info1); + irqc->max_irq = FIELD_GET(AIC2_INFO3_MAX_IRQ, info3); + irqc->nr_die = FIELD_GET(AIC2_INFO1_LAST_DIE, info1) + 1; + irqc->max_die = FIELD_GET(AIC2_INFO3_MAX_DIE, info3); + + off = start_off = irqc->info.irq_cfg; + off += sizeof(u32) * irqc->max_irq; /* IRQ_CFG */ + + irqc->event = of_iomap(node, 1); + if (WARN_ON(!irqc->event)) + goto err_unmap; + + break; + } + } + + irqc->info.sw_set = off; + off += sizeof(u32) * (irqc->max_irq >> 5); /* SW_SET */ + irqc->info.sw_clr = off; + off += sizeof(u32) * (irqc->max_irq >> 5); /* SW_CLR */ + irqc->info.mask_set = off; + off += sizeof(u32) * (irqc->max_irq >> 5); /* MASK_SET */ + irqc->info.mask_clr = off; + off += sizeof(u32) * (irqc->max_irq >> 5); /* MASK_CLR */ + off += sizeof(u32) * (irqc->max_irq >> 5); /* HW_STATE */ + + if (irqc->info.fast_ipi) + static_branch_enable(&use_fast_ipi); + else + static_branch_disable(&use_fast_ipi); + + irqc->info.die_stride = off - start_off; + + irqc->hw_domain = irq_domain_create_tree(of_node_to_fwnode(node), + &aic_irq_domain_ops, irqc); + if (WARN_ON(!irqc->hw_domain)) + goto err_unmap; irq_domain_update_bus_token(irqc->hw_domain, DOMAIN_BUS_WIRED); - if (aic_init_smp(irqc, node)) { - irq_domain_remove(irqc->hw_domain); - iounmap(irqc->base); - kfree(irqc); - return -ENODEV; + if (aic_init_smp(irqc, node)) + goto err_remove_domain; + + affs = of_get_child_by_name(node, "affinities"); + if (affs) { + struct device_node *chld; + + for_each_child_of_node(affs, chld) + build_fiq_affinity(irqc, chld); } set_handle_irq(aic_handle_irq); set_handle_fiq(aic_handle_fiq); - for (i = 0; i < BITS_TO_U32(irqc->nr_hw); i++) - aic_ic_write(irqc, AIC_MASK_SET + i * 4, U32_MAX); - for (i = 0; i < BITS_TO_U32(irqc->nr_hw); i++) - aic_ic_write(irqc, AIC_SW_CLR + i * 4, U32_MAX); - for (i = 0; i < irqc->nr_hw; i++) - aic_ic_write(irqc, AIC_TARGET_CPU + i * 4, 1); + off = 0; + for (die = 0; die < irqc->nr_die; die++) { + for (i = 0; i < BITS_TO_U32(irqc->nr_irq); i++) + aic_ic_write(irqc, irqc->info.mask_set + off + i * 4, U32_MAX); + for (i = 0; i < BITS_TO_U32(irqc->nr_irq); i++) + aic_ic_write(irqc, irqc->info.sw_clr + off + i * 4, U32_MAX); + if (irqc->info.target_cpu) + for (i = 0; i < irqc->nr_irq; i++) + aic_ic_write(irqc, irqc->info.target_cpu + off + i * 4, 1); + off += irqc->info.die_stride; + } + + if (irqc->info.version == 2) { + u32 config = aic_ic_read(irqc, AIC2_CONFIG); + + config |= AIC2_CONFIG_ENABLE; + aic_ic_write(irqc, AIC2_CONFIG, config); + } if (!is_kernel_in_hyp_mode()) pr_info("Kernel running in EL1, mapping interrupts"); + if (static_branch_likely(&use_fast_ipi)) + pr_info("Using Fast IPIs"); + cpuhp_setup_state(CPUHP_AP_IRQ_APPLE_AIC_STARTING, "irqchip/apple-aic/ipi:starting", aic_init_cpu, NULL); vgic_set_kvm_info(&vgic_info); - pr_info("Initialized with %d IRQs, %d FIQs, %d vIPIs\n", - irqc->nr_hw, AIC_NR_FIQ, AIC_NR_SWIPI); + pr_info("Initialized with %d/%d IRQs * %d/%d die(s), %d FIQs, %d vIPIs", + irqc->nr_irq, irqc->max_irq, irqc->nr_die, irqc->max_die, AIC_NR_FIQ, AIC_NR_SWIPI); return 0; + +err_remove_domain: + irq_domain_remove(irqc->hw_domain); +err_unmap: + if (irqc->event && irqc->event != irqc->base) + iounmap(irqc->event); + iounmap(irqc->base); + kfree(irqc); + return -ENODEV; } -IRQCHIP_DECLARE(apple_m1_aic, "apple,aic", aic_of_ic_init); +IRQCHIP_DECLARE(apple_aic, "apple,aic", aic_of_ic_init); +IRQCHIP_DECLARE(apple_aic2, "apple,aic2", aic_of_ic_init); diff --git a/drivers/irqchip/irq-ftintc010.c b/drivers/irqchip/irq-ftintc010.c index 5cc268880f8e..46a3aa60e50e 100644 --- a/drivers/irqchip/irq-ftintc010.c +++ b/drivers/irqchip/irq-ftintc010.c @@ -11,7 +11,6 @@ #include <linux/irq.h> #include <linux/io.h> #include <linux/irqchip.h> -#include <linux/irqchip/versatile-fpga.h> #include <linux/irqdomain.h> #include <linux/module.h> #include <linux/of.h> diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index 9e93ff2b6375..cd772973114a 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -5517,6 +5517,9 @@ int __init its_lpi_memreserve_init(void) if (!efi_enabled(EFI_CONFIG_TABLES)) return 0; + if (list_empty(&its_nodes)) + return 0; + gic_rdists->cpuhp_memreserve_state = CPUHP_INVALID; state = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "irqchip/arm/gicv3/memreserve:online", diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index 5e935d97207d..0efe1a9a9f3b 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -1211,7 +1211,7 @@ static void gic_ipi_send_mask(struct irq_data *d, const struct cpumask *mask) * Ensure that stores to Normal memory are visible to the * other CPUs before issuing the IPI. */ - wmb(); + dsb(ishst); for_each_cpu(cpu, mask) { u64 cluster_id = MPIDR_TO_SGI_CLUSTER_ID(cpu_logical_map(cpu)); diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c index b8bb46c65a97..58ba835bee1f 100644 --- a/drivers/irqchip/irq-gic.c +++ b/drivers/irqchip/irq-gic.c @@ -34,6 +34,7 @@ #include <linux/irqdomain.h> #include <linux/interrupt.h> #include <linux/percpu.h> +#include <linux/seq_file.h> #include <linux/slab.h> #include <linux/irqchip.h> #include <linux/irqchip/chained_irq.h> @@ -66,7 +67,6 @@ union gic_base { }; struct gic_chip_data { - struct irq_chip chip; union gic_base dist_base; union gic_base cpu_base; void __iomem *raw_dist_base; @@ -397,18 +397,15 @@ static void gic_handle_cascade_irq(struct irq_desc *desc) chained_irq_exit(chip, desc); } -static const struct irq_chip gic_chip = { - .irq_mask = gic_mask_irq, - .irq_unmask = gic_unmask_irq, - .irq_eoi = gic_eoi_irq, - .irq_set_type = gic_set_type, - .irq_retrigger = gic_retrigger, - .irq_get_irqchip_state = gic_irq_get_irqchip_state, - .irq_set_irqchip_state = gic_irq_set_irqchip_state, - .flags = IRQCHIP_SET_TYPE_MASKED | - IRQCHIP_SKIP_SET_WAKE | - IRQCHIP_MASK_ON_SUSPEND, -}; +static void gic_irq_print_chip(struct irq_data *d, struct seq_file *p) +{ + struct gic_chip_data *gic = irq_data_get_irq_chip_data(d); + + if (gic->domain->dev) + seq_printf(p, gic->domain->dev->of_node->name); + else + seq_printf(p, "GIC-%d", (int)(gic - &gic_data[0])); +} void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq) { @@ -799,8 +796,12 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, bool force) { void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + gic_irq(d); + struct gic_chip_data *gic = irq_data_get_irq_chip_data(d); unsigned int cpu; + if (unlikely(gic != &gic_data[0])) + return -EINVAL; + if (!force) cpu = cpumask_any_and(mask_val, cpu_online_mask); else @@ -880,6 +881,39 @@ static __init void gic_smp_init(void) #define gic_ipi_send_mask NULL #endif +static const struct irq_chip gic_chip = { + .irq_mask = gic_mask_irq, + .irq_unmask = gic_unmask_irq, + .irq_eoi = gic_eoi_irq, + .irq_set_type = gic_set_type, + .irq_retrigger = gic_retrigger, + .irq_set_affinity = gic_set_affinity, + .ipi_send_mask = gic_ipi_send_mask, + .irq_get_irqchip_state = gic_irq_get_irqchip_state, + .irq_set_irqchip_state = gic_irq_set_irqchip_state, + .irq_print_chip = gic_irq_print_chip, + .flags = IRQCHIP_SET_TYPE_MASKED | + IRQCHIP_SKIP_SET_WAKE | + IRQCHIP_MASK_ON_SUSPEND, +}; + +static const struct irq_chip gic_chip_mode1 = { + .name = "GICv2", + .irq_mask = gic_eoimode1_mask_irq, + .irq_unmask = gic_unmask_irq, + .irq_eoi = gic_eoimode1_eoi_irq, + .irq_set_type = gic_set_type, + .irq_retrigger = gic_retrigger, + .irq_set_affinity = gic_set_affinity, + .ipi_send_mask = gic_ipi_send_mask, + .irq_get_irqchip_state = gic_irq_get_irqchip_state, + .irq_set_irqchip_state = gic_irq_set_irqchip_state, + .irq_set_vcpu_affinity = gic_irq_set_vcpu_affinity, + .flags = IRQCHIP_SET_TYPE_MASKED | + IRQCHIP_SKIP_SET_WAKE | + IRQCHIP_MASK_ON_SUSPEND, +}; + #ifdef CONFIG_BL_SWITCHER /* * gic_send_sgi - send a SGI directly to given CPU interface number @@ -1024,15 +1058,19 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq, { struct gic_chip_data *gic = d->host_data; struct irq_data *irqd = irq_desc_get_irq_data(irq_to_desc(irq)); + const struct irq_chip *chip; + + chip = (static_branch_likely(&supports_deactivate_key) && + gic == &gic_data[0]) ? &gic_chip_mode1 : &gic_chip; switch (hw) { case 0 ... 31: irq_set_percpu_devid(irq); - irq_domain_set_info(d, irq, hw, &gic->chip, d->host_data, + irq_domain_set_info(d, irq, hw, chip, d->host_data, handle_percpu_devid_irq, NULL, NULL); break; default: - irq_domain_set_info(d, irq, hw, &gic->chip, d->host_data, + irq_domain_set_info(d, irq, hw, chip, d->host_data, handle_fasteoi_irq, NULL, NULL); irq_set_probe(irq); irqd_set_single_target(irqd); @@ -1127,26 +1165,6 @@ static const struct irq_domain_ops gic_irq_domain_ops = { .unmap = gic_irq_domain_unmap, }; -static void gic_init_chip(struct gic_chip_data *gic, struct device *dev, - const char *name, bool use_eoimode1) -{ - /* Initialize irq_chip */ - gic->chip = gic_chip; - gic->chip.name = name; - gic->chip.parent_device = dev; - - if (use_eoimode1) { - gic->chip.irq_mask = gic_eoimode1_mask_irq; - gic->chip.irq_eoi = gic_eoimode1_eoi_irq; - gic->chip.irq_set_vcpu_affinity = gic_irq_set_vcpu_affinity; - } - - if (gic == &gic_data[0]) { - gic->chip.irq_set_affinity = gic_set_affinity; - gic->chip.ipi_send_mask = gic_ipi_send_mask; - } -} - static int gic_init_bases(struct gic_chip_data *gic, struct fwnode_handle *handle) { @@ -1246,7 +1264,6 @@ error: static int __init __gic_init_bases(struct gic_chip_data *gic, struct fwnode_handle *handle) { - char *name; int i, ret; if (WARN_ON(!gic || gic->domain)) @@ -1266,18 +1283,8 @@ static int __init __gic_init_bases(struct gic_chip_data *gic, pr_info("GIC: Using split EOI/Deactivate mode\n"); } - if (static_branch_likely(&supports_deactivate_key) && gic == &gic_data[0]) { - name = kasprintf(GFP_KERNEL, "GICv2"); - gic_init_chip(gic, NULL, name, true); - } else { - name = kasprintf(GFP_KERNEL, "GIC-%d", (int)(gic-&gic_data[0])); - gic_init_chip(gic, NULL, name, false); - } - ret = gic_init_bases(gic, handle); - if (ret) - kfree(name); - else if (gic == &gic_data[0]) + if (gic == &gic_data[0]) gic_smp_init(); return ret; @@ -1460,8 +1467,6 @@ int gic_of_init_child(struct device *dev, struct gic_chip_data **gic, int irq) if (!*gic) return -ENOMEM; - gic_init_chip(*gic, dev, dev->of_node->name, false); - ret = gic_of_setup(*gic, dev->of_node); if (ret) return ret; @@ -1472,6 +1477,7 @@ int gic_of_init_child(struct device *dev, struct gic_chip_data **gic, int irq) return ret; } + irq_domain_set_pm_device((*gic)->domain, dev); irq_set_chained_handler_and_data(irq, gic_handle_cascade_irq, *gic); return 0; diff --git a/drivers/irqchip/irq-imx-intmux.c b/drivers/irqchip/irq-imx-intmux.c index e86ff743e98c..80aaea82468a 100644 --- a/drivers/irqchip/irq-imx-intmux.c +++ b/drivers/irqchip/irq-imx-intmux.c @@ -61,7 +61,6 @@ #define CHAN_MAX_NUM 0x8 struct intmux_irqchip_data { - struct irq_chip chip; u32 saved_reg; int chanidx; int irq; @@ -114,7 +113,7 @@ static void imx_intmux_irq_unmask(struct irq_data *d) raw_spin_unlock_irqrestore(&data->lock, flags); } -static struct irq_chip imx_intmux_irq_chip = { +static struct irq_chip imx_intmux_irq_chip __ro_after_init = { .name = "intmux", .irq_mask = imx_intmux_irq_mask, .irq_unmask = imx_intmux_irq_unmask, @@ -126,7 +125,7 @@ static int imx_intmux_irq_map(struct irq_domain *h, unsigned int irq, struct intmux_irqchip_data *data = h->host_data; irq_set_chip_data(irq, data); - irq_set_chip_and_handler(irq, &data->chip, handle_level_irq); + irq_set_chip_and_handler(irq, &imx_intmux_irq_chip, handle_level_irq); return 0; } @@ -241,8 +240,6 @@ static int imx_intmux_probe(struct platform_device *pdev) } for (i = 0; i < channum; i++) { - data->irqchip_data[i].chip = imx_intmux_irq_chip; - data->irqchip_data[i].chip.parent_device = &pdev->dev; data->irqchip_data[i].chanidx = i; data->irqchip_data[i].irq = irq_of_parse_and_map(np, i); @@ -260,6 +257,7 @@ static int imx_intmux_probe(struct platform_device *pdev) goto out; } data->irqchip_data[i].domain = domain; + irq_domain_set_pm_device(domain, &pdev->dev); /* disable all interrupt sources of this channel firstly */ writel_relaxed(0, data->regs + CHANIER(i)); diff --git a/drivers/irqchip/irq-lpc32xx.c b/drivers/irqchip/irq-lpc32xx.c index a29357f39450..4d70a857133f 100644 --- a/drivers/irqchip/irq-lpc32xx.c +++ b/drivers/irqchip/irq-lpc32xx.c @@ -11,6 +11,7 @@ #include <linux/of_address.h> #include <linux/of_irq.h> #include <linux/of_platform.h> +#include <linux/seq_file.h> #include <linux/slab.h> #include <asm/exception.h> @@ -25,8 +26,8 @@ struct lpc32xx_irq_chip { void __iomem *base; + phys_addr_t addr; struct irq_domain *domain; - struct irq_chip chip; }; static struct lpc32xx_irq_chip *lpc32xx_mic_irqc; @@ -118,6 +119,24 @@ static int lpc32xx_irq_set_type(struct irq_data *d, unsigned int type) return 0; } +static void lpc32xx_irq_print_chip(struct irq_data *d, struct seq_file *p) +{ + struct lpc32xx_irq_chip *ic = irq_data_get_irq_chip_data(d); + + if (ic == lpc32xx_mic_irqc) + seq_printf(p, "%08x.mic", ic->addr); + else + seq_printf(p, "%08x.sic", ic->addr); +} + +static const struct irq_chip lpc32xx_chip = { + .irq_ack = lpc32xx_irq_ack, + .irq_mask = lpc32xx_irq_mask, + .irq_unmask = lpc32xx_irq_unmask, + .irq_set_type = lpc32xx_irq_set_type, + .irq_print_chip = lpc32xx_irq_print_chip, +}; + static void __exception_irq_entry lpc32xx_handle_irq(struct pt_regs *regs) { struct lpc32xx_irq_chip *ic = lpc32xx_mic_irqc; @@ -153,7 +172,7 @@ static int lpc32xx_irq_domain_map(struct irq_domain *id, unsigned int virq, struct lpc32xx_irq_chip *ic = id->host_data; irq_set_chip_data(virq, ic); - irq_set_chip_and_handler(virq, &ic->chip, handle_level_irq); + irq_set_chip_and_handler(virq, &lpc32xx_chip, handle_level_irq); irq_set_status_flags(virq, IRQ_LEVEL); irq_set_noprobe(virq); @@ -183,6 +202,7 @@ static int __init lpc32xx_of_ic_init(struct device_node *node, if (!irqc) return -ENOMEM; + irqc->addr = addr; irqc->base = of_iomap(node, 0); if (!irqc->base) { pr_err("%pOF: unable to map registers\n", node); @@ -190,21 +210,11 @@ static int __init lpc32xx_of_ic_init(struct device_node *node, return -EINVAL; } - irqc->chip.irq_ack = lpc32xx_irq_ack; - irqc->chip.irq_mask = lpc32xx_irq_mask; - irqc->chip.irq_unmask = lpc32xx_irq_unmask; - irqc->chip.irq_set_type = lpc32xx_irq_set_type; - if (is_mic) - irqc->chip.name = kasprintf(GFP_KERNEL, "%08x.mic", addr); - else - irqc->chip.name = kasprintf(GFP_KERNEL, "%08x.sic", addr); - irqc->domain = irq_domain_add_linear(node, NR_LPC32XX_IC_IRQS, &lpc32xx_irq_domain_ops, irqc); if (!irqc->domain) { pr_err("unable to add irq domain\n"); iounmap(irqc->base); - kfree(irqc->chip.name); kfree(irqc); return -ENODEV; } diff --git a/drivers/irqchip/irq-meson-gpio.c b/drivers/irqchip/irq-meson-gpio.c index d90ff0b92480..2aaa9aad3e87 100644 --- a/drivers/irqchip/irq-meson-gpio.c +++ b/drivers/irqchip/irq-meson-gpio.c @@ -16,7 +16,7 @@ #include <linux/of.h> #include <linux/of_address.h> -#define NUM_CHANNEL 8 +#define MAX_NUM_CHANNEL 64 #define MAX_INPUT_MUX 256 #define REG_EDGE_POL 0x00 @@ -26,6 +26,8 @@ /* use for A1 like chips */ #define REG_PIN_A1_SEL 0x04 +/* Used for s4 chips */ +#define REG_EDGE_POL_S4 0x1c /* * Note: The S905X3 datasheet reports that BOTH_EDGE is controlled by @@ -51,15 +53,22 @@ static void meson_a1_gpio_irq_sel_pin(struct meson_gpio_irq_controller *ctl, unsigned int channel, unsigned long hwirq); static void meson_a1_gpio_irq_init(struct meson_gpio_irq_controller *ctl); +static int meson8_gpio_irq_set_type(struct meson_gpio_irq_controller *ctl, + unsigned int type, u32 *channel_hwirq); +static int meson_s4_gpio_irq_set_type(struct meson_gpio_irq_controller *ctl, + unsigned int type, u32 *channel_hwirq); struct irq_ctl_ops { void (*gpio_irq_sel_pin)(struct meson_gpio_irq_controller *ctl, unsigned int channel, unsigned long hwirq); void (*gpio_irq_init)(struct meson_gpio_irq_controller *ctl); + int (*gpio_irq_set_type)(struct meson_gpio_irq_controller *ctl, + unsigned int type, u32 *channel_hwirq); }; struct meson_gpio_irq_params { unsigned int nr_hwirq; + unsigned int nr_channels; bool support_edge_both; unsigned int edge_both_offset; unsigned int edge_single_offset; @@ -68,28 +77,44 @@ struct meson_gpio_irq_params { struct irq_ctl_ops ops; }; -#define INIT_MESON_COMMON(irqs, init, sel) \ +#define INIT_MESON_COMMON(irqs, init, sel, type) \ .nr_hwirq = irqs, \ .ops = { \ .gpio_irq_init = init, \ .gpio_irq_sel_pin = sel, \ + .gpio_irq_set_type = type, \ }, #define INIT_MESON8_COMMON_DATA(irqs) \ INIT_MESON_COMMON(irqs, meson_gpio_irq_init_dummy, \ - meson8_gpio_irq_sel_pin) \ + meson8_gpio_irq_sel_pin, \ + meson8_gpio_irq_set_type) \ .edge_single_offset = 0, \ .pol_low_offset = 16, \ .pin_sel_mask = 0xff, \ + .nr_channels = 8, \ #define INIT_MESON_A1_COMMON_DATA(irqs) \ INIT_MESON_COMMON(irqs, meson_a1_gpio_irq_init, \ - meson_a1_gpio_irq_sel_pin) \ + meson_a1_gpio_irq_sel_pin, \ + meson8_gpio_irq_set_type) \ .support_edge_both = true, \ .edge_both_offset = 16, \ .edge_single_offset = 8, \ .pol_low_offset = 0, \ .pin_sel_mask = 0x7f, \ + .nr_channels = 8, \ + +#define INIT_MESON_S4_COMMON_DATA(irqs) \ + INIT_MESON_COMMON(irqs, meson_a1_gpio_irq_init, \ + meson_a1_gpio_irq_sel_pin, \ + meson_s4_gpio_irq_set_type) \ + .support_edge_both = true, \ + .edge_both_offset = 0, \ + .edge_single_offset = 12, \ + .pol_low_offset = 0, \ + .pin_sel_mask = 0xff, \ + .nr_channels = 12, \ static const struct meson_gpio_irq_params meson8_params = { INIT_MESON8_COMMON_DATA(134) @@ -121,6 +146,10 @@ static const struct meson_gpio_irq_params a1_params = { INIT_MESON_A1_COMMON_DATA(62) }; +static const struct meson_gpio_irq_params s4_params = { + INIT_MESON_S4_COMMON_DATA(82) +}; + static const struct of_device_id meson_irq_gpio_matches[] = { { .compatible = "amlogic,meson8-gpio-intc", .data = &meson8_params }, { .compatible = "amlogic,meson8b-gpio-intc", .data = &meson8b_params }, @@ -130,14 +159,15 @@ static const struct of_device_id meson_irq_gpio_matches[] = { { .compatible = "amlogic,meson-g12a-gpio-intc", .data = &axg_params }, { .compatible = "amlogic,meson-sm1-gpio-intc", .data = &sm1_params }, { .compatible = "amlogic,meson-a1-gpio-intc", .data = &a1_params }, + { .compatible = "amlogic,meson-s4-gpio-intc", .data = &s4_params }, { } }; struct meson_gpio_irq_controller { const struct meson_gpio_irq_params *params; void __iomem *base; - u32 channel_irqs[NUM_CHANNEL]; - DECLARE_BITMAP(channel_map, NUM_CHANNEL); + u32 channel_irqs[MAX_NUM_CHANNEL]; + DECLARE_BITMAP(channel_map, MAX_NUM_CHANNEL); spinlock_t lock; }; @@ -207,8 +237,8 @@ meson_gpio_irq_request_channel(struct meson_gpio_irq_controller *ctl, spin_lock_irqsave(&ctl->lock, flags); /* Find a free channel */ - idx = find_first_zero_bit(ctl->channel_map, NUM_CHANNEL); - if (idx >= NUM_CHANNEL) { + idx = find_first_zero_bit(ctl->channel_map, ctl->params->nr_channels); + if (idx >= ctl->params->nr_channels) { spin_unlock_irqrestore(&ctl->lock, flags); pr_err("No channel available\n"); return -ENOSPC; @@ -256,9 +286,8 @@ meson_gpio_irq_release_channel(struct meson_gpio_irq_controller *ctl, clear_bit(idx, ctl->channel_map); } -static int meson_gpio_irq_type_setup(struct meson_gpio_irq_controller *ctl, - unsigned int type, - u32 *channel_hwirq) +static int meson8_gpio_irq_set_type(struct meson_gpio_irq_controller *ctl, + unsigned int type, u32 *channel_hwirq) { u32 val = 0; unsigned int idx; @@ -299,6 +328,51 @@ static int meson_gpio_irq_type_setup(struct meson_gpio_irq_controller *ctl, return 0; } +/* + * gpio irq relative registers for s4 + * -PADCTRL_GPIO_IRQ_CTRL0 + * bit[31]: enable/disable all the irq lines + * bit[12-23]: single edge trigger + * bit[0-11]: polarity trigger + * + * -PADCTRL_GPIO_IRQ_CTRL[X] + * bit[0-16]: 7 bits to choose gpio source for irq line 2*[X] - 2 + * bit[16-22]:7 bits to choose gpio source for irq line 2*[X] - 1 + * where X = 1-6 + * + * -PADCTRL_GPIO_IRQ_CTRL[7] + * bit[0-11]: both edge trigger + */ +static int meson_s4_gpio_irq_set_type(struct meson_gpio_irq_controller *ctl, + unsigned int type, u32 *channel_hwirq) +{ + u32 val = 0; + unsigned int idx; + + idx = meson_gpio_irq_get_channel_idx(ctl, channel_hwirq); + + type &= IRQ_TYPE_SENSE_MASK; + + meson_gpio_irq_update_bits(ctl, REG_EDGE_POL_S4, BIT(idx), 0); + + if (type == IRQ_TYPE_EDGE_BOTH) { + val |= BIT(ctl->params->edge_both_offset + idx); + meson_gpio_irq_update_bits(ctl, REG_EDGE_POL_S4, + BIT(ctl->params->edge_both_offset + idx), val); + return 0; + } + + if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_EDGE_FALLING)) + val |= BIT(ctl->params->pol_low_offset + idx); + + if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) + val |= BIT(ctl->params->edge_single_offset + idx); + + meson_gpio_irq_update_bits(ctl, REG_EDGE_POL, + BIT(idx) | BIT(12 + idx), val); + return 0; +}; + static unsigned int meson_gpio_irq_type_output(unsigned int type) { unsigned int sense = type & IRQ_TYPE_SENSE_MASK; @@ -323,7 +397,7 @@ static int meson_gpio_irq_set_type(struct irq_data *data, unsigned int type) u32 *channel_hwirq = irq_data_get_irq_chip_data(data); int ret; - ret = meson_gpio_irq_type_setup(ctl, type, channel_hwirq); + ret = ctl->params->ops.gpio_irq_set_type(ctl, type, channel_hwirq); if (ret) return ret; @@ -450,10 +524,10 @@ static int meson_gpio_irq_parse_dt(struct device_node *node, struct meson_gpio_i ret = of_property_read_variable_u32_array(node, "amlogic,channel-interrupts", ctl->channel_irqs, - NUM_CHANNEL, - NUM_CHANNEL); + ctl->params->nr_channels, + ctl->params->nr_channels); if (ret < 0) { - pr_err("can't get %d channel interrupts\n", NUM_CHANNEL); + pr_err("can't get %d channel interrupts\n", ctl->params->nr_channels); return ret; } @@ -507,7 +581,7 @@ static int meson_gpio_irq_of_init(struct device_node *node, struct device_node * } pr_info("%d to %d gpio interrupt mux initialized\n", - ctl->params->nr_hwirq, NUM_CHANNEL); + ctl->params->nr_hwirq, ctl->params->nr_channels); return 0; diff --git a/drivers/irqchip/irq-mvebu-pic.c b/drivers/irqchip/irq-mvebu-pic.c index 870f9866b8da..ef3d3646ccc2 100644 --- a/drivers/irqchip/irq-mvebu-pic.c +++ b/drivers/irqchip/irq-mvebu-pic.c @@ -18,6 +18,7 @@ #include <linux/module.h> #include <linux/of_irq.h> #include <linux/platform_device.h> +#include <linux/seq_file.h> #define PIC_CAUSE 0x0 #define PIC_MASK 0x4 @@ -29,7 +30,7 @@ struct mvebu_pic { void __iomem *base; u32 parent_irq; struct irq_domain *domain; - struct irq_chip irq_chip; + struct platform_device *pdev; }; static void mvebu_pic_reset(struct mvebu_pic *pic) @@ -66,6 +67,20 @@ static void mvebu_pic_unmask_irq(struct irq_data *d) writel(reg, pic->base + PIC_MASK); } +static void mvebu_pic_print_chip(struct irq_data *d, struct seq_file *p) +{ + struct mvebu_pic *pic = irq_data_get_irq_chip_data(d); + + seq_printf(p, dev_name(&pic->pdev->dev)); +} + +static const struct irq_chip mvebu_pic_chip = { + .irq_mask = mvebu_pic_mask_irq, + .irq_unmask = mvebu_pic_unmask_irq, + .irq_eoi = mvebu_pic_eoi_irq, + .irq_print_chip = mvebu_pic_print_chip, +}; + static int mvebu_pic_irq_map(struct irq_domain *domain, unsigned int virq, irq_hw_number_t hwirq) { @@ -73,8 +88,7 @@ static int mvebu_pic_irq_map(struct irq_domain *domain, unsigned int virq, irq_set_percpu_devid(virq); irq_set_chip_data(virq, pic); - irq_set_chip_and_handler(virq, &pic->irq_chip, - handle_percpu_devid_irq); + irq_set_chip_and_handler(virq, &mvebu_pic_chip, handle_percpu_devid_irq); irq_set_status_flags(virq, IRQ_LEVEL); irq_set_probe(virq); @@ -120,22 +134,16 @@ static int mvebu_pic_probe(struct platform_device *pdev) { struct device_node *node = pdev->dev.of_node; struct mvebu_pic *pic; - struct irq_chip *irq_chip; pic = devm_kzalloc(&pdev->dev, sizeof(struct mvebu_pic), GFP_KERNEL); if (!pic) return -ENOMEM; + pic->pdev = pdev; pic->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(pic->base)) return PTR_ERR(pic->base); - irq_chip = &pic->irq_chip; - irq_chip->name = dev_name(&pdev->dev); - irq_chip->irq_mask = mvebu_pic_mask_irq; - irq_chip->irq_unmask = mvebu_pic_unmask_irq; - irq_chip->irq_eoi = mvebu_pic_eoi_irq; - pic->parent_irq = irq_of_parse_and_map(node, 0); if (pic->parent_irq <= 0) { dev_err(&pdev->dev, "Failed to parse parent interrupt\n"); diff --git a/drivers/irqchip/irq-nvic.c b/drivers/irqchip/irq-nvic.c index ba4759b3e269..94230306e0ee 100644 --- a/drivers/irqchip/irq-nvic.c +++ b/drivers/irqchip/irq-nvic.c @@ -107,6 +107,7 @@ static int __init nvic_of_init(struct device_node *node, if (!nvic_irq_domain) { pr_warn("Failed to allocate irq domain\n"); + iounmap(nvic_base); return -ENOMEM; } @@ -116,6 +117,7 @@ static int __init nvic_of_init(struct device_node *node, if (ret) { pr_warn("Failed to allocate irq chips\n"); irq_domain_remove(nvic_irq_domain); + iounmap(nvic_base); return ret; } diff --git a/drivers/irqchip/irq-qcom-mpm.c b/drivers/irqchip/irq-qcom-mpm.c new file mode 100644 index 000000000000..eea5a753618c --- /dev/null +++ b/drivers/irqchip/irq-qcom-mpm.c @@ -0,0 +1,461 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021, Linaro Limited + * Copyright (c) 2010-2020, The Linux Foundation. All rights reserved. + */ + +#include <linux/delay.h> +#include <linux/err.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/irqchip.h> +#include <linux/irqdomain.h> +#include <linux/mailbox_client.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/pm_domain.h> +#include <linux/slab.h> +#include <linux/soc/qcom/irq.h> +#include <linux/spinlock.h> + +/* + * This is the driver for Qualcomm MPM (MSM Power Manager) interrupt controller, + * which is commonly found on Qualcomm SoCs built on the RPM architecture. + * Sitting in always-on domain, MPM monitors the wakeup interrupts when SoC is + * asleep, and wakes up the AP when one of those interrupts occurs. This driver + * doesn't directly access physical MPM registers though. Instead, the access + * is bridged via a piece of internal memory (SRAM) that is accessible to both + * AP and RPM. This piece of memory is called 'vMPM' in the driver. + * + * When SoC is awake, the vMPM is owned by AP and the register setup by this + * driver all happens on vMPM. When AP is about to get power collapsed, the + * driver sends a mailbox notification to RPM, which will take over the vMPM + * ownership and dump vMPM into physical MPM registers. On wakeup, AP is woken + * up by a MPM pin/interrupt, and RPM will copy STATUS registers into vMPM. + * Then AP start owning vMPM again. + * + * vMPM register map: + * + * 31 0 + * +--------------------------------+ + * | TIMER0 | 0x00 + * +--------------------------------+ + * | TIMER1 | 0x04 + * +--------------------------------+ + * | ENABLE0 | 0x08 + * +--------------------------------+ + * | ... | ... + * +--------------------------------+ + * | ENABLEn | + * +--------------------------------+ + * | FALLING_EDGE0 | + * +--------------------------------+ + * | ... | + * +--------------------------------+ + * | STATUSn | + * +--------------------------------+ + * + * n = DIV_ROUND_UP(pin_cnt, 32) + * + */ + +#define MPM_REG_ENABLE 0 +#define MPM_REG_FALLING_EDGE 1 +#define MPM_REG_RISING_EDGE 2 +#define MPM_REG_POLARITY 3 +#define MPM_REG_STATUS 4 + +/* MPM pin map to GIC hwirq */ +struct mpm_gic_map { + int pin; + irq_hw_number_t hwirq; +}; + +struct qcom_mpm_priv { + void __iomem *base; + raw_spinlock_t lock; + struct mbox_client mbox_client; + struct mbox_chan *mbox_chan; + struct mpm_gic_map *maps; + unsigned int map_cnt; + unsigned int reg_stride; + struct irq_domain *domain; + struct generic_pm_domain genpd; +}; + +static u32 qcom_mpm_read(struct qcom_mpm_priv *priv, unsigned int reg, + unsigned int index) +{ + unsigned int offset = (reg * priv->reg_stride + index + 2) * 4; + + return readl_relaxed(priv->base + offset); +} + +static void qcom_mpm_write(struct qcom_mpm_priv *priv, unsigned int reg, + unsigned int index, u32 val) +{ + unsigned int offset = (reg * priv->reg_stride + index + 2) * 4; + + writel_relaxed(val, priv->base + offset); + + /* Ensure the write is completed */ + wmb(); +} + +static void qcom_mpm_enable_irq(struct irq_data *d, bool en) +{ + struct qcom_mpm_priv *priv = d->chip_data; + int pin = d->hwirq; + unsigned int index = pin / 32; + unsigned int shift = pin % 32; + unsigned long flags, val; + + raw_spin_lock_irqsave(&priv->lock, flags); + + val = qcom_mpm_read(priv, MPM_REG_ENABLE, index); + __assign_bit(shift, &val, en); + qcom_mpm_write(priv, MPM_REG_ENABLE, index, val); + + raw_spin_unlock_irqrestore(&priv->lock, flags); +} + +static void qcom_mpm_mask(struct irq_data *d) +{ + qcom_mpm_enable_irq(d, false); + + if (d->parent_data) + irq_chip_mask_parent(d); +} + +static void qcom_mpm_unmask(struct irq_data *d) +{ + qcom_mpm_enable_irq(d, true); + + if (d->parent_data) + irq_chip_unmask_parent(d); +} + +static void mpm_set_type(struct qcom_mpm_priv *priv, bool set, unsigned int reg, + unsigned int index, unsigned int shift) +{ + unsigned long flags, val; + + raw_spin_lock_irqsave(&priv->lock, flags); + + val = qcom_mpm_read(priv, reg, index); + __assign_bit(shift, &val, set); + qcom_mpm_write(priv, reg, index, val); + + raw_spin_unlock_irqrestore(&priv->lock, flags); +} + +static int qcom_mpm_set_type(struct irq_data *d, unsigned int type) +{ + struct qcom_mpm_priv *priv = d->chip_data; + int pin = d->hwirq; + unsigned int index = pin / 32; + unsigned int shift = pin % 32; + + if (type & IRQ_TYPE_EDGE_RISING) + mpm_set_type(priv, true, MPM_REG_RISING_EDGE, index, shift); + else + mpm_set_type(priv, false, MPM_REG_RISING_EDGE, index, shift); + + if (type & IRQ_TYPE_EDGE_FALLING) + mpm_set_type(priv, true, MPM_REG_FALLING_EDGE, index, shift); + else + mpm_set_type(priv, false, MPM_REG_FALLING_EDGE, index, shift); + + if (type & IRQ_TYPE_LEVEL_HIGH) + mpm_set_type(priv, true, MPM_REG_POLARITY, index, shift); + else + mpm_set_type(priv, false, MPM_REG_POLARITY, index, shift); + + if (!d->parent_data) + return 0; + + if (type & IRQ_TYPE_EDGE_BOTH) + type = IRQ_TYPE_EDGE_RISING; + + if (type & IRQ_TYPE_LEVEL_MASK) + type = IRQ_TYPE_LEVEL_HIGH; + + return irq_chip_set_type_parent(d, type); +} + +static struct irq_chip qcom_mpm_chip = { + .name = "mpm", + .irq_eoi = irq_chip_eoi_parent, + .irq_mask = qcom_mpm_mask, + .irq_unmask = qcom_mpm_unmask, + .irq_retrigger = irq_chip_retrigger_hierarchy, + .irq_set_type = qcom_mpm_set_type, + .irq_set_affinity = irq_chip_set_affinity_parent, + .flags = IRQCHIP_MASK_ON_SUSPEND | + IRQCHIP_SKIP_SET_WAKE, +}; + +static struct mpm_gic_map *get_mpm_gic_map(struct qcom_mpm_priv *priv, int pin) +{ + struct mpm_gic_map *maps = priv->maps; + int i; + + for (i = 0; i < priv->map_cnt; i++) { + if (maps[i].pin == pin) + return &maps[i]; + } + + return NULL; +} + +static int qcom_mpm_alloc(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs, void *data) +{ + struct qcom_mpm_priv *priv = domain->host_data; + struct irq_fwspec *fwspec = data; + struct irq_fwspec parent_fwspec; + struct mpm_gic_map *map; + irq_hw_number_t pin; + unsigned int type; + int ret; + + ret = irq_domain_translate_twocell(domain, fwspec, &pin, &type); + if (ret) + return ret; + + ret = irq_domain_set_hwirq_and_chip(domain, virq, pin, + &qcom_mpm_chip, priv); + if (ret) + return ret; + + map = get_mpm_gic_map(priv, pin); + if (map == NULL) + return irq_domain_disconnect_hierarchy(domain->parent, virq); + + if (type & IRQ_TYPE_EDGE_BOTH) + type = IRQ_TYPE_EDGE_RISING; + + if (type & IRQ_TYPE_LEVEL_MASK) + type = IRQ_TYPE_LEVEL_HIGH; + + parent_fwspec.fwnode = domain->parent->fwnode; + parent_fwspec.param_count = 3; + parent_fwspec.param[0] = 0; + parent_fwspec.param[1] = map->hwirq; + parent_fwspec.param[2] = type; + + return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, + &parent_fwspec); +} + +static const struct irq_domain_ops qcom_mpm_ops = { + .alloc = qcom_mpm_alloc, + .free = irq_domain_free_irqs_common, + .translate = irq_domain_translate_twocell, +}; + +/* Triggered by RPM when system resumes from deep sleep */ +static irqreturn_t qcom_mpm_handler(int irq, void *dev_id) +{ + struct qcom_mpm_priv *priv = dev_id; + unsigned long enable, pending; + irqreturn_t ret = IRQ_NONE; + unsigned long flags; + int i, j; + + for (i = 0; i < priv->reg_stride; i++) { + raw_spin_lock_irqsave(&priv->lock, flags); + enable = qcom_mpm_read(priv, MPM_REG_ENABLE, i); + pending = qcom_mpm_read(priv, MPM_REG_STATUS, i); + pending &= enable; + raw_spin_unlock_irqrestore(&priv->lock, flags); + + for_each_set_bit(j, &pending, 32) { + unsigned int pin = 32 * i + j; + struct irq_desc *desc = irq_resolve_mapping(priv->domain, pin); + struct irq_data *d = &desc->irq_data; + + if (!irqd_is_level_type(d)) + irq_set_irqchip_state(d->irq, + IRQCHIP_STATE_PENDING, true); + ret = IRQ_HANDLED; + } + } + + return ret; +} + +static int mpm_pd_power_off(struct generic_pm_domain *genpd) +{ + struct qcom_mpm_priv *priv = container_of(genpd, struct qcom_mpm_priv, + genpd); + int i, ret; + + for (i = 0; i < priv->reg_stride; i++) + qcom_mpm_write(priv, MPM_REG_STATUS, i, 0); + + /* Notify RPM to write vMPM into HW */ + ret = mbox_send_message(priv->mbox_chan, NULL); + if (ret < 0) + return ret; + + return 0; +} + +static bool gic_hwirq_is_mapped(struct mpm_gic_map *maps, int cnt, u32 hwirq) +{ + int i; + + for (i = 0; i < cnt; i++) + if (maps[i].hwirq == hwirq) + return true; + + return false; +} + +static int qcom_mpm_init(struct device_node *np, struct device_node *parent) +{ + struct platform_device *pdev = of_find_device_by_node(np); + struct device *dev = &pdev->dev; + struct irq_domain *parent_domain; + struct generic_pm_domain *genpd; + struct qcom_mpm_priv *priv; + unsigned int pin_cnt; + int i, irq; + int ret; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + ret = of_property_read_u32(np, "qcom,mpm-pin-count", &pin_cnt); + if (ret) { + dev_err(dev, "failed to read qcom,mpm-pin-count: %d\n", ret); + return ret; + } + + priv->reg_stride = DIV_ROUND_UP(pin_cnt, 32); + + ret = of_property_count_u32_elems(np, "qcom,mpm-pin-map"); + if (ret < 0) { + dev_err(dev, "failed to read qcom,mpm-pin-map: %d\n", ret); + return ret; + } + + if (ret % 2) { + dev_err(dev, "invalid qcom,mpm-pin-map\n"); + return -EINVAL; + } + + priv->map_cnt = ret / 2; + priv->maps = devm_kcalloc(dev, priv->map_cnt, sizeof(*priv->maps), + GFP_KERNEL); + if (!priv->maps) + return -ENOMEM; + + for (i = 0; i < priv->map_cnt; i++) { + u32 pin, hwirq; + + of_property_read_u32_index(np, "qcom,mpm-pin-map", i * 2, &pin); + of_property_read_u32_index(np, "qcom,mpm-pin-map", i * 2 + 1, &hwirq); + + if (gic_hwirq_is_mapped(priv->maps, i, hwirq)) { + dev_warn(dev, "failed to map pin %d as GIC hwirq %d is already mapped\n", + pin, hwirq); + continue; + } + + priv->maps[i].pin = pin; + priv->maps[i].hwirq = hwirq; + } + + raw_spin_lock_init(&priv->lock); + + priv->base = devm_platform_ioremap_resource(pdev, 0); + if (!priv->base) + return PTR_ERR(priv->base); + + for (i = 0; i < priv->reg_stride; i++) { + qcom_mpm_write(priv, MPM_REG_ENABLE, i, 0); + qcom_mpm_write(priv, MPM_REG_FALLING_EDGE, i, 0); + qcom_mpm_write(priv, MPM_REG_RISING_EDGE, i, 0); + qcom_mpm_write(priv, MPM_REG_POLARITY, i, 0); + qcom_mpm_write(priv, MPM_REG_STATUS, i, 0); + } + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + + genpd = &priv->genpd; + genpd->flags = GENPD_FLAG_IRQ_SAFE; + genpd->power_off = mpm_pd_power_off; + + genpd->name = devm_kasprintf(dev, GFP_KERNEL, "%s", dev_name(dev)); + if (!genpd->name) + return -ENOMEM; + + ret = pm_genpd_init(genpd, NULL, false); + if (ret) { + dev_err(dev, "failed to init genpd: %d\n", ret); + return ret; + } + + ret = of_genpd_add_provider_simple(np, genpd); + if (ret) { + dev_err(dev, "failed to add genpd provider: %d\n", ret); + goto remove_genpd; + } + + priv->mbox_client.dev = dev; + priv->mbox_chan = mbox_request_channel(&priv->mbox_client, 0); + if (IS_ERR(priv->mbox_chan)) { + ret = PTR_ERR(priv->mbox_chan); + dev_err(dev, "failed to acquire IPC channel: %d\n", ret); + return ret; + } + + parent_domain = irq_find_host(parent); + if (!parent_domain) { + dev_err(dev, "failed to find MPM parent domain\n"); + ret = -ENXIO; + goto free_mbox; + } + + priv->domain = irq_domain_create_hierarchy(parent_domain, + IRQ_DOMAIN_FLAG_QCOM_MPM_WAKEUP, pin_cnt, + of_node_to_fwnode(np), &qcom_mpm_ops, priv); + if (!priv->domain) { + dev_err(dev, "failed to create MPM domain\n"); + ret = -ENOMEM; + goto free_mbox; + } + + irq_domain_update_bus_token(priv->domain, DOMAIN_BUS_WAKEUP); + + ret = devm_request_irq(dev, irq, qcom_mpm_handler, IRQF_NO_SUSPEND, + "qcom_mpm", priv); + if (ret) { + dev_err(dev, "failed to request irq: %d\n", ret); + goto remove_domain; + } + + return 0; + +remove_domain: + irq_domain_remove(priv->domain); +free_mbox: + mbox_free_channel(priv->mbox_chan); +remove_genpd: + pm_genpd_remove(genpd); + return ret; +} + +IRQCHIP_PLATFORM_DRIVER_BEGIN(qcom_mpm) +IRQCHIP_MATCH("qcom,mpm", qcom_mpm_init) +IRQCHIP_PLATFORM_DRIVER_END(qcom_mpm) +MODULE_DESCRIPTION("Qualcomm Technologies, Inc. MSM Power Manager"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/irqchip/irq-renesas-intc-irqpin.c b/drivers/irqchip/irq-renesas-intc-irqpin.c index 37f9a4499fdb..e83756aca14e 100644 --- a/drivers/irqchip/irq-renesas-intc-irqpin.c +++ b/drivers/irqchip/irq-renesas-intc-irqpin.c @@ -508,7 +508,6 @@ static int intc_irqpin_probe(struct platform_device *pdev) irq_chip = &p->irq_chip; irq_chip->name = "intc-irqpin"; - irq_chip->parent_device = dev; irq_chip->irq_mask = disable_fn; irq_chip->irq_unmask = enable_fn; irq_chip->irq_set_type = intc_irqpin_irq_set_type; @@ -523,6 +522,8 @@ static int intc_irqpin_probe(struct platform_device *pdev) goto err0; } + irq_domain_set_pm_device(p->irq_domain, dev); + if (p->shared_irqs) { /* request one shared interrupt */ if (devm_request_irq(dev, p->irq[0].requested_irq, diff --git a/drivers/irqchip/irq-renesas-irqc.c b/drivers/irqchip/irq-renesas-irqc.c index 909325f88239..1ee5e9941f67 100644 --- a/drivers/irqchip/irq-renesas-irqc.c +++ b/drivers/irqchip/irq-renesas-irqc.c @@ -188,13 +188,14 @@ static int irqc_probe(struct platform_device *pdev) p->gc->reg_base = p->cpu_int_base; p->gc->chip_types[0].regs.enable = IRQC_EN_SET; p->gc->chip_types[0].regs.disable = IRQC_EN_STS; - p->gc->chip_types[0].chip.parent_device = dev; p->gc->chip_types[0].chip.irq_mask = irq_gc_mask_disable_reg; p->gc->chip_types[0].chip.irq_unmask = irq_gc_unmask_enable_reg; p->gc->chip_types[0].chip.irq_set_type = irqc_irq_set_type; p->gc->chip_types[0].chip.irq_set_wake = irqc_irq_set_wake; p->gc->chip_types[0].chip.flags = IRQCHIP_MASK_ON_SUSPEND; + irq_domain_set_pm_device(p->irq_domain, dev); + /* request interrupts one by one */ for (k = 0; k < p->number_of_irqs; k++) { if (devm_request_irq(dev, p->irq[k].requested_irq, diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c index 259065d271ef..bb87e4c3b88e 100644 --- a/drivers/irqchip/irq-sifive-plic.c +++ b/drivers/irqchip/irq-sifive-plic.c @@ -44,8 +44,8 @@ * Each hart context has a vector of interrupt enable bits associated with it. * There's one bit for each interrupt source. */ -#define ENABLE_BASE 0x2000 -#define ENABLE_PER_HART 0x80 +#define CONTEXT_ENABLE_BASE 0x2000 +#define CONTEXT_ENABLE_SIZE 0x80 /* * Each hart context has a set of control registers associated with it. Right @@ -53,7 +53,7 @@ * take an interrupt, and a register to claim interrupts. */ #define CONTEXT_BASE 0x200000 -#define CONTEXT_PER_HART 0x1000 +#define CONTEXT_SIZE 0x1000 #define CONTEXT_THRESHOLD 0x00 #define CONTEXT_CLAIM 0x04 @@ -81,17 +81,21 @@ static int plic_parent_irq __ro_after_init; static bool plic_cpuhp_setup_done __ro_after_init; static DEFINE_PER_CPU(struct plic_handler, plic_handlers); -static inline void plic_toggle(struct plic_handler *handler, - int hwirq, int enable) +static void __plic_toggle(void __iomem *enable_base, int hwirq, int enable) { - u32 __iomem *reg = handler->enable_base + (hwirq / 32) * sizeof(u32); + u32 __iomem *reg = enable_base + (hwirq / 32) * sizeof(u32); u32 hwirq_mask = 1 << (hwirq % 32); - raw_spin_lock(&handler->enable_lock); if (enable) writel(readl(reg) | hwirq_mask, reg); else writel(readl(reg) & ~hwirq_mask, reg); +} + +static void plic_toggle(struct plic_handler *handler, int hwirq, int enable) +{ + raw_spin_lock(&handler->enable_lock); + __plic_toggle(handler->enable_base, hwirq, enable); raw_spin_unlock(&handler->enable_lock); } @@ -324,8 +328,18 @@ static int __init plic_init(struct device_node *node, * Skip contexts other than external interrupts for our * privilege level. */ - if (parent.args[0] != RV_IRQ_EXT) + if (parent.args[0] != RV_IRQ_EXT) { + /* Disable S-mode enable bits if running in M-mode. */ + if (IS_ENABLED(CONFIG_RISCV_M_MODE)) { + void __iomem *enable_base = priv->regs + + CONTEXT_ENABLE_BASE + + i * CONTEXT_ENABLE_SIZE; + + for (hwirq = 1; hwirq <= nr_irqs; hwirq++) + __plic_toggle(enable_base, hwirq, 0); + } continue; + } hartid = riscv_of_parent_hartid(parent.np); if (hartid < 0) { @@ -361,11 +375,11 @@ static int __init plic_init(struct device_node *node, cpumask_set_cpu(cpu, &priv->lmask); handler->present = true; - handler->hart_base = - priv->regs + CONTEXT_BASE + i * CONTEXT_PER_HART; + handler->hart_base = priv->regs + CONTEXT_BASE + + i * CONTEXT_SIZE; raw_spin_lock_init(&handler->enable_lock); - handler->enable_base = - priv->regs + ENABLE_BASE + i * ENABLE_PER_HART; + handler->enable_base = priv->regs + CONTEXT_ENABLE_BASE + + i * CONTEXT_ENABLE_SIZE; handler->priv = priv; done: for (hwirq = 1; hwirq <= nr_irqs; hwirq++) @@ -398,3 +412,4 @@ out_free_priv: IRQCHIP_DECLARE(sifive_plic, "sifive,plic-1.0.0", plic_init); IRQCHIP_DECLARE(riscv_plic0, "riscv,plic0", plic_init); /* for legacy systems */ +IRQCHIP_DECLARE(thead_c900_plic, "thead,c900-plic", plic_init); /* for firmware driver */ diff --git a/drivers/irqchip/irq-stm32-exti.c b/drivers/irqchip/irq-stm32-exti.c index b7cb2da71888..9d18f47040eb 100644 --- a/drivers/irqchip/irq-stm32-exti.c +++ b/drivers/irqchip/irq-stm32-exti.c @@ -214,6 +214,48 @@ static const struct stm32_desc_irq stm32mp1_desc_irq[] = { { .exti = 73, .irq_parent = 129, .chip = &stm32_exti_h_chip }, }; +static const struct stm32_desc_irq stm32mp13_desc_irq[] = { + { .exti = 0, .irq_parent = 6, .chip = &stm32_exti_h_chip }, + { .exti = 1, .irq_parent = 7, .chip = &stm32_exti_h_chip }, + { .exti = 2, .irq_parent = 8, .chip = &stm32_exti_h_chip }, + { .exti = 3, .irq_parent = 9, .chip = &stm32_exti_h_chip }, + { .exti = 4, .irq_parent = 10, .chip = &stm32_exti_h_chip }, + { .exti = 5, .irq_parent = 24, .chip = &stm32_exti_h_chip }, + { .exti = 6, .irq_parent = 65, .chip = &stm32_exti_h_chip }, + { .exti = 7, .irq_parent = 66, .chip = &stm32_exti_h_chip }, + { .exti = 8, .irq_parent = 67, .chip = &stm32_exti_h_chip }, + { .exti = 9, .irq_parent = 68, .chip = &stm32_exti_h_chip }, + { .exti = 10, .irq_parent = 41, .chip = &stm32_exti_h_chip }, + { .exti = 11, .irq_parent = 43, .chip = &stm32_exti_h_chip }, + { .exti = 12, .irq_parent = 77, .chip = &stm32_exti_h_chip }, + { .exti = 13, .irq_parent = 78, .chip = &stm32_exti_h_chip }, + { .exti = 14, .irq_parent = 106, .chip = &stm32_exti_h_chip }, + { .exti = 15, .irq_parent = 109, .chip = &stm32_exti_h_chip }, + { .exti = 16, .irq_parent = 1, .chip = &stm32_exti_h_chip }, + { .exti = 19, .irq_parent = 3, .chip = &stm32_exti_h_chip_direct }, + { .exti = 21, .irq_parent = 32, .chip = &stm32_exti_h_chip_direct }, + { .exti = 22, .irq_parent = 34, .chip = &stm32_exti_h_chip_direct }, + { .exti = 23, .irq_parent = 73, .chip = &stm32_exti_h_chip_direct }, + { .exti = 24, .irq_parent = 93, .chip = &stm32_exti_h_chip_direct }, + { .exti = 25, .irq_parent = 114, .chip = &stm32_exti_h_chip_direct }, + { .exti = 26, .irq_parent = 38, .chip = &stm32_exti_h_chip_direct }, + { .exti = 27, .irq_parent = 39, .chip = &stm32_exti_h_chip_direct }, + { .exti = 28, .irq_parent = 40, .chip = &stm32_exti_h_chip_direct }, + { .exti = 29, .irq_parent = 72, .chip = &stm32_exti_h_chip_direct }, + { .exti = 30, .irq_parent = 53, .chip = &stm32_exti_h_chip_direct }, + { .exti = 31, .irq_parent = 54, .chip = &stm32_exti_h_chip_direct }, + { .exti = 32, .irq_parent = 83, .chip = &stm32_exti_h_chip_direct }, + { .exti = 33, .irq_parent = 84, .chip = &stm32_exti_h_chip_direct }, + { .exti = 44, .irq_parent = 96, .chip = &stm32_exti_h_chip_direct }, + { .exti = 47, .irq_parent = 92, .chip = &stm32_exti_h_chip_direct }, + { .exti = 48, .irq_parent = 116, .chip = &stm32_exti_h_chip_direct }, + { .exti = 50, .irq_parent = 117, .chip = &stm32_exti_h_chip_direct }, + { .exti = 52, .irq_parent = 118, .chip = &stm32_exti_h_chip_direct }, + { .exti = 53, .irq_parent = 119, .chip = &stm32_exti_h_chip_direct }, + { .exti = 68, .irq_parent = 63, .chip = &stm32_exti_h_chip_direct }, + { .exti = 70, .irq_parent = 98, .chip = &stm32_exti_h_chip_direct }, +}; + static const struct stm32_exti_drv_data stm32mp1_drv_data = { .exti_banks = stm32mp1_exti_banks, .bank_nr = ARRAY_SIZE(stm32mp1_exti_banks), @@ -221,6 +263,13 @@ static const struct stm32_exti_drv_data stm32mp1_drv_data = { .irq_nr = ARRAY_SIZE(stm32mp1_desc_irq), }; +static const struct stm32_exti_drv_data stm32mp13_drv_data = { + .exti_banks = stm32mp1_exti_banks, + .bank_nr = ARRAY_SIZE(stm32mp1_exti_banks), + .desc_irqs = stm32mp13_desc_irq, + .irq_nr = ARRAY_SIZE(stm32mp13_desc_irq), +}; + static const struct stm32_desc_irq *stm32_exti_get_desc(const struct stm32_exti_drv_data *drv_data, irq_hw_number_t hwirq) @@ -922,6 +971,7 @@ static int stm32_exti_probe(struct platform_device *pdev) /* platform driver only for MP1 */ static const struct of_device_id stm32_exti_ids[] = { { .compatible = "st,stm32mp1-exti", .data = &stm32mp1_drv_data}, + { .compatible = "st,stm32mp13-exti", .data = &stm32mp13_drv_data}, {}, }; MODULE_DEVICE_TABLE(of, stm32_exti_ids); diff --git a/drivers/irqchip/irq-ts4800.c b/drivers/irqchip/irq-ts4800.c index f032db23b30f..b2d61d4f6fe6 100644 --- a/drivers/irqchip/irq-ts4800.c +++ b/drivers/irqchip/irq-ts4800.c @@ -19,14 +19,15 @@ #include <linux/of_address.h> #include <linux/of_irq.h> #include <linux/platform_device.h> +#include <linux/seq_file.h> #define IRQ_MASK 0x4 #define IRQ_STATUS 0x8 struct ts4800_irq_data { void __iomem *base; + struct platform_device *pdev; struct irq_domain *domain; - struct irq_chip irq_chip; }; static void ts4800_irq_mask(struct irq_data *d) @@ -47,12 +48,25 @@ static void ts4800_irq_unmask(struct irq_data *d) writew(reg & ~mask, data->base + IRQ_MASK); } +static void ts4800_irq_print_chip(struct irq_data *d, struct seq_file *p) +{ + struct ts4800_irq_data *data = irq_data_get_irq_chip_data(d); + + seq_printf(p, "%s", dev_name(&data->pdev->dev)); +} + +static const struct irq_chip ts4800_chip = { + .irq_mask = ts4800_irq_mask, + .irq_unmask = ts4800_irq_unmask, + .irq_print_chip = ts4800_irq_print_chip, +}; + static int ts4800_irqdomain_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hwirq) { struct ts4800_irq_data *data = d->host_data; - irq_set_chip_and_handler(irq, &data->irq_chip, handle_simple_irq); + irq_set_chip_and_handler(irq, &ts4800_chip, handle_simple_irq); irq_set_chip_data(irq, data); irq_set_noprobe(irq); @@ -92,13 +106,13 @@ static int ts4800_ic_probe(struct platform_device *pdev) { struct device_node *node = pdev->dev.of_node; struct ts4800_irq_data *data; - struct irq_chip *irq_chip; int parent_irq; data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; + data->pdev = pdev; data->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(data->base)) return PTR_ERR(data->base); @@ -111,11 +125,6 @@ static int ts4800_ic_probe(struct platform_device *pdev) return -EINVAL; } - irq_chip = &data->irq_chip; - irq_chip->name = dev_name(&pdev->dev); - irq_chip->irq_mask = ts4800_irq_mask; - irq_chip->irq_unmask = ts4800_irq_unmask; - data->domain = irq_domain_add_linear(node, 8, &ts4800_ic_ops, data); if (!data->domain) { dev_err(&pdev->dev, "cannot add IRQ domain\n"); diff --git a/drivers/irqchip/irq-versatile-fpga.c b/drivers/irqchip/irq-versatile-fpga.c index f2757b6aecc8..ba543ed9c154 100644 --- a/drivers/irqchip/irq-versatile-fpga.c +++ b/drivers/irqchip/irq-versatile-fpga.c @@ -7,12 +7,12 @@ #include <linux/io.h> #include <linux/irqchip.h> #include <linux/irqchip/chained_irq.h> -#include <linux/irqchip/versatile-fpga.h> #include <linux/irqdomain.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_irq.h> +#include <linux/seq_file.h> #include <asm/exception.h> #include <asm/mach/irq.h> @@ -34,14 +34,12 @@ /** * struct fpga_irq_data - irq data container for the FPGA IRQ controller * @base: memory offset in virtual memory - * @chip: chip container for this instance * @domain: IRQ domain for this instance * @valid: mask for valid IRQs on this controller * @used_irqs: number of active IRQs on this controller */ struct fpga_irq_data { void __iomem *base; - struct irq_chip chip; u32 valid; struct irq_domain *domain; u8 used_irqs; @@ -67,6 +65,20 @@ static void fpga_irq_unmask(struct irq_data *d) writel(mask, f->base + IRQ_ENABLE_SET); } +static void fpga_irq_print_chip(struct irq_data *d, struct seq_file *p) +{ + struct fpga_irq_data *f = irq_data_get_irq_chip_data(d); + + seq_printf(p, irq_domain_get_of_node(f->domain)->name); +} + +static const struct irq_chip fpga_chip = { + .irq_ack = fpga_irq_mask, + .irq_mask = fpga_irq_mask, + .irq_unmask = fpga_irq_unmask, + .irq_print_chip = fpga_irq_print_chip, +}; + static void fpga_irq_handle(struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); @@ -116,7 +128,7 @@ static int handle_one_fpga(struct fpga_irq_data *f, struct pt_regs *regs) * Keep iterating over all registered FPGA IRQ controllers until there are * no pending interrupts. */ -asmlinkage void __exception_irq_entry fpga_handle_irq(struct pt_regs *regs) +static asmlinkage void __exception_irq_entry fpga_handle_irq(struct pt_regs *regs) { int i, handled; @@ -135,8 +147,7 @@ static int fpga_irqdomain_map(struct irq_domain *d, unsigned int irq, if (!(f->valid & BIT(hwirq))) return -EPERM; irq_set_chip_data(irq, f); - irq_set_chip_and_handler(irq, &f->chip, - handle_level_irq); + irq_set_chip_and_handler(irq, &fpga_chip, handle_level_irq); irq_set_probe(irq); return 0; } @@ -146,8 +157,8 @@ static const struct irq_domain_ops fpga_irqdomain_ops = { .xlate = irq_domain_xlate_onetwocell, }; -void __init fpga_irq_init(void __iomem *base, const char *name, int irq_start, - int parent_irq, u32 valid, struct device_node *node) +static void __init fpga_irq_init(void __iomem *base, int parent_irq, + u32 valid, struct device_node *node) { struct fpga_irq_data *f; int i; @@ -158,10 +169,6 @@ void __init fpga_irq_init(void __iomem *base, const char *name, int irq_start, } f = &fpga_irq_devices[fpga_irq_id]; f->base = base; - f->chip.name = name; - f->chip.irq_ack = fpga_irq_mask; - f->chip.irq_mask = fpga_irq_mask; - f->chip.irq_unmask = fpga_irq_unmask; f->valid = valid; if (parent_irq != -1) { @@ -169,20 +176,19 @@ void __init fpga_irq_init(void __iomem *base, const char *name, int irq_start, f); } - /* This will also allocate irq descriptors */ - f->domain = irq_domain_add_simple(node, fls(valid), irq_start, + f->domain = irq_domain_add_linear(node, fls(valid), &fpga_irqdomain_ops, f); /* This will allocate all valid descriptors in the linear case */ for (i = 0; i < fls(valid); i++) if (valid & BIT(i)) { - if (!irq_start) - irq_create_mapping(f->domain, i); + /* Is this still required? */ + irq_create_mapping(f->domain, i); f->used_irqs++; } pr_info("FPGA IRQ chip %d \"%s\" @ %p, %u irqs", - fpga_irq_id, name, base, f->used_irqs); + fpga_irq_id, node->name, base, f->used_irqs); if (parent_irq != -1) pr_cont(", parent IRQ: %d\n", parent_irq); else @@ -192,8 +198,8 @@ void __init fpga_irq_init(void __iomem *base, const char *name, int irq_start, } #ifdef CONFIG_OF -int __init fpga_irq_of_init(struct device_node *node, - struct device_node *parent) +static int __init fpga_irq_of_init(struct device_node *node, + struct device_node *parent) { void __iomem *base; u32 clear_mask; @@ -222,7 +228,7 @@ int __init fpga_irq_of_init(struct device_node *node, parent_irq = -1; } - fpga_irq_init(base, node->name, 0, parent_irq, valid_mask, node); + fpga_irq_init(base, parent_irq, valid_mask, node); /* * On Versatile AB/PB, some secondary interrupts have a direct diff --git a/drivers/irqchip/irq-xilinx-intc.c b/drivers/irqchip/irq-xilinx-intc.c index 356a59755d63..238d3d344949 100644 --- a/drivers/irqchip/irq-xilinx-intc.c +++ b/drivers/irqchip/irq-xilinx-intc.c @@ -32,6 +32,8 @@ #define MER_ME (1<<0) #define MER_HIE (1<<1) +#define SPURIOUS_IRQ (-1U) + static DEFINE_STATIC_KEY_FALSE(xintc_is_be); struct xintc_irq_chip { @@ -110,20 +112,6 @@ static struct irq_chip intc_dev = { .irq_mask_ack = intc_mask_ack, }; -unsigned int xintc_get_irq(void) -{ - unsigned int irq = -1; - u32 hwirq; - - hwirq = xintc_read(primary_intc, IVR); - if (hwirq != -1U) - irq = irq_find_mapping(primary_intc->root_domain, hwirq); - - pr_debug("irq-xilinx: hwirq=%d, irq=%d\n", hwirq, irq); - - return irq; -} - static int xintc_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw) { struct xintc_irq_chip *irqc = d->host_data; @@ -164,6 +152,19 @@ static void xil_intc_irq_handler(struct irq_desc *desc) chained_irq_exit(chip, desc); } +static void xil_intc_handle_irq(struct pt_regs *regs) +{ + u32 hwirq; + + do { + hwirq = xintc_read(primary_intc, IVR); + if (unlikely(hwirq == SPURIOUS_IRQ)) + break; + + generic_handle_domain_irq(primary_intc->root_domain, hwirq); + } while (true); +} + static int __init xilinx_intc_of_init(struct device_node *intc, struct device_node *parent) { @@ -233,6 +234,7 @@ static int __init xilinx_intc_of_init(struct device_node *intc, } else { primary_intc = irqc; irq_set_default_host(primary_intc->root_domain); + set_handle_irq(xil_intc_handle_irq); } return 0; diff --git a/drivers/irqchip/qcom-pdc.c b/drivers/irqchip/qcom-pdc.c index 173e6520e06e..d96916cf6a41 100644 --- a/drivers/irqchip/qcom-pdc.c +++ b/drivers/irqchip/qcom-pdc.c @@ -21,23 +21,19 @@ #include <linux/slab.h> #include <linux/types.h> -#define PDC_MAX_IRQS 168 #define PDC_MAX_GPIO_IRQS 256 -#define CLEAR_INTR(reg, intr) (reg & ~(1 << intr)) -#define ENABLE_INTR(reg, intr) (reg | (1 << intr)) - #define IRQ_ENABLE_BANK 0x10 #define IRQ_i_CFG 0x110 -#define PDC_NO_PARENT_IRQ ~0UL - struct pdc_pin_region { u32 pin_base; u32 parent_base; u32 cnt; }; +#define pin_to_hwirq(r, p) ((r)->parent_base + (p) - (r)->pin_base) + static DEFINE_RAW_SPINLOCK(pdc_lock); static void __iomem *pdc_base; static struct pdc_pin_region *pdc_region; @@ -56,17 +52,18 @@ static u32 pdc_reg_read(int reg, u32 i) static void pdc_enable_intr(struct irq_data *d, bool on) { int pin_out = d->hwirq; + unsigned long enable; + unsigned long flags; u32 index, mask; - u32 enable; index = pin_out / 32; mask = pin_out % 32; - raw_spin_lock(&pdc_lock); + raw_spin_lock_irqsave(&pdc_lock, flags); enable = pdc_reg_read(IRQ_ENABLE_BANK, index); - enable = on ? ENABLE_INTR(enable, mask) : CLEAR_INTR(enable, mask); + __assign_bit(mask, &enable, on); pdc_reg_write(IRQ_ENABLE_BANK, index, enable); - raw_spin_unlock(&pdc_lock); + raw_spin_unlock_irqrestore(&pdc_lock, flags); } static void qcom_pdc_gic_disable(struct irq_data *d) @@ -186,34 +183,17 @@ static struct irq_chip qcom_pdc_gic_chip = { .irq_set_affinity = irq_chip_set_affinity_parent, }; -static irq_hw_number_t get_parent_hwirq(int pin) +static struct pdc_pin_region *get_pin_region(int pin) { int i; - struct pdc_pin_region *region; for (i = 0; i < pdc_region_cnt; i++) { - region = &pdc_region[i]; - if (pin >= region->pin_base && - pin < region->pin_base + region->cnt) - return (region->parent_base + pin - region->pin_base); + if (pin >= pdc_region[i].pin_base && + pin < pdc_region[i].pin_base + pdc_region[i].cnt) + return &pdc_region[i]; } - return PDC_NO_PARENT_IRQ; -} - -static int qcom_pdc_translate(struct irq_domain *d, struct irq_fwspec *fwspec, - unsigned long *hwirq, unsigned int *type) -{ - if (is_of_node(fwspec->fwnode)) { - if (fwspec->param_count != 2) - return -EINVAL; - - *hwirq = fwspec->param[0]; - *type = fwspec->param[1] & IRQ_TYPE_SENSE_MASK; - return 0; - } - - return -EINVAL; + return NULL; } static int qcom_pdc_alloc(struct irq_domain *domain, unsigned int virq, @@ -221,55 +201,12 @@ static int qcom_pdc_alloc(struct irq_domain *domain, unsigned int virq, { struct irq_fwspec *fwspec = data; struct irq_fwspec parent_fwspec; - irq_hw_number_t hwirq, parent_hwirq; - unsigned int type; - int ret; - - ret = qcom_pdc_translate(domain, fwspec, &hwirq, &type); - if (ret) - return ret; - - ret = irq_domain_set_hwirq_and_chip(domain, virq, hwirq, - &qcom_pdc_gic_chip, NULL); - if (ret) - return ret; - - parent_hwirq = get_parent_hwirq(hwirq); - if (parent_hwirq == PDC_NO_PARENT_IRQ) - return irq_domain_disconnect_hierarchy(domain->parent, virq); - - if (type & IRQ_TYPE_EDGE_BOTH) - type = IRQ_TYPE_EDGE_RISING; - - if (type & IRQ_TYPE_LEVEL_MASK) - type = IRQ_TYPE_LEVEL_HIGH; - - parent_fwspec.fwnode = domain->parent->fwnode; - parent_fwspec.param_count = 3; - parent_fwspec.param[0] = 0; - parent_fwspec.param[1] = parent_hwirq; - parent_fwspec.param[2] = type; - - return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, - &parent_fwspec); -} - -static const struct irq_domain_ops qcom_pdc_ops = { - .translate = qcom_pdc_translate, - .alloc = qcom_pdc_alloc, - .free = irq_domain_free_irqs_common, -}; - -static int qcom_pdc_gpio_alloc(struct irq_domain *domain, unsigned int virq, - unsigned int nr_irqs, void *data) -{ - struct irq_fwspec *fwspec = data; - struct irq_fwspec parent_fwspec; - irq_hw_number_t hwirq, parent_hwirq; + struct pdc_pin_region *region; + irq_hw_number_t hwirq; unsigned int type; int ret; - ret = qcom_pdc_translate(domain, fwspec, &hwirq, &type); + ret = irq_domain_translate_twocell(domain, fwspec, &hwirq, &type); if (ret) return ret; @@ -281,8 +218,8 @@ static int qcom_pdc_gpio_alloc(struct irq_domain *domain, unsigned int virq, if (ret) return ret; - parent_hwirq = get_parent_hwirq(hwirq); - if (parent_hwirq == PDC_NO_PARENT_IRQ) + region = get_pin_region(hwirq); + if (!region) return irq_domain_disconnect_hierarchy(domain->parent, virq); if (type & IRQ_TYPE_EDGE_BOTH) @@ -294,23 +231,16 @@ static int qcom_pdc_gpio_alloc(struct irq_domain *domain, unsigned int virq, parent_fwspec.fwnode = domain->parent->fwnode; parent_fwspec.param_count = 3; parent_fwspec.param[0] = 0; - parent_fwspec.param[1] = parent_hwirq; + parent_fwspec.param[1] = pin_to_hwirq(region, hwirq); parent_fwspec.param[2] = type; return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, &parent_fwspec); } -static int qcom_pdc_gpio_domain_select(struct irq_domain *d, - struct irq_fwspec *fwspec, - enum irq_domain_bus_token bus_token) -{ - return bus_token == DOMAIN_BUS_WAKEUP; -} - -static const struct irq_domain_ops qcom_pdc_gpio_ops = { - .select = qcom_pdc_gpio_domain_select, - .alloc = qcom_pdc_gpio_alloc, +static const struct irq_domain_ops qcom_pdc_ops = { + .translate = irq_domain_translate_twocell, + .alloc = qcom_pdc_alloc, .free = irq_domain_free_irqs_common, }; @@ -361,7 +291,7 @@ static int pdc_setup_pin_mapping(struct device_node *np) static int qcom_pdc_init(struct device_node *node, struct device_node *parent) { - struct irq_domain *parent_domain, *pdc_domain, *pdc_gpio_domain; + struct irq_domain *parent_domain, *pdc_domain; int ret; pdc_base = of_iomap(node, 0); @@ -383,32 +313,21 @@ static int qcom_pdc_init(struct device_node *node, struct device_node *parent) goto fail; } - pdc_domain = irq_domain_create_hierarchy(parent_domain, 0, PDC_MAX_IRQS, - of_fwnode_handle(node), - &qcom_pdc_ops, NULL); - if (!pdc_domain) { - pr_err("%pOF: GIC domain add failed\n", node); - ret = -ENOMEM; - goto fail; - } - - pdc_gpio_domain = irq_domain_create_hierarchy(parent_domain, + pdc_domain = irq_domain_create_hierarchy(parent_domain, IRQ_DOMAIN_FLAG_QCOM_PDC_WAKEUP, PDC_MAX_GPIO_IRQS, of_fwnode_handle(node), - &qcom_pdc_gpio_ops, NULL); - if (!pdc_gpio_domain) { - pr_err("%pOF: PDC domain add failed for GPIO domain\n", node); + &qcom_pdc_ops, NULL); + if (!pdc_domain) { + pr_err("%pOF: PDC domain add failed\n", node); ret = -ENOMEM; - goto remove; + goto fail; } - irq_domain_update_bus_token(pdc_gpio_domain, DOMAIN_BUS_WAKEUP); + irq_domain_update_bus_token(pdc_domain, DOMAIN_BUS_WAKEUP); return 0; -remove: - irq_domain_remove(pdc_domain); fail: kfree(pdc_region); iounmap(pdc_base); diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c index bd087cca1c1d..af17459c1a5c 100644 --- a/drivers/isdn/hardware/mISDN/hfcpci.c +++ b/drivers/isdn/hardware/mISDN/hfcpci.c @@ -2005,7 +2005,11 @@ setup_hw(struct hfc_pci *hc) } /* Allocate memory for FIFOS */ /* the memory needs to be on a 32k boundary within the first 4G */ - dma_set_mask(&hc->pdev->dev, 0xFFFF8000); + if (dma_set_mask(&hc->pdev->dev, 0xFFFF8000)) { + printk(KERN_WARNING + "HFC-PCI: No usable DMA configuration!\n"); + return -EIO; + } buffer = dma_alloc_coherent(&hc->pdev->dev, 0x8000, &hc->hw.dmahandle, GFP_KERNEL); /* We silently assume the address is okay if nonzero */ diff --git a/drivers/isdn/mISDN/dsp_pipeline.c b/drivers/isdn/mISDN/dsp_pipeline.c index e11ca6bbc7f4..c3b2c99b5cd5 100644 --- a/drivers/isdn/mISDN/dsp_pipeline.c +++ b/drivers/isdn/mISDN/dsp_pipeline.c @@ -192,7 +192,7 @@ void dsp_pipeline_destroy(struct dsp_pipeline *pipeline) int dsp_pipeline_build(struct dsp_pipeline *pipeline, const char *cfg) { int found = 0; - char *dup, *tok, *name, *args; + char *dup, *next, *tok, *name, *args; struct dsp_element_entry *entry, *n; struct dsp_pipeline_entry *pipeline_entry; struct mISDN_dsp_element *elem; @@ -203,10 +203,10 @@ int dsp_pipeline_build(struct dsp_pipeline *pipeline, const char *cfg) if (!list_empty(&pipeline->list)) _dsp_pipeline_destroy(pipeline); - dup = kstrdup(cfg, GFP_ATOMIC); + dup = next = kstrdup(cfg, GFP_ATOMIC); if (!dup) return 0; - while ((tok = strsep(&dup, "|"))) { + while ((tok = strsep(&next, "|"))) { if (!strlen(tok)) continue; name = strsep(&tok, "("); diff --git a/drivers/md/dm.c b/drivers/md/dm.c index ab9cc91931f9..183ce0d6728f 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -2027,7 +2027,7 @@ static void __dm_destroy(struct mapped_device *md, bool wait) set_bit(DMF_FREEING, &md->flags); spin_unlock(&_minor_lock); - blk_set_queue_dying(md->queue); + blk_mark_disk_dead(md->disk); /* * Take suspend_lock so that presuspend and postsuspend methods diff --git a/drivers/md/md.c b/drivers/md/md.c index f88a9e948f3e..f210a55af201 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -5867,10 +5867,6 @@ int md_run(struct mddev *mddev) nowait = nowait && blk_queue_nowait(bdev_get_queue(rdev->bdev)); } - /* Set the NOWAIT flags if all underlying devices support it */ - if (nowait) - blk_queue_flag_set(QUEUE_FLAG_NOWAIT, mddev->queue); - if (!bioset_initialized(&mddev->bio_set)) { err = bioset_init(&mddev->bio_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS); if (err) @@ -6008,6 +6004,10 @@ int md_run(struct mddev *mddev) else blk_queue_flag_clear(QUEUE_FLAG_NONROT, mddev->queue); blk_queue_flag_set(QUEUE_FLAG_IO_STAT, mddev->queue); + + /* Set the NOWAIT flags if all underlying devices support it */ + if (nowait) + blk_queue_flag_set(QUEUE_FLAG_NOWAIT, mddev->queue); } if (pers->sync_request) { if (mddev->kobj.sd && diff --git a/drivers/mfd/ezx-pcap.c b/drivers/mfd/ezx-pcap.c index 70fa18b04ad2..b14d3f98e1eb 100644 --- a/drivers/mfd/ezx-pcap.c +++ b/drivers/mfd/ezx-pcap.c @@ -193,13 +193,11 @@ static void pcap_isr_work(struct work_struct *work) ezx_pcap_write(pcap, PCAP_REG_MSR, isr | msr); ezx_pcap_write(pcap, PCAP_REG_ISR, isr); - local_irq_disable(); service = isr & ~msr; for (irq = pcap->irq_base; service; service >>= 1, irq++) { if (service & 1) - generic_handle_irq(irq); + generic_handle_irq_safe(irq); } - local_irq_enable(); ezx_pcap_write(pcap, PCAP_REG_MSR, pcap->msr); } while (gpio_get_value(pdata->gpio)); } diff --git a/drivers/misc/eeprom/ee1004.c b/drivers/misc/eeprom/ee1004.c index bb9c4512c968..9fbfe784d710 100644 --- a/drivers/misc/eeprom/ee1004.c +++ b/drivers/misc/eeprom/ee1004.c @@ -114,6 +114,9 @@ static ssize_t ee1004_eeprom_read(struct i2c_client *client, char *buf, if (offset + count > EE1004_PAGE_SIZE) count = EE1004_PAGE_SIZE - offset; + if (count > I2C_SMBUS_BLOCK_MAX) + count = I2C_SMBUS_BLOCK_MAX; + return i2c_smbus_read_i2c_block_data_or_emulated(client, offset, count, buf); } diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c index 4ccbf43e6bfa..aa1682b94a23 100644 --- a/drivers/misc/fastrpc.c +++ b/drivers/misc/fastrpc.c @@ -1288,7 +1288,14 @@ static int fastrpc_dmabuf_alloc(struct fastrpc_user *fl, char __user *argp) } if (copy_to_user(argp, &bp, sizeof(bp))) { - dma_buf_put(buf->dmabuf); + /* + * The usercopy failed, but we can't do much about it, as + * dma_buf_fd() already called fd_install() and made the + * file descriptor accessible for the current process. It + * might already be closed and dmabuf no longer valid when + * we reach this point. Therefore "leak" the fd and rely on + * the process exit path to do any required cleanup. + */ return -EFAULT; } diff --git a/drivers/misc/hi6421v600-irq.c b/drivers/misc/hi6421v600-irq.c index 1c763796cf1f..caa3de37698b 100644 --- a/drivers/misc/hi6421v600-irq.c +++ b/drivers/misc/hi6421v600-irq.c @@ -117,8 +117,8 @@ static irqreturn_t hi6421v600_irq_handler(int irq, void *__priv) * If both powerkey down and up IRQs are received, * handle them at the right order */ - generic_handle_irq(priv->irqs[POWERKEY_DOWN]); - generic_handle_irq(priv->irqs[POWERKEY_UP]); + generic_handle_irq_safe(priv->irqs[POWERKEY_DOWN]); + generic_handle_irq_safe(priv->irqs[POWERKEY_UP]); pending &= ~HISI_IRQ_POWERKEY_UP_DOWN; } @@ -126,7 +126,7 @@ static irqreturn_t hi6421v600_irq_handler(int irq, void *__priv) continue; for_each_set_bit(offset, &pending, BITS_PER_BYTE) { - generic_handle_irq(priv->irqs[offset + i * BITS_PER_BYTE]); + generic_handle_irq_safe(priv->irqs[offset + i * BITS_PER_BYTE]); } } diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index 4e61b28a002f..4e67c1403cc9 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c @@ -1682,31 +1682,31 @@ static void mmc_blk_read_single(struct mmc_queue *mq, struct request *req) struct mmc_card *card = mq->card; struct mmc_host *host = card->host; blk_status_t error = BLK_STS_OK; - int retries = 0; do { u32 status; int err; + int retries = 0; - mmc_blk_rw_rq_prep(mqrq, card, 1, mq); + while (retries++ <= MMC_READ_SINGLE_RETRIES) { + mmc_blk_rw_rq_prep(mqrq, card, 1, mq); - mmc_wait_for_req(host, mrq); + mmc_wait_for_req(host, mrq); - err = mmc_send_status(card, &status); - if (err) - goto error_exit; - - if (!mmc_host_is_spi(host) && - !mmc_ready_for_data(status)) { - err = mmc_blk_fix_state(card, req); + err = mmc_send_status(card, &status); if (err) goto error_exit; - } - if (mrq->cmd->error && retries++ < MMC_READ_SINGLE_RETRIES) - continue; + if (!mmc_host_is_spi(host) && + !mmc_ready_for_data(status)) { + err = mmc_blk_fix_state(card, req); + if (err) + goto error_exit; + } - retries = 0; + if (!mrq->cmd->error) + break; + } if (mrq->cmd->error || mrq->data->error || @@ -1908,7 +1908,7 @@ static int mmc_blk_card_busy(struct mmc_card *card, struct request *req) cb_data.card = card; cb_data.status = 0; - err = __mmc_poll_for_busy(card->host, MMC_BLK_TIMEOUT_MS, + err = __mmc_poll_for_busy(card->host, 0, MMC_BLK_TIMEOUT_MS, &mmc_blk_busy_cb, &cb_data); /* diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index bbbbcaf70a59..8421519c2a98 100644 --- a/drivers/mmc/core/mmc.c +++ b/drivers/mmc/core/mmc.c @@ -1962,7 +1962,7 @@ static int mmc_sleep(struct mmc_host *host) goto out_release; } - err = __mmc_poll_for_busy(host, timeout_ms, &mmc_sleep_busy_cb, host); + err = __mmc_poll_for_busy(host, 0, timeout_ms, &mmc_sleep_busy_cb, host); out_release: mmc_retune_release(host); diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c index d63d1c735335..180d7e9d3400 100644 --- a/drivers/mmc/core/mmc_ops.c +++ b/drivers/mmc/core/mmc_ops.c @@ -21,6 +21,8 @@ #define MMC_BKOPS_TIMEOUT_MS (120 * 1000) /* 120s */ #define MMC_SANITIZE_TIMEOUT_MS (240 * 1000) /* 240s */ +#define MMC_OP_COND_PERIOD_US (1 * 1000) /* 1ms */ +#define MMC_OP_COND_TIMEOUT_MS 1000 /* 1s */ static const u8 tuning_blk_pattern_4bit[] = { 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc, @@ -232,7 +234,9 @@ int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr) cmd.arg = mmc_host_is_spi(host) ? 0 : ocr; cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR; - err = __mmc_poll_for_busy(host, 1000, &__mmc_send_op_cond_cb, &cb_data); + err = __mmc_poll_for_busy(host, MMC_OP_COND_PERIOD_US, + MMC_OP_COND_TIMEOUT_MS, + &__mmc_send_op_cond_cb, &cb_data); if (err) return err; @@ -495,13 +499,14 @@ static int mmc_busy_cb(void *cb_data, bool *busy) return 0; } -int __mmc_poll_for_busy(struct mmc_host *host, unsigned int timeout_ms, +int __mmc_poll_for_busy(struct mmc_host *host, unsigned int period_us, + unsigned int timeout_ms, int (*busy_cb)(void *cb_data, bool *busy), void *cb_data) { int err; unsigned long timeout; - unsigned int udelay = 32, udelay_max = 32768; + unsigned int udelay = period_us ? period_us : 32, udelay_max = 32768; bool expired = false; bool busy = false; @@ -546,7 +551,7 @@ int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms, cb_data.retry_crc_err = retry_crc_err; cb_data.busy_cmd = busy_cmd; - return __mmc_poll_for_busy(host, timeout_ms, &mmc_busy_cb, &cb_data); + return __mmc_poll_for_busy(host, 0, timeout_ms, &mmc_busy_cb, &cb_data); } EXPORT_SYMBOL_GPL(mmc_poll_for_busy); diff --git a/drivers/mmc/core/mmc_ops.h b/drivers/mmc/core/mmc_ops.h index 9c813b851d0b..09ffbc00908b 100644 --- a/drivers/mmc/core/mmc_ops.h +++ b/drivers/mmc/core/mmc_ops.h @@ -41,7 +41,8 @@ int mmc_can_ext_csd(struct mmc_card *card); int mmc_switch_status(struct mmc_card *card, bool crc_err_fatal); bool mmc_prepare_busy_cmd(struct mmc_host *host, struct mmc_command *cmd, unsigned int timeout_ms); -int __mmc_poll_for_busy(struct mmc_host *host, unsigned int timeout_ms, +int __mmc_poll_for_busy(struct mmc_host *host, unsigned int period_us, + unsigned int timeout_ms, int (*busy_cb)(void *cb_data, bool *busy), void *cb_data); int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms, diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c index 45f578793980..bfbfed30dc4d 100644 --- a/drivers/mmc/core/sd.c +++ b/drivers/mmc/core/sd.c @@ -67,7 +67,7 @@ static const unsigned int sd_au_size[] = { __res & __mask; \ }) -#define SD_POWEROFF_NOTIFY_TIMEOUT_MS 2000 +#define SD_POWEROFF_NOTIFY_TIMEOUT_MS 1000 #define SD_WRITE_EXTR_SINGLE_TIMEOUT_MS 1000 struct sd_busy_data { @@ -1664,9 +1664,15 @@ static int sd_poweroff_notify(struct mmc_card *card) goto out; } + /* Find out when the command is completed. */ + err = mmc_poll_for_busy(card, SD_WRITE_EXTR_SINGLE_TIMEOUT_MS, false, + MMC_BUSY_EXTR_SINGLE); + if (err) + goto out; + cb_data.card = card; cb_data.reg_buf = reg_buf; - err = __mmc_poll_for_busy(card->host, SD_POWEROFF_NOTIFY_TIMEOUT_MS, + err = __mmc_poll_for_busy(card->host, 0, SD_POWEROFF_NOTIFY_TIMEOUT_MS, &sd_busy_poweroff_notify_cb, &cb_data); out: diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c index 8f36536cb1b6..58ab9d90bc8b 100644 --- a/drivers/mmc/host/meson-gx-mmc.c +++ b/drivers/mmc/host/meson-gx-mmc.c @@ -173,6 +173,8 @@ struct meson_host { int irq; bool vqmmc_enabled; + bool needs_pre_post_req; + }; #define CMD_CFG_LENGTH_MASK GENMASK(8, 0) @@ -663,6 +665,8 @@ static void meson_mmc_request_done(struct mmc_host *mmc, struct meson_host *host = mmc_priv(mmc); host->cmd = NULL; + if (host->needs_pre_post_req) + meson_mmc_post_req(mmc, mrq, 0); mmc_request_done(host->mmc, mrq); } @@ -880,7 +884,7 @@ static int meson_mmc_validate_dram_access(struct mmc_host *mmc, struct mmc_data static void meson_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq) { struct meson_host *host = mmc_priv(mmc); - bool needs_pre_post_req = mrq->data && + host->needs_pre_post_req = mrq->data && !(mrq->data->host_cookie & SD_EMMC_PRE_REQ_DONE); /* @@ -896,22 +900,19 @@ static void meson_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq) } } - if (needs_pre_post_req) { + if (host->needs_pre_post_req) { meson_mmc_get_transfer_mode(mmc, mrq); if (!meson_mmc_desc_chain_mode(mrq->data)) - needs_pre_post_req = false; + host->needs_pre_post_req = false; } - if (needs_pre_post_req) + if (host->needs_pre_post_req) meson_mmc_pre_req(mmc, mrq); /* Stop execution */ writel(0, host->regs + SD_EMMC_START); meson_mmc_start_cmd(mmc, mrq->sbc ?: mrq->cmd); - - if (needs_pre_post_req) - meson_mmc_post_req(mmc, mrq, 0); } static void meson_mmc_read_resp(struct mmc_host *mmc, struct mmc_command *cmd) diff --git a/drivers/mmc/host/moxart-mmc.c b/drivers/mmc/host/moxart-mmc.c index 16d1c7a43d33..b6eb75f4bbfc 100644 --- a/drivers/mmc/host/moxart-mmc.c +++ b/drivers/mmc/host/moxart-mmc.c @@ -705,12 +705,12 @@ static int moxart_remove(struct platform_device *pdev) if (!IS_ERR_OR_NULL(host->dma_chan_rx)) dma_release_channel(host->dma_chan_rx); mmc_remove_host(mmc); - mmc_free_host(mmc); writel(0, host->base + REG_INTERRUPT_MASK); writel(0, host->base + REG_POWER_CONTROL); writel(readl(host->base + REG_CLOCK_CONTROL) | CLK_OFF, host->base + REG_CLOCK_CONTROL); + mmc_free_host(mmc); return 0; } diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c index a593b1fbd69e..0f3658b36513 100644 --- a/drivers/mmc/host/sdhci-of-esdhc.c +++ b/drivers/mmc/host/sdhci-of-esdhc.c @@ -524,12 +524,16 @@ static void esdhc_of_adma_workaround(struct sdhci_host *host, u32 intmask) static int esdhc_of_enable_dma(struct sdhci_host *host) { + int ret; u32 value; struct device *dev = mmc_dev(host->mmc); if (of_device_is_compatible(dev->of_node, "fsl,ls1043a-esdhc") || - of_device_is_compatible(dev->of_node, "fsl,ls1046a-esdhc")) - dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40)); + of_device_is_compatible(dev->of_node, "fsl,ls1046a-esdhc")) { + ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40)); + if (ret) + return ret; + } value = sdhci_readl(host, ESDHC_DMA_SYSCTL); diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c index bcc595c70a9f..104dcd702870 100644 --- a/drivers/mmc/host/sh_mmcif.c +++ b/drivers/mmc/host/sh_mmcif.c @@ -405,6 +405,9 @@ static int sh_mmcif_dma_slave_config(struct sh_mmcif_host *host, struct dma_slave_config cfg = { 0, }; res = platform_get_resource(host->pd, IORESOURCE_MEM, 0); + if (!res) + return -EINVAL; + cfg.direction = direction; if (direction == DMA_DEV_TO_MEM) { diff --git a/drivers/mtd/devices/phram.c b/drivers/mtd/devices/phram.c index 6ed6c51fac69..d503821a3e60 100644 --- a/drivers/mtd/devices/phram.c +++ b/drivers/mtd/devices/phram.c @@ -264,16 +264,20 @@ static int phram_setup(const char *val) } } - if (erasesize) - div_u64_rem(len, (uint32_t)erasesize, &rem); - if (len == 0 || erasesize == 0 || erasesize > len - || erasesize > UINT_MAX || rem) { + || erasesize > UINT_MAX) { parse_err("illegal erasesize or len\n"); ret = -EINVAL; goto error; } + div_u64_rem(len, (uint32_t)erasesize, &rem); + if (rem) { + parse_err("len is not multiple of erasesize\n"); + ret = -EINVAL; + goto error; + } + ret = register_device(name, start, len, (uint32_t)erasesize); if (ret) goto error; diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c index 70f492dce158..eef87b28d6c8 100644 --- a/drivers/mtd/mtdcore.c +++ b/drivers/mtd/mtdcore.c @@ -546,6 +546,7 @@ static int mtd_nvmem_add(struct mtd_info *mtd) config.stride = 1; config.read_only = true; config.root_only = true; + config.ignore_wp = true; config.no_of_node = !of_device_is_compatible(node, "nvmem-cells"); config.priv = mtd; @@ -833,6 +834,7 @@ static struct nvmem_device *mtd_otp_nvmem_register(struct mtd_info *mtd, config.owner = THIS_MODULE; config.type = NVMEM_TYPE_OTP; config.root_only = true; + config.ignore_wp = true; config.reg_read = reg_read; config.size = size; config.of_node = np; diff --git a/drivers/mtd/nand/raw/Kconfig b/drivers/mtd/nand/raw/Kconfig index 20408b7db540..820e5dc3bc9b 100644 --- a/drivers/mtd/nand/raw/Kconfig +++ b/drivers/mtd/nand/raw/Kconfig @@ -42,7 +42,7 @@ config MTD_NAND_OMAP2 tristate "OMAP2, OMAP3, OMAP4 and Keystone NAND controller" depends on ARCH_OMAP2PLUS || ARCH_KEYSTONE || ARCH_K3 || COMPILE_TEST depends on HAS_IOMEM - select OMAP_GPMC if ARCH_K3 + depends on OMAP_GPMC help Support for NAND flash on Texas Instruments OMAP2, OMAP3, OMAP4 and Keystone platforms. diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.c b/drivers/mtd/nand/raw/brcmnand/brcmnand.c index f75929783b94..aee78f5f4f15 100644 --- a/drivers/mtd/nand/raw/brcmnand/brcmnand.c +++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.c @@ -2106,7 +2106,7 @@ static int brcmnand_read_by_pio(struct mtd_info *mtd, struct nand_chip *chip, mtd->oobsize / trans, host->hwcfg.sector_size_1k); - if (!ret) { + if (ret != -EBADMSG) { *err_addr = brcmnand_get_uncorrecc_addr(ctrl); if (*err_addr) diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c index 1b64c5a5140d..ded4df473928 100644 --- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c +++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c @@ -2285,7 +2285,7 @@ static int gpmi_nfc_exec_op(struct nand_chip *chip, this->hw.must_apply_timings = false; ret = gpmi_nfc_apply_timings(this); if (ret) - return ret; + goto out_pm; } dev_dbg(this->dev, "%s: %d instructions\n", __func__, op->ninstrs); @@ -2414,6 +2414,7 @@ unmap: this->bch = false; +out_pm: pm_runtime_mark_last_busy(this->dev); pm_runtime_put_autosuspend(this->dev); diff --git a/drivers/mtd/nand/raw/ingenic/ingenic_ecc.c b/drivers/mtd/nand/raw/ingenic/ingenic_ecc.c index efe0ffe4f1ab..9054559e52dd 100644 --- a/drivers/mtd/nand/raw/ingenic/ingenic_ecc.c +++ b/drivers/mtd/nand/raw/ingenic/ingenic_ecc.c @@ -68,9 +68,14 @@ static struct ingenic_ecc *ingenic_ecc_get(struct device_node *np) struct ingenic_ecc *ecc; pdev = of_find_device_by_node(np); - if (!pdev || !platform_get_drvdata(pdev)) + if (!pdev) return ERR_PTR(-EPROBE_DEFER); + if (!platform_get_drvdata(pdev)) { + put_device(&pdev->dev); + return ERR_PTR(-EPROBE_DEFER); + } + ecc = platform_get_drvdata(pdev); clk_prepare_enable(ecc->clk); diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c index 7c6efa3b6255..1a77542c6d67 100644 --- a/drivers/mtd/nand/raw/qcom_nandc.c +++ b/drivers/mtd/nand/raw/qcom_nandc.c @@ -2,7 +2,6 @@ /* * Copyright (c) 2016, The Linux Foundation. All rights reserved. */ - #include <linux/clk.h> #include <linux/slab.h> #include <linux/bitops.h> @@ -3073,10 +3072,6 @@ static int qcom_nandc_probe(struct platform_device *pdev) if (dma_mapping_error(dev, nandc->base_dma)) return -ENXIO; - ret = qcom_nandc_alloc(nandc); - if (ret) - goto err_nandc_alloc; - ret = clk_prepare_enable(nandc->core_clk); if (ret) goto err_core_clk; @@ -3085,6 +3080,10 @@ static int qcom_nandc_probe(struct platform_device *pdev) if (ret) goto err_aon_clk; + ret = qcom_nandc_alloc(nandc); + if (ret) + goto err_nandc_alloc; + ret = qcom_nandc_setup(nandc); if (ret) goto err_setup; @@ -3096,15 +3095,14 @@ static int qcom_nandc_probe(struct platform_device *pdev) return 0; err_setup: + qcom_nandc_unalloc(nandc); +err_nandc_alloc: clk_disable_unprepare(nandc->aon_clk); err_aon_clk: clk_disable_unprepare(nandc->core_clk); err_core_clk: - qcom_nandc_unalloc(nandc); -err_nandc_alloc: dma_unmap_resource(dev, res->start, resource_size(res), DMA_BIDIRECTIONAL, 0); - return ret; } diff --git a/drivers/mtd/parsers/qcomsmempart.c b/drivers/mtd/parsers/qcomsmempart.c index 06a818cd2433..4311b89d8df0 100644 --- a/drivers/mtd/parsers/qcomsmempart.c +++ b/drivers/mtd/parsers/qcomsmempart.c @@ -58,11 +58,11 @@ static int parse_qcomsmem_part(struct mtd_info *mtd, const struct mtd_partition **pparts, struct mtd_part_parser_data *data) { + size_t len = SMEM_FLASH_PTABLE_HDR_LEN; + int ret, i, j, tmpparts, numparts = 0; struct smem_flash_pentry *pentry; struct smem_flash_ptable *ptable; - size_t len = SMEM_FLASH_PTABLE_HDR_LEN; struct mtd_partition *parts; - int ret, i, numparts; char *name, *c; if (IS_ENABLED(CONFIG_MTD_SPI_NOR_USE_4K_SECTORS) @@ -75,7 +75,8 @@ static int parse_qcomsmem_part(struct mtd_info *mtd, pr_debug("Parsing partition table info from SMEM\n"); ptable = qcom_smem_get(SMEM_APPS, SMEM_AARM_PARTITION_TABLE, &len); if (IS_ERR(ptable)) { - pr_err("Error reading partition table header\n"); + if (PTR_ERR(ptable) != -EPROBE_DEFER) + pr_err("Error reading partition table header\n"); return PTR_ERR(ptable); } @@ -87,8 +88,8 @@ static int parse_qcomsmem_part(struct mtd_info *mtd, } /* Ensure that # of partitions is less than the max we have allocated */ - numparts = le32_to_cpu(ptable->numparts); - if (numparts > SMEM_FLASH_PTABLE_MAX_PARTS_V4) { + tmpparts = le32_to_cpu(ptable->numparts); + if (tmpparts > SMEM_FLASH_PTABLE_MAX_PARTS_V4) { pr_err("Partition numbers exceed the max limit\n"); return -EINVAL; } @@ -116,11 +117,17 @@ static int parse_qcomsmem_part(struct mtd_info *mtd, return PTR_ERR(ptable); } + for (i = 0; i < tmpparts; i++) { + pentry = &ptable->pentry[i]; + if (pentry->name[0] != '\0') + numparts++; + } + parts = kcalloc(numparts, sizeof(*parts), GFP_KERNEL); if (!parts) return -ENOMEM; - for (i = 0; i < numparts; i++) { + for (i = 0, j = 0; i < tmpparts; i++) { pentry = &ptable->pentry[i]; if (pentry->name[0] == '\0') continue; @@ -135,24 +142,25 @@ static int parse_qcomsmem_part(struct mtd_info *mtd, for (c = name; *c != '\0'; c++) *c = tolower(*c); - parts[i].name = name; - parts[i].offset = le32_to_cpu(pentry->offset) * mtd->erasesize; - parts[i].mask_flags = pentry->attr; - parts[i].size = le32_to_cpu(pentry->length) * mtd->erasesize; + parts[j].name = name; + parts[j].offset = le32_to_cpu(pentry->offset) * mtd->erasesize; + parts[j].mask_flags = pentry->attr; + parts[j].size = le32_to_cpu(pentry->length) * mtd->erasesize; pr_debug("%d: %s offs=0x%08x size=0x%08x attr:0x%08x\n", i, pentry->name, le32_to_cpu(pentry->offset), le32_to_cpu(pentry->length), pentry->attr); + j++; } pr_debug("SMEM partition table found: ver: %d len: %d\n", - le32_to_cpu(ptable->version), numparts); + le32_to_cpu(ptable->version), tmpparts); *pparts = parts; return numparts; out_free_parts: - while (--i >= 0) - kfree(parts[i].name); + while (--j >= 0) + kfree(parts[j].name); kfree(parts); *pparts = NULL; @@ -166,6 +174,8 @@ static void parse_qcomsmem_cleanup(const struct mtd_partition *pparts, for (i = 0; i < nr_parts; i++) kfree(pparts[i].name); + + kfree(pparts); } static const struct of_device_id qcomsmem_of_match_table[] = { diff --git a/drivers/net/arcnet/com20020-pci.c b/drivers/net/arcnet/com20020-pci.c index 6382e1937cca..c580acb8b1d3 100644 --- a/drivers/net/arcnet/com20020-pci.c +++ b/drivers/net/arcnet/com20020-pci.c @@ -138,6 +138,9 @@ static int com20020pci_probe(struct pci_dev *pdev, return -ENOMEM; ci = (struct com20020_pci_card_info *)id->driver_data; + if (!ci) + return -EINVAL; + priv->ci = ci; mm = &ci->misc_map; diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c index 6006c2e8fa2b..a86b1f71762e 100644 --- a/drivers/net/bonding/bond_3ad.c +++ b/drivers/net/bonding/bond_3ad.c @@ -225,7 +225,7 @@ static inline int __check_agg_selection_timer(struct port *port) if (bond == NULL) return 0; - return BOND_AD_INFO(bond).agg_select_timer ? 1 : 0; + return atomic_read(&BOND_AD_INFO(bond).agg_select_timer) ? 1 : 0; } /** @@ -1021,8 +1021,8 @@ static void ad_mux_machine(struct port *port, bool *update_slave_arr) if (port->aggregator && port->aggregator->is_active && !__port_is_enabled(port)) { - __enable_port(port); + *update_slave_arr = true; } } break; @@ -1779,6 +1779,7 @@ static void ad_agg_selection_logic(struct aggregator *agg, port = port->next_port_in_aggregator) { __enable_port(port); } + *update_slave_arr = true; } } @@ -1994,7 +1995,7 @@ static void ad_marker_response_received(struct bond_marker *marker, */ void bond_3ad_initiate_agg_selection(struct bonding *bond, int timeout) { - BOND_AD_INFO(bond).agg_select_timer = timeout; + atomic_set(&BOND_AD_INFO(bond).agg_select_timer, timeout); } /** @@ -2278,6 +2279,28 @@ void bond_3ad_update_ad_actor_settings(struct bonding *bond) } /** + * bond_agg_timer_advance - advance agg_select_timer + * @bond: bonding structure + * + * Return true when agg_select_timer reaches 0. + */ +static bool bond_agg_timer_advance(struct bonding *bond) +{ + int val, nval; + + while (1) { + val = atomic_read(&BOND_AD_INFO(bond).agg_select_timer); + if (!val) + return false; + nval = val - 1; + if (atomic_cmpxchg(&BOND_AD_INFO(bond).agg_select_timer, + val, nval) == val) + break; + } + return nval == 0; +} + +/** * bond_3ad_state_machine_handler - handle state machines timeout * @work: work context to fetch bonding struct to work on from * @@ -2312,9 +2335,7 @@ void bond_3ad_state_machine_handler(struct work_struct *work) if (!bond_has_slaves(bond)) goto re_arm; - /* check if agg_select_timer timer after initialize is timed out */ - if (BOND_AD_INFO(bond).agg_select_timer && - !(--BOND_AD_INFO(bond).agg_select_timer)) { + if (bond_agg_timer_advance(bond)) { slave = bond_first_slave_rcu(bond); port = slave ? &(SLAVE_AD_INFO(slave)->port) : NULL; diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 238b56d77c36..aebeb46e6fa6 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -2379,10 +2379,9 @@ static int __bond_release_one(struct net_device *bond_dev, bond_select_active_slave(bond); } - if (!bond_has_slaves(bond)) { - bond_set_carrier(bond); + bond_set_carrier(bond); + if (!bond_has_slaves(bond)) eth_hw_addr_random(bond_dev); - } unblock_netpoll_tx(); synchronize_rcu(); diff --git a/drivers/net/can/rcar/rcar_canfd.c b/drivers/net/can/rcar/rcar_canfd.c index b7dc1c32875f..acd74725831f 100644 --- a/drivers/net/can/rcar/rcar_canfd.c +++ b/drivers/net/can/rcar/rcar_canfd.c @@ -1715,15 +1715,15 @@ static int rcar_canfd_channel_probe(struct rcar_canfd_global *gpriv, u32 ch, netif_napi_add(ndev, &priv->napi, rcar_canfd_rx_poll, RCANFD_NAPI_WEIGHT); + spin_lock_init(&priv->tx_lock); + devm_can_led_init(ndev); + gpriv->ch[priv->channel] = priv; err = register_candev(ndev); if (err) { dev_err(&pdev->dev, "register_candev() failed, error %d\n", err); goto fail_candev; } - spin_lock_init(&priv->tx_lock); - devm_can_led_init(ndev); - gpriv->ch[priv->channel] = priv; dev_info(&pdev->dev, "device registered (channel %u)\n", priv->channel); return 0; diff --git a/drivers/net/can/usb/etas_es58x/es58x_core.c b/drivers/net/can/usb/etas_es58x/es58x_core.c index 2ed2370a3166..2d73ebbf3836 100644 --- a/drivers/net/can/usb/etas_es58x/es58x_core.c +++ b/drivers/net/can/usb/etas_es58x/es58x_core.c @@ -1787,7 +1787,7 @@ static int es58x_open(struct net_device *netdev) struct es58x_device *es58x_dev = es58x_priv(netdev)->es58x_dev; int ret; - if (atomic_inc_return(&es58x_dev->opened_channel_cnt) == 1) { + if (!es58x_dev->opened_channel_cnt) { ret = es58x_alloc_rx_urbs(es58x_dev); if (ret) return ret; @@ -1805,12 +1805,13 @@ static int es58x_open(struct net_device *netdev) if (ret) goto free_urbs; + es58x_dev->opened_channel_cnt++; netif_start_queue(netdev); return ret; free_urbs: - if (atomic_dec_and_test(&es58x_dev->opened_channel_cnt)) + if (!es58x_dev->opened_channel_cnt) es58x_free_urbs(es58x_dev); netdev_err(netdev, "%s: Could not open the network device: %pe\n", __func__, ERR_PTR(ret)); @@ -1845,7 +1846,8 @@ static int es58x_stop(struct net_device *netdev) es58x_flush_pending_tx_msg(netdev); - if (atomic_dec_and_test(&es58x_dev->opened_channel_cnt)) + es58x_dev->opened_channel_cnt--; + if (!es58x_dev->opened_channel_cnt) es58x_free_urbs(es58x_dev); return 0; @@ -2215,7 +2217,6 @@ static struct es58x_device *es58x_init_es58x_dev(struct usb_interface *intf, init_usb_anchor(&es58x_dev->tx_urbs_idle); init_usb_anchor(&es58x_dev->tx_urbs_busy); atomic_set(&es58x_dev->tx_urbs_idle_cnt, 0); - atomic_set(&es58x_dev->opened_channel_cnt, 0); usb_set_intfdata(intf, es58x_dev); es58x_dev->rx_pipe = usb_rcvbulkpipe(es58x_dev->udev, diff --git a/drivers/net/can/usb/etas_es58x/es58x_core.h b/drivers/net/can/usb/etas_es58x/es58x_core.h index 826a15871573..e5033cb5e695 100644 --- a/drivers/net/can/usb/etas_es58x/es58x_core.h +++ b/drivers/net/can/usb/etas_es58x/es58x_core.h @@ -373,8 +373,6 @@ struct es58x_operators { * queue wake/stop logic should prevent this URB from getting * empty. Please refer to es58x_get_tx_urb() for more details. * @tx_urbs_idle_cnt: number of urbs in @tx_urbs_idle. - * @opened_channel_cnt: number of channels opened (c.f. es58x_open() - * and es58x_stop()). * @ktime_req_ns: kernel timestamp when es58x_set_realtime_diff_ns() * was called. * @realtime_diff_ns: difference in nanoseconds between the clocks of @@ -384,6 +382,10 @@ struct es58x_operators { * in RX branches. * @rx_max_packet_size: Maximum length of bulk-in URB. * @num_can_ch: Number of CAN channel (i.e. number of elements of @netdev). + * @opened_channel_cnt: number of channels opened. Free of race + * conditions because its two users (net_device_ops:ndo_open() + * and net_device_ops:ndo_close()) guarantee that the network + * stack big kernel lock (a.k.a. rtnl_mutex) is being hold. * @rx_cmd_buf_len: Length of @rx_cmd_buf. * @rx_cmd_buf: The device might split the URB commands in an * arbitrary amount of pieces. This buffer is used to concatenate @@ -406,7 +408,6 @@ struct es58x_device { struct usb_anchor tx_urbs_busy; struct usb_anchor tx_urbs_idle; atomic_t tx_urbs_idle_cnt; - atomic_t opened_channel_cnt; u64 ktime_req_ns; s64 realtime_diff_ns; @@ -415,6 +416,7 @@ struct es58x_device { u16 rx_max_packet_size; u8 num_can_ch; + u8 opened_channel_cnt; u16 rx_cmd_buf_len; union es58x_urb_cmd rx_cmd_buf; diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c index b487e3fe770a..d35749fad1ef 100644 --- a/drivers/net/can/usb/gs_usb.c +++ b/drivers/net/can/usb/gs_usb.c @@ -191,8 +191,8 @@ struct gs_can { struct gs_usb { struct gs_can *canch[GS_MAX_INTF]; struct usb_anchor rx_submitted; - atomic_t active_channels; struct usb_device *udev; + u8 active_channels; }; /* 'allocate' a tx context. @@ -589,7 +589,7 @@ static int gs_can_open(struct net_device *netdev) if (rc) return rc; - if (atomic_add_return(1, &parent->active_channels) == 1) { + if (!parent->active_channels) { for (i = 0; i < GS_MAX_RX_URBS; i++) { struct urb *urb; u8 *buf; @@ -690,6 +690,7 @@ static int gs_can_open(struct net_device *netdev) dev->can.state = CAN_STATE_ERROR_ACTIVE; + parent->active_channels++; if (!(dev->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)) netif_start_queue(netdev); @@ -705,7 +706,8 @@ static int gs_can_close(struct net_device *netdev) netif_stop_queue(netdev); /* Stop polling */ - if (atomic_dec_and_test(&parent->active_channels)) + parent->active_channels--; + if (!parent->active_channels) usb_kill_anchored_urbs(&parent->rx_submitted); /* Stop sending URBs */ @@ -984,8 +986,6 @@ static int gs_usb_probe(struct usb_interface *intf, init_usb_anchor(&dev->rx_submitted); - atomic_set(&dev->active_channels, 0); - usb_set_intfdata(intf, dev); dev->udev = interface_to_usbdev(intf); diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig index 7b1457a6e327..0029d279616f 100644 --- a/drivers/net/dsa/Kconfig +++ b/drivers/net/dsa/Kconfig @@ -36,6 +36,7 @@ config NET_DSA_LANTIQ_GSWIP config NET_DSA_MT7530 tristate "MediaTek MT753x and MT7621 Ethernet switch support" select NET_DSA_TAG_MTK + select MEDIATEK_GE_PHY help This enables support for the MediaTek MT7530, MT7531, and MT7621 Ethernet switch chips. @@ -81,6 +82,7 @@ config NET_DSA_REALTEK_SMI config NET_DSA_SMSC_LAN9303 tristate + depends on VLAN_8021Q || VLAN_8021Q=n select NET_DSA_TAG_LAN9303 select REGMAP help diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index 33499fcd8848..6afb5db8244c 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -621,7 +621,7 @@ static int bcm_sf2_mdio_register(struct dsa_switch *ds) get_device(&priv->master_mii_bus->dev); priv->master_mii_dn = dn; - priv->slave_mii_bus = devm_mdiobus_alloc(ds->dev); + priv->slave_mii_bus = mdiobus_alloc(); if (!priv->slave_mii_bus) { of_node_put(dn); return -ENOMEM; @@ -681,8 +681,10 @@ static int bcm_sf2_mdio_register(struct dsa_switch *ds) } err = mdiobus_register(priv->slave_mii_bus); - if (err && dn) + if (err && dn) { + mdiobus_free(priv->slave_mii_bus); of_node_put(dn); + } return err; } @@ -690,6 +692,7 @@ static int bcm_sf2_mdio_register(struct dsa_switch *ds) static void bcm_sf2_mdio_unregister(struct bcm_sf2_priv *priv) { mdiobus_unregister(priv->slave_mii_bus); + mdiobus_free(priv->slave_mii_bus); of_node_put(priv->master_mii_dn); } diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c index d55784d19fa4..3969d89fa4db 100644 --- a/drivers/net/dsa/lan9303-core.c +++ b/drivers/net/dsa/lan9303-core.c @@ -10,6 +10,7 @@ #include <linux/mii.h> #include <linux/phy.h> #include <linux/if_bridge.h> +#include <linux/if_vlan.h> #include <linux/etherdevice.h> #include "lan9303.h" @@ -1083,21 +1084,27 @@ static void lan9303_adjust_link(struct dsa_switch *ds, int port, static int lan9303_port_enable(struct dsa_switch *ds, int port, struct phy_device *phy) { + struct dsa_port *dp = dsa_to_port(ds, port); struct lan9303 *chip = ds->priv; - if (!dsa_is_user_port(ds, port)) + if (!dsa_port_is_user(dp)) return 0; + vlan_vid_add(dp->cpu_dp->master, htons(ETH_P_8021Q), port); + return lan9303_enable_processing_port(chip, port); } static void lan9303_port_disable(struct dsa_switch *ds, int port) { + struct dsa_port *dp = dsa_to_port(ds, port); struct lan9303 *chip = ds->priv; - if (!dsa_is_user_port(ds, port)) + if (!dsa_port_is_user(dp)) return; + vlan_vid_del(dp->cpu_dp->master, htons(ETH_P_8021Q), port); + lan9303_disable_processing_port(chip, port); lan9303_phy_write(ds, chip->phy_addr_base + port, MII_BMCR, BMCR_PDOWN); } @@ -1310,7 +1317,7 @@ static int lan9303_probe_reset_gpio(struct lan9303 *chip, struct device_node *np) { chip->reset_gpio = devm_gpiod_get_optional(chip->dev, "reset", - GPIOD_OUT_LOW); + GPIOD_OUT_HIGH); if (IS_ERR(chip->reset_gpio)) return PTR_ERR(chip->reset_gpio); diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c index 46ed953e787e..8a7a8093a156 100644 --- a/drivers/net/dsa/lantiq_gswip.c +++ b/drivers/net/dsa/lantiq_gswip.c @@ -498,8 +498,9 @@ static int gswip_mdio_rd(struct mii_bus *bus, int addr, int reg) static int gswip_mdio(struct gswip_priv *priv, struct device_node *mdio_np) { struct dsa_switch *ds = priv->ds; + int err; - ds->slave_mii_bus = devm_mdiobus_alloc(priv->dev); + ds->slave_mii_bus = mdiobus_alloc(); if (!ds->slave_mii_bus) return -ENOMEM; @@ -512,7 +513,11 @@ static int gswip_mdio(struct gswip_priv *priv, struct device_node *mdio_np) ds->slave_mii_bus->parent = priv->dev; ds->slave_mii_bus->phy_mask = ~ds->phys_mii_mask; - return of_mdiobus_register(ds->slave_mii_bus, mdio_np); + err = of_mdiobus_register(ds->slave_mii_bus, mdio_np); + if (err) + mdiobus_free(ds->slave_mii_bus); + + return err; } static int gswip_pce_table_entry_read(struct gswip_priv *priv, @@ -2145,8 +2150,10 @@ disable_switch: gswip_mdio_mask(priv, GSWIP_MDIO_GLOB_ENABLE, 0, GSWIP_MDIO_GLOB); dsa_unregister_switch(priv->ds); mdio_bus: - if (mdio_np) + if (mdio_np) { mdiobus_unregister(priv->ds->slave_mii_bus); + mdiobus_free(priv->ds->slave_mii_bus); + } put_mdio_node: of_node_put(mdio_np); for (i = 0; i < priv->num_gphy_fw; i++) @@ -2170,6 +2177,7 @@ static int gswip_remove(struct platform_device *pdev) if (priv->ds->slave_mii_bus) { mdiobus_unregister(priv->ds->slave_mii_bus); of_node_put(priv->ds->slave_mii_bus->dev.of_node); + mdiobus_free(priv->ds->slave_mii_bus); } for (i = 0; i < priv->num_gphy_fw; i++) diff --git a/drivers/net/dsa/microchip/ksz8795_spi.c b/drivers/net/dsa/microchip/ksz8795_spi.c index 866767b70d65..b0a7dee27ffc 100644 --- a/drivers/net/dsa/microchip/ksz8795_spi.c +++ b/drivers/net/dsa/microchip/ksz8795_spi.c @@ -124,12 +124,23 @@ static const struct of_device_id ksz8795_dt_ids[] = { }; MODULE_DEVICE_TABLE(of, ksz8795_dt_ids); +static const struct spi_device_id ksz8795_spi_ids[] = { + { "ksz8765" }, + { "ksz8794" }, + { "ksz8795" }, + { "ksz8863" }, + { "ksz8873" }, + { }, +}; +MODULE_DEVICE_TABLE(spi, ksz8795_spi_ids); + static struct spi_driver ksz8795_spi_driver = { .driver = { .name = "ksz8795-switch", .owner = THIS_MODULE, .of_match_table = of_match_ptr(ksz8795_dt_ids), }, + .id_table = ksz8795_spi_ids, .probe = ksz8795_spi_probe, .remove = ksz8795_spi_remove, .shutdown = ksz8795_spi_shutdown, diff --git a/drivers/net/dsa/microchip/ksz9477_spi.c b/drivers/net/dsa/microchip/ksz9477_spi.c index e3cb0e6c9f6f..43addeabfc25 100644 --- a/drivers/net/dsa/microchip/ksz9477_spi.c +++ b/drivers/net/dsa/microchip/ksz9477_spi.c @@ -98,12 +98,24 @@ static const struct of_device_id ksz9477_dt_ids[] = { }; MODULE_DEVICE_TABLE(of, ksz9477_dt_ids); +static const struct spi_device_id ksz9477_spi_ids[] = { + { "ksz9477" }, + { "ksz9897" }, + { "ksz9893" }, + { "ksz9563" }, + { "ksz8563" }, + { "ksz9567" }, + { }, +}; +MODULE_DEVICE_TABLE(spi, ksz9477_spi_ids); + static struct spi_driver ksz9477_spi_driver = { .driver = { .name = "ksz9477-switch", .owner = THIS_MODULE, .of_match_table = of_match_ptr(ksz9477_dt_ids), }, + .id_table = ksz9477_spi_ids, .probe = ksz9477_spi_probe, .remove = ksz9477_spi_remove, .shutdown = ksz9477_spi_shutdown, diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c index 55dbda04ea62..243f8ad6d06e 100644 --- a/drivers/net/dsa/microchip/ksz_common.c +++ b/drivers/net/dsa/microchip/ksz_common.c @@ -26,7 +26,7 @@ void ksz_update_port_member(struct ksz_device *dev, int port) struct dsa_switch *ds = dev->ds; u8 port_member = 0, cpu_port; const struct dsa_port *dp; - int i; + int i, j; if (!dsa_is_user_port(ds, port)) return; @@ -45,13 +45,33 @@ void ksz_update_port_member(struct ksz_device *dev, int port) continue; if (!dsa_port_bridge_same(dp, other_dp)) continue; + if (other_p->stp_state != BR_STATE_FORWARDING) + continue; - if (other_p->stp_state == BR_STATE_FORWARDING && - p->stp_state == BR_STATE_FORWARDING) { + if (p->stp_state == BR_STATE_FORWARDING) { val |= BIT(port); port_member |= BIT(i); } + /* Retain port [i]'s relationship to other ports than [port] */ + for (j = 0; j < ds->num_ports; j++) { + const struct dsa_port *third_dp; + struct ksz_port *third_p; + + if (j == i) + continue; + if (j == port) + continue; + if (!dsa_is_user_port(ds, j)) + continue; + third_p = &dev->ports[j]; + if (third_p->stp_state != BR_STATE_FORWARDING) + continue; + third_dp = dsa_to_port(ds, j); + if (dsa_port_bridge_same(other_dp, third_dp)) + val |= BIT(j); + } + dev->dev_ops->cfg_port_member(dev, i, val | cpu_port); } diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c index b82512e5b33b..a251bc55727f 100644 --- a/drivers/net/dsa/mt7530.c +++ b/drivers/net/dsa/mt7530.c @@ -2074,7 +2074,7 @@ mt7530_setup_mdio(struct mt7530_priv *priv) if (priv->irq) mt7530_setup_mdio_irq(priv); - ret = mdiobus_register(bus); + ret = devm_mdiobus_register(dev, bus); if (ret) { dev_err(dev, "failed to register MDIO bus: %d\n", ret); if (priv->irq) @@ -2936,7 +2936,7 @@ mt753x_phylink_validate(struct dsa_switch *ds, int port, phylink_set_port_modes(mask); - if (state->interface != PHY_INTERFACE_MODE_TRGMII || + if (state->interface != PHY_INTERFACE_MODE_TRGMII && !phy_interface_mode_is_8023z(state->interface)) { phylink_set(mask, 10baseT_Half); phylink_set(mask, 10baseT_Full); diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 58ca684d73f7..ab1676553714 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -2284,6 +2284,13 @@ static int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port, if (!mv88e6xxx_max_vid(chip)) return -EOPNOTSUPP; + /* The ATU removal procedure needs the FID to be mapped in the VTU, + * but FDB deletion runs concurrently with VLAN deletion. Flush the DSA + * switchdev workqueue to ensure that all FDB entries are deleted + * before we remove the VLAN. + */ + dsa_flush_workqueue(); + mv88e6xxx_reg_lock(chip); err = mv88e6xxx_port_get_pvid(chip, port, &pvid); @@ -3399,7 +3406,7 @@ static int mv88e6xxx_mdio_register(struct mv88e6xxx_chip *chip, return err; } - bus = devm_mdiobus_alloc_size(chip->dev, sizeof(*mdio_bus)); + bus = mdiobus_alloc_size(sizeof(*mdio_bus)); if (!bus) return -ENOMEM; @@ -3424,14 +3431,14 @@ static int mv88e6xxx_mdio_register(struct mv88e6xxx_chip *chip, if (!external) { err = mv88e6xxx_g2_irq_mdio_setup(chip, bus); if (err) - return err; + goto out; } err = of_mdiobus_register(bus, np); if (err) { dev_err(chip->dev, "Cannot register MDIO bus (%d)\n", err); mv88e6xxx_g2_irq_mdio_free(chip, bus); - return err; + goto out; } if (external) @@ -3440,21 +3447,26 @@ static int mv88e6xxx_mdio_register(struct mv88e6xxx_chip *chip, list_add(&mdio_bus->list, &chip->mdios); return 0; + +out: + mdiobus_free(bus); + return err; } static void mv88e6xxx_mdios_unregister(struct mv88e6xxx_chip *chip) { - struct mv88e6xxx_mdio_bus *mdio_bus; + struct mv88e6xxx_mdio_bus *mdio_bus, *p; struct mii_bus *bus; - list_for_each_entry(mdio_bus, &chip->mdios, list) { + list_for_each_entry_safe(mdio_bus, p, &chip->mdios, list) { bus = mdio_bus->bus; if (!mdio_bus->external) mv88e6xxx_g2_irq_mdio_free(chip, bus); mdiobus_unregister(bus); + mdiobus_free(bus); } } diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c index bf8d38239e7e..33f0ceae381d 100644 --- a/drivers/net/dsa/ocelot/felix_vsc9959.c +++ b/drivers/net/dsa/ocelot/felix_vsc9959.c @@ -1061,7 +1061,7 @@ static int vsc9959_mdio_bus_alloc(struct ocelot *ocelot) return PTR_ERR(hw); } - bus = devm_mdiobus_alloc_size(dev, sizeof(*mdio_priv)); + bus = mdiobus_alloc_size(sizeof(*mdio_priv)); if (!bus) return -ENOMEM; @@ -1081,6 +1081,7 @@ static int vsc9959_mdio_bus_alloc(struct ocelot *ocelot) rc = mdiobus_register(bus); if (rc < 0) { dev_err(dev, "failed to register MDIO bus\n"); + mdiobus_free(bus); return rc; } @@ -1132,6 +1133,7 @@ static void vsc9959_mdio_bus_free(struct ocelot *ocelot) lynx_pcs_destroy(phylink_pcs); } mdiobus_unregister(felix->imdio); + mdiobus_free(felix->imdio); } static void vsc9959_sched_speed_set(struct ocelot *ocelot, int port, diff --git a/drivers/net/dsa/ocelot/seville_vsc9953.c b/drivers/net/dsa/ocelot/seville_vsc9953.c index 8c1c9da61602..f2f1608a476c 100644 --- a/drivers/net/dsa/ocelot/seville_vsc9953.c +++ b/drivers/net/dsa/ocelot/seville_vsc9953.c @@ -1029,7 +1029,7 @@ static int vsc9953_mdio_bus_alloc(struct ocelot *ocelot) } /* Needed in order to initialize the bus mutex lock */ - rc = of_mdiobus_register(bus, NULL); + rc = devm_of_mdiobus_register(dev, bus, NULL); if (rc < 0) { dev_err(dev, "failed to register MDIO bus\n"); return rc; @@ -1083,7 +1083,8 @@ static void vsc9953_mdio_bus_free(struct ocelot *ocelot) mdio_device_free(mdio_device); lynx_pcs_destroy(phylink_pcs); } - mdiobus_unregister(felix->imdio); + + /* mdiobus_unregister and mdiobus_free handled by devres */ } static const struct felix_info seville_info_vsc9953 = { diff --git a/drivers/net/dsa/qca/ar9331.c b/drivers/net/dsa/qca/ar9331.c index da0d7e68643a..c39de2a4c1fe 100644 --- a/drivers/net/dsa/qca/ar9331.c +++ b/drivers/net/dsa/qca/ar9331.c @@ -378,7 +378,7 @@ static int ar9331_sw_mbus_init(struct ar9331_sw_priv *priv) if (!mnp) return -ENODEV; - ret = of_mdiobus_register(mbus, mnp); + ret = devm_of_mdiobus_register(dev, mbus, mnp); of_node_put(mnp); if (ret) return ret; @@ -1091,7 +1091,6 @@ static void ar9331_sw_remove(struct mdio_device *mdiodev) } irq_domain_remove(priv->irqdomain); - mdiobus_unregister(priv->mbus); dsa_unregister_switch(&priv->ds); reset_control_assert(priv->sw_reset); diff --git a/drivers/net/ethernet/8390/mcf8390.c b/drivers/net/ethernet/8390/mcf8390.c index e320cccba61a..90cd7bdf06f5 100644 --- a/drivers/net/ethernet/8390/mcf8390.c +++ b/drivers/net/ethernet/8390/mcf8390.c @@ -405,12 +405,12 @@ static int mcf8390_init(struct net_device *dev) static int mcf8390_probe(struct platform_device *pdev) { struct net_device *dev; - struct resource *mem, *irq; + struct resource *mem; resource_size_t msize; - int ret; + int ret, irq; - irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); - if (irq == NULL) { + irq = platform_get_irq(pdev, 0); + if (irq < 0) { dev_err(&pdev->dev, "no IRQ specified?\n"); return -ENXIO; } @@ -433,7 +433,7 @@ static int mcf8390_probe(struct platform_device *pdev) SET_NETDEV_DEV(dev, &pdev->dev); platform_set_drvdata(pdev, dev); - dev->irq = irq->start; + dev->irq = irq; dev->base_addr = mem->start; ret = mcf8390_init(dev); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index 492ac383f16d..a3593290886f 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -721,7 +721,9 @@ static void xgbe_stop_timers(struct xgbe_prv_data *pdata) if (!channel->tx_ring) break; + /* Deactivate the Tx timer */ del_timer_sync(&channel->tx_timer); + channel->tx_timer_active = 0; } } @@ -2550,6 +2552,14 @@ read_again: buf2_len = xgbe_rx_buf2_len(rdata, packet, len); len += buf2_len; + if (buf2_len > rdata->rx.buf.dma_len) { + /* Hardware inconsistency within the descriptors + * that has resulted in a length underflow. + */ + error = 1; + goto skip_data; + } + if (!skb) { skb = xgbe_create_skb(pdata, napi, rdata, buf1_len); @@ -2579,8 +2589,10 @@ skip_data: if (!last || context_next) goto read_again; - if (!skb) + if (!skb || error) { + dev_kfree_skb(skb); goto next_packet; + } /* Be sure we don't exceed the configured MTU */ max_len = netdev->mtu + ETH_HLEN; diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c index efdcf484a510..2af3da4b2d05 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c @@ -425,6 +425,9 @@ static void xgbe_pci_remove(struct pci_dev *pdev) pci_free_irq_vectors(pdata->pcidev); + /* Disable all interrupts in the hardware */ + XP_IOWRITE(pdata, XP_INT_EN, 0x0); + xgbe_free_pdata(pdata); } diff --git a/drivers/net/ethernet/arc/emac_mdio.c b/drivers/net/ethernet/arc/emac_mdio.c index 9acf589b1178..87f40c2ba904 100644 --- a/drivers/net/ethernet/arc/emac_mdio.c +++ b/drivers/net/ethernet/arc/emac_mdio.c @@ -132,6 +132,7 @@ int arc_mdio_probe(struct arc_emac_priv *priv) { struct arc_emac_mdio_bus_data *data = &priv->bus_data; struct device_node *np = priv->dev->of_node; + const char *name = "Synopsys MII Bus"; struct mii_bus *bus; int error; @@ -142,7 +143,7 @@ int arc_mdio_probe(struct arc_emac_priv *priv) priv->bus = bus; bus->priv = priv; bus->parent = priv->dev; - bus->name = "Synopsys MII Bus"; + bus->name = name; bus->read = &arc_mdio_read; bus->write = &arc_mdio_write; bus->reset = &arc_mdio_reset; @@ -167,7 +168,7 @@ int arc_mdio_probe(struct arc_emac_priv *priv) if (error) { mdiobus_free(bus); return dev_err_probe(priv->dev, error, - "cannot register MDIO bus %s\n", bus->name); + "cannot register MDIO bus %s\n", name); } return 0; diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c index 4ad3fc72e74e..a89b93cb4e26 100644 --- a/drivers/net/ethernet/atheros/alx/main.c +++ b/drivers/net/ethernet/atheros/alx/main.c @@ -1181,8 +1181,11 @@ static int alx_change_mtu(struct net_device *netdev, int mtu) alx->hw.mtu = mtu; alx->rxbuf_size = max(max_frame, ALX_DEF_RXBUF_SIZE); netdev_update_features(netdev); - if (netif_running(netdev)) + if (netif_running(netdev)) { + mutex_lock(&alx->mtx); alx_reinit(alx); + mutex_unlock(&alx->mtx); + } return 0; } diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c index da595242bc13..f50604f3e541 100644 --- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c +++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c @@ -900,7 +900,7 @@ static void atl1c_clean_tx_ring(struct atl1c_adapter *adapter, atl1c_clean_buffer(pdev, buffer_info); } - netdev_reset_queue(adapter->netdev); + netdev_tx_reset_queue(netdev_get_tx_queue(adapter->netdev, queue)); /* Zero out Tx-buffers */ memset(tpd_ring->desc, 0, sizeof(struct atl1c_tpd_desc) * diff --git a/drivers/net/ethernet/broadcom/bgmac-platform.c b/drivers/net/ethernet/broadcom/bgmac-platform.c index c6412c523637..b4381cd41979 100644 --- a/drivers/net/ethernet/broadcom/bgmac-platform.c +++ b/drivers/net/ethernet/broadcom/bgmac-platform.c @@ -172,6 +172,7 @@ static int bgmac_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; struct bgmac *bgmac; + struct resource *regs; int ret; bgmac = bgmac_alloc(&pdev->dev); @@ -208,15 +209,23 @@ static int bgmac_probe(struct platform_device *pdev) if (IS_ERR(bgmac->plat.base)) return PTR_ERR(bgmac->plat.base); - bgmac->plat.idm_base = devm_platform_ioremap_resource_byname(pdev, "idm_base"); - if (IS_ERR(bgmac->plat.idm_base)) - return PTR_ERR(bgmac->plat.idm_base); - else + /* The idm_base resource is optional for some platforms */ + regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "idm_base"); + if (regs) { + bgmac->plat.idm_base = devm_ioremap_resource(&pdev->dev, regs); + if (IS_ERR(bgmac->plat.idm_base)) + return PTR_ERR(bgmac->plat.idm_base); bgmac->feature_flags &= ~BGMAC_FEAT_IDM_MASK; + } - bgmac->plat.nicpm_base = devm_platform_ioremap_resource_byname(pdev, "nicpm_base"); - if (IS_ERR(bgmac->plat.nicpm_base)) - return PTR_ERR(bgmac->plat.nicpm_base); + /* The nicpm_base resource is optional for some platforms */ + regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nicpm_base"); + if (regs) { + bgmac->plat.nicpm_base = devm_ioremap_resource(&pdev->dev, + regs); + if (IS_ERR(bgmac->plat.nicpm_base)) + return PTR_ERR(bgmac->plat.nicpm_base); + } bgmac->read = platform_bgmac_read; bgmac->write = platform_bgmac_write; diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c index e20aafeb4ca9..b97ed9b5f685 100644 --- a/drivers/net/ethernet/broadcom/bnx2.c +++ b/drivers/net/ethernet/broadcom/bnx2.c @@ -8216,7 +8216,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) rc = dma_set_coherent_mask(&pdev->dev, persist_dma_mask); if (rc) { dev_err(&pdev->dev, - "pci_set_consistent_dma_mask failed, aborting\n"); + "dma_set_coherent_mask failed, aborting\n"); goto err_out_unmap; } } else if ((rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) != 0) { diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index a19dd6797070..2209d99b3404 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h @@ -2533,6 +2533,4 @@ void bnx2x_register_phc(struct bnx2x *bp); * Meant for implicit re-load flows. */ int bnx2x_vlan_reconfigure_vid(struct bnx2x *bp); -int bnx2x_init_firmware(struct bnx2x *bp); -void bnx2x_release_firmware(struct bnx2x *bp); #endif /* bnx2x.h */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 8d36ebbf08e1..5729a5ab059d 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -2364,24 +2364,30 @@ int bnx2x_compare_fw_ver(struct bnx2x *bp, u32 load_code, bool print_err) /* is another pf loaded on this engine? */ if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP && load_code != FW_MSG_CODE_DRV_LOAD_COMMON) { - /* build my FW version dword */ - u32 my_fw = (bp->fw_major) + (bp->fw_minor << 8) + - (bp->fw_rev << 16) + (bp->fw_eng << 24); + u8 loaded_fw_major, loaded_fw_minor, loaded_fw_rev, loaded_fw_eng; + u32 loaded_fw; /* read loaded FW from chip */ - u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM); + loaded_fw = REG_RD(bp, XSEM_REG_PRAM); - DP(BNX2X_MSG_SP, "loaded fw %x, my fw %x\n", - loaded_fw, my_fw); + loaded_fw_major = loaded_fw & 0xff; + loaded_fw_minor = (loaded_fw >> 8) & 0xff; + loaded_fw_rev = (loaded_fw >> 16) & 0xff; + loaded_fw_eng = (loaded_fw >> 24) & 0xff; + + DP(BNX2X_MSG_SP, "loaded fw 0x%x major 0x%x minor 0x%x rev 0x%x eng 0x%x\n", + loaded_fw, loaded_fw_major, loaded_fw_minor, loaded_fw_rev, loaded_fw_eng); /* abort nic load if version mismatch */ - if (my_fw != loaded_fw) { + if (loaded_fw_major != BCM_5710_FW_MAJOR_VERSION || + loaded_fw_minor != BCM_5710_FW_MINOR_VERSION || + loaded_fw_eng != BCM_5710_FW_ENGINEERING_VERSION || + loaded_fw_rev < BCM_5710_FW_REVISION_VERSION_V15) { if (print_err) - BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. Aborting\n", - loaded_fw, my_fw); + BNX2X_ERR("loaded FW incompatible. Aborting\n"); else - BNX2X_DEV_INFO("bnx2x with FW %x was already loaded which mismatches my %x FW, possibly due to MF UNDI\n", - loaded_fw, my_fw); + BNX2X_DEV_INFO("loaded FW incompatible, possibly due to MF UNDI\n"); + return -EBUSY; } } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 774c1f1a57c3..c19b072f3a23 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -100,6 +100,9 @@ MODULE_LICENSE("GPL"); MODULE_FIRMWARE(FW_FILE_NAME_E1); MODULE_FIRMWARE(FW_FILE_NAME_E1H); MODULE_FIRMWARE(FW_FILE_NAME_E2); +MODULE_FIRMWARE(FW_FILE_NAME_E1_V15); +MODULE_FIRMWARE(FW_FILE_NAME_E1H_V15); +MODULE_FIRMWARE(FW_FILE_NAME_E2_V15); int bnx2x_num_queues; module_param_named(num_queues, bnx2x_num_queues, int, 0444); @@ -12316,15 +12319,6 @@ static int bnx2x_init_bp(struct bnx2x *bp) bnx2x_read_fwinfo(bp); - if (IS_PF(bp)) { - rc = bnx2x_init_firmware(bp); - - if (rc) { - bnx2x_free_mem_bp(bp); - return rc; - } - } - func = BP_FUNC(bp); /* need to reset chip if undi was active */ @@ -12337,7 +12331,6 @@ static int bnx2x_init_bp(struct bnx2x *bp) rc = bnx2x_prev_unload(bp); if (rc) { - bnx2x_release_firmware(bp); bnx2x_free_mem_bp(bp); return rc; } @@ -13406,7 +13399,7 @@ do { \ (u8 *)bp->arr, len); \ } while (0) -int bnx2x_init_firmware(struct bnx2x *bp) +static int bnx2x_init_firmware(struct bnx2x *bp) { const char *fw_file_name, *fw_file_name_v15; struct bnx2x_fw_file_hdr *fw_hdr; @@ -13506,7 +13499,7 @@ request_firmware_exit: return rc; } -void bnx2x_release_firmware(struct bnx2x *bp) +static void bnx2x_release_firmware(struct bnx2x *bp) { kfree(bp->init_ops_offsets); kfree(bp->init_ops); @@ -14023,7 +14016,6 @@ static int bnx2x_init_one(struct pci_dev *pdev, return 0; init_one_freemem: - bnx2x_release_firmware(bp); bnx2x_free_mem_bp(bp); init_one_exit: diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 4f94136a011a..b1c98d1408b8 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -4747,8 +4747,10 @@ static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id) return rc; req->vnic_id = cpu_to_le32(vnic->fw_vnic_id); - req->num_mc_entries = cpu_to_le32(vnic->mc_list_count); - req->mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping); + if (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST) { + req->num_mc_entries = cpu_to_le32(vnic->mc_list_count); + req->mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping); + } req->mask = cpu_to_le32(vnic->rx_mask); return hwrm_req_send_silent(bp, req); } @@ -7787,6 +7789,19 @@ static int bnxt_map_fw_health_regs(struct bnxt *bp) return 0; } +static void bnxt_remap_fw_health_regs(struct bnxt *bp) +{ + if (!bp->fw_health) + return; + + if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) { + bp->fw_health->status_reliable = true; + bp->fw_health->resets_reliable = true; + } else { + bnxt_try_map_fw_health_reg(bp); + } +} + static int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp) { struct bnxt_fw_health *fw_health = bp->fw_health; @@ -8639,6 +8654,9 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init) vnic->uc_filter_count = 1; vnic->rx_mask = 0; + if (test_bit(BNXT_STATE_HALF_OPEN, &bp->state)) + goto skip_rx_mask; + if (bp->dev->flags & IFF_BROADCAST) vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST; @@ -8648,7 +8666,7 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init) if (bp->dev->flags & IFF_ALLMULTI) { vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; vnic->mc_list_count = 0; - } else { + } else if (bp->dev->flags & IFF_MULTICAST) { u32 mask = 0; bnxt_mc_list_updated(bp, &mask); @@ -8659,6 +8677,7 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init) if (rc) goto err_out; +skip_rx_mask: rc = bnxt_hwrm_set_coal(bp); if (rc) netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n", @@ -9850,8 +9869,8 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up) resc_reinit = true; if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE) fw_reset = true; - else if (bp->fw_health && !bp->fw_health->status_reliable) - bnxt_try_map_fw_health_reg(bp); + else + bnxt_remap_fw_health_regs(bp); if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) && !fw_reset) { netdev_err(bp->dev, "RESET_DONE not set during FW reset.\n"); @@ -10330,13 +10349,15 @@ int bnxt_half_open_nic(struct bnxt *bp) goto half_open_err; } - rc = bnxt_alloc_mem(bp, false); + rc = bnxt_alloc_mem(bp, true); if (rc) { netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc); goto half_open_err; } - rc = bnxt_init_nic(bp, false); + set_bit(BNXT_STATE_HALF_OPEN, &bp->state); + rc = bnxt_init_nic(bp, true); if (rc) { + clear_bit(BNXT_STATE_HALF_OPEN, &bp->state); netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc); goto half_open_err; } @@ -10344,7 +10365,7 @@ int bnxt_half_open_nic(struct bnxt *bp) half_open_err: bnxt_free_skbs(bp); - bnxt_free_mem(bp, false); + bnxt_free_mem(bp, true); dev_close(bp->dev); return rc; } @@ -10354,9 +10375,10 @@ half_open_err: */ void bnxt_half_close_nic(struct bnxt *bp) { - bnxt_hwrm_resource_free(bp, false, false); + bnxt_hwrm_resource_free(bp, false, true); bnxt_free_skbs(bp); - bnxt_free_mem(bp, false); + bnxt_free_mem(bp, true); + clear_bit(BNXT_STATE_HALF_OPEN, &bp->state); } void bnxt_reenable_sriov(struct bnxt *bp) @@ -10772,7 +10794,7 @@ static void bnxt_set_rx_mode(struct net_device *dev) if (dev->flags & IFF_ALLMULTI) { mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; vnic->mc_list_count = 0; - } else { + } else if (dev->flags & IFF_MULTICAST) { mc_update = bnxt_mc_list_updated(bp, &mask); } @@ -10849,9 +10871,10 @@ skip_uc: !bnxt_promisc_ok(bp)) vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0); - if (rc && vnic->mc_list_count) { + if (rc && (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST)) { netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n", rc); + vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_MCAST; vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; vnic->mc_list_count = 0; rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 440dfeb4948b..666fc1e7a7d2 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -1921,6 +1921,7 @@ struct bnxt { #define BNXT_STATE_RECOVER 12 #define BNXT_STATE_FW_NON_FATAL_COND 13 #define BNXT_STATE_FW_ACTIVATE_RESET 14 +#define BNXT_STATE_HALF_OPEN 15 /* For offline ethtool tests */ #define BNXT_NO_FW_ACCESS(bp) \ (test_bit(BNXT_STATE_FW_FATAL_COND, &(bp)->state) || \ diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c index 4da31b1b84f9..f6e21fac0e69 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c @@ -367,6 +367,16 @@ bnxt_dl_livepatch_report_err(struct bnxt *bp, struct netlink_ext_ack *extack, } } +/* Live patch status in NVM */ +#define BNXT_LIVEPATCH_NOT_INSTALLED 0 +#define BNXT_LIVEPATCH_INSTALLED FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_INSTALL +#define BNXT_LIVEPATCH_REMOVED FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_ACTIVE +#define BNXT_LIVEPATCH_MASK (FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_INSTALL | \ + FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_ACTIVE) +#define BNXT_LIVEPATCH_ACTIVATED BNXT_LIVEPATCH_MASK + +#define BNXT_LIVEPATCH_STATE(flags) ((flags) & BNXT_LIVEPATCH_MASK) + static int bnxt_dl_livepatch_activate(struct bnxt *bp, struct netlink_ext_ack *extack) { @@ -374,8 +384,9 @@ bnxt_dl_livepatch_activate(struct bnxt *bp, struct netlink_ext_ack *extack) struct hwrm_fw_livepatch_query_input *query_req; struct hwrm_fw_livepatch_output *patch_resp; struct hwrm_fw_livepatch_input *patch_req; + u16 flags, live_patch_state; + bool activated = false; u32 installed = 0; - u16 flags; u8 target; int rc; @@ -394,7 +405,6 @@ bnxt_dl_livepatch_activate(struct bnxt *bp, struct netlink_ext_ack *extack) hwrm_req_drop(bp, query_req); return rc; } - patch_req->opcode = FW_LIVEPATCH_REQ_OPCODE_ACTIVATE; patch_req->loadtype = FW_LIVEPATCH_REQ_LOADTYPE_NVM_INSTALL; patch_resp = hwrm_req_hold(bp, patch_req); @@ -407,12 +417,20 @@ bnxt_dl_livepatch_activate(struct bnxt *bp, struct netlink_ext_ack *extack) } flags = le16_to_cpu(query_resp->status_flags); - if (~flags & FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_INSTALL) + live_patch_state = BNXT_LIVEPATCH_STATE(flags); + + if (live_patch_state == BNXT_LIVEPATCH_NOT_INSTALLED) continue; - if ((flags & FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_ACTIVE) && - !strncmp(query_resp->active_ver, query_resp->install_ver, - sizeof(query_resp->active_ver))) + + if (live_patch_state == BNXT_LIVEPATCH_ACTIVATED) { + activated = true; continue; + } + + if (live_patch_state == BNXT_LIVEPATCH_INSTALLED) + patch_req->opcode = FW_LIVEPATCH_REQ_OPCODE_ACTIVATE; + else if (live_patch_state == BNXT_LIVEPATCH_REMOVED) + patch_req->opcode = FW_LIVEPATCH_REQ_OPCODE_DEACTIVATE; patch_req->fw_target = target; rc = hwrm_req_send(bp, patch_req); @@ -424,8 +442,13 @@ bnxt_dl_livepatch_activate(struct bnxt *bp, struct netlink_ext_ack *extack) } if (!rc && !installed) { - NL_SET_ERR_MSG_MOD(extack, "No live patches found"); - rc = -ENOENT; + if (activated) { + NL_SET_ERR_MSG_MOD(extack, "Live patch already activated"); + rc = -EEXIST; + } else { + NL_SET_ERR_MSG_MOD(extack, "No live patches found"); + rc = -ENOENT; + } } hwrm_req_drop(bp, query_req); hwrm_req_drop(bp, patch_req); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 003330e8cd58..8aaa2335f848 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -25,6 +25,7 @@ #include "bnxt_hsi.h" #include "bnxt.h" #include "bnxt_hwrm.h" +#include "bnxt_ulp.h" #include "bnxt_xdp.h" #include "bnxt_ptp.h" #include "bnxt_ethtool.h" @@ -1969,6 +1970,9 @@ static int bnxt_get_fecparam(struct net_device *dev, case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE: fec->active_fec |= ETHTOOL_FEC_LLRS; break; + case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE: + fec->active_fec |= ETHTOOL_FEC_OFF; + break; } return 0; } @@ -3454,7 +3458,7 @@ static int bnxt_run_loopback(struct bnxt *bp) if (!skb) return -ENOMEM; data = skb_put(skb, pkt_size); - eth_broadcast_addr(data); + ether_addr_copy(&data[i], bp->dev->dev_addr); i += ETH_ALEN; ether_addr_copy(&data[i], bp->dev->dev_addr); i += ETH_ALEN; @@ -3548,9 +3552,12 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest, if (!offline) { bnxt_run_fw_tests(bp, test_mask, &test_results); } else { - rc = bnxt_close_nic(bp, false, false); - if (rc) + bnxt_ulp_stop(bp); + rc = bnxt_close_nic(bp, true, false); + if (rc) { + bnxt_ulp_start(bp, rc); return; + } bnxt_run_fw_tests(bp, test_mask, &test_results); buf[BNXT_MACLPBK_TEST_IDX] = 1; @@ -3560,6 +3567,7 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest, if (rc) { bnxt_hwrm_mac_loopback(bp, false); etest->flags |= ETH_TEST_FL_FAILED; + bnxt_ulp_start(bp, rc); return; } if (bnxt_run_loopback(bp)) @@ -3585,7 +3593,8 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest, } bnxt_hwrm_phy_loopback(bp, false, false); bnxt_half_close_nic(bp); - rc = bnxt_open_nic(bp, false, true); + rc = bnxt_open_nic(bp, true, true); + bnxt_ulp_start(bp, rc); } if (rc || bnxt_test_irq(bp)) { buf[BNXT_IRQ_TEST_IDX] = 1; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c index 566c9487ef55..b01d42928a53 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c @@ -644,17 +644,23 @@ static int __hwrm_send(struct bnxt *bp, struct bnxt_hwrm_ctx *ctx) /* Last byte of resp contains valid bit */ valid = ((u8 *)ctx->resp) + len - 1; - for (j = 0; j < HWRM_VALID_BIT_DELAY_USEC; j++) { + for (j = 0; j < HWRM_VALID_BIT_DELAY_USEC; ) { /* make sure we read from updated DMA memory */ dma_rmb(); if (*valid) break; - usleep_range(1, 5); + if (j < 10) { + udelay(1); + j++; + } else { + usleep_range(20, 30); + j += 20; + } } if (j >= HWRM_VALID_BIT_DELAY_USEC) { hwrm_err(bp, ctx, "Error (timeout: %u) msg {0x%x 0x%x} len:%d v:%d\n", - hwrm_total_timeout(i), req_type, + hwrm_total_timeout(i) + j, req_type, le16_to_cpu(ctx->req->seq_id), len, *valid); goto exit; } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h index d52bd2d63aec..c98032e38188 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h @@ -90,7 +90,7 @@ static inline unsigned int hwrm_total_timeout(unsigned int n) } -#define HWRM_VALID_BIT_DELAY_USEC 150 +#define HWRM_VALID_BIT_DELAY_USEC 50000 static inline bool bnxt_cfa_hwrm_message(u16 req_type) { diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index 87f1056e29ff..2da804f84b48 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -2287,8 +2287,10 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring, dma_length_status = status->length_status; if (dev->features & NETIF_F_RXCSUM) { rx_csum = (__force __be16)(status->rx_csum & 0xffff); - skb->csum = (__force __wsum)ntohs(rx_csum); - skb->ip_summed = CHECKSUM_COMPLETE; + if (rx_csum) { + skb->csum = (__force __wsum)ntohs(rx_csum); + skb->ip_summed = CHECKSUM_COMPLETE; + } } /* DMA flags and length are still valid no matter how diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c index e31a5a397f11..f55d9d9c01a8 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c @@ -40,6 +40,13 @@ void bcmgenet_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct bcmgenet_priv *priv = netdev_priv(dev); + struct device *kdev = &priv->pdev->dev; + + if (!device_can_wakeup(kdev)) { + wol->supported = 0; + wol->wolopts = 0; + return; + } wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER; wol->wolopts = priv->wolopts; diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index a363da928e8b..d13f06cf0308 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -1573,7 +1573,14 @@ static int macb_poll(struct napi_struct *napi, int budget) if (work_done < budget) { napi_complete_done(napi, work_done); - /* Packets received while interrupts were disabled */ + /* RSR bits only seem to propagate to raise interrupts when + * interrupts are enabled at the time, so if bits are already + * set due to packets received while interrupts were disabled, + * they will not cause another interrupt to be generated when + * interrupts are re-enabled. + * Check for this case here. This has been seen to happen + * around 30% of the time under heavy network load. + */ status = macb_readl(bp, RSR); if (status) { if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) @@ -1581,6 +1588,22 @@ static int macb_poll(struct napi_struct *napi, int budget) napi_reschedule(napi); } else { queue_writel(queue, IER, bp->rx_intr_mask); + + /* In rare cases, packets could have been received in + * the window between the check above and re-enabling + * interrupts. Therefore, a double-check is required + * to avoid losing a wakeup. This can potentially race + * with the interrupt handler doing the same actions + * if an interrupt is raised just after enabling them, + * but this should be harmless. + */ + status = macb_readl(bp, RSR); + if (unlikely(status)) { + queue_writel(queue, IDR, bp->rx_intr_mask); + if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) + queue_writel(queue, ISR, MACB_BIT(RCOMP)); + napi_schedule(napi); + } } } @@ -4712,7 +4735,7 @@ static int macb_probe(struct platform_device *pdev) #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT if (GEM_BFEXT(DAW64, gem_readl(bp, DCFG6))) { - dma_set_mask(&pdev->dev, DMA_BIT_MASK(44)); + dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(44)); bp->hw_dma_cap |= HW_DMA_CAP_64B; } #endif diff --git a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c index da41eee2f25c..a06003bfa04b 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c @@ -3613,6 +3613,8 @@ int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai, MAC_STATS_ACCUM_SECS : (MAC_STATS_ACCUM_SECS * 10); adapter->params.pci.vpd_cap_addr = pci_find_capability(adapter->pdev, PCI_CAP_ID_VPD); + if (!adapter->params.pci.vpd_cap_addr) + return -ENODEV; ret = get_vpd_params(adapter, &adapter->params.vpd); if (ret < 0) return ret; diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c index 691605c15265..d5356db7539a 100644 --- a/drivers/net/ethernet/faraday/ftgmac100.c +++ b/drivers/net/ethernet/faraday/ftgmac100.c @@ -989,117 +989,6 @@ static int ftgmac100_alloc_rx_buffers(struct ftgmac100 *priv) return 0; } -static void ftgmac100_adjust_link(struct net_device *netdev) -{ - struct ftgmac100 *priv = netdev_priv(netdev); - struct phy_device *phydev = netdev->phydev; - bool tx_pause, rx_pause; - int new_speed; - - /* We store "no link" as speed 0 */ - if (!phydev->link) - new_speed = 0; - else - new_speed = phydev->speed; - - /* Grab pause settings from PHY if configured to do so */ - if (priv->aneg_pause) { - rx_pause = tx_pause = phydev->pause; - if (phydev->asym_pause) - tx_pause = !rx_pause; - } else { - rx_pause = priv->rx_pause; - tx_pause = priv->tx_pause; - } - - /* Link hasn't changed, do nothing */ - if (phydev->speed == priv->cur_speed && - phydev->duplex == priv->cur_duplex && - rx_pause == priv->rx_pause && - tx_pause == priv->tx_pause) - return; - - /* Print status if we have a link or we had one and just lost it, - * don't print otherwise. - */ - if (new_speed || priv->cur_speed) - phy_print_status(phydev); - - priv->cur_speed = new_speed; - priv->cur_duplex = phydev->duplex; - priv->rx_pause = rx_pause; - priv->tx_pause = tx_pause; - - /* Link is down, do nothing else */ - if (!new_speed) - return; - - /* Disable all interrupts */ - iowrite32(0, priv->base + FTGMAC100_OFFSET_IER); - - /* Reset the adapter asynchronously */ - schedule_work(&priv->reset_task); -} - -static int ftgmac100_mii_probe(struct net_device *netdev) -{ - struct ftgmac100 *priv = netdev_priv(netdev); - struct platform_device *pdev = to_platform_device(priv->dev); - struct device_node *np = pdev->dev.of_node; - struct phy_device *phydev; - phy_interface_t phy_intf; - int err; - - /* Default to RGMII. It's a gigabit part after all */ - err = of_get_phy_mode(np, &phy_intf); - if (err) - phy_intf = PHY_INTERFACE_MODE_RGMII; - - /* Aspeed only supports these. I don't know about other IP - * block vendors so I'm going to just let them through for - * now. Note that this is only a warning if for some obscure - * reason the DT really means to lie about it or it's a newer - * part we don't know about. - * - * On the Aspeed SoC there are additionally straps and SCU - * control bits that could tell us what the interface is - * (or allow us to configure it while the IP block is held - * in reset). For now I chose to keep this driver away from - * those SoC specific bits and assume the device-tree is - * right and the SCU has been configured properly by pinmux - * or the firmware. - */ - if (priv->is_aspeed && !(phy_interface_mode_is_rgmii(phy_intf))) { - netdev_warn(netdev, - "Unsupported PHY mode %s !\n", - phy_modes(phy_intf)); - } - - phydev = phy_find_first(priv->mii_bus); - if (!phydev) { - netdev_info(netdev, "%s: no PHY found\n", netdev->name); - return -ENODEV; - } - - phydev = phy_connect(netdev, phydev_name(phydev), - &ftgmac100_adjust_link, phy_intf); - - if (IS_ERR(phydev)) { - netdev_err(netdev, "%s: Could not attach to PHY\n", netdev->name); - return PTR_ERR(phydev); - } - - /* Indicate that we support PAUSE frames (see comment in - * Documentation/networking/phy.rst) - */ - phy_support_asym_pause(phydev); - - /* Display what we found */ - phy_attached_info(phydev); - - return 0; -} - static int ftgmac100_mdiobus_read(struct mii_bus *bus, int phy_addr, int regnum) { struct net_device *netdev = bus->priv; @@ -1410,10 +1299,8 @@ static int ftgmac100_init_all(struct ftgmac100 *priv, bool ignore_alloc_err) return err; } -static void ftgmac100_reset_task(struct work_struct *work) +static void ftgmac100_reset(struct ftgmac100 *priv) { - struct ftgmac100 *priv = container_of(work, struct ftgmac100, - reset_task); struct net_device *netdev = priv->netdev; int err; @@ -1459,6 +1346,134 @@ static void ftgmac100_reset_task(struct work_struct *work) rtnl_unlock(); } +static void ftgmac100_reset_task(struct work_struct *work) +{ + struct ftgmac100 *priv = container_of(work, struct ftgmac100, + reset_task); + + ftgmac100_reset(priv); +} + +static void ftgmac100_adjust_link(struct net_device *netdev) +{ + struct ftgmac100 *priv = netdev_priv(netdev); + struct phy_device *phydev = netdev->phydev; + bool tx_pause, rx_pause; + int new_speed; + + /* We store "no link" as speed 0 */ + if (!phydev->link) + new_speed = 0; + else + new_speed = phydev->speed; + + /* Grab pause settings from PHY if configured to do so */ + if (priv->aneg_pause) { + rx_pause = tx_pause = phydev->pause; + if (phydev->asym_pause) + tx_pause = !rx_pause; + } else { + rx_pause = priv->rx_pause; + tx_pause = priv->tx_pause; + } + + /* Link hasn't changed, do nothing */ + if (phydev->speed == priv->cur_speed && + phydev->duplex == priv->cur_duplex && + rx_pause == priv->rx_pause && + tx_pause == priv->tx_pause) + return; + + /* Print status if we have a link or we had one and just lost it, + * don't print otherwise. + */ + if (new_speed || priv->cur_speed) + phy_print_status(phydev); + + priv->cur_speed = new_speed; + priv->cur_duplex = phydev->duplex; + priv->rx_pause = rx_pause; + priv->tx_pause = tx_pause; + + /* Link is down, do nothing else */ + if (!new_speed) + return; + + /* Disable all interrupts */ + iowrite32(0, priv->base + FTGMAC100_OFFSET_IER); + + /* Release phy lock to allow ftgmac100_reset to aquire it, keeping lock + * order consistent to prevent dead lock. + */ + if (netdev->phydev) + mutex_unlock(&netdev->phydev->lock); + + ftgmac100_reset(priv); + + if (netdev->phydev) + mutex_lock(&netdev->phydev->lock); + +} + +static int ftgmac100_mii_probe(struct net_device *netdev) +{ + struct ftgmac100 *priv = netdev_priv(netdev); + struct platform_device *pdev = to_platform_device(priv->dev); + struct device_node *np = pdev->dev.of_node; + struct phy_device *phydev; + phy_interface_t phy_intf; + int err; + + /* Default to RGMII. It's a gigabit part after all */ + err = of_get_phy_mode(np, &phy_intf); + if (err) + phy_intf = PHY_INTERFACE_MODE_RGMII; + + /* Aspeed only supports these. I don't know about other IP + * block vendors so I'm going to just let them through for + * now. Note that this is only a warning if for some obscure + * reason the DT really means to lie about it or it's a newer + * part we don't know about. + * + * On the Aspeed SoC there are additionally straps and SCU + * control bits that could tell us what the interface is + * (or allow us to configure it while the IP block is held + * in reset). For now I chose to keep this driver away from + * those SoC specific bits and assume the device-tree is + * right and the SCU has been configured properly by pinmux + * or the firmware. + */ + if (priv->is_aspeed && !(phy_interface_mode_is_rgmii(phy_intf))) { + netdev_warn(netdev, + "Unsupported PHY mode %s !\n", + phy_modes(phy_intf)); + } + + phydev = phy_find_first(priv->mii_bus); + if (!phydev) { + netdev_info(netdev, "%s: no PHY found\n", netdev->name); + return -ENODEV; + } + + phydev = phy_connect(netdev, phydev_name(phydev), + &ftgmac100_adjust_link, phy_intf); + + if (IS_ERR(phydev)) { + netdev_err(netdev, "%s: Could not attach to PHY\n", netdev->name); + return PTR_ERR(phydev); + } + + /* Indicate that we support PAUSE frames (see comment in + * Documentation/networking/phy.rst) + */ + phy_support_asym_pause(phydev); + + /* Display what we found */ + phy_attached_info(phydev); + + return 0; +} + static int ftgmac100_open(struct net_device *netdev) { struct ftgmac100 *priv = netdev_priv(netdev); diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c index e985ae008a97..0f90d2d5bb60 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c @@ -4338,7 +4338,7 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev) } INIT_WORK(&priv->tx_onestep_tstamp, dpaa2_eth_tx_onestep_tstamp); - + mutex_init(&priv->onestep_tstamp_lock); skb_queue_head_init(&priv->tx_skbs); priv->rx_copybreak = DPAA2_ETH_DEFAULT_COPYBREAK; @@ -4523,12 +4523,12 @@ static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev) #ifdef CONFIG_DEBUG_FS dpaa2_dbg_remove(priv); #endif + + unregister_netdev(net_dev); rtnl_lock(); dpaa2_eth_disconnect_mac(priv); rtnl_unlock(); - unregister_netdev(net_dev); - dpaa2_eth_dl_port_del(priv); dpaa2_eth_dl_traps_unregister(priv); dpaa2_eth_dl_free(priv); diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c index d6eefbbf163f..cacd454ac696 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c @@ -532,6 +532,7 @@ static int dpaa2_switch_flower_parse_mirror_key(struct flow_cls_offload *cls, struct flow_rule *rule = flow_cls_offload_flow_rule(cls); struct flow_dissector *dissector = rule->match.dissector; struct netlink_ext_ack *extack = cls->common.extack; + int ret = -EOPNOTSUPP; if (dissector->used_keys & ~(BIT(FLOW_DISSECTOR_KEY_BASIC) | @@ -561,9 +562,10 @@ static int dpaa2_switch_flower_parse_mirror_key(struct flow_cls_offload *cls, } *vlan = (u16)match.key->vlan_id; + ret = 0; } - return 0; + return ret; } static int diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c index ff756265d58f..9a2c16d69e2c 100644 --- a/drivers/net/ethernet/freescale/gianfar_ethtool.c +++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c @@ -1464,6 +1464,7 @@ static int gfar_get_ts_info(struct net_device *dev, ptp_node = of_find_compatible_node(NULL, NULL, "fsl,etsec-ptp"); if (ptp_node) { ptp_dev = of_find_device_by_node(ptp_node); + of_node_put(ptp_node); if (ptp_dev) ptp = platform_get_drvdata(ptp_dev); } diff --git a/drivers/net/ethernet/google/gve/gve_adminq.c b/drivers/net/ethernet/google/gve/gve_adminq.c index 2ad7f57f7e5b..f7621ab672b9 100644 --- a/drivers/net/ethernet/google/gve/gve_adminq.c +++ b/drivers/net/ethernet/google/gve/gve_adminq.c @@ -301,7 +301,7 @@ static int gve_adminq_parse_err(struct gve_priv *priv, u32 status) */ static int gve_adminq_kick_and_wait(struct gve_priv *priv) { - u32 tail, head; + int tail, head; int i; tail = ioread32be(&priv->reg_bar0->adminq_event_counter); diff --git a/drivers/net/ethernet/google/gve/gve_rx.c b/drivers/net/ethernet/google/gve/gve_rx.c index 2068199445bd..e4e98aa7745f 100644 --- a/drivers/net/ethernet/google/gve/gve_rx.c +++ b/drivers/net/ethernet/google/gve/gve_rx.c @@ -609,6 +609,7 @@ static bool gve_rx(struct gve_rx_ring *rx, netdev_features_t feat, *packet_size_bytes = skb->len + (skb->protocol ? ETH_HLEN : 0); *work_done = work_cnt; + skb_record_rx_queue(skb, rx->q_num); if (skb_is_nonlinear(skb)) napi_gro_frags(napi); else diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index bda7a2a9d211..b423e94956f1 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -110,6 +110,7 @@ static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter, struct ibmvnic_sub_crq_queue *tx_scrq); static void free_long_term_buff(struct ibmvnic_adapter *adapter, struct ibmvnic_long_term_buff *ltb); +static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter); struct ibmvnic_stat { char name[ETH_GSTRING_LEN]; @@ -1424,7 +1425,7 @@ static int __ibmvnic_open(struct net_device *netdev) rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP); if (rc) { ibmvnic_napi_disable(adapter); - release_resources(adapter); + ibmvnic_disable_irqs(adapter); return rc; } @@ -1474,9 +1475,6 @@ static int ibmvnic_open(struct net_device *netdev) rc = init_resources(adapter); if (rc) { netdev_err(netdev, "failed to initialize resources\n"); - release_resources(adapter); - release_rx_pools(adapter); - release_tx_pools(adapter); goto out; } } @@ -1493,6 +1491,13 @@ out: adapter->state = VNIC_OPEN; rc = 0; } + + if (rc) { + release_resources(adapter); + release_rx_pools(adapter); + release_tx_pools(adapter); + } + return rc; } @@ -2208,6 +2213,19 @@ static const char *reset_reason_to_string(enum ibmvnic_reset_reason reason) } /* + * Initialize the init_done completion and return code values. We + * can get a transport event just after registering the CRQ and the + * tasklet will use this to communicate the transport event. To ensure + * we don't miss the notification/error, initialize these _before_ + * regisering the CRQ. + */ +static inline void reinit_init_done(struct ibmvnic_adapter *adapter) +{ + reinit_completion(&adapter->init_done); + adapter->init_done_rc = 0; +} + +/* * do_reset returns zero if we are able to keep processing reset events, or * non-zero if we hit a fatal error and must halt. */ @@ -2313,6 +2331,8 @@ static int do_reset(struct ibmvnic_adapter *adapter, */ adapter->state = VNIC_PROBED; + reinit_init_done(adapter); + if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) { rc = init_crq_queue(adapter); } else if (adapter->reset_reason == VNIC_RESET_MOBILITY) { @@ -2456,7 +2476,8 @@ static int do_hard_reset(struct ibmvnic_adapter *adapter, */ adapter->state = VNIC_PROBED; - reinit_completion(&adapter->init_done); + reinit_init_done(adapter); + rc = init_crq_queue(adapter); if (rc) { netdev_err(adapter->netdev, @@ -2597,23 +2618,82 @@ out: static void __ibmvnic_reset(struct work_struct *work) { struct ibmvnic_adapter *adapter; - bool saved_state = false; + unsigned int timeout = 5000; struct ibmvnic_rwi *tmprwi; + bool saved_state = false; struct ibmvnic_rwi *rwi; unsigned long flags; - u32 reset_state; + struct device *dev; + bool need_reset; int num_fails = 0; + u32 reset_state; int rc = 0; adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset); + dev = &adapter->vdev->dev; - if (test_and_set_bit_lock(0, &adapter->resetting)) { + /* Wait for ibmvnic_probe() to complete. If probe is taking too long + * or if another reset is in progress, defer work for now. If probe + * eventually fails it will flush and terminate our work. + * + * Three possibilities here: + * 1. Adpater being removed - just return + * 2. Timed out on probe or another reset in progress - delay the work + * 3. Completed probe - perform any resets in queue + */ + if (adapter->state == VNIC_PROBING && + !wait_for_completion_timeout(&adapter->probe_done, timeout)) { + dev_err(dev, "Reset thread timed out on probe"); queue_delayed_work(system_long_wq, &adapter->ibmvnic_delayed_reset, IBMVNIC_RESET_DELAY); return; } + /* adapter is done with probe (i.e state is never VNIC_PROBING now) */ + if (adapter->state == VNIC_REMOVING) + return; + + /* ->rwi_list is stable now (no one else is removing entries) */ + + /* ibmvnic_probe() may have purged the reset queue after we were + * scheduled to process a reset so there maybe no resets to process. + * Before setting the ->resetting bit though, we have to make sure + * that there is infact a reset to process. Otherwise we may race + * with ibmvnic_open() and end up leaving the vnic down: + * + * __ibmvnic_reset() ibmvnic_open() + * ----------------- -------------- + * + * set ->resetting bit + * find ->resetting bit is set + * set ->state to IBMVNIC_OPEN (i.e + * assume reset will open device) + * return + * find reset queue empty + * return + * + * Neither performed vnic login/open and vnic stays down + * + * If we hold the lock and conditionally set the bit, either we + * or ibmvnic_open() will complete the open. + */ + need_reset = false; + spin_lock(&adapter->rwi_lock); + if (!list_empty(&adapter->rwi_list)) { + if (test_and_set_bit_lock(0, &adapter->resetting)) { + queue_delayed_work(system_long_wq, + &adapter->ibmvnic_delayed_reset, + IBMVNIC_RESET_DELAY); + } else { + need_reset = true; + } + } + spin_unlock(&adapter->rwi_lock); + + if (!need_reset) + return; + rwi = get_next_rwi(adapter); while (rwi) { spin_lock_irqsave(&adapter->state_lock, flags); @@ -2730,12 +2810,23 @@ static void __ibmvnic_delayed_reset(struct work_struct *work) __ibmvnic_reset(&adapter->ibmvnic_reset); } +static void flush_reset_queue(struct ibmvnic_adapter *adapter) +{ + struct list_head *entry, *tmp_entry; + + if (!list_empty(&adapter->rwi_list)) { + list_for_each_safe(entry, tmp_entry, &adapter->rwi_list) { + list_del(entry); + kfree(list_entry(entry, struct ibmvnic_rwi, list)); + } + } +} + static int ibmvnic_reset(struct ibmvnic_adapter *adapter, enum ibmvnic_reset_reason reason) { - struct list_head *entry, *tmp_entry; - struct ibmvnic_rwi *rwi, *tmp; struct net_device *netdev = adapter->netdev; + struct ibmvnic_rwi *rwi, *tmp; unsigned long flags; int ret; @@ -2754,13 +2845,6 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter, goto err; } - if (adapter->state == VNIC_PROBING) { - netdev_warn(netdev, "Adapter reset during probe\n"); - adapter->init_done_rc = -EAGAIN; - ret = EAGAIN; - goto err; - } - list_for_each_entry(tmp, &adapter->rwi_list, list) { if (tmp->reset_reason == reason) { netdev_dbg(netdev, "Skipping matching reset, reason=%s\n", @@ -2778,10 +2862,9 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter, /* if we just received a transport event, * flush reset queue and process this reset */ - if (adapter->force_reset_recovery && !list_empty(&adapter->rwi_list)) { - list_for_each_safe(entry, tmp_entry, &adapter->rwi_list) - list_del(entry); - } + if (adapter->force_reset_recovery) + flush_reset_queue(adapter); + rwi->reset_reason = reason; list_add_tail(&rwi->list, &adapter->rwi_list); netdev_dbg(adapter->netdev, "Scheduling reset (reason %s)\n", @@ -5316,9 +5399,9 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq, } if (!completion_done(&adapter->init_done)) { - complete(&adapter->init_done); if (!adapter->init_done_rc) adapter->init_done_rc = -EAGAIN; + complete(&adapter->init_done); } break; @@ -5341,6 +5424,13 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq, adapter->fw_done_rc = -EIO; complete(&adapter->fw_done); } + + /* if we got here during crq-init, retry crq-init */ + if (!completion_done(&adapter->init_done)) { + adapter->init_done_rc = -EAGAIN; + complete(&adapter->init_done); + } + if (!completion_done(&adapter->stats_done)) complete(&adapter->stats_done); if (test_bit(0, &adapter->resetting)) @@ -5657,10 +5747,6 @@ static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter, bool reset) adapter->from_passive_init = false; - if (reset) - reinit_completion(&adapter->init_done); - - adapter->init_done_rc = 0; rc = ibmvnic_send_crq_init(adapter); if (rc) { dev_err(dev, "Send crq init failed with error %d\n", rc); @@ -5674,12 +5760,14 @@ static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter, bool reset) if (adapter->init_done_rc) { release_crq_queue(adapter); + dev_err(dev, "CRQ-init failed, %d\n", adapter->init_done_rc); return adapter->init_done_rc; } if (adapter->from_passive_init) { adapter->state = VNIC_OPEN; adapter->from_passive_init = false; + dev_err(dev, "CRQ-init failed, passive-init\n"); return -EINVAL; } @@ -5719,6 +5807,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) struct ibmvnic_adapter *adapter; struct net_device *netdev; unsigned char *mac_addr_p; + unsigned long flags; bool init_success; int rc; @@ -5763,6 +5852,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) spin_lock_init(&adapter->rwi_lock); spin_lock_init(&adapter->state_lock); mutex_init(&adapter->fw_lock); + init_completion(&adapter->probe_done); init_completion(&adapter->init_done); init_completion(&adapter->fw_done); init_completion(&adapter->reset_done); @@ -5773,6 +5863,33 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) init_success = false; do { + reinit_init_done(adapter); + + /* clear any failovers we got in the previous pass + * since we are reinitializing the CRQ + */ + adapter->failover_pending = false; + + /* If we had already initialized CRQ, we may have one or + * more resets queued already. Discard those and release + * the CRQ before initializing the CRQ again. + */ + release_crq_queue(adapter); + + /* Since we are still in PROBING state, __ibmvnic_reset() + * will not access the ->rwi_list and since we released CRQ, + * we won't get _new_ transport events. But there maybe an + * ongoing ibmvnic_reset() call. So serialize access to + * rwi_list. If we win the race, ibvmnic_reset() could add + * a reset after we purged but thats ok - we just may end + * up with an extra reset (i.e similar to having two or more + * resets in the queue at once). + * CHECK. + */ + spin_lock_irqsave(&adapter->rwi_lock, flags); + flush_reset_queue(adapter); + spin_unlock_irqrestore(&adapter->rwi_lock, flags); + rc = init_crq_queue(adapter); if (rc) { dev_err(&dev->dev, "Couldn't initialize crq. rc=%d\n", @@ -5804,12 +5921,6 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) goto ibmvnic_dev_file_err; netif_carrier_off(netdev); - rc = register_netdev(netdev); - if (rc) { - dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc); - goto ibmvnic_register_fail; - } - dev_info(&dev->dev, "ibmvnic registered\n"); if (init_success) { adapter->state = VNIC_PROBED; @@ -5822,6 +5933,16 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) adapter->wait_for_reset = false; adapter->last_reset_time = jiffies; + + rc = register_netdev(netdev); + if (rc) { + dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc); + goto ibmvnic_register_fail; + } + dev_info(&dev->dev, "ibmvnic registered\n"); + + complete(&adapter->probe_done); + return 0; ibmvnic_register_fail: @@ -5836,6 +5957,17 @@ ibmvnic_stats_fail: ibmvnic_init_fail: release_sub_crqs(adapter, 1); release_crq_queue(adapter); + + /* cleanup worker thread after releasing CRQ so we don't get + * transport events (i.e new work items for the worker thread). + */ + adapter->state = VNIC_REMOVING; + complete(&adapter->probe_done); + flush_work(&adapter->ibmvnic_reset); + flush_delayed_work(&adapter->ibmvnic_delayed_reset); + + flush_reset_queue(adapter); + mutex_destroy(&adapter->fw_lock); free_netdev(netdev); @@ -5912,10 +6044,14 @@ static ssize_t failover_store(struct device *dev, struct device_attribute *attr, be64_to_cpu(session_token)); rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address, H_SESSION_ERR_DETECTED, session_token, 0, 0); - if (rc) + if (rc) { netdev_err(netdev, "H_VIOCTL initiated failover failed, rc %ld\n", rc); + goto last_resort; + } + + return count; last_resort: netdev_dbg(netdev, "Trying to send CRQ_CMD, the last resort\n"); diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h index 4a7a56ff74ce..fa2d607a7b1b 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.h +++ b/drivers/net/ethernet/ibm/ibmvnic.h @@ -930,6 +930,7 @@ struct ibmvnic_adapter { struct ibmvnic_tx_pool *tx_pool; struct ibmvnic_tx_pool *tso_pool; + struct completion probe_done; struct completion init_done; int init_done_rc; diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h index c3def0ee7788..8d06c9d8ff8b 100644 --- a/drivers/net/ethernet/intel/e1000e/e1000.h +++ b/drivers/net/ethernet/intel/e1000e/e1000.h @@ -115,7 +115,8 @@ enum e1000_boards { board_pch_lpt, board_pch_spt, board_pch_cnp, - board_pch_tgp + board_pch_tgp, + board_pch_adp }; struct e1000_ps_page { @@ -502,6 +503,7 @@ extern const struct e1000_info e1000_pch_lpt_info; extern const struct e1000_info e1000_pch_spt_info; extern const struct e1000_info e1000_pch_cnp_info; extern const struct e1000_info e1000_pch_tgp_info; +extern const struct e1000_info e1000_pch_adp_info; extern const struct e1000_info e1000_es2_info; void e1000e_ptp_init(struct e1000_adapter *adapter); diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h index bcf680e83811..13382df2f2ef 100644 --- a/drivers/net/ethernet/intel/e1000e/hw.h +++ b/drivers/net/ethernet/intel/e1000e/hw.h @@ -630,6 +630,7 @@ struct e1000_phy_info { bool disable_polarity_correction; bool is_mdix; bool polarity_correction; + bool reset_disable; bool speed_downgraded; bool autoneg_wait_to_complete; }; diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index 5e4fc9b4e2ad..d60e2016d03c 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c @@ -2050,6 +2050,10 @@ static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw) bool blocked = false; int i = 0; + /* Check the PHY (LCD) reset flag */ + if (hw->phy.reset_disable) + return true; + while ((blocked = !(er32(FWSM) & E1000_ICH_FWSM_RSPCIPHY)) && (i++ < 30)) usleep_range(10000, 11000); @@ -4136,9 +4140,9 @@ static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw) return ret_val; if (!(data & valid_csum_mask)) { - e_dbg("NVM Checksum Invalid\n"); + e_dbg("NVM Checksum valid bit not set\n"); - if (hw->mac.type < e1000_pch_cnp) { + if (hw->mac.type < e1000_pch_tgp) { data |= valid_csum_mask; ret_val = e1000_write_nvm(hw, word, 1, &data); if (ret_val) @@ -6021,3 +6025,23 @@ const struct e1000_info e1000_pch_tgp_info = { .phy_ops = &ich8_phy_ops, .nvm_ops = &spt_nvm_ops, }; + +const struct e1000_info e1000_pch_adp_info = { + .mac = e1000_pch_adp, + .flags = FLAG_IS_ICH + | FLAG_HAS_WOL + | FLAG_HAS_HW_TIMESTAMP + | FLAG_HAS_CTRLEXT_ON_LOAD + | FLAG_HAS_AMT + | FLAG_HAS_FLASH + | FLAG_HAS_JUMBO_FRAMES + | FLAG_APME_IN_WUC, + .flags2 = FLAG2_HAS_PHY_STATS + | FLAG2_HAS_EEE, + .pba = 26, + .max_hw_frame_size = 9022, + .get_variants = e1000_get_variants_ich8lan, + .mac_ops = &ich8_mac_ops, + .phy_ops = &ich8_phy_ops, + .nvm_ops = &spt_nvm_ops, +}; diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h index 2504b11c3169..638a3ddd7ada 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.h +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h @@ -271,6 +271,7 @@ #define I217_CGFREG_ENABLE_MTA_RESET 0x0002 #define I217_MEMPWR PHY_REG(772, 26) #define I217_MEMPWR_DISABLE_SMB_RELEASE 0x0010 +#define I217_MEMPWR_MOEM 0x1000 /* Receive Address Initial CRC Calculation */ #define E1000_PCH_RAICC(_n) (0x05F50 + ((_n) * 4)) diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 635a95927e93..c5bdef3ffe26 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -52,6 +52,7 @@ static const struct e1000_info *e1000_info_tbl[] = { [board_pch_spt] = &e1000_pch_spt_info, [board_pch_cnp] = &e1000_pch_cnp_info, [board_pch_tgp] = &e1000_pch_tgp_info, + [board_pch_adp] = &e1000_pch_adp_info, }; struct e1000_reg_info { @@ -6341,7 +6342,8 @@ static void e1000e_s0ix_entry_flow(struct e1000_adapter *adapter) u32 mac_data; u16 phy_data; - if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID) { + if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID && + hw->mac.type >= e1000_pch_adp) { /* Request ME configure the device for S0ix */ mac_data = er32(H2ME); mac_data |= E1000_H2ME_START_DPG; @@ -6490,7 +6492,8 @@ static void e1000e_s0ix_exit_flow(struct e1000_adapter *adapter) u16 phy_data; u32 i = 0; - if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID) { + if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID && + hw->mac.type >= e1000_pch_adp) { /* Request ME unconfigure the device from S0ix */ mac_data = er32(H2ME); mac_data &= ~E1000_H2ME_START_DPG; @@ -6984,8 +6987,21 @@ static __maybe_unused int e1000e_pm_suspend(struct device *dev) struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev)); struct e1000_adapter *adapter = netdev_priv(netdev); struct pci_dev *pdev = to_pci_dev(dev); + struct e1000_hw *hw = &adapter->hw; + u16 phy_data; int rc; + if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID && + hw->mac.type >= e1000_pch_adp) { + /* Mask OEM Bits / Gig Disable / Restart AN (772_26[12] = 1) */ + e1e_rphy(hw, I217_MEMPWR, &phy_data); + phy_data |= I217_MEMPWR_MOEM; + e1e_wphy(hw, I217_MEMPWR, phy_data); + + /* Disable LCD reset */ + hw->phy.reset_disable = true; + } + e1000e_flush_lpic(pdev); e1000e_pm_freeze(dev); @@ -7007,6 +7023,8 @@ static __maybe_unused int e1000e_pm_resume(struct device *dev) struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev)); struct e1000_adapter *adapter = netdev_priv(netdev); struct pci_dev *pdev = to_pci_dev(dev); + struct e1000_hw *hw = &adapter->hw; + u16 phy_data; int rc; /* Introduce S0ix implementation */ @@ -7017,6 +7035,17 @@ static __maybe_unused int e1000e_pm_resume(struct device *dev) if (rc) return rc; + if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID && + hw->mac.type >= e1000_pch_adp) { + /* Unmask OEM Bits / Gig Disable / Restart AN 772_26[12] = 0 */ + e1e_rphy(hw, I217_MEMPWR, &phy_data); + phy_data &= ~I217_MEMPWR_MOEM; + e1e_wphy(hw, I217_MEMPWR, phy_data); + + /* Enable LCD reset */ + hw->phy.reset_disable = false; + } + return e1000e_pm_thaw(dev); } @@ -7898,22 +7927,22 @@ static const struct pci_device_id e1000_pci_tbl[] = { { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V14), board_pch_tgp }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM15), board_pch_tgp }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V15), board_pch_tgp }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_LM23), board_pch_tgp }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_V23), board_pch_tgp }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM16), board_pch_tgp }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V16), board_pch_tgp }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM17), board_pch_tgp }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V17), board_pch_tgp }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_LM22), board_pch_tgp }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_V22), board_pch_tgp }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM18), board_pch_tgp }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V18), board_pch_tgp }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM19), board_pch_tgp }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V19), board_pch_tgp }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_LM20), board_pch_tgp }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_V20), board_pch_tgp }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_LM21), board_pch_tgp }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_V21), board_pch_tgp }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_LM23), board_pch_adp }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_V23), board_pch_adp }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM16), board_pch_adp }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V16), board_pch_adp }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM17), board_pch_adp }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V17), board_pch_adp }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_LM22), board_pch_adp }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_V22), board_pch_adp }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM18), board_pch_adp }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V18), board_pch_adp }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM19), board_pch_adp }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V19), board_pch_adp }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_LM20), board_pch_adp }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_V20), board_pch_adp }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_LM21), board_pch_adp }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_V21), board_pch_adp }, { 0, 0, 0, 0, 0, 0, 0 } /* terminate list */ }; diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 2e02cc68cd3f..80c5cecaf2b5 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -144,6 +144,7 @@ enum i40e_state_t { __I40E_VIRTCHNL_OP_PENDING, __I40E_RECOVERY_MODE, __I40E_VF_RESETS_DISABLED, /* disable resets during i40e_remove */ + __I40E_IN_REMOVE, __I40E_VFS_RELEASING, /* This must be last as it determines the size of the BITMAP */ __I40E_STATE_SIZE__, diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index 1e57cc8c47d7..9db5001297c7 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c @@ -742,10 +742,8 @@ static void i40e_dbg_dump_vf(struct i40e_pf *pf, int vf_id) vsi = pf->vsi[vf->lan_vsi_idx]; dev_info(&pf->pdev->dev, "vf %2d: VSI id=%d, seid=%d, qps=%d\n", vf_id, vf->lan_vsi_id, vsi->seid, vf->num_queue_pairs); - dev_info(&pf->pdev->dev, " num MDD=%lld, invalid msg=%lld, valid msg=%lld\n", - vf->num_mdd_events, - vf->num_invalid_msgs, - vf->num_valid_msgs); + dev_info(&pf->pdev->dev, " num MDD=%lld\n", + vf->num_mdd_events); } else { dev_info(&pf->pdev->dev, "invalid VF id %d\n", vf_id); } diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index f70c478dafdb..31b03fe78d3b 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -10853,6 +10853,9 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired) { int ret; + + if (test_bit(__I40E_IN_REMOVE, pf->state)) + return; /* Now we wait for GRST to settle out. * We don't have to delete the VEBs or VSIs from the hw switch * because the reset will make them disappear. @@ -12212,6 +12215,8 @@ int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count) vsi->req_queue_pairs = queue_count; i40e_prep_for_reset(pf); + if (test_bit(__I40E_IN_REMOVE, pf->state)) + return pf->alloc_rss_size; pf->alloc_rss_size = new_rss_size; @@ -13038,6 +13043,10 @@ static int i40e_xdp_setup(struct i40e_vsi *vsi, struct bpf_prog *prog, if (need_reset) i40e_prep_for_reset(pf); + /* VSI shall be deleted in a moment, just return EINVAL */ + if (test_bit(__I40E_IN_REMOVE, pf->state)) + return -EINVAL; + old_prog = xchg(&vsi->xdp_prog, prog); if (need_reset) { @@ -15928,8 +15937,13 @@ static void i40e_remove(struct pci_dev *pdev) i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), 0); i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0); - while (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) + /* Grab __I40E_RESET_RECOVERY_PENDING and set __I40E_IN_REMOVE + * flags, once they are set, i40e_rebuild should not be called as + * i40e_prep_for_reset always returns early. + */ + while (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) usleep_range(1000, 2000); + set_bit(__I40E_IN_REMOVE, pf->state); if (pf->flags & I40E_FLAG_SRIOV_ENABLED) { set_bit(__I40E_VF_RESETS_DISABLED, pf->state); @@ -16128,6 +16142,9 @@ static void i40e_pci_error_reset_done(struct pci_dev *pdev) { struct i40e_pf *pf = pci_get_drvdata(pdev); + if (test_bit(__I40E_IN_REMOVE, pf->state)) + return; + i40e_reset_and_rebuild(pf, false, false); } diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index dfdb6e786461..2606e8f0f19b 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -1917,19 +1917,17 @@ sriov_configure_out: /***********************virtual channel routines******************/ /** - * i40e_vc_send_msg_to_vf_ex + * i40e_vc_send_msg_to_vf * @vf: pointer to the VF info * @v_opcode: virtual channel opcode * @v_retval: virtual channel return value * @msg: pointer to the msg buffer * @msglen: msg length - * @is_quiet: true for not printing unsuccessful return values, false otherwise * * send msg to VF **/ -static int i40e_vc_send_msg_to_vf_ex(struct i40e_vf *vf, u32 v_opcode, - u32 v_retval, u8 *msg, u16 msglen, - bool is_quiet) +static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode, + u32 v_retval, u8 *msg, u16 msglen) { struct i40e_pf *pf; struct i40e_hw *hw; @@ -1944,25 +1942,6 @@ static int i40e_vc_send_msg_to_vf_ex(struct i40e_vf *vf, u32 v_opcode, hw = &pf->hw; abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; - /* single place to detect unsuccessful return values */ - if (v_retval && !is_quiet) { - vf->num_invalid_msgs++; - dev_info(&pf->pdev->dev, "VF %d failed opcode %d, retval: %d\n", - vf->vf_id, v_opcode, v_retval); - if (vf->num_invalid_msgs > - I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED) { - dev_err(&pf->pdev->dev, - "Number of invalid messages exceeded for VF %d\n", - vf->vf_id); - dev_err(&pf->pdev->dev, "Use PF Control I/F to enable the VF\n"); - set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states); - } - } else { - vf->num_valid_msgs++; - /* reset the invalid counter, if a valid message is received. */ - vf->num_invalid_msgs = 0; - } - aq_ret = i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval, msg, msglen, NULL); if (aq_ret) { @@ -1976,23 +1955,6 @@ static int i40e_vc_send_msg_to_vf_ex(struct i40e_vf *vf, u32 v_opcode, } /** - * i40e_vc_send_msg_to_vf - * @vf: pointer to the VF info - * @v_opcode: virtual channel opcode - * @v_retval: virtual channel return value - * @msg: pointer to the msg buffer - * @msglen: msg length - * - * send msg to VF - **/ -static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode, - u32 v_retval, u8 *msg, u16 msglen) -{ - return i40e_vc_send_msg_to_vf_ex(vf, v_opcode, v_retval, - msg, msglen, false); -} - -/** * i40e_vc_send_resp_to_vf * @vf: pointer to the VF info * @opcode: operation code @@ -2822,7 +2784,6 @@ error_param: * i40e_check_vf_permission * @vf: pointer to the VF info * @al: MAC address list from virtchnl - * @is_quiet: set true for printing msg without opcode info, false otherwise * * Check that the given list of MAC addresses is allowed. Will return -EPERM * if any address in the list is not valid. Checks the following conditions: @@ -2837,8 +2798,7 @@ error_param: * addresses might not be accurate. **/ static inline int i40e_check_vf_permission(struct i40e_vf *vf, - struct virtchnl_ether_addr_list *al, - bool *is_quiet) + struct virtchnl_ether_addr_list *al) { struct i40e_pf *pf = vf->pf; struct i40e_vsi *vsi = pf->vsi[vf->lan_vsi_idx]; @@ -2846,7 +2806,6 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf, int mac2add_cnt = 0; int i; - *is_quiet = false; for (i = 0; i < al->num_elements; i++) { struct i40e_mac_filter *f; u8 *addr = al->list[i].addr; @@ -2870,7 +2829,6 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf, !ether_addr_equal(addr, vf->default_lan_addr.addr)) { dev_err(&pf->pdev->dev, "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n"); - *is_quiet = true; return -EPERM; } @@ -2921,7 +2879,6 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg) (struct virtchnl_ether_addr_list *)msg; struct i40e_pf *pf = vf->pf; struct i40e_vsi *vsi = NULL; - bool is_quiet = false; i40e_status ret = 0; int i; @@ -2938,7 +2895,7 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg) */ spin_lock_bh(&vsi->mac_filter_hash_lock); - ret = i40e_check_vf_permission(vf, al, &is_quiet); + ret = i40e_check_vf_permission(vf, al); if (ret) { spin_unlock_bh(&vsi->mac_filter_hash_lock); goto error_param; @@ -2976,8 +2933,8 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg) error_param: /* send the response to the VF */ - return i40e_vc_send_msg_to_vf_ex(vf, VIRTCHNL_OP_ADD_ETH_ADDR, - ret, NULL, 0, is_quiet); + return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_ETH_ADDR, + ret, NULL, 0); } /** diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h index 03c42fd0fea1..a554d0a0b09b 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h @@ -10,8 +10,6 @@ #define I40E_VIRTCHNL_SUPPORTED_QTYPES 2 -#define I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED 10 - #define I40E_VLAN_PRIORITY_SHIFT 13 #define I40E_VLAN_MASK 0xFFF #define I40E_PRIORITY_MASK 0xE000 @@ -92,9 +90,6 @@ struct i40e_vf { u8 num_queue_pairs; /* num of qps assigned to VF vsis */ u8 num_req_queues; /* num of requested qps */ u64 num_mdd_events; /* num of mdd events detected */ - /* num of continuous malformed or invalid msgs detected */ - u64 num_invalid_msgs; - u64 num_valid_msgs; /* num of valid msgs detected */ unsigned long vf_caps; /* vf's adv. capabilities */ unsigned long vf_states; /* vf's runtime states */ diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h index 59806d1f7e79..4babe4705a55 100644 --- a/drivers/net/ethernet/intel/iavf/iavf.h +++ b/drivers/net/ethernet/intel/iavf/iavf.h @@ -201,6 +201,10 @@ enum iavf_state_t { __IAVF_RUNNING, /* opened, working */ }; +enum iavf_critical_section_t { + __IAVF_IN_REMOVE_TASK, /* device being removed */ +}; + #define IAVF_CLOUD_FIELD_OMAC 0x01 #define IAVF_CLOUD_FIELD_IMAC 0x02 #define IAVF_CLOUD_FIELD_IVLAN 0x04 @@ -246,7 +250,6 @@ struct iavf_adapter { struct list_head mac_filter_list; struct mutex crit_lock; struct mutex client_lock; - struct mutex remove_lock; /* Lock to protect accesses to MAC and VLAN lists */ spinlock_t mac_vlan_list_lock; char misc_vector_name[IFNAMSIZ + 9]; @@ -284,6 +287,8 @@ struct iavf_adapter { #define IAVF_FLAG_LEGACY_RX BIT(15) #define IAVF_FLAG_REINIT_ITR_NEEDED BIT(16) #define IAVF_FLAG_QUEUES_DISABLED BIT(17) +#define IAVF_FLAG_SETUP_NETDEV_FEATURES BIT(18) +#define IAVF_FLAG_REINIT_MSIX_NEEDED BIT(20) /* duplicates for common code */ #define IAVF_FLAG_DCB_ENABLED 0 /* flags for admin queue service task */ diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c index 8125b9120615..0e178a0a59c5 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_main.c +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c @@ -302,8 +302,9 @@ static irqreturn_t iavf_msix_aq(int irq, void *data) rd32(hw, IAVF_VFINT_ICR01); rd32(hw, IAVF_VFINT_ICR0_ENA1); - /* schedule work on the private workqueue */ - queue_work(iavf_wq, &adapter->adminq_task); + if (adapter->state != __IAVF_REMOVE) + /* schedule work on the private workqueue */ + queue_work(iavf_wq, &adapter->adminq_task); return IRQ_HANDLED; } @@ -1136,8 +1137,7 @@ void iavf_down(struct iavf_adapter *adapter) rss->state = IAVF_ADV_RSS_DEL_REQUEST; spin_unlock_bh(&adapter->adv_rss_lock); - if (!(adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) && - adapter->state != __IAVF_RESETTING) { + if (!(adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)) { /* cancel any current operation */ adapter->current_op = VIRTCHNL_OP_UNKNOWN; /* Schedule operations to close down the HW. Don't wait @@ -2120,7 +2120,7 @@ int iavf_parse_vf_resource_msg(struct iavf_adapter *adapter) "Requested %d queues, but PF only gave us %d.\n", num_req_queues, adapter->vsi_res->num_queue_pairs); - adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED; + adapter->flags |= IAVF_FLAG_REINIT_MSIX_NEEDED; adapter->num_req_queues = adapter->vsi_res->num_queue_pairs; iavf_schedule_reset(adapter); @@ -2374,17 +2374,22 @@ static void iavf_watchdog_task(struct work_struct *work) struct iavf_hw *hw = &adapter->hw; u32 reg_val; - if (!mutex_trylock(&adapter->crit_lock)) + if (!mutex_trylock(&adapter->crit_lock)) { + if (adapter->state == __IAVF_REMOVE) + return; + goto restart_watchdog; + } if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) iavf_change_state(adapter, __IAVF_COMM_FAILED); - if (adapter->flags & IAVF_FLAG_RESET_NEEDED && - adapter->state != __IAVF_RESETTING) { - iavf_change_state(adapter, __IAVF_RESETTING); + if (adapter->flags & IAVF_FLAG_RESET_NEEDED) { adapter->aq_required = 0; adapter->current_op = VIRTCHNL_OP_UNKNOWN; + mutex_unlock(&adapter->crit_lock); + queue_work(iavf_wq, &adapter->reset_task); + return; } switch (adapter->state) { @@ -2419,6 +2424,15 @@ static void iavf_watchdog_task(struct work_struct *work) msecs_to_jiffies(1)); return; case __IAVF_INIT_FAILED: + if (test_bit(__IAVF_IN_REMOVE_TASK, + &adapter->crit_section)) { + /* Do not update the state and do not reschedule + * watchdog task, iavf_remove should handle this state + * as it can loop forever + */ + mutex_unlock(&adapter->crit_lock); + return; + } if (++adapter->aq_wait_count > IAVF_AQ_MAX_ERR) { dev_err(&adapter->pdev->dev, "Failed to communicate with PF; waiting before retry\n"); @@ -2435,6 +2449,17 @@ static void iavf_watchdog_task(struct work_struct *work) queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ); return; case __IAVF_COMM_FAILED: + if (test_bit(__IAVF_IN_REMOVE_TASK, + &adapter->crit_section)) { + /* Set state to __IAVF_INIT_FAILED and perform remove + * steps. Remove IAVF_FLAG_PF_COMMS_FAILED so the task + * doesn't bring the state back to __IAVF_COMM_FAILED. + */ + iavf_change_state(adapter, __IAVF_INIT_FAILED); + adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED; + mutex_unlock(&adapter->crit_lock); + return; + } reg_val = rd32(hw, IAVF_VFGEN_RSTAT) & IAVF_VFGEN_RSTAT_VFR_STATE_MASK; if (reg_val == VIRTCHNL_VFR_VFACTIVE || @@ -2507,7 +2532,8 @@ static void iavf_watchdog_task(struct work_struct *work) schedule_delayed_work(&adapter->client_task, msecs_to_jiffies(5)); mutex_unlock(&adapter->crit_lock); restart_watchdog: - queue_work(iavf_wq, &adapter->adminq_task); + if (adapter->state >= __IAVF_DOWN) + queue_work(iavf_wq, &adapter->adminq_task); if (adapter->aq_required) queue_delayed_work(iavf_wq, &adapter->watchdog_task, msecs_to_jiffies(20)); @@ -2515,6 +2541,13 @@ restart_watchdog: queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ * 2); } +/** + * iavf_disable_vf - disable VF + * @adapter: board private structure + * + * Set communication failed flag and free all resources. + * NOTE: This function is expected to be called with crit_lock being held. + **/ static void iavf_disable_vf(struct iavf_adapter *adapter) { struct iavf_mac_filter *f, *ftmp; @@ -2569,7 +2602,6 @@ static void iavf_disable_vf(struct iavf_adapter *adapter) memset(adapter->vf_res, 0, IAVF_VIRTCHNL_VF_RESOURCE_SIZE); iavf_shutdown_adminq(&adapter->hw); adapter->netdev->flags &= ~IFF_UP; - mutex_unlock(&adapter->crit_lock); adapter->flags &= ~IAVF_FLAG_RESET_PENDING; iavf_change_state(adapter, __IAVF_DOWN); wake_up(&adapter->down_waitqueue); @@ -2601,13 +2633,13 @@ static void iavf_reset_task(struct work_struct *work) /* When device is being removed it doesn't make sense to run the reset * task, just return in such a case. */ - if (mutex_is_locked(&adapter->remove_lock)) - return; + if (!mutex_trylock(&adapter->crit_lock)) { + if (adapter->state != __IAVF_REMOVE) + queue_work(iavf_wq, &adapter->reset_task); - if (iavf_lock_timeout(&adapter->crit_lock, 200)) { - schedule_work(&adapter->reset_task); return; } + while (!mutex_trylock(&adapter->client_lock)) usleep_range(500, 1000); if (CLIENT_ENABLED(adapter)) { @@ -2662,6 +2694,7 @@ static void iavf_reset_task(struct work_struct *work) reg_val); iavf_disable_vf(adapter); mutex_unlock(&adapter->client_lock); + mutex_unlock(&adapter->crit_lock); return; /* Do not attempt to reinit. It's dead, Jim. */ } @@ -2670,8 +2703,7 @@ continue_reset: * ndo_open() returning, so we can't assume it means all our open * tasks have finished, since we're not holding the rtnl_lock here. */ - running = ((adapter->state == __IAVF_RUNNING) || - (adapter->state == __IAVF_RESETTING)); + running = adapter->state == __IAVF_RUNNING; if (running) { netdev->flags &= ~IFF_UP; @@ -2701,7 +2733,8 @@ continue_reset: err); adapter->aq_required = 0; - if (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED) { + if ((adapter->flags & IAVF_FLAG_REINIT_MSIX_NEEDED) || + (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED)) { err = iavf_reinit_interrupt_scheme(adapter); if (err) goto reset_err; @@ -2773,12 +2806,13 @@ continue_reset: if (err) goto reset_err; - if (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED) { + if ((adapter->flags & IAVF_FLAG_REINIT_MSIX_NEEDED) || + (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED)) { err = iavf_request_traffic_irqs(adapter, netdev->name); if (err) goto reset_err; - adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED; + adapter->flags &= ~IAVF_FLAG_REINIT_MSIX_NEEDED; } iavf_configure(adapter); @@ -2793,6 +2827,9 @@ continue_reset: iavf_change_state(adapter, __IAVF_DOWN); wake_up(&adapter->down_waitqueue); } + + adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED; + mutex_unlock(&adapter->client_lock); mutex_unlock(&adapter->crit_lock); @@ -2826,13 +2863,19 @@ static void iavf_adminq_task(struct work_struct *work) if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) goto out; + if (!mutex_trylock(&adapter->crit_lock)) { + if (adapter->state == __IAVF_REMOVE) + return; + + queue_work(iavf_wq, &adapter->adminq_task); + goto out; + } + event.buf_len = IAVF_MAX_AQ_BUF_SIZE; event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); if (!event.msg_buf) goto out; - if (iavf_lock_timeout(&adapter->crit_lock, 200)) - goto freedom; do { ret = iavf_clean_arq_element(hw, &event, &pending); v_op = (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high); @@ -2848,6 +2891,24 @@ static void iavf_adminq_task(struct work_struct *work) } while (pending); mutex_unlock(&adapter->crit_lock); + if ((adapter->flags & IAVF_FLAG_SETUP_NETDEV_FEATURES)) { + if (adapter->netdev_registered || + !test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section)) { + struct net_device *netdev = adapter->netdev; + + rtnl_lock(); + netdev_update_features(netdev); + rtnl_unlock(); + /* Request VLAN offload settings */ + if (VLAN_V2_ALLOWED(adapter)) + iavf_set_vlan_offload_features + (adapter, 0, netdev->features); + + iavf_set_queue_vlan_tag_loc(adapter); + } + + adapter->flags &= ~IAVF_FLAG_SETUP_NETDEV_FEATURES; + } if ((adapter->flags & (IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED)) || adapter->state == __IAVF_RESETTING) @@ -3800,11 +3861,12 @@ static int iavf_close(struct net_device *netdev) struct iavf_adapter *adapter = netdev_priv(netdev); int status; - if (adapter->state <= __IAVF_DOWN_PENDING) - return 0; + mutex_lock(&adapter->crit_lock); - while (!mutex_trylock(&adapter->crit_lock)) - usleep_range(500, 1000); + if (adapter->state <= __IAVF_DOWN_PENDING) { + mutex_unlock(&adapter->crit_lock); + return 0; + } set_bit(__IAVF_VSI_DOWN, adapter->vsi.state); if (CLIENT_ENABLED(adapter)) @@ -3853,8 +3915,11 @@ static int iavf_change_mtu(struct net_device *netdev, int new_mtu) iavf_notify_client_l2_params(&adapter->vsi); adapter->flags |= IAVF_FLAG_SERVICE_CLIENT_REQUESTED; } - adapter->flags |= IAVF_FLAG_RESET_NEEDED; - queue_work(iavf_wq, &adapter->reset_task); + + if (netif_running(netdev)) { + adapter->flags |= IAVF_FLAG_RESET_NEEDED; + queue_work(iavf_wq, &adapter->reset_task); + } return 0; } @@ -4431,7 +4496,6 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) */ mutex_init(&adapter->crit_lock); mutex_init(&adapter->client_lock); - mutex_init(&adapter->remove_lock); mutex_init(&hw->aq.asq_mutex); mutex_init(&hw->aq.arq_mutex); @@ -4547,7 +4611,6 @@ static int __maybe_unused iavf_resume(struct device *dev_d) static void iavf_remove(struct pci_dev *pdev) { struct iavf_adapter *adapter = iavf_pdev_to_adapter(pdev); - enum iavf_state_t prev_state = adapter->last_state; struct net_device *netdev = adapter->netdev; struct iavf_fdir_fltr *fdir, *fdirtmp; struct iavf_vlan_filter *vlf, *vlftmp; @@ -4556,14 +4619,37 @@ static void iavf_remove(struct pci_dev *pdev) struct iavf_cloud_filter *cf, *cftmp; struct iavf_hw *hw = &adapter->hw; int err; - /* Indicate we are in remove and not to run reset_task */ - mutex_lock(&adapter->remove_lock); - cancel_work_sync(&adapter->reset_task); + + /* When reboot/shutdown is in progress no need to do anything + * as the adapter is already REMOVE state that was set during + * iavf_shutdown() callback. + */ + if (adapter->state == __IAVF_REMOVE) + return; + + set_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section); + /* Wait until port initialization is complete. + * There are flows where register/unregister netdev may race. + */ + while (1) { + mutex_lock(&adapter->crit_lock); + if (adapter->state == __IAVF_RUNNING || + adapter->state == __IAVF_DOWN || + adapter->state == __IAVF_INIT_FAILED) { + mutex_unlock(&adapter->crit_lock); + break; + } + + mutex_unlock(&adapter->crit_lock); + usleep_range(500, 1000); + } cancel_delayed_work_sync(&adapter->watchdog_task); - cancel_delayed_work_sync(&adapter->client_task); + if (adapter->netdev_registered) { - unregister_netdev(netdev); + rtnl_lock(); + unregister_netdevice(netdev); adapter->netdev_registered = false; + rtnl_unlock(); } if (CLIENT_ALLOWED(adapter)) { err = iavf_lan_del_device(adapter); @@ -4572,6 +4658,10 @@ static void iavf_remove(struct pci_dev *pdev) err); } + mutex_lock(&adapter->crit_lock); + dev_info(&adapter->pdev->dev, "Remove device\n"); + iavf_change_state(adapter, __IAVF_REMOVE); + iavf_request_reset(adapter); msleep(50); /* If the FW isn't responding, kick it once, but only once. */ @@ -4579,37 +4669,24 @@ static void iavf_remove(struct pci_dev *pdev) iavf_request_reset(adapter); msleep(50); } - if (iavf_lock_timeout(&adapter->crit_lock, 5000)) - dev_warn(&adapter->pdev->dev, "failed to acquire crit_lock in %s\n", __FUNCTION__); - dev_info(&adapter->pdev->dev, "Removing device\n"); + iavf_misc_irq_disable(adapter); /* Shut down all the garbage mashers on the detention level */ - iavf_change_state(adapter, __IAVF_REMOVE); + cancel_work_sync(&adapter->reset_task); + cancel_delayed_work_sync(&adapter->watchdog_task); + cancel_work_sync(&adapter->adminq_task); + cancel_delayed_work_sync(&adapter->client_task); + adapter->aq_required = 0; adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED; iavf_free_all_tx_resources(adapter); iavf_free_all_rx_resources(adapter); - iavf_misc_irq_disable(adapter); iavf_free_misc_irq(adapter); - /* In case we enter iavf_remove from erroneous state, free traffic irqs - * here, so as to not cause a kernel crash, when calling - * iavf_reset_interrupt_capability. - */ - if ((adapter->last_state == __IAVF_RESETTING && - prev_state != __IAVF_DOWN) || - (adapter->last_state == __IAVF_RUNNING && - !(netdev->flags & IFF_UP))) - iavf_free_traffic_irqs(adapter); - iavf_reset_interrupt_capability(adapter); iavf_free_q_vectors(adapter); - cancel_delayed_work_sync(&adapter->watchdog_task); - - cancel_work_sync(&adapter->adminq_task); - iavf_free_rss(adapter); if (hw->aq.asq.count) @@ -4621,8 +4698,6 @@ static void iavf_remove(struct pci_dev *pdev) mutex_destroy(&adapter->client_lock); mutex_unlock(&adapter->crit_lock); mutex_destroy(&adapter->crit_lock); - mutex_unlock(&adapter->remove_lock); - mutex_destroy(&adapter->remove_lock); iounmap(hw->hw_addr); pci_release_regions(pdev); diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c index 5ee1d118fd30..5263cefe46f5 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c +++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c @@ -1835,6 +1835,22 @@ void iavf_request_reset(struct iavf_adapter *adapter) } /** + * iavf_netdev_features_vlan_strip_set - update vlan strip status + * @netdev: ptr to netdev being adjusted + * @enable: enable or disable vlan strip + * + * Helper function to change vlan strip status in netdev->features. + */ +static void iavf_netdev_features_vlan_strip_set(struct net_device *netdev, + const bool enable) +{ + if (enable) + netdev->features |= NETIF_F_HW_VLAN_CTAG_RX; + else + netdev->features &= ~NETIF_F_HW_VLAN_CTAG_RX; +} + +/** * iavf_virtchnl_completion * @adapter: adapter structure * @v_opcode: opcode sent by PF @@ -2057,8 +2073,18 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter, } break; case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING: + dev_warn(&adapter->pdev->dev, "Changing VLAN Stripping is not allowed when Port VLAN is configured\n"); + /* Vlan stripping could not be enabled by ethtool. + * Disable it in netdev->features. + */ + iavf_netdev_features_vlan_strip_set(netdev, false); + break; case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING: dev_warn(&adapter->pdev->dev, "Changing VLAN Stripping is not allowed when Port VLAN is configured\n"); + /* Vlan stripping could not be disabled by ethtool. + * Enable it in netdev->features. + */ + iavf_netdev_features_vlan_strip_set(netdev, true); break; default: dev_err(&adapter->pdev->dev, "PF returned error %d (%s) to our request %d\n", @@ -2146,29 +2172,7 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter, sizeof(adapter->vlan_v2_caps))); iavf_process_config(adapter); - - /* unlock crit_lock before acquiring rtnl_lock as other - * processes holding rtnl_lock could be waiting for the same - * crit_lock - */ - mutex_unlock(&adapter->crit_lock); - /* VLAN capabilities can change during VFR, so make sure to - * update the netdev features with the new capabilities - */ - rtnl_lock(); - netdev_update_features(netdev); - rtnl_unlock(); - if (iavf_lock_timeout(&adapter->crit_lock, 10000)) - dev_warn(&adapter->pdev->dev, "failed to acquire crit_lock in %s\n", - __FUNCTION__); - - /* Request VLAN offload settings */ - if (VLAN_V2_ALLOWED(adapter)) - iavf_set_vlan_offload_features(adapter, 0, - netdev->features); - - iavf_set_queue_vlan_tag_loc(adapter); - + adapter->flags |= IAVF_FLAG_SETUP_NETDEV_FEATURES; } break; case VIRTCHNL_OP_ENABLE_QUEUES: @@ -2334,6 +2338,20 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter, spin_unlock_bh(&adapter->adv_rss_lock); } break; + case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING: + /* PF enabled vlan strip on this VF. + * Update netdev->features if needed to be in sync with ethtool. + */ + if (!v_retval) + iavf_netdev_features_vlan_strip_set(netdev, true); + break; + case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING: + /* PF disabled vlan strip on this VF. + * Update netdev->features if needed to be in sync with ethtool. + */ + if (!v_retval) + iavf_netdev_features_vlan_strip_set(netdev, false); + break; default: if (adapter->current_op && (v_opcode != adapter->current_op)) dev_warn(&adapter->pdev->dev, "Expected response %d from PF, received %d\n", diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index 4e16d185077d..bea1d1e39fa2 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -280,7 +280,6 @@ enum ice_pf_state { ICE_VFLR_EVENT_PENDING, ICE_FLTR_OVERFLOW_PROMISC, ICE_VF_DIS, - ICE_VF_DEINIT_IN_PROGRESS, ICE_CFG_BUSY, ICE_SERVICE_SCHED, ICE_SERVICE_DIS, @@ -483,6 +482,8 @@ enum ice_pf_flags { ICE_FLAG_VF_TRUE_PROMISC_ENA, ICE_FLAG_MDD_AUTO_RESET_VF, ICE_FLAG_LINK_LENIENT_MODE_ENA, + ICE_FLAG_PLUG_AUX_DEV, + ICE_FLAG_MTU_CHANGED, ICE_PF_FLAGS_NBITS /* must be last */ }; @@ -887,7 +888,7 @@ static inline void ice_set_rdma_cap(struct ice_pf *pf) if (pf->hw.func_caps.common_cap.rdma && pf->num_rdma_msix) { set_bit(ICE_FLAG_RDMA_ENA, pf->flags); set_bit(ICE_FLAG_AUX_ENA, pf->flags); - ice_plug_aux_dev(pf); + set_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags); } } @@ -897,7 +898,16 @@ static inline void ice_set_rdma_cap(struct ice_pf *pf) */ static inline void ice_clear_rdma_cap(struct ice_pf *pf) { - ice_unplug_aux_dev(pf); + /* We can directly unplug aux device here only if the flag bit + * ICE_FLAG_PLUG_AUX_DEV is not set because ice_unplug_aux_dev() + * could race with ice_plug_aux_dev() called from + * ice_service_task(). In this case we only clear that bit now and + * aux device will be unplugged later once ice_plug_aux_device() + * called from ice_service_task() finishes (see ice_service_task()). + */ + if (!test_and_clear_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags)) + ice_unplug_aux_dev(pf); + clear_bit(ICE_FLAG_RDMA_ENA, pf->flags); clear_bit(ICE_FLAG_AUX_ENA, pf->flags); } diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index 408d15a5b0e3..e2af99a763ed 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -3340,9 +3340,10 @@ ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, if (fec == ICE_FEC_AUTO && ice_fw_supports_link_override(hw) && !ice_fw_supports_report_dflt_cfg(hw)) { - struct ice_link_default_override_tlv tlv; + struct ice_link_default_override_tlv tlv = { 0 }; - if (ice_get_link_default_override(&tlv, pi)) + status = ice_get_link_default_override(&tlv, pi); + if (status) goto out; if (!(tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE) && diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.c b/drivers/net/ethernet/intel/ice/ice_eswitch.c index 864692b157b6..73edc24d81d5 100644 --- a/drivers/net/ethernet/intel/ice/ice_eswitch.c +++ b/drivers/net/ethernet/intel/ice/ice_eswitch.c @@ -44,6 +44,7 @@ ice_eswitch_add_vf_mac_rule(struct ice_pf *pf, struct ice_vf *vf, const u8 *mac) ctrl_vsi->rxq_map[vf->vf_id]; rule_info.flags_info.act |= ICE_SINGLE_ACT_LB_ENABLE; rule_info.flags_info.act_valid = true; + rule_info.tun_type = ICE_SW_TUN_AND_NON_TUN; err = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, vf->repr->mac_rule); diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c index e2e3ef7fba7f..a5dc9824e255 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c @@ -2298,7 +2298,7 @@ ice_set_link_ksettings(struct net_device *netdev, if (err) goto done; - curr_link_speed = pi->phy.link_info.link_speed; + curr_link_speed = pi->phy.curr_user_speed_req; adv_link_speed = ice_ksettings_find_adv_link_speed(ks); /* If speed didn't get set, set it to what it currently is. diff --git a/drivers/net/ethernet/intel/ice/ice_lag.c b/drivers/net/ethernet/intel/ice/ice_lag.c index e375ac849aec..4f954db01b92 100644 --- a/drivers/net/ethernet/intel/ice/ice_lag.c +++ b/drivers/net/ethernet/intel/ice/ice_lag.c @@ -204,11 +204,7 @@ ice_lag_unlink(struct ice_lag *lag, lag->upper_netdev = NULL; } - if (lag->peer_netdev) { - dev_put(lag->peer_netdev); - lag->peer_netdev = NULL; - } - + lag->peer_netdev = NULL; ice_set_sriov_cap(pf); ice_set_rdma_cap(pf); lag->bonded = false; @@ -216,6 +212,32 @@ ice_lag_unlink(struct ice_lag *lag, } /** + * ice_lag_unregister - handle netdev unregister events + * @lag: LAG info struct + * @netdev: netdev reporting the event + */ +static void ice_lag_unregister(struct ice_lag *lag, struct net_device *netdev) +{ + struct ice_pf *pf = lag->pf; + + /* check to see if this event is for this netdev + * check that we are in an aggregate + */ + if (netdev != lag->netdev || !lag->bonded) + return; + + if (lag->upper_netdev) { + dev_put(lag->upper_netdev); + lag->upper_netdev = NULL; + ice_set_sriov_cap(pf); + ice_set_rdma_cap(pf); + } + /* perform some cleanup in case we come back */ + lag->bonded = false; + lag->role = ICE_LAG_NONE; +} + +/** * ice_lag_changeupper_event - handle LAG changeupper event * @lag: LAG info struct * @ptr: opaque pointer data @@ -307,7 +329,7 @@ ice_lag_event_handler(struct notifier_block *notif_blk, unsigned long event, ice_lag_info_event(lag, ptr); break; case NETDEV_UNREGISTER: - ice_lag_unlink(lag, ptr); + ice_lag_unregister(lag, netdev); break; default: break; diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h index d981dc6f2323..85a612838a89 100644 --- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h +++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h @@ -568,6 +568,7 @@ struct ice_tx_ctx_desc { (0x3FFFFULL << ICE_TXD_CTX_QW1_TSO_LEN_S) #define ICE_TXD_CTX_QW1_MSS_S 50 +#define ICE_TXD_CTX_MIN_MSS 64 #define ICE_TXD_CTX_QW1_VSI_S 50 #define ICE_TXD_CTX_QW1_VSI_M (0x3FFULL << ICE_TXD_CTX_QW1_VSI_S) diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index 0c187cf04fcf..53256aca27c7 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -1684,6 +1684,12 @@ static void ice_vsi_set_rss_flow_fld(struct ice_vsi *vsi) if (status) dev_dbg(dev, "ice_add_rss_cfg failed for sctp6 flow, vsi = %d, error = %d\n", vsi_num, status); + + status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_ESP_SPI, + ICE_FLOW_SEG_HDR_ESP); + if (status) + dev_dbg(dev, "ice_add_rss_cfg failed for esp/spi flow, vsi = %d, error = %d\n", + vsi_num, status); } /** diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 30814435f779..b7e8744b0c0a 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -1799,7 +1799,9 @@ static void ice_handle_mdd_event(struct ice_pf *pf) * reset, so print the event prior to reset. */ ice_print_vf_rx_mdd_event(vf); + mutex_lock(&pf->vf[i].cfg_lock); ice_reset_vf(&pf->vf[i], false); + mutex_unlock(&pf->vf[i].cfg_lock); } } } @@ -2253,6 +2255,30 @@ static void ice_service_task(struct work_struct *work) return; } + if (test_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags)) { + /* Plug aux device per request */ + ice_plug_aux_dev(pf); + + /* Mark plugging as done but check whether unplug was + * requested during ice_plug_aux_dev() call + * (e.g. from ice_clear_rdma_cap()) and if so then + * plug aux device. + */ + if (!test_and_clear_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags)) + ice_unplug_aux_dev(pf); + } + + if (test_and_clear_bit(ICE_FLAG_MTU_CHANGED, pf->flags)) { + struct iidc_event *event; + + event = kzalloc(sizeof(*event), GFP_KERNEL); + if (event) { + set_bit(IIDC_EVENT_AFTER_MTU_CHANGE, event->type); + ice_send_event_to_aux(pf, event); + kfree(event); + } + } + ice_clean_adminq_subtask(pf); ice_check_media_subtask(pf); ice_check_for_hang_subtask(pf); @@ -3018,7 +3044,7 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data) struct iidc_event *event; ena_mask &= ~ICE_AUX_CRIT_ERR; - event = kzalloc(sizeof(*event), GFP_KERNEL); + event = kzalloc(sizeof(*event), GFP_ATOMIC); if (event) { set_bit(IIDC_EVENT_CRIT_ERR, event->type); /* report the entire OICR value to AUX driver */ @@ -4854,7 +4880,6 @@ static void ice_remove(struct pci_dev *pdev) ice_devlink_unregister_params(pf); set_bit(ICE_DOWN, pf->state); - mutex_destroy(&(&pf->hw)->fdir_fltr_lock); ice_deinit_lag(pf); if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) ice_ptp_release(pf); @@ -4862,6 +4887,7 @@ static void ice_remove(struct pci_dev *pdev) ice_remove_arfs(pf); ice_setup_mc_magic_wake(pf); ice_vsi_release_all(pf); + mutex_destroy(&(&pf->hw)->fdir_fltr_lock); ice_set_wake(pf); ice_free_irq_msix_misc(pf); ice_for_each_vsi(pf, i) { @@ -5936,8 +5962,9 @@ ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi, u64 pkts = 0, bytes = 0; ring = READ_ONCE(rings[i]); - if (ring) - ice_fetch_u64_stats_per_ring(&ring->syncp, ring->stats, &pkts, &bytes); + if (!ring) + continue; + ice_fetch_u64_stats_per_ring(&ring->syncp, ring->stats, &pkts, &bytes); vsi_stats->tx_packets += pkts; vsi_stats->tx_bytes += bytes; vsi->tx_restart += ring->tx_stats.restart_q; @@ -6817,7 +6844,6 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu) struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_vsi *vsi = np->vsi; struct ice_pf *pf = vsi->back; - struct iidc_event *event; u8 count = 0; int err = 0; @@ -6852,14 +6878,6 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu) return -EBUSY; } - event = kzalloc(sizeof(*event), GFP_KERNEL); - if (!event) - return -ENOMEM; - - set_bit(IIDC_EVENT_BEFORE_MTU_CHANGE, event->type); - ice_send_event_to_aux(pf, event); - clear_bit(IIDC_EVENT_BEFORE_MTU_CHANGE, event->type); - netdev->mtu = (unsigned int)new_mtu; /* if VSI is up, bring it down and then back up */ @@ -6867,21 +6885,18 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu) err = ice_down(vsi); if (err) { netdev_err(netdev, "change MTU if_down err %d\n", err); - goto event_after; + return err; } err = ice_up(vsi); if (err) { netdev_err(netdev, "change MTU if_up err %d\n", err); - goto event_after; + return err; } } netdev_dbg(netdev, "changed MTU to %d\n", new_mtu); -event_after: - set_bit(IIDC_EVENT_AFTER_MTU_CHANGE, event->type); - ice_send_event_to_aux(pf, event); - kfree(event); + set_bit(ICE_FLAG_MTU_CHANGED, pf->flags); return err; } @@ -8525,6 +8540,7 @@ ice_features_check(struct sk_buff *skb, struct net_device __always_unused *netdev, netdev_features_t features) { + bool gso = skb_is_gso(skb); size_t len; /* No point in doing any of this if neither checksum nor GSO are @@ -8537,24 +8553,32 @@ ice_features_check(struct sk_buff *skb, /* We cannot support GSO if the MSS is going to be less than * 64 bytes. If it is then we need to drop support for GSO. */ - if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64)) + if (gso && (skb_shinfo(skb)->gso_size < ICE_TXD_CTX_MIN_MSS)) features &= ~NETIF_F_GSO_MASK; - len = skb_network_header(skb) - skb->data; + len = skb_network_offset(skb); if (len > ICE_TXD_MACLEN_MAX || len & 0x1) goto out_rm_features; - len = skb_transport_header(skb) - skb_network_header(skb); + len = skb_network_header_len(skb); if (len > ICE_TXD_IPLEN_MAX || len & 0x1) goto out_rm_features; if (skb->encapsulation) { - len = skb_inner_network_header(skb) - skb_transport_header(skb); - if (len > ICE_TXD_L4LEN_MAX || len & 0x1) - goto out_rm_features; + /* this must work for VXLAN frames AND IPIP/SIT frames, and in + * the case of IPIP frames, the transport header pointer is + * after the inner header! So check to make sure that this + * is a GRE or UDP_TUNNEL frame before doing that math. + */ + if (gso && (skb_shinfo(skb)->gso_type & + (SKB_GSO_GRE | SKB_GSO_UDP_TUNNEL))) { + len = skb_inner_network_header(skb) - + skb_transport_header(skb); + if (len > ICE_TXD_L4LEN_MAX || len & 0x1) + goto out_rm_features; + } - len = skb_inner_transport_header(skb) - - skb_inner_network_header(skb); + len = skb_inner_network_header_len(skb); if (len > ICE_TXD_IPLEN_MAX || len & 0x1) goto out_rm_features; } diff --git a/drivers/net/ethernet/intel/ice/ice_protocol_type.h b/drivers/net/ethernet/intel/ice/ice_protocol_type.h index dc1b0e9e6df5..695b6dd61dc2 100644 --- a/drivers/net/ethernet/intel/ice/ice_protocol_type.h +++ b/drivers/net/ethernet/intel/ice/ice_protocol_type.h @@ -47,6 +47,7 @@ enum ice_protocol_type { enum ice_sw_tunnel_type { ICE_NON_TUN = 0, + ICE_SW_TUN_AND_NON_TUN, ICE_SW_TUN_VXLAN, ICE_SW_TUN_GENEVE, ICE_SW_TUN_NVGRE, diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c index ae291d442539..000c39d163a2 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp.c +++ b/drivers/net/ethernet/intel/ice/ice_ptp.c @@ -1533,9 +1533,12 @@ exit: static int ice_ptp_adjtime_nonatomic(struct ptp_clock_info *info, s64 delta) { struct timespec64 now, then; + int ret; then = ns_to_timespec64(delta); - ice_ptp_gettimex64(info, &now, NULL); + ret = ice_ptp_gettimex64(info, &now, NULL); + if (ret) + return ret; now = timespec64_add(now, then); return ice_ptp_settime64(info, (const struct timespec64 *)&now); diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c index 11ae0bee3590..475ec2afa210 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.c +++ b/drivers/net/ethernet/intel/ice/ice_switch.c @@ -4537,6 +4537,7 @@ ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo, case ICE_SW_TUN_NVGRE: prof_type = ICE_PROF_TUN_GRE; break; + case ICE_SW_TUN_AND_NON_TUN: default: prof_type = ICE_PROF_ALL; break; @@ -5305,7 +5306,8 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, if (status) goto err_ice_add_adv_rule; - if (rinfo->tun_type != ICE_NON_TUN) { + if (rinfo->tun_type != ICE_NON_TUN && + rinfo->tun_type != ICE_SW_TUN_AND_NON_TUN) { status = ice_fill_adv_packet_tun(hw, rinfo->tun_type, s_rule->pdata.lkup_tx_rx.hdr, pkt_offsets); diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.c b/drivers/net/ethernet/intel/ice/ice_tc_lib.c index e8aab664270a..65cf32eb4046 100644 --- a/drivers/net/ethernet/intel/ice/ice_tc_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.c @@ -709,7 +709,7 @@ ice_tc_set_port(struct flow_match_ports match, fltr->flags |= ICE_TC_FLWR_FIELD_ENC_DEST_L4_PORT; else fltr->flags |= ICE_TC_FLWR_FIELD_DEST_L4_PORT; - fltr->flags |= ICE_TC_FLWR_FIELD_DEST_L4_PORT; + headers->l4_key.dst_port = match.key->dst; headers->l4_mask.dst_port = match.mask->dst; } @@ -718,7 +718,7 @@ ice_tc_set_port(struct flow_match_ports match, fltr->flags |= ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT; else fltr->flags |= ICE_TC_FLWR_FIELD_SRC_L4_PORT; - fltr->flags |= ICE_TC_FLWR_FIELD_SRC_L4_PORT; + headers->l4_key.src_port = match.key->src; headers->l4_mask.src_port = match.mask->src; } diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c index 39b80124d282..1be3cd4b2bef 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c @@ -500,8 +500,6 @@ void ice_free_vfs(struct ice_pf *pf) struct ice_hw *hw = &pf->hw; unsigned int tmp, i; - set_bit(ICE_VF_DEINIT_IN_PROGRESS, pf->state); - if (!pf->vf) return; @@ -519,22 +517,26 @@ void ice_free_vfs(struct ice_pf *pf) else dev_warn(dev, "VFs are assigned - not disabling SR-IOV\n"); - /* Avoid wait time by stopping all VFs at the same time */ - ice_for_each_vf(pf, i) - ice_dis_vf_qs(&pf->vf[i]); - tmp = pf->num_alloc_vfs; pf->num_qps_per_vf = 0; pf->num_alloc_vfs = 0; for (i = 0; i < tmp; i++) { - if (test_bit(ICE_VF_STATE_INIT, pf->vf[i].vf_states)) { + struct ice_vf *vf = &pf->vf[i]; + + mutex_lock(&vf->cfg_lock); + + ice_dis_vf_qs(vf); + + if (test_bit(ICE_VF_STATE_INIT, vf->vf_states)) { /* disable VF qp mappings and set VF disable state */ - ice_dis_vf_mappings(&pf->vf[i]); - set_bit(ICE_VF_STATE_DIS, pf->vf[i].vf_states); - ice_free_vf_res(&pf->vf[i]); + ice_dis_vf_mappings(vf); + set_bit(ICE_VF_STATE_DIS, vf->vf_states); + ice_free_vf_res(vf); } - mutex_destroy(&pf->vf[i].cfg_lock); + mutex_unlock(&vf->cfg_lock); + + mutex_destroy(&vf->cfg_lock); } if (ice_sriov_free_msix_res(pf)) @@ -570,7 +572,6 @@ void ice_free_vfs(struct ice_pf *pf) i); clear_bit(ICE_VF_DIS, pf->state); - clear_bit(ICE_VF_DEINIT_IN_PROGRESS, pf->state); clear_bit(ICE_FLAG_SRIOV_ENA, pf->flags); } @@ -1498,6 +1499,8 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr) ice_for_each_vf(pf, v) { vf = &pf->vf[v]; + mutex_lock(&vf->cfg_lock); + vf->driver_caps = 0; ice_vc_set_default_allowlist(vf); @@ -1512,6 +1515,8 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr) ice_vf_pre_vsi_rebuild(vf); ice_vf_rebuild_vsi(vf); ice_vf_post_vsi_rebuild(vf); + + mutex_unlock(&vf->cfg_lock); } if (ice_is_eswitch_mode_switchdev(pf)) @@ -1562,6 +1567,8 @@ bool ice_reset_vf(struct ice_vf *vf, bool is_vflr) u32 reg; int i; + lockdep_assert_held(&vf->cfg_lock); + dev = ice_pf_to_dev(pf); if (test_bit(ICE_VF_RESETS_DISABLED, pf->state)) { @@ -2061,9 +2068,12 @@ void ice_process_vflr_event(struct ice_pf *pf) bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32; /* read GLGEN_VFLRSTAT register to find out the flr VFs */ reg = rd32(hw, GLGEN_VFLRSTAT(reg_idx)); - if (reg & BIT(bit_idx)) + if (reg & BIT(bit_idx)) { /* GLGEN_VFLRSTAT bit will be cleared in ice_reset_vf */ + mutex_lock(&vf->cfg_lock); ice_reset_vf(vf, true); + mutex_unlock(&vf->cfg_lock); + } } } @@ -2140,7 +2150,9 @@ ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event) if (!vf) return; + mutex_lock(&vf->cfg_lock); ice_vc_reset_vf(vf); + mutex_unlock(&vf->cfg_lock); } /** @@ -2170,24 +2182,6 @@ ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode, dev = ice_pf_to_dev(pf); - /* single place to detect unsuccessful return values */ - if (v_retval) { - vf->num_inval_msgs++; - dev_info(dev, "VF %d failed opcode %d, retval: %d\n", vf->vf_id, - v_opcode, v_retval); - if (vf->num_inval_msgs > ICE_DFLT_NUM_INVAL_MSGS_ALLOWED) { - dev_err(dev, "Number of invalid messages exceeded for VF %d\n", - vf->vf_id); - dev_err(dev, "Use PF Control I/F to enable the VF\n"); - set_bit(ICE_VF_STATE_DIS, vf->vf_states); - return -EIO; - } - } else { - vf->num_valid_msgs++; - /* reset the invalid counter, if a valid message is received. */ - vf->num_inval_msgs = 0; - } - aq_ret = ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, v_opcode, v_retval, msg, msglen, NULL); if (aq_ret && pf->hw.mailboxq.sq_last_status != ICE_AQ_RC_ENOSYS) { @@ -4625,10 +4619,6 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event) struct device *dev; int err = 0; - /* if de-init is underway, don't process messages from VF */ - if (test_bit(ICE_VF_DEINIT_IN_PROGRESS, pf->state)) - return; - dev = ice_pf_to_dev(pf); if (ice_validate_vf_id(pf, vf_id)) { err = -EINVAL; diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h index 752487a1bdd6..8f27255cc0cc 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h @@ -14,7 +14,6 @@ #define ICE_MAX_MACADDR_PER_VF 18 /* Malicious Driver Detection */ -#define ICE_DFLT_NUM_INVAL_MSGS_ALLOWED 10 #define ICE_MDD_EVENTS_THRESHOLD 30 /* Static VF transaction/status register def */ @@ -134,8 +133,6 @@ struct ice_vf { unsigned int max_tx_rate; /* Maximum Tx bandwidth limit in Mbps */ DECLARE_BITMAP(vf_states, ICE_VF_STATES_NBITS); /* VF runtime states */ - u64 num_inval_msgs; /* number of continuous invalid msgs */ - u64 num_valid_msgs; /* number of valid msgs detected */ unsigned long vf_caps; /* VF's adv. capabilities */ u8 num_req_qs; /* num of queue pairs requested by VF */ u16 num_mac; diff --git a/drivers/net/ethernet/intel/igc/igc_phy.c b/drivers/net/ethernet/intel/igc/igc_phy.c index 5cad31c3c7b0..40dbf4b43234 100644 --- a/drivers/net/ethernet/intel/igc/igc_phy.c +++ b/drivers/net/ethernet/intel/igc/igc_phy.c @@ -746,8 +746,6 @@ s32 igc_write_phy_reg_gpy(struct igc_hw *hw, u32 offset, u16 data) if (ret_val) return ret_val; ret_val = igc_write_phy_reg_mdic(hw, offset, data); - if (ret_val) - return ret_val; hw->phy.ops.release(hw); } else { ret_val = igc_write_xmdio_reg(hw, (u16)offset, dev_addr, @@ -779,8 +777,6 @@ s32 igc_read_phy_reg_gpy(struct igc_hw *hw, u32 offset, u16 *data) if (ret_val) return ret_val; ret_val = igc_read_phy_reg_mdic(hw, offset, data); - if (ret_val) - return ret_val; hw->phy.ops.release(hw); } else { ret_val = igc_read_xmdio_reg(hw, (u16)offset, dev_addr, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c index b3fd8e5cd85b..6a5e9cf6b5da 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c @@ -390,12 +390,14 @@ static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget) u32 cmd_type; while (budget-- > 0) { - if (unlikely(!ixgbe_desc_unused(xdp_ring)) || - !netif_carrier_ok(xdp_ring->netdev)) { + if (unlikely(!ixgbe_desc_unused(xdp_ring))) { work_done = false; break; } + if (!netif_carrier_ok(xdp_ring->netdev)) + break; + if (!xsk_tx_peek_desc(pool, &desc)) break; diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 0015fcf1df2b..0f293acd17e8 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -1984,14 +1984,15 @@ static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter, if (adapter->flags & IXGBEVF_FLAGS_LEGACY_RX) return; - set_ring_build_skb_enabled(rx_ring); + if (PAGE_SIZE < 8192) + if (max_frame > IXGBEVF_MAX_FRAME_BUILD_SKB) + set_ring_uses_large_buffer(rx_ring); - if (PAGE_SIZE < 8192) { - if (max_frame <= IXGBEVF_MAX_FRAME_BUILD_SKB) - return; + /* 82599 can't rely on RXDCTL.RLPML to restrict the size of the frame */ + if (adapter->hw.mac.type == ixgbe_mac_82599_vf && !ring_uses_large_buffer(rx_ring)) + return; - set_ring_uses_large_buffer(rx_ring); - } + set_ring_build_skb_enabled(rx_ring); } /** diff --git a/drivers/net/ethernet/lantiq_xrx200.c b/drivers/net/ethernet/lantiq_xrx200.c index 41d11137cde0..5712c3e94be8 100644 --- a/drivers/net/ethernet/lantiq_xrx200.c +++ b/drivers/net/ethernet/lantiq_xrx200.c @@ -260,9 +260,9 @@ static int xrx200_hw_receive(struct xrx200_chan *ch) if (ctl & LTQ_DMA_EOP) { ch->skb_head->protocol = eth_type_trans(ch->skb_head, net_dev); - netif_receive_skb(ch->skb_head); net_dev->stats.rx_packets++; net_dev->stats.rx_bytes += ch->skb_head->len; + netif_receive_skb(ch->skb_head); ch->skb_head = NULL; ch->skb_tail = NULL; ret = XRX200_DMA_PACKET_COMPLETE; diff --git a/drivers/net/ethernet/litex/Kconfig b/drivers/net/ethernet/litex/Kconfig index f99adbf26ab4..04345b929d8e 100644 --- a/drivers/net/ethernet/litex/Kconfig +++ b/drivers/net/ethernet/litex/Kconfig @@ -17,7 +17,7 @@ if NET_VENDOR_LITEX config LITEX_LITEETH tristate "LiteX Ethernet support" - depends on OF + depends on OF && HAS_IOMEM help If you wish to compile a kernel for hardware with a LiteX LiteEth device then you should answer Y to this. diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index 105247582684..143ca8be5eb5 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -2704,6 +2704,16 @@ MODULE_DEVICE_TABLE(of, mv643xx_eth_shared_ids); static struct platform_device *port_platdev[3]; +static void mv643xx_eth_shared_of_remove(void) +{ + int n; + + for (n = 0; n < 3; n++) { + platform_device_del(port_platdev[n]); + port_platdev[n] = NULL; + } +} + static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev, struct device_node *pnp) { @@ -2740,7 +2750,9 @@ static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev, return -EINVAL; } - of_get_mac_address(pnp, ppd.mac_addr); + ret = of_get_mac_address(pnp, ppd.mac_addr); + if (ret) + return ret; mv643xx_eth_property(pnp, "tx-queue-size", ppd.tx_queue_size); mv643xx_eth_property(pnp, "tx-sram-addr", ppd.tx_sram_addr); @@ -2804,21 +2816,13 @@ static int mv643xx_eth_shared_of_probe(struct platform_device *pdev) ret = mv643xx_eth_shared_of_add_port(pdev, pnp); if (ret) { of_node_put(pnp); + mv643xx_eth_shared_of_remove(); return ret; } } return 0; } -static void mv643xx_eth_shared_of_remove(void) -{ - int n; - - for (n = 0; n < 3; n++) { - platform_device_del(port_platdev[n]); - port_platdev[n] = NULL; - } -} #else static inline int mv643xx_eth_shared_of_probe(struct platform_device *pdev) { diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index 7cdbf8b8bbf6..1a835b48791b 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -6870,6 +6870,9 @@ static int mvpp2_port_probe(struct platform_device *pdev, dev->max_mtu = MVPP2_BM_JUMBO_PKT_SIZE; dev->dev.of_node = port_node; + port->pcs_gmac.ops = &mvpp2_phylink_gmac_pcs_ops; + port->pcs_xlg.ops = &mvpp2_phylink_xlg_pcs_ops; + if (!mvpp2_use_acpi_compat_mode(port_fwnode)) { port->phylink_config.dev = &dev->dev; port->phylink_config.type = PHYLINK_NETDEV; @@ -6940,9 +6943,6 @@ static int mvpp2_port_probe(struct platform_device *pdev, port->phylink_config.supported_interfaces); } - port->pcs_gmac.ops = &mvpp2_phylink_gmac_pcs_ops; - port->pcs_xlg.ops = &mvpp2_phylink_xlg_pcs_ops; - phylink = phylink_create(&port->phylink_config, port_fwnode, phy_mode, &mvpp2_phylink_ops); if (IS_ERR(phylink)) { diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c index a73a8017e0ee..a79201a9a6f0 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c @@ -605,6 +605,7 @@ static bool is_valid_offset(struct rvu *rvu, struct cpt_rd_wr_reg_msg *req) } else if (!(req->hdr.pcifunc & RVU_PFVF_FUNC_MASK)) { /* Registers that can be accessed from PF */ switch (offset) { + case CPT_AF_DIAG: case CPT_AF_CTL: case CPT_AF_PF_FUNC: case CPT_AF_BLK_RST: diff --git a/drivers/net/ethernet/marvell/prestera/prestera_main.c b/drivers/net/ethernet/marvell/prestera/prestera_main.c index cad93f747d0c..73cd0a4b7291 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_main.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_main.c @@ -554,6 +554,7 @@ static int prestera_switch_set_base_mac_addr(struct prestera_switch *sw) dev_info(prestera_dev(sw), "using random base mac address\n"); } of_node_put(base_mac_np); + of_node_put(np); return prestera_hw_switch_mac_set(sw, sw->base_mac); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index 17fe05809653..3eacd8739929 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -131,11 +131,8 @@ static int cmd_alloc_index(struct mlx5_cmd *cmd) static void cmd_free_index(struct mlx5_cmd *cmd, int idx) { - unsigned long flags; - - spin_lock_irqsave(&cmd->alloc_lock, flags); + lockdep_assert_held(&cmd->alloc_lock); set_bit(idx, &cmd->bitmask); - spin_unlock_irqrestore(&cmd->alloc_lock, flags); } static void cmd_ent_get(struct mlx5_cmd_work_ent *ent) @@ -145,17 +142,21 @@ static void cmd_ent_get(struct mlx5_cmd_work_ent *ent) static void cmd_ent_put(struct mlx5_cmd_work_ent *ent) { + struct mlx5_cmd *cmd = ent->cmd; + unsigned long flags; + + spin_lock_irqsave(&cmd->alloc_lock, flags); if (!refcount_dec_and_test(&ent->refcnt)) - return; + goto out; if (ent->idx >= 0) { - struct mlx5_cmd *cmd = ent->cmd; - cmd_free_index(cmd, ent->idx); up(ent->page_queue ? &cmd->pages_sem : &cmd->sem); } cmd_free_ent(ent); +out: + spin_unlock_irqrestore(&cmd->alloc_lock, flags); } static struct mlx5_cmd_layout *get_inst(struct mlx5_cmd *cmd, int idx) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 812e6810cb3b..c14e06ca64d8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -224,7 +224,7 @@ static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev) struct mlx5e_tx_wqe { struct mlx5_wqe_ctrl_seg ctrl; struct mlx5_wqe_eth_seg eth; - struct mlx5_wqe_data_seg data[0]; + struct mlx5_wqe_data_seg data[]; }; struct mlx5e_rx_wqe_ll { @@ -241,8 +241,8 @@ struct mlx5e_umr_wqe { struct mlx5_wqe_umr_ctrl_seg uctrl; struct mlx5_mkey_seg mkc; union { - struct mlx5_mtt inline_mtts[0]; - struct mlx5_klm inline_klms[0]; + DECLARE_FLEX_ARRAY(struct mlx5_mtt, inline_mtts); + DECLARE_FLEX_ARRAY(struct mlx5_klm, inline_klms); }; }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c index 00449df98a5e..c1e07496c89c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c @@ -570,7 +570,8 @@ static int mlx5e_htb_convert_rate(struct mlx5e_priv *priv, u64 rate, static void mlx5e_htb_convert_ceil(struct mlx5e_priv *priv, u64 ceil, u32 *max_average_bw) { - *max_average_bw = div_u64(ceil, BYTES_IN_MBIT); + /* Hardware treats 0 as "unlimited", set at least 1. */ + *max_average_bw = max_t(u32, div_u64(ceil, BYTES_IN_MBIT), 1); qos_dbg(priv->mdev, "Convert: ceil %llu -> max_average_bw %u\n", ceil, *max_average_bw); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c index 9c076aa20306..b6f5c1bcdbcd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c @@ -183,18 +183,7 @@ void mlx5e_rep_bond_unslave(struct mlx5_eswitch *esw, static bool mlx5e_rep_is_lag_netdev(struct net_device *netdev) { - struct mlx5e_rep_priv *rpriv; - struct mlx5e_priv *priv; - - /* A given netdev is not a representor or not a slave of LAG configuration */ - if (!mlx5e_eswitch_rep(netdev) || !netif_is_lag_port(netdev)) - return false; - - priv = netdev_priv(netdev); - rpriv = priv->ppriv; - - /* Egress acl forward to vport is supported only non-uplink representor */ - return rpriv->rep->vport != MLX5_VPORT_UPLINK; + return netif_is_lag_port(netdev) && mlx5e_eswitch_vf_rep(netdev); } static void mlx5e_rep_changelowerstate_event(struct net_device *netdev, void *ptr) @@ -210,9 +199,6 @@ static void mlx5e_rep_changelowerstate_event(struct net_device *netdev, void *pt u16 fwd_vport_num; int err; - if (!mlx5e_rep_is_lag_netdev(netdev)) - return; - info = ptr; lag_info = info->lower_state_info; /* This is not an event of a representor becoming active slave */ @@ -266,9 +252,6 @@ static void mlx5e_rep_changeupper_event(struct net_device *netdev, void *ptr) struct net_device *lag_dev; struct mlx5e_priv *priv; - if (!mlx5e_rep_is_lag_netdev(netdev)) - return; - priv = netdev_priv(netdev); rpriv = priv->ppriv; lag_dev = info->upper_dev; @@ -293,6 +276,19 @@ static int mlx5e_rep_esw_bond_netevent(struct notifier_block *nb, unsigned long event, void *ptr) { struct net_device *netdev = netdev_notifier_info_to_dev(ptr); + struct mlx5e_rep_priv *rpriv; + struct mlx5e_rep_bond *bond; + struct mlx5e_priv *priv; + + if (!mlx5e_rep_is_lag_netdev(netdev)) + return NOTIFY_DONE; + + bond = container_of(nb, struct mlx5e_rep_bond, nb); + priv = netdev_priv(netdev); + rpriv = mlx5_eswitch_get_uplink_priv(priv->mdev->priv.eswitch, REP_ETH); + /* Verify VF representor is on the same device of the bond handling the netevent. */ + if (rpriv->uplink_priv.bond != bond) + return NOTIFY_DONE; switch (event) { case NETDEV_CHANGELOWERSTATE: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c index c6d2f8c78db7..48dc121b2cb4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c @@ -491,7 +491,7 @@ void mlx5e_rep_bridge_init(struct mlx5e_priv *priv) } br_offloads->netdev_nb.notifier_call = mlx5_esw_bridge_switchdev_port_event; - err = register_netdevice_notifier(&br_offloads->netdev_nb); + err = register_netdevice_notifier_net(&init_net, &br_offloads->netdev_nb); if (err) { esw_warn(mdev, "Failed to register bridge offloads netdevice notifier (err=%d)\n", err); @@ -509,7 +509,9 @@ err_register_swdev_blk: err_register_swdev: destroy_workqueue(br_offloads->wq); err_alloc_wq: + rtnl_lock(); mlx5_esw_bridge_cleanup(esw); + rtnl_unlock(); } void mlx5e_rep_bridge_cleanup(struct mlx5e_priv *priv) @@ -524,7 +526,7 @@ void mlx5e_rep_bridge_cleanup(struct mlx5e_priv *priv) return; cancel_delayed_work_sync(&br_offloads->update_work); - unregister_netdevice_notifier(&br_offloads->netdev_nb); + unregister_netdevice_notifier_net(&init_net, &br_offloads->netdev_nb); unregister_switchdev_blocking_notifier(&br_offloads->nb_blk); unregister_switchdev_notifier(&br_offloads->nb); destroy_workqueue(br_offloads->wq); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h index 26efa33de56f..9cc844bd00f5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h @@ -16,11 +16,13 @@ struct mlx5e_tc_act_parse_state { unsigned int num_actions; struct mlx5e_tc_flow *flow; struct netlink_ext_ack *extack; + bool ct_clear; bool encap; bool decap; bool mpls_push; bool ptype_host; const struct ip_tunnel_info *tun_info; + struct mlx5e_mpls_info mpls_info; struct pedit_headers_action hdrs[__PEDIT_CMD_MAX]; int ifindexes[MLX5_MAX_FLOW_FWD_VPORTS]; int if_count; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c index 06ec30cdb269..58cc33f1363d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c @@ -27,8 +27,13 @@ tc_act_parse_ct(struct mlx5e_tc_act_parse_state *parse_state, struct mlx5e_priv *priv, struct mlx5_flow_attr *attr) { + bool clear_action = act->ct.action & TCA_CT_ACT_CLEAR; int err; + /* It's redundant to do ct clear more than once. */ + if (clear_action && parse_state->ct_clear) + return 0; + err = mlx5_tc_ct_parse_action(parse_state->ct_priv, attr, &attr->parse_attr->mod_hdr_acts, act, parse_state->extack); @@ -40,6 +45,8 @@ tc_act_parse_ct(struct mlx5e_tc_act_parse_state *parse_state, if (mlx5e_is_eswitch_flow(parse_state->flow)) attr->esw_attr->split_count = attr->esw_attr->out_count; + parse_state->ct_clear = clear_action; + return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c index c614fc7fdc9c..2e615e0ba972 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c @@ -177,6 +177,12 @@ parse_mirred_encap(struct mlx5e_tc_act_parse_state *parse_state, return -ENOMEM; parse_state->encap = false; + + if (parse_state->mpls_push) { + memcpy(&parse_attr->mpls_info[esw_attr->out_count], + &parse_state->mpls_info, sizeof(parse_state->mpls_info)); + parse_state->mpls_push = false; + } esw_attr->dests[esw_attr->out_count].flags |= MLX5_ESW_DEST_ENCAP; esw_attr->out_count++; /* attr->dests[].rep is resolved when we handle encap */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mpls.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mpls.c index 784fc4f68b1e..89ca88c78840 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mpls.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mpls.c @@ -22,6 +22,16 @@ tc_act_can_offload_mpls_push(struct mlx5e_tc_act_parse_state *parse_state, return true; } +static void +copy_mpls_info(struct mlx5e_mpls_info *mpls_info, + const struct flow_action_entry *act) +{ + mpls_info->label = act->mpls_push.label; + mpls_info->tc = act->mpls_push.tc; + mpls_info->bos = act->mpls_push.bos; + mpls_info->ttl = act->mpls_push.ttl; +} + static int tc_act_parse_mpls_push(struct mlx5e_tc_act_parse_state *parse_state, const struct flow_action_entry *act, @@ -29,6 +39,7 @@ tc_act_parse_mpls_push(struct mlx5e_tc_act_parse_state *parse_state, struct mlx5_flow_attr *attr) { parse_state->mpls_push = true; + copy_mpls_info(&parse_state->mpls_info, act); return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h index f832c26ff2c3..70b40ae384e4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h @@ -35,6 +35,7 @@ enum { struct mlx5e_tc_flow_parse_attr { const struct ip_tunnel_info *tun_info[MLX5_MAX_FLOW_FWD_VPORTS]; + struct mlx5e_mpls_info mpls_info[MLX5_MAX_FLOW_FWD_VPORTS]; struct net_device *filter_dev; struct mlx5_flow_spec spec; struct mlx5e_tc_mod_hdr_acts mod_hdr_acts; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c index 9918ed8c059b..d39d0dae22fc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c @@ -750,6 +750,7 @@ int mlx5e_attach_encap(struct mlx5e_priv *priv, struct mlx5e_tc_flow_parse_attr *parse_attr; struct mlx5_flow_attr *attr = flow->attr; const struct ip_tunnel_info *tun_info; + const struct mlx5e_mpls_info *mpls_info; unsigned long tbl_time_before = 0; struct mlx5e_encap_entry *e; struct mlx5e_encap_key key; @@ -760,6 +761,7 @@ int mlx5e_attach_encap(struct mlx5e_priv *priv, parse_attr = attr->parse_attr; tun_info = parse_attr->tun_info[out_index]; + mpls_info = &parse_attr->mpls_info[out_index]; family = ip_tunnel_info_af(tun_info); key.ip_tun_key = &tun_info->key; key.tc_tunnel = mlx5e_get_tc_tun(mirred_dev); @@ -810,6 +812,7 @@ int mlx5e_attach_encap(struct mlx5e_priv *priv, goto out_err_init; } e->tun_info = tun_info; + memcpy(&e->mpls_info, mpls_info, sizeof(*mpls_info)); err = mlx5e_tc_tun_init_encap_attr(mirred_dev, priv, e, extack); if (err) goto out_err_init; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_mplsoudp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_mplsoudp.c index 60952b33b568..c5b1617d556f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_mplsoudp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_mplsoudp.c @@ -30,16 +30,15 @@ static int generate_ip_tun_hdr(char buf[], struct mlx5e_encap_entry *r) { const struct ip_tunnel_key *tun_key = &r->tun_info->key; + const struct mlx5e_mpls_info *mpls_info = &r->mpls_info; struct udphdr *udp = (struct udphdr *)(buf); struct mpls_shim_hdr *mpls; - u32 tun_id; - tun_id = be32_to_cpu(tunnel_id_to_key32(tun_key->tun_id)); mpls = (struct mpls_shim_hdr *)(udp + 1); *ip_proto = IPPROTO_UDP; udp->dest = tun_key->tp_dst; - *mpls = mpls_entry_encode(tun_id, tun_key->ttl, tun_key->tos, true); + *mpls = mpls_entry_encode(mpls_info->label, mpls_info->ttl, mpls_info->tc, mpls_info->bos); return 0; } @@ -60,37 +59,31 @@ static int parse_tunnel(struct mlx5e_priv *priv, void *headers_v) { struct flow_rule *rule = flow_cls_offload_flow_rule(f); - struct flow_match_enc_keyid enc_keyid; struct flow_match_mpls match; void *misc2_c; void *misc2_v; - misc2_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, - misc_parameters_2); - misc2_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, - misc_parameters_2); - - if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS)) - return 0; - - if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) - return 0; - - flow_rule_match_enc_keyid(rule, &enc_keyid); - - if (!enc_keyid.mask->keyid) - return 0; - if (!MLX5_CAP_ETH(priv->mdev, tunnel_stateless_mpls_over_udp) && !(MLX5_CAP_GEN(priv->mdev, flex_parser_protocols) & MLX5_FLEX_PROTO_CW_MPLS_UDP)) return -EOPNOTSUPP; + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) + return -EOPNOTSUPP; + + if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS)) + return 0; + flow_rule_match_mpls(rule, &match); /* Only support matching the first LSE */ if (match.mask->used_lses != 1) return -EOPNOTSUPP; + misc2_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, + misc_parameters_2); + misc2_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, + misc_parameters_2); + MLX5_SET(fte_match_set_misc2, misc2_c, outer_first_mpls_over_udp.mpls_label, match.mask->ls[0].mpls_label); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tir.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tir.c index da169b816665..d4239e3b3c88 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tir.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tir.c @@ -88,9 +88,6 @@ void mlx5e_tir_builder_build_packet_merge(struct mlx5e_tir_builder *builder, (MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ - rough_max_l2_l3_hdr_sz) >> 8); MLX5_SET(tirc, tirc, lro_timeout_period_usecs, pkt_merge_param->timeout); break; - case MLX5E_PACKET_MERGE_SHAMPO: - MLX5_SET(tirc, tirc, packet_merge_mask, MLX5_TIRC_PACKET_MERGE_MASK_SHAMPO); - break; default: break; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h index 4cdf8e5b24c2..b789af07829c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h @@ -167,6 +167,11 @@ static inline u16 mlx5e_txqsq_get_next_pi(struct mlx5e_txqsq *sq, u16 size) return pi; } +static inline u16 mlx5e_shampo_get_cqe_header_index(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) +{ + return be16_to_cpu(cqe->shampo.header_entry_index) & (rq->mpwqe.shampo->hd_per_wq - 1); +} + struct mlx5e_shampo_umr { u16 len; }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c index 338d65e2c9ce..56e10c84a706 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c @@ -341,8 +341,10 @@ mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd, /* copy the inline part if required */ if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE) { - memcpy(eseg->inline_hdr.start, xdptxd->data, MLX5E_XDP_MIN_INLINE); + memcpy(eseg->inline_hdr.start, xdptxd->data, sizeof(eseg->inline_hdr.start)); eseg->inline_hdr.sz = cpu_to_be16(MLX5E_XDP_MIN_INLINE); + memcpy(dseg, xdptxd->data + sizeof(eseg->inline_hdr.start), + MLX5E_XDP_MIN_INLINE - sizeof(eseg->inline_hdr.start)); dma_len -= MLX5E_XDP_MIN_INLINE; dma_addr += MLX5E_XDP_MIN_INLINE; dseg++; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c index 2db9573a3fe6..b56fea142c24 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c @@ -157,11 +157,20 @@ static void mlx5e_ipsec_set_swp(struct sk_buff *skb, /* Tunnel mode */ if (mode == XFRM_MODE_TUNNEL) { eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2; - eseg->swp_inner_l4_offset = skb_inner_transport_offset(skb) / 2; if (xo->proto == IPPROTO_IPV6) eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6; - if (inner_ip_hdr(skb)->protocol == IPPROTO_UDP) + + switch (xo->inner_ipproto) { + case IPPROTO_UDP: eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP; + fallthrough; + case IPPROTO_TCP: + /* IP | ESP | IP | [TCP | UDP] */ + eseg->swp_inner_l4_offset = skb_inner_transport_offset(skb) / 2; + break; + default: + break; + } return; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h index b98db50c3418..428881e0adcb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h @@ -131,14 +131,17 @@ static inline bool mlx5e_ipsec_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg) { - struct xfrm_offload *xo = xfrm_offload(skb); + u8 inner_ipproto; if (!mlx5e_ipsec_eseg_meta(eseg)) return false; eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM; - if (xo->inner_ipproto) { - eseg->cs_flags |= MLX5_ETH_WQE_L4_INNER_CSUM | MLX5_ETH_WQE_L3_INNER_CSUM; + inner_ipproto = xfrm_offload(skb)->inner_ipproto; + if (inner_ipproto) { + eseg->cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM; + if (inner_ipproto == IPPROTO_TCP || inner_ipproto == IPPROTO_UDP) + eseg->cs_flags |= MLX5_ETH_WQE_L4_INNER_CSUM; } else if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM; sq->stats->csum_partial_inner++; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 57d755db1cf5..6e80585d731f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -1792,7 +1792,7 @@ static int mlx5e_get_module_eeprom(struct net_device *netdev, if (size_read < 0) { netdev_err(priv->netdev, "%s: mlx5_query_eeprom failed:0x%x\n", __func__, size_read); - return 0; + return size_read; } i += size_read; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index bf80fb612449..3667f5ef5990 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -3616,8 +3616,7 @@ static int set_feature_hw_gro(struct net_device *netdev, bool enable) goto out; } - err = mlx5e_safe_switch_params(priv, &new_params, - mlx5e_modify_tirs_packet_merge_ctx, NULL, reset); + err = mlx5e_safe_switch_params(priv, &new_params, NULL, NULL, reset); out: mutex_unlock(&priv->state_lock); return err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h index b01dacb6f527..b3f7520dfd08 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h @@ -183,6 +183,13 @@ struct mlx5e_decap_entry { struct rcu_head rcu; }; +struct mlx5e_mpls_info { + u32 label; + u8 tc; + u8 bos; + u8 ttl; +}; + struct mlx5e_encap_entry { /* attached neigh hash entry */ struct mlx5e_neigh_hash_entry *nhe; @@ -196,6 +203,7 @@ struct mlx5e_encap_entry { struct list_head route_list; struct mlx5_pkt_reformat *pkt_reformat; const struct ip_tunnel_info *tun_info; + struct mlx5e_mpls_info mpls_info; unsigned char h_dest[ETH_ALEN]; /* destination eth addr */ struct net_device *out_dev; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index e86ccc22fb82..6530d7bd5045 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -1117,7 +1117,7 @@ static void mlx5e_shampo_update_ipv6_udp_hdr(struct mlx5e_rq *rq, struct ipv6hdr static void mlx5e_shampo_update_fin_psh_flags(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, struct tcphdr *skb_tcp_hd) { - u16 header_index = be16_to_cpu(cqe->shampo.header_entry_index); + u16 header_index = mlx5e_shampo_get_cqe_header_index(rq, cqe); struct tcphdr *last_tcp_hd; void *last_hd_addr; @@ -1349,7 +1349,8 @@ static inline void mlx5e_handle_csum(struct net_device *netdev, } /* True when explicitly set via priv flag, or XDP prog is loaded */ - if (test_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state)) + if (test_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state) || + get_cqe_tls_offload(cqe)) goto csum_unnecessary; /* CQE csum doesn't cover padding octets in short ethernet @@ -1871,7 +1872,7 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, return skb; } -static void +static struct sk_buff * mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, struct mlx5_cqe64 *cqe, u16 header_index) { @@ -1895,7 +1896,7 @@ mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, skb = mlx5e_build_linear_skb(rq, hdr, frag_size, rx_headroom, head_size); if (unlikely(!skb)) - return; + return NULL; /* queue up for recycling/reuse */ page_ref_inc(head->page); @@ -1907,7 +1908,7 @@ mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, ALIGN(head_size, sizeof(long))); if (unlikely(!skb)) { rq->stats->buff_alloc_err++; - return; + return NULL; } prefetchw(skb->data); @@ -1918,9 +1919,7 @@ mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, skb->tail += head_size; skb->len += head_size; } - rq->hw_gro_data->skb = skb; - NAPI_GRO_CB(skb)->count = 1; - skb_shinfo(skb)->gso_size = mpwrq_get_cqe_byte_cnt(cqe) - head_size; + return skb; } static void @@ -1973,13 +1972,14 @@ mlx5e_free_rx_shampo_hd_entry(struct mlx5e_rq *rq, u16 header_index) static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) { u16 data_bcnt = mpwrq_get_cqe_byte_cnt(cqe) - cqe->shampo.header_size; - u16 header_index = be16_to_cpu(cqe->shampo.header_entry_index); + u16 header_index = mlx5e_shampo_get_cqe_header_index(rq, cqe); u32 wqe_offset = be32_to_cpu(cqe->shampo.data_offset); u16 cstrides = mpwrq_get_cqe_consumed_strides(cqe); u32 data_offset = wqe_offset & (PAGE_SIZE - 1); u32 cqe_bcnt = mpwrq_get_cqe_byte_cnt(cqe); u16 wqe_id = be16_to_cpu(cqe->wqe_id); u32 page_idx = wqe_offset >> PAGE_SHIFT; + u16 head_size = cqe->shampo.header_size; struct sk_buff **skb = &rq->hw_gro_data->skb; bool flush = cqe->shampo.flush; bool match = cqe->shampo.match; @@ -2011,9 +2011,16 @@ static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cq } if (!*skb) { - mlx5e_skb_from_cqe_shampo(rq, wi, cqe, header_index); + if (likely(head_size)) + *skb = mlx5e_skb_from_cqe_shampo(rq, wi, cqe, header_index); + else + *skb = mlx5e_skb_from_cqe_mpwrq_nonlinear(rq, wi, cqe_bcnt, data_offset, + page_idx); if (unlikely(!*skb)) goto free_hd_entry; + + NAPI_GRO_CB(*skb)->count = 1; + skb_shinfo(*skb)->gso_size = cqe_bcnt - head_size; } else { NAPI_GRO_CB(*skb)->count++; if (NAPI_GRO_CB(*skb)->count == 2 && @@ -2027,8 +2034,10 @@ static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cq } } - di = &wi->umr.dma_info[page_idx]; - mlx5e_fill_skb_data(*skb, rq, di, data_bcnt, data_offset); + if (likely(head_size)) { + di = &wi->umr.dma_info[page_idx]; + mlx5e_fill_skb_data(*skb, rq, di, data_bcnt, data_offset); + } mlx5e_shampo_complete_rx_cqe(rq, cqe, cqe_bcnt, *skb); if (flush) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c index 8c9163d2c646..08a75654f5f1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c @@ -334,6 +334,7 @@ void mlx5e_self_test(struct net_device *ndev, struct ethtool_test *etest, netdev_info(ndev, "\t[%d] %s start..\n", i, st.name); buf[count] = st.st_func(priv); netdev_info(ndev, "\t[%d] %s end: result(%lld)\n", i, st.name, buf[count]); + count++; } mutex_unlock(&priv->state_lock); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c index 26e326fe503c..00f1d16db456 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c @@ -1254,9 +1254,6 @@ static void fec_set_corrected_bits_total(struct mlx5e_priv *priv, u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {}; int sz = MLX5_ST_SZ_BYTES(ppcnt_reg); - if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group)) - return; - MLX5_SET(ppcnt_reg, in, local_port, 1); MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP); if (mlx5_core_access_reg(mdev, in, sz, ppcnt_phy_statistical, @@ -1272,6 +1269,9 @@ static void fec_set_corrected_bits_total(struct mlx5e_priv *priv, void mlx5e_stats_fec_get(struct mlx5e_priv *priv, struct ethtool_fec_stats *fec_stats) { + if (!MLX5_CAP_PCAM_FEATURE(priv->mdev, ppcnt_statistical_group)) + return; + fec_set_corrected_bits_total(priv, fec_stats); fec_set_block_stats(priv, fec_stats); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 3d908a7e1406..b27532a9301e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -1414,7 +1414,8 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, if (err) goto err_out; - if (!attr->chain && esw_attr->int_port) { + if (!attr->chain && esw_attr->int_port && + attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { /* If decap route device is internal port, change the * source vport value in reg_c0 back to uplink just in * case the rule performs goto chain > 0. If we have a miss @@ -3191,6 +3192,30 @@ actions_match_supported(struct mlx5e_priv *priv, return false; } + if (!(~actions & + (MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_DROP))) { + NL_SET_ERR_MSG_MOD(extack, "Rule cannot support forward+drop action"); + return false; + } + + if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR && + actions & MLX5_FLOW_CONTEXT_ACTION_DROP) { + NL_SET_ERR_MSG_MOD(extack, "Drop with modify header action is not supported"); + return false; + } + + if (!(~actions & + (MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_DROP))) { + NL_SET_ERR_MSG_MOD(extack, "Rule cannot support forward+drop action"); + return false; + } + + if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR && + actions & MLX5_FLOW_CONTEXT_ACTION_DROP) { + NL_SET_ERR_MSG_MOD(extack, "Drop with modify header action is not supported"); + return false; + } + if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR && !modify_header_match_supported(priv, &parse_attr->spec, flow_action, actions, ct_flow, ct_clear, extack)) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index 7fd33b356cc8..ee7ecb88adc1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -208,7 +208,7 @@ static inline void mlx5e_insert_vlan(void *start, struct sk_buff *skb, u16 ihs) int cpy1_sz = 2 * ETH_ALEN; int cpy2_sz = ihs - cpy1_sz; - memcpy(vhdr, skb->data, cpy1_sz); + memcpy(&vhdr->addrs, skb->data, cpy1_sz); vhdr->h_vlan_proto = skb->vlan_proto; vhdr->h_vlan_TCI = cpu_to_be16(skb_vlan_tag_get(skb)); memcpy(&vhdr->h_vlan_encapsulated_proto, skb->data + cpy1_sz, cpy2_sz); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c index f690f430f40f..05e08cec5a8c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c @@ -1574,6 +1574,8 @@ struct mlx5_esw_bridge_offloads *mlx5_esw_bridge_init(struct mlx5_eswitch *esw) { struct mlx5_esw_bridge_offloads *br_offloads; + ASSERT_RTNL(); + br_offloads = kvzalloc(sizeof(*br_offloads), GFP_KERNEL); if (!br_offloads) return ERR_PTR(-ENOMEM); @@ -1590,6 +1592,8 @@ void mlx5_esw_bridge_cleanup(struct mlx5_eswitch *esw) { struct mlx5_esw_bridge_offloads *br_offloads = esw->br_offloads; + ASSERT_RTNL(); + if (!br_offloads) return; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/diag/bridge_tracepoint.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/diag/bridge_tracepoint.h index 3401188e0a60..51ac24e6ec3c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/diag/bridge_tracepoint.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/diag/bridge_tracepoint.h @@ -21,7 +21,7 @@ DECLARE_EVENT_CLASS(mlx5_esw_bridge_fdb_template, __field(unsigned int, used) ), TP_fast_assign( - strncpy(__entry->dev_name, + strscpy(__entry->dev_name, netdev_name(fdb->dev), IFNAMSIZ); memcpy(__entry->addr, fdb->key.addr, ETH_ALEN); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c index 11bbcd5f5b8b..694c54066955 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c @@ -697,7 +697,7 @@ void mlx5_esw_qos_vport_disable(struct mlx5_eswitch *esw, struct mlx5_vport *vpo } int mlx5_esw_qos_set_vport_rate(struct mlx5_eswitch *esw, struct mlx5_vport *vport, - u32 min_rate, u32 max_rate) + u32 max_rate, u32 min_rate) { int err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 9a7b25692505..cfcd72bad9af 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -2838,10 +2838,6 @@ bool mlx5_esw_vport_match_metadata_supported(const struct mlx5_eswitch *esw) if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source)) return false; - if (mlx5_core_is_ecpf_esw_manager(esw->dev) || - mlx5_ecpf_vport_exists(esw->dev)) - return false; - return true; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index b628917e38e4..537c82b9aa53 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -2074,6 +2074,8 @@ void mlx5_del_flow_rules(struct mlx5_flow_handle *handle) fte->node.del_hw_func = NULL; up_write_ref_node(&fte->node, false); tree_put_node(&fte->node, false); + } else { + up_write_ref_node(&fte->node, false); } kfree(handle); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c index 0b0234f9d694..84dbe46d5ede 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c @@ -132,7 +132,7 @@ static void mlx5_stop_sync_reset_poll(struct mlx5_core_dev *dev) { struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset; - del_timer(&fw_reset->timer); + del_timer_sync(&fw_reset->timer); } static void mlx5_sync_reset_clear_reset_requested(struct mlx5_core_dev *dev, bool poll_health) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c index 1ca01a5b6cdd..626aa60b6099 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c @@ -126,6 +126,10 @@ static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev, return; } + /* Handle multipath entry with lower priority value */ + if (mp->mfi && mp->mfi != fi && fi->fib_priority >= mp->mfi->fib_priority) + return; + /* Handle add/replace event */ nhs = fib_info_num_path(fi); if (nhs == 1) { @@ -135,12 +139,13 @@ static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev, int i = mlx5_lag_dev_get_netdev_idx(ldev, nh_dev); if (i < 0) - i = MLX5_LAG_NORMAL_AFFINITY; - else - ++i; + return; + i++; mlx5_lag_set_port_affinity(ldev, i); } + + mp->mfi = fi; return; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c index d5e47630e284..df58cba37930 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c @@ -121,12 +121,13 @@ u32 mlx5_chains_get_nf_ft_chain(struct mlx5_fs_chains *chains) u32 mlx5_chains_get_prio_range(struct mlx5_fs_chains *chains) { - if (!mlx5_chains_prios_supported(chains)) - return 1; - if (mlx5_chains_ignore_flow_level_supported(chains)) return UINT_MAX; + if (!chains->dev->priv.eswitch || + chains->dev->priv.eswitch->mode != MLX5_ESWITCH_OFFLOADS) + return 1; + /* We should get here only for eswitch case */ return FDB_TC_MAX_PRIO; } @@ -211,7 +212,7 @@ static int create_chain_restore(struct fs_chain *chain) { struct mlx5_eswitch *esw = chain->chains->dev->priv.eswitch; - char modact[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)]; + u8 modact[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {}; struct mlx5_fs_chains *chains = chain->chains; enum mlx5e_tc_attr_to_reg chain_to_reg; struct mlx5_modify_hdr *mod_hdr; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 2c774f367199..bba72b220cc3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -526,7 +526,7 @@ static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx) /* Check log_max_qp from HCA caps to set in current profile */ if (prof->log_max_qp == LOG_MAX_SUPPORTED_QPS) { - prof->log_max_qp = MLX5_CAP_GEN_MAX(dev, log_max_qp); + prof->log_max_qp = min_t(u8, 17, MLX5_CAP_GEN_MAX(dev, log_max_qp)); } else if (MLX5_CAP_GEN_MAX(dev, log_max_qp) < prof->log_max_qp) { mlx5_core_warn(dev, "log_max_qp value in current profile is %d, changing it to HCA capability limit (%d)\n", prof->log_max_qp, @@ -1840,10 +1840,12 @@ static const struct pci_device_id mlx5_core_pci_table[] = { { PCI_VDEVICE(MELLANOX, 0x101e), MLX5_PCI_DEV_IS_VF}, /* ConnectX Family mlx5Gen Virtual Function */ { PCI_VDEVICE(MELLANOX, 0x101f) }, /* ConnectX-6 LX */ { PCI_VDEVICE(MELLANOX, 0x1021) }, /* ConnectX-7 */ + { PCI_VDEVICE(MELLANOX, 0x1023) }, /* ConnectX-8 */ { PCI_VDEVICE(MELLANOX, 0xa2d2) }, /* BlueField integrated ConnectX-5 network controller */ { PCI_VDEVICE(MELLANOX, 0xa2d3), MLX5_PCI_DEV_IS_VF}, /* BlueField integrated ConnectX-5 network controller VF */ { PCI_VDEVICE(MELLANOX, 0xa2d6) }, /* BlueField-2 integrated ConnectX-6 Dx network controller */ { PCI_VDEVICE(MELLANOX, 0xa2dc) }, /* BlueField-3 integrated ConnectX-7 network controller */ + { PCI_VDEVICE(MELLANOX, 0xa2df) }, /* BlueField-4 integrated ConnectX-8 network controller */ { 0, } }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c index 1ef2b6a848c1..7b16a1188aab 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c @@ -406,23 +406,24 @@ int mlx5_query_module_eeprom(struct mlx5_core_dev *dev, switch (module_id) { case MLX5_MODULE_ID_SFP: - mlx5_sfp_eeprom_params_set(&query.i2c_address, &query.page, &query.offset); + mlx5_sfp_eeprom_params_set(&query.i2c_address, &query.page, &offset); break; case MLX5_MODULE_ID_QSFP: case MLX5_MODULE_ID_QSFP_PLUS: case MLX5_MODULE_ID_QSFP28: - mlx5_qsfp_eeprom_params_set(&query.i2c_address, &query.page, &query.offset); + mlx5_qsfp_eeprom_params_set(&query.i2c_address, &query.page, &offset); break; default: mlx5_core_err(dev, "Module ID not recognized: 0x%x\n", module_id); return -EINVAL; } - if (query.offset + size > MLX5_EEPROM_PAGE_LENGTH) + if (offset + size > MLX5_EEPROM_PAGE_LENGTH) /* Cross pages read, read until offset 256 in low page */ - size -= offset + size - MLX5_EEPROM_PAGE_LENGTH; + size = MLX5_EEPROM_PAGE_LENGTH - offset; query.size = size; + query.offset = offset; return mlx5_query_mcia(dev, &query, data); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c index 7f6fd9c5e371..e289cfdbce07 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c @@ -4,7 +4,6 @@ #include "dr_types.h" #define DR_ICM_MODIFY_HDR_ALIGN_BASE 64 -#define DR_ICM_SYNC_THRESHOLD_POOL (64 * 1024 * 1024) struct mlx5dr_icm_pool { enum mlx5dr_icm_type icm_type; @@ -136,37 +135,35 @@ static void dr_icm_pool_mr_destroy(struct mlx5dr_icm_mr *icm_mr) kvfree(icm_mr); } -static int dr_icm_chunk_ste_init(struct mlx5dr_icm_chunk *chunk) +static int dr_icm_buddy_get_ste_size(struct mlx5dr_icm_buddy_mem *buddy) { - chunk->ste_arr = kvzalloc(chunk->num_of_entries * - sizeof(chunk->ste_arr[0]), GFP_KERNEL); - if (!chunk->ste_arr) - return -ENOMEM; - - chunk->hw_ste_arr = kvzalloc(chunk->num_of_entries * - DR_STE_SIZE_REDUCED, GFP_KERNEL); - if (!chunk->hw_ste_arr) - goto out_free_ste_arr; - - chunk->miss_list = kvmalloc(chunk->num_of_entries * - sizeof(chunk->miss_list[0]), GFP_KERNEL); - if (!chunk->miss_list) - goto out_free_hw_ste_arr; + /* We support only one type of STE size, both for ConnectX-5 and later + * devices. Once the support for match STE which has a larger tag is + * added (32B instead of 16B), the STE size for devices later than + * ConnectX-5 needs to account for that. + */ + return DR_STE_SIZE_REDUCED; +} - return 0; +static void dr_icm_chunk_ste_init(struct mlx5dr_icm_chunk *chunk, int offset) +{ + struct mlx5dr_icm_buddy_mem *buddy = chunk->buddy_mem; + int index = offset / DR_STE_SIZE; -out_free_hw_ste_arr: - kvfree(chunk->hw_ste_arr); -out_free_ste_arr: - kvfree(chunk->ste_arr); - return -ENOMEM; + chunk->ste_arr = &buddy->ste_arr[index]; + chunk->miss_list = &buddy->miss_list[index]; + chunk->hw_ste_arr = buddy->hw_ste_arr + + index * dr_icm_buddy_get_ste_size(buddy); } static void dr_icm_chunk_ste_cleanup(struct mlx5dr_icm_chunk *chunk) { - kvfree(chunk->miss_list); - kvfree(chunk->hw_ste_arr); - kvfree(chunk->ste_arr); + struct mlx5dr_icm_buddy_mem *buddy = chunk->buddy_mem; + + memset(chunk->hw_ste_arr, 0, + chunk->num_of_entries * dr_icm_buddy_get_ste_size(buddy)); + memset(chunk->ste_arr, 0, + chunk->num_of_entries * sizeof(chunk->ste_arr[0])); } static enum mlx5dr_icm_type @@ -189,6 +186,44 @@ static void dr_icm_chunk_destroy(struct mlx5dr_icm_chunk *chunk, kvfree(chunk); } +static int dr_icm_buddy_init_ste_cache(struct mlx5dr_icm_buddy_mem *buddy) +{ + int num_of_entries = + mlx5dr_icm_pool_chunk_size_to_entries(buddy->pool->max_log_chunk_sz); + + buddy->ste_arr = kvcalloc(num_of_entries, + sizeof(struct mlx5dr_ste), GFP_KERNEL); + if (!buddy->ste_arr) + return -ENOMEM; + + /* Preallocate full STE size on non-ConnectX-5 devices since + * we need to support both full and reduced with the same cache. + */ + buddy->hw_ste_arr = kvcalloc(num_of_entries, + dr_icm_buddy_get_ste_size(buddy), GFP_KERNEL); + if (!buddy->hw_ste_arr) + goto free_ste_arr; + + buddy->miss_list = kvmalloc(num_of_entries * sizeof(struct list_head), GFP_KERNEL); + if (!buddy->miss_list) + goto free_hw_ste_arr; + + return 0; + +free_hw_ste_arr: + kvfree(buddy->hw_ste_arr); +free_ste_arr: + kvfree(buddy->ste_arr); + return -ENOMEM; +} + +static void dr_icm_buddy_cleanup_ste_cache(struct mlx5dr_icm_buddy_mem *buddy) +{ + kvfree(buddy->ste_arr); + kvfree(buddy->hw_ste_arr); + kvfree(buddy->miss_list); +} + static int dr_icm_buddy_create(struct mlx5dr_icm_pool *pool) { struct mlx5dr_icm_buddy_mem *buddy; @@ -208,11 +243,19 @@ static int dr_icm_buddy_create(struct mlx5dr_icm_pool *pool) buddy->icm_mr = icm_mr; buddy->pool = pool; + if (pool->icm_type == DR_ICM_TYPE_STE) { + /* Reduce allocations by preallocating and reusing the STE structures */ + if (dr_icm_buddy_init_ste_cache(buddy)) + goto err_cleanup_buddy; + } + /* add it to the -start- of the list in order to search in it first */ list_add(&buddy->list_node, &pool->buddy_mem_list); return 0; +err_cleanup_buddy: + mlx5dr_buddy_cleanup(buddy); err_free_buddy: kvfree(buddy); free_mr: @@ -234,6 +277,9 @@ static void dr_icm_buddy_destroy(struct mlx5dr_icm_buddy_mem *buddy) mlx5dr_buddy_cleanup(buddy); + if (buddy->pool->icm_type == DR_ICM_TYPE_STE) + dr_icm_buddy_cleanup_ste_cache(buddy); + kvfree(buddy); } @@ -261,34 +307,30 @@ dr_icm_chunk_create(struct mlx5dr_icm_pool *pool, chunk->byte_size = mlx5dr_icm_pool_chunk_size_to_byte(chunk_size, pool->icm_type); chunk->seg = seg; + chunk->buddy_mem = buddy_mem_pool; - if (pool->icm_type == DR_ICM_TYPE_STE && dr_icm_chunk_ste_init(chunk)) { - mlx5dr_err(pool->dmn, - "Failed to init ste arrays (order: %d)\n", - chunk_size); - goto out_free_chunk; - } + if (pool->icm_type == DR_ICM_TYPE_STE) + dr_icm_chunk_ste_init(chunk, offset); buddy_mem_pool->used_memory += chunk->byte_size; - chunk->buddy_mem = buddy_mem_pool; INIT_LIST_HEAD(&chunk->chunk_list); /* chunk now is part of the used_list */ list_add_tail(&chunk->chunk_list, &buddy_mem_pool->used_list); return chunk; - -out_free_chunk: - kvfree(chunk); - return NULL; } static bool dr_icm_pool_is_sync_required(struct mlx5dr_icm_pool *pool) { - if (pool->hot_memory_size > DR_ICM_SYNC_THRESHOLD_POOL) - return true; + int allow_hot_size; + + /* sync when hot memory reaches half of the pool size */ + allow_hot_size = + mlx5dr_icm_pool_chunk_size_to_byte(pool->max_log_chunk_sz, + pool->icm_type) / 2; - return false; + return pool->hot_memory_size > allow_hot_size; } static int dr_icm_pool_sync_all_buddy_pools(struct mlx5dr_icm_pool *pool) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c index e87cf498c77b..38971fe1dfe1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c @@ -13,18 +13,6 @@ static bool dr_mask_is_dmac_set(struct mlx5dr_match_spec *spec) return (spec->dmac_47_16 || spec->dmac_15_0); } -static bool dr_mask_is_src_addr_set(struct mlx5dr_match_spec *spec) -{ - return (spec->src_ip_127_96 || spec->src_ip_95_64 || - spec->src_ip_63_32 || spec->src_ip_31_0); -} - -static bool dr_mask_is_dst_addr_set(struct mlx5dr_match_spec *spec) -{ - return (spec->dst_ip_127_96 || spec->dst_ip_95_64 || - spec->dst_ip_63_32 || spec->dst_ip_31_0); -} - static bool dr_mask_is_l3_base_set(struct mlx5dr_match_spec *spec) { return (spec->ip_protocol || spec->frag || spec->tcp_flags || @@ -503,11 +491,11 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher, &mask, inner, rx); if (outer_ipv == DR_RULE_IPV6) { - if (dr_mask_is_dst_addr_set(&mask.outer)) + if (DR_MASK_IS_DST_IP_SET(&mask.outer)) mlx5dr_ste_build_eth_l3_ipv6_dst(ste_ctx, &sb[idx++], &mask, inner, rx); - if (dr_mask_is_src_addr_set(&mask.outer)) + if (DR_MASK_IS_SRC_IP_SET(&mask.outer)) mlx5dr_ste_build_eth_l3_ipv6_src(ste_ctx, &sb[idx++], &mask, inner, rx); @@ -610,11 +598,11 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher, &mask, inner, rx); if (inner_ipv == DR_RULE_IPV6) { - if (dr_mask_is_dst_addr_set(&mask.inner)) + if (DR_MASK_IS_DST_IP_SET(&mask.inner)) mlx5dr_ste_build_eth_l3_ipv6_dst(ste_ctx, &sb[idx++], &mask, inner, rx); - if (dr_mask_is_src_addr_set(&mask.inner)) + if (DR_MASK_IS_SRC_IP_SET(&mask.inner)) mlx5dr_ste_build_eth_l3_ipv6_src(ste_ctx, &sb[idx++], &mask, inner, rx); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c index 7e61742e58a0..187e29b409b6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c @@ -602,12 +602,34 @@ int mlx5dr_ste_set_action_decap_l3_list(struct mlx5dr_ste_ctx *ste_ctx, used_hw_action_num); } +static int dr_ste_build_pre_check_spec(struct mlx5dr_domain *dmn, + struct mlx5dr_match_spec *spec) +{ + if (spec->ip_version) { + if (spec->ip_version != 0xf) { + mlx5dr_err(dmn, + "Partial ip_version mask with src/dst IP is not supported\n"); + return -EINVAL; + } + } else if (spec->ethertype != 0xffff && + (DR_MASK_IS_SRC_IP_SET(spec) || DR_MASK_IS_DST_IP_SET(spec))) { + mlx5dr_err(dmn, + "Partial/no ethertype mask with src/dst IP is not supported\n"); + return -EINVAL; + } + + return 0; +} + int mlx5dr_ste_build_pre_check(struct mlx5dr_domain *dmn, u8 match_criteria, struct mlx5dr_match_param *mask, struct mlx5dr_match_param *value) { - if (!value && (match_criteria & DR_MATCHER_CRITERIA_MISC)) { + if (value) + return 0; + + if (match_criteria & DR_MATCHER_CRITERIA_MISC) { if (mask->misc.source_port && mask->misc.source_port != 0xffff) { mlx5dr_err(dmn, "Partial mask source_port is not supported\n"); @@ -621,6 +643,14 @@ int mlx5dr_ste_build_pre_check(struct mlx5dr_domain *dmn, } } + if ((match_criteria & DR_MATCHER_CRITERIA_OUTER) && + dr_ste_build_pre_check_spec(dmn, &mask->outer)) + return -EINVAL; + + if ((match_criteria & DR_MATCHER_CRITERIA_INNER) && + dr_ste_build_pre_check_spec(dmn, &mask->inner)) + return -EINVAL; + return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h index 1b3d484b99be..55fcb751e24a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h @@ -798,6 +798,16 @@ struct mlx5dr_match_param { (_misc3)->icmpv4_code || \ (_misc3)->icmpv4_header_data) +#define DR_MASK_IS_SRC_IP_SET(_spec) ((_spec)->src_ip_127_96 || \ + (_spec)->src_ip_95_64 || \ + (_spec)->src_ip_63_32 || \ + (_spec)->src_ip_31_0) + +#define DR_MASK_IS_DST_IP_SET(_spec) ((_spec)->dst_ip_127_96 || \ + (_spec)->dst_ip_95_64 || \ + (_spec)->dst_ip_63_32 || \ + (_spec)->dst_ip_31_0) + struct mlx5dr_esw_caps { u64 drop_icm_address_rx; u64 drop_icm_address_tx; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c index a476da2424f8..3f311462bedf 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c @@ -233,7 +233,11 @@ static bool contain_vport_reformat_action(struct mlx5_flow_rule *dst) dst->dest_attr.vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID; } -#define MLX5_FLOW_CONTEXT_ACTION_MAX 32 +/* We want to support a rule with 32 destinations, which means we need to + * account for 32 destinations plus usually a counter plus one more action + * for a multi-destination flow table. + */ +#define MLX5_FLOW_CONTEXT_ACTION_MAX 34 static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns, struct mlx5_flow_table *ft, struct mlx5_flow_group *group, @@ -403,9 +407,9 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns, enum mlx5_flow_destination_type type = dst->dest_attr.type; u32 id; - if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX || - num_term_actions >= MLX5_FLOW_CONTEXT_ACTION_MAX) { - err = -ENOSPC; + if (fs_dr_num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX || + num_term_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) { + err = -EOPNOTSUPP; goto free_actions; } @@ -478,8 +482,9 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns, MLX5_FLOW_DESTINATION_TYPE_COUNTER) continue; - if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) { - err = -ENOSPC; + if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX || + fs_dr_num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) { + err = -EOPNOTSUPP; goto free_actions; } @@ -499,14 +504,28 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns, params.match_sz = match_sz; params.match_buf = (u64 *)fte->val; if (num_term_actions == 1) { - if (term_actions->reformat) + if (term_actions->reformat) { + if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) { + err = -EOPNOTSUPP; + goto free_actions; + } actions[num_actions++] = term_actions->reformat; + } + if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) { + err = -EOPNOTSUPP; + goto free_actions; + } actions[num_actions++] = term_actions->dest; } else if (num_term_actions > 1) { bool ignore_flow_level = !!(fte->action.flags & FLOW_ACT_IGNORE_FLOW_LEVEL); + if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX || + fs_dr_num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) { + err = -EOPNOTSUPP; + goto free_actions; + } tmp_action = mlx5dr_action_create_mult_dest_tbl(domain, term_actions, num_term_actions, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h index c7c93131b762..dfa223415fe2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h @@ -160,6 +160,11 @@ struct mlx5dr_icm_buddy_mem { * sync_ste command sets them free. */ struct list_head hot_list; + + /* Memory optimisation */ + struct mlx5dr_ste *ste_arr; + struct list_head *miss_list; + u8 *hw_ste_arr; }; int mlx5dr_buddy_init(struct mlx5dr_icm_buddy_mem *buddy, diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c b/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c index 59783fc46a7b..10b866e9f726 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c @@ -1103,7 +1103,7 @@ void sparx5_get_stats64(struct net_device *ndev, stats->tx_carrier_errors = portstats[spx5_stats_tx_csense_cnt]; stats->tx_window_errors = portstats[spx5_stats_tx_late_coll_cnt]; stats->rx_dropped = portstats[spx5_stats_ana_ac_port_stat_lsb_cnt]; - for (idx = 0; idx < 2 * SPX5_PRIOS; ++idx, ++stats) + for (idx = 0; idx < 2 * SPX5_PRIOS; ++idx) stats->rx_dropped += portstats[spx5_stats_green_p0_rx_port_drop + idx]; stats->tx_dropped = portstats[spx5_stats_tx_local_drop]; diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.h b/drivers/net/ethernet/microchip/sparx5/sparx5_main.h index a1acc9b461f2..d40e18ce3293 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.h +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.h @@ -16,6 +16,8 @@ #include <linux/phylink.h> #include <linux/hrtimer.h> +#include "sparx5_main_regs.h" + /* Target chip type */ enum spx5_target_chiptype { SPX5_TARGET_CT_7546 = 0x7546, /* SparX-5-64 Enterprise */ diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c b/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c index dc7e5ea6ec15..148d431fcde4 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c @@ -145,9 +145,9 @@ static void sparx5_xtr_grp(struct sparx5 *sparx5, u8 grp, bool byte_swap) skb_put(skb, byte_cnt - ETH_FCS_LEN); eth_skb_pad(skb); skb->protocol = eth_type_trans(skb, netdev); - netif_rx(skb); netdev->stats.rx_bytes += skb->len; netdev->stats.rx_packets++; + netif_rx(skb); } static int sparx5_inject(struct sparx5 *sparx5, diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_vlan.c b/drivers/net/ethernet/microchip/sparx5/sparx5_vlan.c index 4ce490a25f33..8e56ffa1c4f7 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_vlan.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_vlan.c @@ -58,16 +58,6 @@ int sparx5_vlan_vid_add(struct sparx5_port *port, u16 vid, bool pvid, struct sparx5 *sparx5 = port->sparx5; int ret; - /* Make the port a member of the VLAN */ - set_bit(port->portno, sparx5->vlan_mask[vid]); - ret = sparx5_vlant_set_mask(sparx5, vid); - if (ret) - return ret; - - /* Default ingress vlan classification */ - if (pvid) - port->pvid = vid; - /* Untagged egress vlan classification */ if (untagged && port->vid != vid) { if (port->vid) { @@ -79,6 +69,16 @@ int sparx5_vlan_vid_add(struct sparx5_port *port, u16 vid, bool pvid, port->vid = vid; } + /* Make the port a member of the VLAN */ + set_bit(port->portno, sparx5->vlan_mask[vid]); + ret = sparx5_vlant_set_mask(sparx5, vid); + if (ret) + return ret; + + /* Default ingress vlan classification */ + if (pvid) + port->pvid = vid; + sparx5_vlan_port_apply(sparx5, port); return 0; diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c index 455293aa6343..fd3ceb74620d 100644 --- a/drivers/net/ethernet/mscc/ocelot.c +++ b/drivers/net/ethernet/mscc/ocelot.c @@ -549,14 +549,18 @@ EXPORT_SYMBOL(ocelot_vlan_add); int ocelot_vlan_del(struct ocelot *ocelot, int port, u16 vid) { struct ocelot_port *ocelot_port = ocelot->ports[port]; + bool del_pvid = false; int err; + if (ocelot_port->pvid_vlan && ocelot_port->pvid_vlan->vid == vid) + del_pvid = true; + err = ocelot_vlan_member_del(ocelot, port, vid); if (err) return err; /* Ingress */ - if (ocelot_port->pvid_vlan && ocelot_port->pvid_vlan->vid == vid) + if (del_pvid) ocelot_port_set_pvid(ocelot, port, NULL); /* Egress */ @@ -1432,6 +1436,8 @@ static void ocelot_populate_ipv4_ptp_event_trap_key(struct ocelot_vcap_filter *trap) { trap->key_type = OCELOT_VCAP_KEY_IPV4; + trap->key.ipv4.proto.value[0] = IPPROTO_UDP; + trap->key.ipv4.proto.mask[0] = 0xff; trap->key.ipv4.dport.value = PTP_EV_PORT; trap->key.ipv4.dport.mask = 0xffff; } @@ -1440,6 +1446,8 @@ static void ocelot_populate_ipv6_ptp_event_trap_key(struct ocelot_vcap_filter *trap) { trap->key_type = OCELOT_VCAP_KEY_IPV6; + trap->key.ipv4.proto.value[0] = IPPROTO_UDP; + trap->key.ipv4.proto.mask[0] = 0xff; trap->key.ipv6.dport.value = PTP_EV_PORT; trap->key.ipv6.dport.mask = 0xffff; } @@ -1448,6 +1456,8 @@ static void ocelot_populate_ipv4_ptp_general_trap_key(struct ocelot_vcap_filter *trap) { trap->key_type = OCELOT_VCAP_KEY_IPV4; + trap->key.ipv4.proto.value[0] = IPPROTO_UDP; + trap->key.ipv4.proto.mask[0] = 0xff; trap->key.ipv4.dport.value = PTP_GEN_PORT; trap->key.ipv4.dport.mask = 0xffff; } @@ -1456,6 +1466,8 @@ static void ocelot_populate_ipv6_ptp_general_trap_key(struct ocelot_vcap_filter *trap) { trap->key_type = OCELOT_VCAP_KEY_IPV6; + trap->key.ipv4.proto.value[0] = IPPROTO_UDP; + trap->key.ipv4.proto.mask[0] = 0xff; trap->key.ipv6.dport.value = PTP_GEN_PORT; trap->key.ipv6.dport.mask = 0xffff; } @@ -1737,12 +1749,11 @@ void ocelot_get_strings(struct ocelot *ocelot, int port, u32 sset, u8 *data) } EXPORT_SYMBOL(ocelot_get_strings); +/* Caller must hold &ocelot->stats_lock */ static void ocelot_update_stats(struct ocelot *ocelot) { int i, j; - mutex_lock(&ocelot->stats_lock); - for (i = 0; i < ocelot->num_phys_ports; i++) { /* Configure the port to read the stats from */ ocelot_write(ocelot, SYS_STAT_CFG_STAT_VIEW(i), SYS_STAT_CFG); @@ -1761,8 +1772,6 @@ static void ocelot_update_stats(struct ocelot *ocelot) ~(u64)U32_MAX) + val; } } - - mutex_unlock(&ocelot->stats_lock); } static void ocelot_check_stats_work(struct work_struct *work) @@ -1771,7 +1780,9 @@ static void ocelot_check_stats_work(struct work_struct *work) struct ocelot *ocelot = container_of(del_work, struct ocelot, stats_work); + mutex_lock(&ocelot->stats_lock); ocelot_update_stats(ocelot); + mutex_unlock(&ocelot->stats_lock); queue_delayed_work(ocelot->stats_queue, &ocelot->stats_work, OCELOT_STATS_CHECK_DELAY); @@ -1781,12 +1792,16 @@ void ocelot_get_ethtool_stats(struct ocelot *ocelot, int port, u64 *data) { int i; + mutex_lock(&ocelot->stats_lock); + /* check and update now */ ocelot_update_stats(ocelot); /* Copy all counters */ for (i = 0; i < ocelot->num_stats; i++) *data++ = ocelot->stats[port * ocelot->num_stats + i]; + + mutex_unlock(&ocelot->stats_lock); } EXPORT_SYMBOL(ocelot_get_ethtool_stats); diff --git a/drivers/net/ethernet/mscc/ocelot_flower.c b/drivers/net/ethernet/mscc/ocelot_flower.c index 949858891973..fdb4d7e7296c 100644 --- a/drivers/net/ethernet/mscc/ocelot_flower.c +++ b/drivers/net/ethernet/mscc/ocelot_flower.c @@ -60,6 +60,12 @@ static int ocelot_chain_to_block(int chain, bool ingress) */ static int ocelot_chain_to_lookup(int chain) { + /* Backwards compatibility with older, single-chain tc-flower + * offload support in Ocelot + */ + if (chain == 0) + return 0; + return (chain / VCAP_LOOKUP) % 10; } @@ -68,7 +74,15 @@ static int ocelot_chain_to_lookup(int chain) */ static int ocelot_chain_to_pag(int chain) { - int lookup = ocelot_chain_to_lookup(chain); + int lookup; + + /* Backwards compatibility with older, single-chain tc-flower + * offload support in Ocelot + */ + if (chain == 0) + return 0; + + lookup = ocelot_chain_to_lookup(chain); /* calculate PAG value as chain index relative to the first PAG */ return chain - VCAP_IS2_CHAIN(lookup, 0); diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h index 784292b16290..1543e47456d5 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h +++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h @@ -723,6 +723,8 @@ static inline bool nfp_fl_is_netdev_to_offload(struct net_device *netdev) return true; if (netif_is_gretap(netdev)) return true; + if (netif_is_ip6gretap(netdev)) + return true; return false; } diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c index dfb4468fe287..cb43651ea9ba 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c +++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c @@ -922,8 +922,8 @@ nfp_tunnel_add_shared_mac(struct nfp_app *app, struct net_device *netdev, int port, bool mod) { struct nfp_flower_priv *priv = app->priv; - int ida_idx = NFP_MAX_MAC_INDEX, err; struct nfp_tun_offloaded_mac *entry; + int ida_idx = -1, err; u16 nfp_mac_idx = 0; entry = nfp_tunnel_lookup_offloaded_macs(app, netdev->dev_addr); @@ -997,7 +997,7 @@ err_remove_hash: err_free_entry: kfree(entry); err_free_ida: - if (ida_idx != NFP_MAX_MAC_INDEX) + if (ida_idx != -1) ida_simple_remove(&priv->tun.mac_off_ids, ida_idx); return err; @@ -1011,6 +1011,7 @@ nfp_tunnel_del_shared_mac(struct nfp_app *app, struct net_device *netdev, struct nfp_flower_repr_priv *repr_priv; struct nfp_tun_offloaded_mac *entry; struct nfp_repr *repr; + u16 nfp_mac_idx; int ida_idx; entry = nfp_tunnel_lookup_offloaded_macs(app, mac); @@ -1029,8 +1030,6 @@ nfp_tunnel_del_shared_mac(struct nfp_app *app, struct net_device *netdev, entry->bridge_count--; if (!entry->bridge_count && entry->ref_count) { - u16 nfp_mac_idx; - nfp_mac_idx = entry->index & ~NFP_TUN_PRE_TUN_IDX_BIT; if (__nfp_tunnel_offload_mac(app, mac, nfp_mac_idx, false)) { @@ -1046,7 +1045,6 @@ nfp_tunnel_del_shared_mac(struct nfp_app *app, struct net_device *netdev, /* If MAC is now used by 1 repr set the offloaded MAC index to port. */ if (entry->ref_count == 1 && list_is_singular(&entry->repr_list)) { - u16 nfp_mac_idx; int port, err; repr_priv = list_first_entry(&entry->repr_list, @@ -1074,8 +1072,14 @@ nfp_tunnel_del_shared_mac(struct nfp_app *app, struct net_device *netdev, WARN_ON_ONCE(rhashtable_remove_fast(&priv->tun.offloaded_macs, &entry->ht_node, offloaded_macs_params)); + + if (nfp_flower_is_supported_bridge(netdev)) + nfp_mac_idx = entry->index & ~NFP_TUN_PRE_TUN_IDX_BIT; + else + nfp_mac_idx = entry->index; + /* If MAC has global ID then extract and free the ida entry. */ - if (nfp_tunnel_is_mac_idx_global(entry->index)) { + if (nfp_tunnel_is_mac_idx_global(nfp_mac_idx)) { ida_idx = nfp_tunnel_get_ida_from_global_mac_idx(entry->index); ida_simple_remove(&priv->tun.mac_off_ids, ida_idx); } diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c index bc39558fe82b..756f97dce85b 100644 --- a/drivers/net/ethernet/nxp/lpc_eth.c +++ b/drivers/net/ethernet/nxp/lpc_eth.c @@ -1471,6 +1471,7 @@ static int lpc_eth_drv_resume(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); struct netdata_local *pldat; + int ret; if (device_may_wakeup(&pdev->dev)) disable_irq_wake(ndev->irq); @@ -1480,7 +1481,9 @@ static int lpc_eth_drv_resume(struct platform_device *pdev) pldat = netdev_priv(ndev); /* Enable interface clock */ - clk_enable(pldat->clk); + ret = clk_enable(pldat->clk); + if (ret) + return ret; /* Reset and initialize */ __lpc_eth_reset(pldat); diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c index 8ac38828ba45..48cf4355bc47 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c @@ -3806,11 +3806,11 @@ bool qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *p_disabled_vfs) return found; } -static void qed_iov_get_link(struct qed_hwfn *p_hwfn, - u16 vfid, - struct qed_mcp_link_params *p_params, - struct qed_mcp_link_state *p_link, - struct qed_mcp_link_capabilities *p_caps) +static int qed_iov_get_link(struct qed_hwfn *p_hwfn, + u16 vfid, + struct qed_mcp_link_params *p_params, + struct qed_mcp_link_state *p_link, + struct qed_mcp_link_capabilities *p_caps) { struct qed_vf_info *p_vf = qed_iov_get_vf_info(p_hwfn, vfid, @@ -3818,7 +3818,7 @@ static void qed_iov_get_link(struct qed_hwfn *p_hwfn, struct qed_bulletin_content *p_bulletin; if (!p_vf) - return; + return -EINVAL; p_bulletin = p_vf->bulletin.p_virt; @@ -3828,6 +3828,7 @@ static void qed_iov_get_link(struct qed_hwfn *p_hwfn, __qed_vf_get_link_state(p_hwfn, p_link, p_bulletin); if (p_caps) __qed_vf_get_link_caps(p_hwfn, p_caps, p_bulletin); + return 0; } static int @@ -4686,6 +4687,7 @@ static int qed_get_vf_config(struct qed_dev *cdev, struct qed_public_vf_info *vf_info; struct qed_mcp_link_state link; u32 tx_rate; + int ret; /* Sanitize request */ if (IS_VF(cdev)) @@ -4699,7 +4701,9 @@ static int qed_get_vf_config(struct qed_dev *cdev, vf_info = qed_iov_get_public_vf_info(hwfn, vf_id, true); - qed_iov_get_link(hwfn, vf_id, NULL, &link, NULL); + ret = qed_iov_get_link(hwfn, vf_id, NULL, &link, NULL); + if (ret) + return ret; /* Fill information about VF */ ivi->vf = vf_id; diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c index 597cd9cd57b5..7b0e390c0b07 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_vf.c +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c @@ -513,6 +513,9 @@ int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn) p_iov->bulletin.size, &p_iov->bulletin.phys, GFP_KERNEL); + if (!p_iov->bulletin.p_virt) + goto free_pf2vf_reply; + DP_VERBOSE(p_hwfn, QED_MSG_IOV, "VF's bulletin Board [%p virt 0x%llx phys 0x%08x bytes]\n", p_iov->bulletin.p_virt, @@ -552,6 +555,10 @@ int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn) return rc; +free_pf2vf_reply: + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + sizeof(union pfvf_tlvs), + p_iov->pf2vf_reply, p_iov->pf2vf_reply_phys); free_vf2pf_request: dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(union vfpf_tlvs), diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c index 32161a56726c..2881f5b2b5f4 100644 --- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c +++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c @@ -2285,18 +2285,18 @@ static int __init sxgbe_cmdline_opt(char *str) char *opt; if (!str || !*str) - return -EINVAL; + return 1; while ((opt = strsep(&str, ",")) != NULL) { if (!strncmp(opt, "eee_timer:", 10)) { if (kstrtoint(opt + 10, 0, &eee_timer)) goto err; } } - return 0; + return 1; err: pr_err("%s: ERROR broken module parameter conversion\n", __func__); - return -EINVAL; + return 1; } __setup("sxgbeeth=", sxgbe_cmdline_opt); diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c index be6bfd6b7ec7..50baf62b2cbc 100644 --- a/drivers/net/ethernet/sfc/mcdi.c +++ b/drivers/net/ethernet/sfc/mcdi.c @@ -163,9 +163,9 @@ static void efx_mcdi_send_request(struct efx_nic *efx, unsigned cmd, /* Serialise with efx_mcdi_ev_cpl() and efx_mcdi_ev_death() */ spin_lock_bh(&mcdi->iface_lock); ++mcdi->seqno; + seqno = mcdi->seqno & SEQ_MASK; spin_unlock_bh(&mcdi->iface_lock); - seqno = mcdi->seqno & SEQ_MASK; xflags = 0; if (mcdi->mode == MCDI_MODE_EVENTS) xflags |= MCDI_HEADER_XFLAGS_EVREQ; diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c index dd6f69ced4ee..fc9cef9dcefc 100644 --- a/drivers/net/ethernet/smsc/smc911x.c +++ b/drivers/net/ethernet/smsc/smc911x.c @@ -1648,7 +1648,7 @@ static int smc911x_ethtool_geteeprom(struct net_device *dev, return ret; if ((ret=smc911x_ethtool_read_eeprom_byte(dev, &eebuf[i]))!=0) return ret; - } + } memcpy(data, eebuf+eeprom->offset, eeprom->len); return 0; } @@ -1667,11 +1667,11 @@ static int smc911x_ethtool_seteeprom(struct net_device *dev, return ret; /* write byte */ if ((ret=smc911x_ethtool_write_eeprom_byte(dev, *data))!=0) - return ret; + return ret; if ((ret=smc911x_ethtool_write_eeprom_cmd(dev, E2P_CMD_EPC_CMD_WRITE_, i ))!=0) return ret; - } - return 0; + } + return 0; } static int smc911x_ethtool_geteeprom_len(struct net_device *dev) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c index dde5b772a5af..c3f10a92b62b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c @@ -49,13 +49,15 @@ struct visconti_eth { void __iomem *reg; u32 phy_intf_sel; struct clk *phy_ref_clk; + struct device *dev; spinlock_t lock; /* lock to protect register update */ }; static void visconti_eth_fix_mac_speed(void *priv, unsigned int speed) { struct visconti_eth *dwmac = priv; - unsigned int val, clk_sel_val; + struct net_device *netdev = dev_get_drvdata(dwmac->dev); + unsigned int val, clk_sel_val = 0; unsigned long flags; spin_lock_irqsave(&dwmac->lock, flags); @@ -85,7 +87,9 @@ static void visconti_eth_fix_mac_speed(void *priv, unsigned int speed) break; default: /* No bit control */ - break; + netdev_err(netdev, "Unsupported speed request (%d)", speed); + spin_unlock_irqrestore(&dwmac->lock, flags); + return; } writel(val, dwmac->reg + MAC_CTRL_REG); @@ -229,6 +233,7 @@ static int visconti_eth_dwmac_probe(struct platform_device *pdev) spin_lock_init(&dwmac->lock); dwmac->reg = stmmac_res.addr; + dwmac->dev = &pdev->dev; plat_dat->bsp_priv = dwmac; plat_dat->fix_mac_speed = visconti_eth_fix_mac_speed; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h index 1914ad698cab..acd70b9a3173 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h @@ -150,6 +150,7 @@ #define NUM_DWMAC100_DMA_REGS 9 #define NUM_DWMAC1000_DMA_REGS 23 +#define NUM_DWMAC4_DMA_REGS 27 void dwmac_enable_dma_transmission(void __iomem *ioaddr); void dwmac_enable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c index 164dff5ec32e..abfb3cd5958d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c @@ -21,10 +21,18 @@ #include "dwxgmac2.h" #define REG_SPACE_SIZE 0x1060 +#define GMAC4_REG_SPACE_SIZE 0x116C #define MAC100_ETHTOOL_NAME "st_mac100" #define GMAC_ETHTOOL_NAME "st_gmac" #define XGMAC_ETHTOOL_NAME "st_xgmac" +/* Same as DMA_CHAN_BASE_ADDR defined in dwmac4_dma.h + * + * It is here because dwmac_dma.h and dwmac4_dam.h can not be included at the + * same time due to the conflicting macro names. + */ +#define GMAC4_DMA_CHAN_BASE_ADDR 0x00001100 + #define ETHTOOL_DMA_OFFSET 55 struct stmmac_stats { @@ -434,6 +442,8 @@ static int stmmac_ethtool_get_regs_len(struct net_device *dev) if (priv->plat->has_xgmac) return XGMAC_REGSIZE * 4; + else if (priv->plat->has_gmac4) + return GMAC4_REG_SPACE_SIZE; return REG_SPACE_SIZE; } @@ -446,8 +456,13 @@ static void stmmac_ethtool_gregs(struct net_device *dev, stmmac_dump_mac_regs(priv, priv->hw, reg_space); stmmac_dump_dma_regs(priv, priv->ioaddr, reg_space); - if (!priv->plat->has_xgmac) { - /* Copy DMA registers to where ethtool expects them */ + /* Copy DMA registers to where ethtool expects them */ + if (priv->plat->has_gmac4) { + /* GMAC4 dumps its DMA registers at its DMA_CHAN_BASE_ADDR */ + memcpy(®_space[ETHTOOL_DMA_OFFSET], + ®_space[GMAC4_DMA_CHAN_BASE_ADDR / 4], + NUM_DWMAC4_DMA_REGS * 4); + } else if (!priv->plat->has_xgmac) { memcpy(®_space[ETHTOOL_DMA_OFFSET], ®_space[DMA_BUS_MODE / 4], NUM_DWMAC1000_DMA_REGS * 4); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c index 074e2cdfb0fa..a7ec9f4d46ce 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c @@ -145,15 +145,20 @@ static int adjust_systime(void __iomem *ioaddr, u32 sec, u32 nsec, static void get_systime(void __iomem *ioaddr, u64 *systime) { - u64 ns; - - /* Get the TSSS value */ - ns = readl(ioaddr + PTP_STNSR); - /* Get the TSS and convert sec time value to nanosecond */ - ns += readl(ioaddr + PTP_STSR) * 1000000000ULL; + u64 ns, sec0, sec1; + + /* Get the TSS value */ + sec1 = readl_relaxed(ioaddr + PTP_STSR); + do { + sec0 = sec1; + /* Get the TSSS value */ + ns = readl_relaxed(ioaddr + PTP_STNSR); + /* Get the TSS value */ + sec1 = readl_relaxed(ioaddr + PTP_STSR); + } while (sec0 != sec1); if (systime) - *systime = ns; + *systime = ns + (sec1 * 1000000000ULL); } static void get_ptptime(void __iomem *ptpaddr, u64 *ptp_time) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 639a753266e6..422e3225f476 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -2262,6 +2262,23 @@ static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan) stmmac_stop_tx(priv, priv->ioaddr, chan); } +static void stmmac_enable_all_dma_irq(struct stmmac_priv *priv) +{ + u32 rx_channels_count = priv->plat->rx_queues_to_use; + u32 tx_channels_count = priv->plat->tx_queues_to_use; + u32 dma_csr_ch = max(rx_channels_count, tx_channels_count); + u32 chan; + + for (chan = 0; chan < dma_csr_ch; chan++) { + struct stmmac_channel *ch = &priv->channel[chan]; + unsigned long flags; + + spin_lock_irqsave(&ch->lock, flags); + stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1); + spin_unlock_irqrestore(&ch->lock, flags); + } +} + /** * stmmac_start_all_dma - start all RX and TX DMA channels * @priv: driver private structure @@ -2904,8 +2921,10 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv) stmmac_axi(priv, priv->ioaddr, priv->plat->axi); /* DMA CSR Channel configuration */ - for (chan = 0; chan < dma_csr_ch; chan++) + for (chan = 0; chan < dma_csr_ch; chan++) { stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan); + stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1); + } /* DMA RX Channel Configuration */ for (chan = 0; chan < rx_channels_count; chan++) { @@ -3761,6 +3780,7 @@ static int stmmac_open(struct net_device *dev) stmmac_enable_all_queues(priv); netif_tx_start_all_queues(priv->dev); + stmmac_enable_all_dma_irq(priv); return 0; @@ -6510,8 +6530,10 @@ int stmmac_xdp_open(struct net_device *dev) } /* DMA CSR Channel configuration */ - for (chan = 0; chan < dma_csr_ch; chan++) + for (chan = 0; chan < dma_csr_ch; chan++) { stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan); + stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1); + } /* Adjust Split header */ sph_en = (priv->hw->rx_csum > 0) && priv->sph; @@ -6572,6 +6594,7 @@ int stmmac_xdp_open(struct net_device *dev) stmmac_enable_all_queues(priv); netif_carrier_on(dev); netif_tx_start_all_queues(dev); + stmmac_enable_all_dma_irq(priv); return 0; @@ -7252,6 +7275,10 @@ int stmmac_dvr_remove(struct device *dev) netdev_info(priv->dev, "%s: removing driver", __func__); + pm_runtime_get_sync(dev); + pm_runtime_disable(dev); + pm_runtime_put_noidle(dev); + stmmac_stop_all_dma(priv); stmmac_mac_set(priv, priv->ioaddr, false); netif_carrier_off(ndev); @@ -7270,8 +7297,6 @@ int stmmac_dvr_remove(struct device *dev) if (priv->plat->stmmac_rst) reset_control_assert(priv->plat->stmmac_rst); reset_control_assert(priv->plat->stmmac_ahb_rst); - pm_runtime_put(dev); - pm_runtime_disable(dev); if (priv->hw->pcs != STMMAC_PCS_TBI && priv->hw->pcs != STMMAC_PCS_RTBI) stmmac_mdio_unregister(ndev); @@ -7449,6 +7474,7 @@ int stmmac_resume(struct device *dev) stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw); stmmac_enable_all_queues(priv); + stmmac_enable_all_dma_irq(priv); mutex_unlock(&priv->lock); rtnl_unlock(); @@ -7465,7 +7491,7 @@ static int __init stmmac_cmdline_opt(char *str) char *opt; if (!str || !*str) - return -EINVAL; + return 1; while ((opt = strsep(&str, ",")) != NULL) { if (!strncmp(opt, "debug:", 6)) { if (kstrtoint(opt + 6, 0, &debug)) @@ -7496,11 +7522,11 @@ static int __init stmmac_cmdline_opt(char *str) goto err; } } - return 0; + return 1; err: pr_err("%s: ERROR broken module parameter conversion", __func__); - return -EINVAL; + return 1; } __setup("stmmaceth=", stmmac_cmdline_opt); diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c index ad9029ae6848..77e5dffb558f 100644 --- a/drivers/net/ethernet/sun/sunhme.c +++ b/drivers/net/ethernet/sun/sunhme.c @@ -3146,7 +3146,7 @@ static int happy_meal_pci_probe(struct pci_dev *pdev, if (err) { printk(KERN_ERR "happymeal(PCI): Cannot register net device, " "aborting.\n"); - goto err_out_iounmap; + goto err_out_free_coherent; } pci_set_drvdata(pdev, hp); @@ -3179,6 +3179,10 @@ static int happy_meal_pci_probe(struct pci_dev *pdev, return 0; +err_out_free_coherent: + dma_free_coherent(hp->dma_dev, PAGE_SIZE, + hp->happy_block, hp->hblock_dvma); + err_out_iounmap: iounmap(hp->gregs); diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c index dc70a6bfaa6a..92ca739fac01 100644 --- a/drivers/net/ethernet/ti/cpts.c +++ b/drivers/net/ethernet/ti/cpts.c @@ -568,7 +568,9 @@ int cpts_register(struct cpts *cpts) for (i = 0; i < CPTS_MAX_EVENTS; i++) list_add(&cpts->pool_data[i].list, &cpts->pool); - clk_enable(cpts->refclk); + err = clk_enable(cpts->refclk); + if (err) + return err; cpts_write32(cpts, CPTS_EN, control); cpts_write32(cpts, TS_PEND_EN, int_enable); diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c index b900ab5aef2a..64c7e26c3b75 100644 --- a/drivers/net/ethernet/xilinx/ll_temac_main.c +++ b/drivers/net/ethernet/xilinx/ll_temac_main.c @@ -1433,6 +1433,8 @@ static int temac_probe(struct platform_device *pdev) lp->indirect_lock = devm_kmalloc(&pdev->dev, sizeof(*lp->indirect_lock), GFP_KERNEL); + if (!lp->indirect_lock) + return -ENOMEM; spin_lock_init(lp->indirect_lock); } diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c index 519599480b15..77fa2cb03aca 100644 --- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c +++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c @@ -1183,7 +1183,7 @@ static int xemaclite_of_probe(struct platform_device *ofdev) if (rc) { dev_err(dev, "Cannot register network device, aborting\n"); - goto error; + goto put_node; } dev_info(dev, @@ -1191,6 +1191,8 @@ static int xemaclite_of_probe(struct platform_device *ofdev) (unsigned long __force)ndev->mem_start, lp->base_addr, ndev->irq); return 0; +put_node: + of_node_put(lp->phy_node); error: free_netdev(ndev); return rc; diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c index b1fc153125d9..45c3c4a1101b 100644 --- a/drivers/net/hamradio/6pack.c +++ b/drivers/net/hamradio/6pack.c @@ -668,11 +668,11 @@ static void sixpack_close(struct tty_struct *tty) */ netif_stop_queue(sp->dev); + unregister_netdev(sp->dev); + del_timer_sync(&sp->tx_t); del_timer_sync(&sp->resync_t); - unregister_netdev(sp->dev); - /* Free all 6pack frame buffers after unreg. */ kfree(sp->rbuff); kfree(sp->xbuff); diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 3646469433b1..fde1c492ca02 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -1587,6 +1587,9 @@ static void netvsc_get_ethtool_stats(struct net_device *dev, pcpu_sum = kvmalloc_array(num_possible_cpus(), sizeof(struct netvsc_ethtool_pcpu_stats), GFP_KERNEL); + if (!pcpu_sum) + return; + netvsc_get_pcpu_stats(dev, pcpu_sum); for_each_present_cpu(cpu) { struct netvsc_ethtool_pcpu_stats *this_sum = &pcpu_sum[cpu]; diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c index 7d67f41387f5..4f5ef8a9a9a8 100644 --- a/drivers/net/ieee802154/at86rf230.c +++ b/drivers/net/ieee802154/at86rf230.c @@ -100,6 +100,7 @@ struct at86rf230_local { unsigned long cal_timeout; bool is_tx; bool is_tx_from_off; + bool was_tx; u8 tx_retry; struct sk_buff *tx_skb; struct at86rf230_state_change tx; @@ -343,7 +344,11 @@ at86rf230_async_error_recover_complete(void *context) if (ctx->free) kfree(ctx); - ieee802154_wake_queue(lp->hw); + if (lp->was_tx) { + lp->was_tx = 0; + dev_kfree_skb_any(lp->tx_skb); + ieee802154_wake_queue(lp->hw); + } } static void @@ -352,7 +357,11 @@ at86rf230_async_error_recover(void *context) struct at86rf230_state_change *ctx = context; struct at86rf230_local *lp = ctx->lp; - lp->is_tx = 0; + if (lp->is_tx) { + lp->was_tx = 1; + lp->is_tx = 0; + } + at86rf230_async_state_change(lp, ctx, STATE_RX_AACK_ON, at86rf230_async_error_recover_complete); } diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c index ece6ff6049f6..2bc730fd260e 100644 --- a/drivers/net/ieee802154/ca8210.c +++ b/drivers/net/ieee802154/ca8210.c @@ -1771,6 +1771,7 @@ static int ca8210_async_xmit_complete( status ); if (status != MAC_TRANSACTION_OVERFLOW) { + dev_kfree_skb_any(priv->tx_skb); ieee802154_wake_queue(priv->hw); return 0; } @@ -2974,8 +2975,8 @@ static void ca8210_hw_setup(struct ieee802154_hw *ca8210_hw) ca8210_hw->phy->cca.opt = NL802154_CCA_OPT_ENERGY_CARRIER_AND; ca8210_hw->phy->cca_ed_level = -9800; ca8210_hw->phy->symbol_duration = 16; - ca8210_hw->phy->lifs_period = 40; - ca8210_hw->phy->sifs_period = 12; + ca8210_hw->phy->lifs_period = 40 * ca8210_hw->phy->symbol_duration; + ca8210_hw->phy->sifs_period = 12 * ca8210_hw->phy->symbol_duration; ca8210_hw->flags = IEEE802154_HW_AFILT | IEEE802154_HW_OMIT_CKSUM | diff --git a/drivers/net/ieee802154/mac802154_hwsim.c b/drivers/net/ieee802154/mac802154_hwsim.c index 8caa61ec718f..36f1c5aa98fc 100644 --- a/drivers/net/ieee802154/mac802154_hwsim.c +++ b/drivers/net/ieee802154/mac802154_hwsim.c @@ -786,6 +786,7 @@ static int hwsim_add_one(struct genl_info *info, struct device *dev, goto err_pib; } + pib->channel = 13; rcu_assign_pointer(phy->pib, pib); phy->idx = idx; INIT_LIST_HEAD(&phy->edges); diff --git a/drivers/net/ieee802154/mcr20a.c b/drivers/net/ieee802154/mcr20a.c index 8dc04e2590b1..383231b85464 100644 --- a/drivers/net/ieee802154/mcr20a.c +++ b/drivers/net/ieee802154/mcr20a.c @@ -976,8 +976,8 @@ static void mcr20a_hw_setup(struct mcr20a_local *lp) dev_dbg(printdev(lp), "%s\n", __func__); phy->symbol_duration = 16; - phy->lifs_period = 40; - phy->sifs_period = 12; + phy->lifs_period = 40 * phy->symbol_duration; + phy->sifs_period = 12 * phy->symbol_duration; hw->flags = IEEE802154_HW_TX_OMIT_CKSUM | IEEE802154_HW_AFILT | diff --git a/drivers/net/ipa/Kconfig b/drivers/net/ipa/Kconfig index d037682fb7ad..6782c2cbf542 100644 --- a/drivers/net/ipa/Kconfig +++ b/drivers/net/ipa/Kconfig @@ -2,7 +2,9 @@ config QCOM_IPA tristate "Qualcomm IPA support" depends on NET && QCOM_SMEM depends on ARCH_QCOM || COMPILE_TEST + depends on INTERCONNECT depends on QCOM_RPROC_COMMON || (QCOM_RPROC_COMMON=n && COMPILE_TEST) + depends on QCOM_AOSS_QMP || QCOM_AOSS_QMP=n select QCOM_MDT_LOADER if ARCH_QCOM select QCOM_SCM select QCOM_QMI_HELPERS diff --git a/drivers/net/ipa/ipa_power.c b/drivers/net/ipa/ipa_power.c index b1c6c0fcb654..f2989aac47a6 100644 --- a/drivers/net/ipa/ipa_power.c +++ b/drivers/net/ipa/ipa_power.c @@ -11,6 +11,8 @@ #include <linux/pm_runtime.h> #include <linux/bitops.h> +#include "linux/soc/qcom/qcom_aoss.h" + #include "ipa.h" #include "ipa_power.h" #include "ipa_endpoint.h" @@ -64,6 +66,7 @@ enum ipa_power_flag { * struct ipa_power - IPA power management information * @dev: IPA device pointer * @core: IPA core clock + * @qmp: QMP handle for AOSS communication * @spinlock: Protects modem TX queue enable/disable * @flags: Boolean state flags * @interconnect_count: Number of elements in interconnect[] @@ -72,6 +75,7 @@ enum ipa_power_flag { struct ipa_power { struct device *dev; struct clk *core; + struct qmp *qmp; spinlock_t spinlock; /* used with STOPPED/STARTED power flags */ DECLARE_BITMAP(flags, IPA_POWER_FLAG_COUNT); u32 interconnect_count; @@ -382,6 +386,47 @@ void ipa_power_modem_queue_active(struct ipa *ipa) clear_bit(IPA_POWER_FLAG_STARTED, ipa->power->flags); } +static int ipa_power_retention_init(struct ipa_power *power) +{ + struct qmp *qmp = qmp_get(power->dev); + + if (IS_ERR(qmp)) { + if (PTR_ERR(qmp) == -EPROBE_DEFER) + return -EPROBE_DEFER; + + /* We assume any other error means it's not defined/needed */ + qmp = NULL; + } + power->qmp = qmp; + + return 0; +} + +static void ipa_power_retention_exit(struct ipa_power *power) +{ + qmp_put(power->qmp); + power->qmp = NULL; +} + +/* Control register retention on power collapse */ +void ipa_power_retention(struct ipa *ipa, bool enable) +{ + static const char fmt[] = "{ class: bcm, res: ipa_pc, val: %c }"; + struct ipa_power *power = ipa->power; + char buf[36]; /* Exactly enough for fmt[]; size a multiple of 4 */ + int ret; + + if (!power->qmp) + return; /* Not needed on this platform */ + + (void)snprintf(buf, sizeof(buf), fmt, enable ? '1' : '0'); + + ret = qmp_send(power->qmp, buf, sizeof(buf)); + if (ret) + dev_err(power->dev, "error %d sending QMP %sable request\n", + ret, enable ? "en" : "dis"); +} + int ipa_power_setup(struct ipa *ipa) { int ret; @@ -438,12 +483,18 @@ ipa_power_init(struct device *dev, const struct ipa_power_data *data) if (ret) goto err_kfree; + ret = ipa_power_retention_init(power); + if (ret) + goto err_interconnect_exit; + pm_runtime_set_autosuspend_delay(dev, IPA_AUTOSUSPEND_DELAY); pm_runtime_use_autosuspend(dev); pm_runtime_enable(dev); return power; +err_interconnect_exit: + ipa_interconnect_exit(power); err_kfree: kfree(power); err_clk_put: @@ -460,6 +511,7 @@ void ipa_power_exit(struct ipa_power *power) pm_runtime_disable(dev); pm_runtime_dont_use_autosuspend(dev); + ipa_power_retention_exit(power); ipa_interconnect_exit(power); kfree(power); clk_put(clk); diff --git a/drivers/net/ipa/ipa_power.h b/drivers/net/ipa/ipa_power.h index 2151805d7fbb..6f84f057a209 100644 --- a/drivers/net/ipa/ipa_power.h +++ b/drivers/net/ipa/ipa_power.h @@ -41,6 +41,13 @@ void ipa_power_modem_queue_wake(struct ipa *ipa); void ipa_power_modem_queue_active(struct ipa *ipa); /** + * ipa_power_retention() - Control register retention on power collapse + * @ipa: IPA pointer + * @enable: Whether retention should be enabled or disabled + */ +void ipa_power_retention(struct ipa *ipa, bool enable); + +/** * ipa_power_setup() - Set up IPA power management * @ipa: IPA pointer * diff --git a/drivers/net/ipa/ipa_uc.c b/drivers/net/ipa/ipa_uc.c index 856e55a080a7..fe11910518d9 100644 --- a/drivers/net/ipa/ipa_uc.c +++ b/drivers/net/ipa/ipa_uc.c @@ -11,6 +11,7 @@ #include "ipa.h" #include "ipa_uc.h" +#include "ipa_power.h" /** * DOC: The IPA embedded microcontroller @@ -154,6 +155,7 @@ static void ipa_uc_response_hdlr(struct ipa *ipa, enum ipa_irq_id irq_id) case IPA_UC_RESPONSE_INIT_COMPLETED: if (ipa->uc_powered) { ipa->uc_loaded = true; + ipa_power_retention(ipa, true); pm_runtime_mark_last_busy(dev); (void)pm_runtime_put_autosuspend(dev); ipa->uc_powered = false; @@ -184,6 +186,9 @@ void ipa_uc_deconfig(struct ipa *ipa) ipa_interrupt_remove(ipa->interrupt, IPA_IRQ_UC_1); ipa_interrupt_remove(ipa->interrupt, IPA_IRQ_UC_0); + if (ipa->uc_loaded) + ipa_power_retention(ipa, false); + if (!ipa->uc_powered) return; diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c index 16aa3a478e9e..3d0874331763 100644 --- a/drivers/net/macsec.c +++ b/drivers/net/macsec.c @@ -3870,6 +3870,18 @@ static void macsec_common_dellink(struct net_device *dev, struct list_head *head struct macsec_dev *macsec = macsec_priv(dev); struct net_device *real_dev = macsec->real_dev; + /* If h/w offloading is available, propagate to the device */ + if (macsec_is_offloaded(macsec)) { + const struct macsec_ops *ops; + struct macsec_context ctx; + + ops = macsec_get_ops(netdev_priv(dev), &ctx); + if (ops) { + ctx.secy = &macsec->secy; + macsec_offload(ops->mdo_del_secy, &ctx); + } + } + unregister_netdevice_queue(dev, head); list_del_rcu(&macsec->secys); macsec_del_dev(macsec); @@ -3884,18 +3896,6 @@ static void macsec_dellink(struct net_device *dev, struct list_head *head) struct net_device *real_dev = macsec->real_dev; struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev); - /* If h/w offloading is available, propagate to the device */ - if (macsec_is_offloaded(macsec)) { - const struct macsec_ops *ops; - struct macsec_context ctx; - - ops = macsec_get_ops(netdev_priv(dev), &ctx); - if (ops) { - ctx.secy = &macsec->secy; - macsec_offload(ops->mdo_del_secy, &ctx); - } - } - macsec_common_dellink(dev, head); if (list_empty(&rxd->secys)) { @@ -4018,6 +4018,15 @@ static int macsec_newlink(struct net *net, struct net_device *dev, !macsec_check_offload(macsec->offload, macsec)) return -EOPNOTSUPP; + /* send_sci must be set to true when transmit sci explicitly is set */ + if ((data && data[IFLA_MACSEC_SCI]) && + (data && data[IFLA_MACSEC_INC_SCI])) { + u8 send_sci = !!nla_get_u8(data[IFLA_MACSEC_INC_SCI]); + + if (!send_sci) + return -EINVAL; + } + if (data && data[IFLA_MACSEC_ICV_LEN]) icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]); mtu = real_dev->mtu - icv_len - macsec_extra_len(true); diff --git a/drivers/net/mctp/mctp-serial.c b/drivers/net/mctp/mctp-serial.c index eaa6fb3224bc..62723a7faa2d 100644 --- a/drivers/net/mctp/mctp-serial.c +++ b/drivers/net/mctp/mctp-serial.c @@ -403,8 +403,16 @@ static void mctp_serial_tty_receive_buf(struct tty_struct *tty, mctp_serial_push(dev, c[i]); } +static void mctp_serial_uninit(struct net_device *ndev) +{ + struct mctp_serial *dev = netdev_priv(ndev); + + cancel_work_sync(&dev->tx_work); +} + static const struct net_device_ops mctp_serial_netdev_ops = { .ndo_start_xmit = mctp_serial_tx, + .ndo_uninit = mctp_serial_uninit, }; static void mctp_serial_setup(struct net_device *ndev) @@ -483,7 +491,6 @@ static void mctp_serial_close(struct tty_struct *tty) int idx = dev->idx; unregister_netdev(dev->netdev); - cancel_work_sync(&dev->tx_work); ida_free(&mctp_serial_ida, idx); } diff --git a/drivers/net/mdio/mdio-aspeed.c b/drivers/net/mdio/mdio-aspeed.c index 966c3b4ad59d..e2273588c75b 100644 --- a/drivers/net/mdio/mdio-aspeed.c +++ b/drivers/net/mdio/mdio-aspeed.c @@ -148,6 +148,7 @@ static const struct of_device_id aspeed_mdio_of_match[] = { { .compatible = "aspeed,ast2600-mdio", }, { }, }; +MODULE_DEVICE_TABLE(of, aspeed_mdio_of_match); static struct platform_driver aspeed_mdio_driver = { .driver = { diff --git a/drivers/net/mdio/mdio-ipq4019.c b/drivers/net/mdio/mdio-ipq4019.c index 5f4cd24a0241..4eba5a91075c 100644 --- a/drivers/net/mdio/mdio-ipq4019.c +++ b/drivers/net/mdio/mdio-ipq4019.c @@ -200,7 +200,11 @@ static int ipq_mdio_reset(struct mii_bus *bus) if (ret) return ret; - return clk_prepare_enable(priv->mdio_clk); + ret = clk_prepare_enable(priv->mdio_clk); + if (ret == 0) + mdelay(10); + + return ret; } static int ipq4019_mdio_probe(struct platform_device *pdev) diff --git a/drivers/net/mdio/mdio-mscc-miim.c b/drivers/net/mdio/mdio-mscc-miim.c index 7d2abaf2b2c9..64fb76c1e395 100644 --- a/drivers/net/mdio/mdio-mscc-miim.c +++ b/drivers/net/mdio/mdio-mscc-miim.c @@ -187,6 +187,13 @@ static const struct regmap_config mscc_miim_regmap_config = { .reg_stride = 4, }; +static const struct regmap_config mscc_miim_phy_regmap_config = { + .reg_bits = 32, + .val_bits = 32, + .reg_stride = 4, + .name = "phy", +}; + int mscc_miim_setup(struct device *dev, struct mii_bus **pbus, const char *name, struct regmap *mii_regmap, int status_offset) { @@ -250,7 +257,7 @@ static int mscc_miim_probe(struct platform_device *pdev) } phy_regmap = devm_regmap_init_mmio(&pdev->dev, phy_regs, - &mscc_miim_regmap_config); + &mscc_miim_phy_regmap_config); if (IS_ERR(phy_regmap)) { dev_err(&pdev->dev, "Unable to create phy register regmap\n"); return PTR_ERR(phy_regmap); diff --git a/drivers/net/netdevsim/fib.c b/drivers/net/netdevsim/fib.c index 4300261e2f9e..378ee779061c 100644 --- a/drivers/net/netdevsim/fib.c +++ b/drivers/net/netdevsim/fib.c @@ -623,14 +623,14 @@ static int nsim_fib6_rt_append(struct nsim_fib_data *data, if (err) goto err_fib6_rt_nh_del; - fib6_event->rt_arr[i]->trap = true; + WRITE_ONCE(fib6_event->rt_arr[i]->trap, true); } return 0; err_fib6_rt_nh_del: for (i--; i >= 0; i--) { - fib6_event->rt_arr[i]->trap = false; + WRITE_ONCE(fib6_event->rt_arr[i]->trap, false); nsim_fib6_rt_nh_del(fib6_rt, fib6_event->rt_arr[i]); } return err; diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c index 5b6c0d120e09..29aa811af430 100644 --- a/drivers/net/phy/at803x.c +++ b/drivers/net/phy/at803x.c @@ -1688,19 +1688,19 @@ static int qca808x_read_status(struct phy_device *phydev) if (ret < 0) return ret; - if (phydev->link && phydev->speed == SPEED_2500) - phydev->interface = PHY_INTERFACE_MODE_2500BASEX; - else - phydev->interface = PHY_INTERFACE_MODE_SMII; - - /* generate seed as a lower random value to make PHY linked as SLAVE easily, - * except for master/slave configuration fault detected. - * the reason for not putting this code into the function link_change_notify is - * the corner case where the link partner is also the qca8081 PHY and the seed - * value is configured as the same value, the link can't be up and no link change - * occurs. - */ - if (!phydev->link) { + if (phydev->link) { + if (phydev->speed == SPEED_2500) + phydev->interface = PHY_INTERFACE_MODE_2500BASEX; + else + phydev->interface = PHY_INTERFACE_MODE_SGMII; + } else { + /* generate seed as a lower random value to make PHY linked as SLAVE easily, + * except for master/slave configuration fault detected. + * the reason for not putting this code into the function link_change_notify is + * the corner case where the link partner is also the qca8081 PHY and the seed + * value is configured as the same value, the link can't be up and no link change + * occurs. + */ if (phydev->master_slave_state == MASTER_SLAVE_STATE_ERR) { qca808x_phy_ms_seed_enable(phydev, false); } else { diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c index 211b5476a6f5..ce17b2af3218 100644 --- a/drivers/net/phy/dp83822.c +++ b/drivers/net/phy/dp83822.c @@ -274,7 +274,7 @@ static int dp83822_config_intr(struct phy_device *phydev) if (err < 0) return err; - err = phy_write(phydev, MII_DP83822_MISR1, 0); + err = phy_write(phydev, MII_DP83822_MISR2, 0); if (err < 0) return err; diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index fa71fb7a66b5..2702faf7b0f6 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -553,9 +553,9 @@ static int m88e1121_config_aneg_rgmii_delays(struct phy_device *phydev) else mscr = 0; - return phy_modify_paged(phydev, MII_MARVELL_MSCR_PAGE, - MII_88E1121_PHY_MSCR_REG, - MII_88E1121_PHY_MSCR_DELAY_MASK, mscr); + return phy_modify_paged_changed(phydev, MII_MARVELL_MSCR_PAGE, + MII_88E1121_PHY_MSCR_REG, + MII_88E1121_PHY_MSCR_DELAY_MASK, mscr); } static int m88e1121_config_aneg(struct phy_device *phydev) @@ -569,11 +569,13 @@ static int m88e1121_config_aneg(struct phy_device *phydev) return err; } + changed = err; + err = marvell_set_polarity(phydev, phydev->mdix_ctrl); if (err < 0) return err; - changed = err; + changed |= err; err = genphy_config_aneg(phydev); if (err < 0) @@ -1213,16 +1215,15 @@ static int m88e1118_config_aneg(struct phy_device *phydev) { int err; - err = genphy_soft_reset(phydev); + err = marvell_set_polarity(phydev, phydev->mdix_ctrl); if (err < 0) return err; - err = marvell_set_polarity(phydev, phydev->mdix_ctrl); + err = genphy_config_aneg(phydev); if (err < 0) return err; - err = genphy_config_aneg(phydev); - return 0; + return genphy_soft_reset(phydev); } static int m88e1118_config_init(struct phy_device *phydev) @@ -1686,8 +1687,8 @@ static int marvell_suspend(struct phy_device *phydev) int err; /* Suspend the fiber mode first */ - if (!linkmode_test_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, - phydev->supported)) { + if (linkmode_test_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, + phydev->supported)) { err = marvell_set_page(phydev, MII_MARVELL_FIBER_PAGE); if (err < 0) goto error; @@ -1721,8 +1722,8 @@ static int marvell_resume(struct phy_device *phydev) int err; /* Resume the fiber mode first */ - if (!linkmode_test_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, - phydev->supported)) { + if (linkmode_test_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, + phydev->supported)) { err = marvell_set_page(phydev, MII_MARVELL_FIBER_PAGE); if (err < 0) goto error; diff --git a/drivers/net/phy/mediatek-ge.c b/drivers/net/phy/mediatek-ge.c index b7a5ae20edd5..68ee434f9dea 100644 --- a/drivers/net/phy/mediatek-ge.c +++ b/drivers/net/phy/mediatek-ge.c @@ -55,9 +55,6 @@ static int mt7530_phy_config_init(struct phy_device *phydev) static int mt7531_phy_config_init(struct phy_device *phydev) { - if (phydev->interface != PHY_INTERFACE_MODE_INTERNAL) - return -EINVAL; - mtk_gephy_config_init(phydev); /* PHY link down power saving enable */ diff --git a/drivers/net/phy/meson-gxl.c b/drivers/net/phy/meson-gxl.c index 7e7904fee1d9..73f7962a37d3 100644 --- a/drivers/net/phy/meson-gxl.c +++ b/drivers/net/phy/meson-gxl.c @@ -30,8 +30,12 @@ #define INTSRC_LINK_DOWN BIT(4) #define INTSRC_REMOTE_FAULT BIT(5) #define INTSRC_ANEG_COMPLETE BIT(6) +#define INTSRC_ENERGY_DETECT BIT(7) #define INTSRC_MASK 30 +#define INT_SOURCES (INTSRC_LINK_DOWN | INTSRC_ANEG_COMPLETE | \ + INTSRC_ENERGY_DETECT) + #define BANK_ANALOG_DSP 0 #define BANK_WOL 1 #define BANK_BIST 3 @@ -200,7 +204,6 @@ static int meson_gxl_ack_interrupt(struct phy_device *phydev) static int meson_gxl_config_intr(struct phy_device *phydev) { - u16 val; int ret; if (phydev->interrupts == PHY_INTERRUPT_ENABLED) { @@ -209,16 +212,9 @@ static int meson_gxl_config_intr(struct phy_device *phydev) if (ret) return ret; - val = INTSRC_ANEG_PR - | INTSRC_PARALLEL_FAULT - | INTSRC_ANEG_LP_ACK - | INTSRC_LINK_DOWN - | INTSRC_REMOTE_FAULT - | INTSRC_ANEG_COMPLETE; - ret = phy_write(phydev, INTSRC_MASK, val); + ret = phy_write(phydev, INTSRC_MASK, INT_SOURCES); } else { - val = 0; - ret = phy_write(phydev, INTSRC_MASK, val); + ret = phy_write(phydev, INTSRC_MASK, 0); /* Ack any pending IRQ */ ret = meson_gxl_ack_interrupt(phydev); @@ -237,10 +233,23 @@ static irqreturn_t meson_gxl_handle_interrupt(struct phy_device *phydev) return IRQ_NONE; } + irq_status &= INT_SOURCES; + if (irq_status == 0) return IRQ_NONE; - phy_trigger_machine(phydev); + /* Aneg-complete interrupt is used for link-up detection */ + if (phydev->autoneg == AUTONEG_ENABLE && + irq_status == INTSRC_ENERGY_DETECT) + return IRQ_HANDLED; + + /* Give PHY some time before MAC starts sending data. This works + * around an issue where network doesn't come up properly. + */ + if (!(irq_status & INTSRC_LINK_DOWN)) + phy_queue_state_machine(phydev, msecs_to_jiffies(100)); + else + phy_trigger_machine(phydev); return IRQ_HANDLED; } diff --git a/drivers/net/phy/mscc/mscc_main.c b/drivers/net/phy/mscc/mscc_main.c index ebfeeb3c67c1..7e3017e7a1c0 100644 --- a/drivers/net/phy/mscc/mscc_main.c +++ b/drivers/net/phy/mscc/mscc_main.c @@ -2685,3 +2685,6 @@ MODULE_DEVICE_TABLE(mdio, vsc85xx_tbl); MODULE_DESCRIPTION("Microsemi VSC85xx PHY driver"); MODULE_AUTHOR("Nagaraju Lakkaraju"); MODULE_LICENSE("Dual MIT/GPL"); + +MODULE_FIRMWARE(MSCC_VSC8584_REVB_INT8051_FW); +MODULE_FIRMWARE(MSCC_VSC8574_REVB_INT8051_FW); diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c index 1a627ba4b850..a31098981a65 100644 --- a/drivers/net/usb/ax88179_178a.c +++ b/drivers/net/usb/ax88179_178a.c @@ -1468,58 +1468,68 @@ static int ax88179_rx_fixup(struct usbnet *dev, struct sk_buff *skb) u16 hdr_off; u32 *pkt_hdr; - /* This check is no longer done by usbnet */ - if (skb->len < dev->net->hard_header_len) + /* At the end of the SKB, there's a header telling us how many packets + * are bundled into this buffer and where we can find an array of + * per-packet metadata (which contains elements encoded into u16). + */ + if (skb->len < 4) return 0; - skb_trim(skb, skb->len - 4); rx_hdr = get_unaligned_le32(skb_tail_pointer(skb)); - pkt_cnt = (u16)rx_hdr; hdr_off = (u16)(rx_hdr >> 16); + + if (pkt_cnt == 0) + return 0; + + /* Make sure that the bounds of the metadata array are inside the SKB + * (and in front of the counter at the end). + */ + if (pkt_cnt * 2 + hdr_off > skb->len) + return 0; pkt_hdr = (u32 *)(skb->data + hdr_off); - while (pkt_cnt--) { + /* Packets must not overlap the metadata array */ + skb_trim(skb, hdr_off); + + for (; ; pkt_cnt--, pkt_hdr++) { u16 pkt_len; le32_to_cpus(pkt_hdr); pkt_len = (*pkt_hdr >> 16) & 0x1fff; - /* Check CRC or runt packet */ - if ((*pkt_hdr & AX_RXHDR_CRC_ERR) || - (*pkt_hdr & AX_RXHDR_DROP_ERR)) { - skb_pull(skb, (pkt_len + 7) & 0xFFF8); - pkt_hdr++; - continue; - } - - if (pkt_cnt == 0) { - skb->len = pkt_len; - /* Skip IP alignment pseudo header */ - skb_pull(skb, 2); - skb_set_tail_pointer(skb, skb->len); - skb->truesize = pkt_len + sizeof(struct sk_buff); - ax88179_rx_checksum(skb, pkt_hdr); - return 1; - } + if (pkt_len > skb->len) + return 0; - ax_skb = skb_clone(skb, GFP_ATOMIC); - if (ax_skb) { + /* Check CRC or runt packet */ + if (((*pkt_hdr & (AX_RXHDR_CRC_ERR | AX_RXHDR_DROP_ERR)) == 0) && + pkt_len >= 2 + ETH_HLEN) { + bool last = (pkt_cnt == 0); + + if (last) { + ax_skb = skb; + } else { + ax_skb = skb_clone(skb, GFP_ATOMIC); + if (!ax_skb) + return 0; + } ax_skb->len = pkt_len; /* Skip IP alignment pseudo header */ skb_pull(ax_skb, 2); skb_set_tail_pointer(ax_skb, ax_skb->len); ax_skb->truesize = pkt_len + sizeof(struct sk_buff); ax88179_rx_checksum(ax_skb, pkt_hdr); + + if (last) + return 1; + usbnet_skb_return(dev, ax_skb); - } else { - return 0; } - skb_pull(skb, (pkt_len + 7) & 0xFFF8); - pkt_hdr++; + /* Trim this packet away from the SKB */ + if (!skb_pull(skb, (pkt_len + 7) & 0xFFF8)) + return 0; } - return 1; } static struct sk_buff * diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c index eb3817d70f2b..9b4dfa3001d6 100644 --- a/drivers/net/usb/cdc_ether.c +++ b/drivers/net/usb/cdc_ether.c @@ -583,6 +583,11 @@ static const struct usb_device_id products[] = { .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET, \ .bInterfaceProtocol = USB_CDC_PROTO_NONE +#define ZAURUS_FAKE_INTERFACE \ + .bInterfaceClass = USB_CLASS_COMM, \ + .bInterfaceSubClass = USB_CDC_SUBCLASS_MDLM, \ + .bInterfaceProtocol = USB_CDC_PROTO_NONE + /* SA-1100 based Sharp Zaurus ("collie"), or compatible; * wire-incompatible with true CDC Ethernet implementations. * (And, it seems, needlessly so...) @@ -640,6 +645,13 @@ static const struct usb_device_id products[] = { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_DEVICE, .idVendor = 0x04DD, + .idProduct = 0x9032, /* SL-6000 */ + ZAURUS_FAKE_INTERFACE, + .driver_info = 0, +}, { + .match_flags = USB_DEVICE_ID_MATCH_INT_INFO + | USB_DEVICE_ID_MATCH_DEVICE, + .idVendor = 0x04DD, /* reported with some C860 units */ .idProduct = 0x9050, /* C-860 */ ZAURUS_MASTER_INTERFACE, diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c index 82bb5ed94c48..c0b8b4aa78f3 100644 --- a/drivers/net/usb/cdc_mbim.c +++ b/drivers/net/usb/cdc_mbim.c @@ -659,6 +659,11 @@ static const struct usb_device_id mbim_devs[] = { .driver_info = (unsigned long)&cdc_mbim_info_avoid_altsetting_toggle, }, + /* Telit FN990 */ + { USB_DEVICE_AND_INTERFACE_INFO(0x1bc7, 0x1071, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE), + .driver_info = (unsigned long)&cdc_mbim_info_avoid_altsetting_toggle, + }, + /* default entry */ { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE), .driver_info = (unsigned long)&cdc_mbim_info_zlp, diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c index e303b522efb5..15f91d691bba 100644 --- a/drivers/net/usb/cdc_ncm.c +++ b/drivers/net/usb/cdc_ncm.c @@ -1715,10 +1715,10 @@ int cdc_ncm_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in) { struct sk_buff *skb; struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0]; - int len; + unsigned int len; int nframes; int x; - int offset; + unsigned int offset; union { struct usb_cdc_ncm_ndp16 *ndp16; struct usb_cdc_ncm_ndp32 *ndp32; @@ -1790,8 +1790,8 @@ next_ndp: break; } - /* sanity checking */ - if (((offset + len) > skb_in->len) || + /* sanity checking - watch out for integer wrap*/ + if ((offset > skb_in->len) || (len > skb_in->len - offset) || (len > ctx->rx_max) || (len < ETH_HLEN)) { netif_dbg(dev, rx_err, dev->net, "invalid frame detected (ignored) offset[%u]=%u, length=%u, skb=%p\n", diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c index cd33955df0b6..6a769df0b421 100644 --- a/drivers/net/usb/ipheth.c +++ b/drivers/net/usb/ipheth.c @@ -121,7 +121,7 @@ static int ipheth_alloc_urbs(struct ipheth_device *iphone) if (tx_buf == NULL) goto free_rx_urb; - rx_buf = usb_alloc_coherent(iphone->udev, IPHETH_BUF_SIZE, + rx_buf = usb_alloc_coherent(iphone->udev, IPHETH_BUF_SIZE + IPHETH_IP_ALIGN, GFP_KERNEL, &rx_urb->transfer_dma); if (rx_buf == NULL) goto free_tx_buf; @@ -146,7 +146,7 @@ error_nomem: static void ipheth_free_urbs(struct ipheth_device *iphone) { - usb_free_coherent(iphone->udev, IPHETH_BUF_SIZE, iphone->rx_buf, + usb_free_coherent(iphone->udev, IPHETH_BUF_SIZE + IPHETH_IP_ALIGN, iphone->rx_buf, iphone->rx_urb->transfer_dma); usb_free_coherent(iphone->udev, IPHETH_BUF_SIZE, iphone->tx_buf, iphone->tx_urb->transfer_dma); @@ -317,7 +317,7 @@ static int ipheth_rx_submit(struct ipheth_device *dev, gfp_t mem_flags) usb_fill_bulk_urb(dev->rx_urb, udev, usb_rcvbulkpipe(udev, dev->bulk_in), - dev->rx_buf, IPHETH_BUF_SIZE, + dev->rx_buf, IPHETH_BUF_SIZE + IPHETH_IP_ALIGN, ipheth_rcvbulk_callback, dev); dev->rx_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index b8e20a3f2b84..415f16662f88 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c @@ -1537,11 +1537,8 @@ static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb) netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata); lan78xx_defer_kevent(dev, EVENT_LINK_RESET); - if (dev->domain_data.phyirq > 0) { - local_irq_disable(); - generic_handle_irq(dev->domain_data.phyirq); - local_irq_enable(); - } + if (dev->domain_data.phyirq > 0) + generic_handle_irq_safe(dev->domain_data.phyirq); } else { netdev_warn(dev->net, "unexpected interrupt: 0x%08x\n", intdata); diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 37e5f3495362..3353e761016d 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -1400,6 +1400,8 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x413c, 0x81d7, 0)}, /* Dell Wireless 5821e */ {QMI_FIXED_INTF(0x413c, 0x81d7, 1)}, /* Dell Wireless 5821e preproduction config */ {QMI_FIXED_INTF(0x413c, 0x81e0, 0)}, /* Dell Wireless 5821e with eSIM support*/ + {QMI_FIXED_INTF(0x413c, 0x81e4, 0)}, /* Dell Wireless 5829e with eSIM support*/ + {QMI_FIXED_INTF(0x413c, 0x81e6, 0)}, /* Dell Wireless 5829e */ {QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */ {QMI_FIXED_INTF(0x03f0, 0x9d1d, 1)}, /* HP lt4120 Snapdragon X5 LTE */ {QMI_QUIRK_SET_DTR(0x22de, 0x9051, 2)}, /* Hucom Wireless HM-211S/K */ diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c index bc1e3dd67c04..a0f29482294d 100644 --- a/drivers/net/usb/smsc95xx.c +++ b/drivers/net/usb/smsc95xx.c @@ -84,9 +84,10 @@ static int __must_check __smsc95xx_read_reg(struct usbnet *dev, u32 index, ret = fn(dev, USB_VENDOR_REQUEST_READ_REGISTER, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0, index, &buf, 4); - if (unlikely(ret < 0)) { - netdev_warn(dev->net, "Failed to read reg index 0x%08x: %d\n", - index, ret); + if (ret < 0) { + if (ret != -ENODEV) + netdev_warn(dev->net, "Failed to read reg index 0x%08x: %d\n", + index, ret); return ret; } @@ -116,7 +117,7 @@ static int __must_check __smsc95xx_write_reg(struct usbnet *dev, u32 index, ret = fn(dev, USB_VENDOR_REQUEST_WRITE_REGISTER, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0, index, &buf, 4); - if (unlikely(ret < 0)) + if (ret < 0 && ret != -ENODEV) netdev_warn(dev->net, "Failed to write reg index 0x%08x: %d\n", index, ret); @@ -159,6 +160,9 @@ static int __must_check __smsc95xx_phy_wait_not_busy(struct usbnet *dev, do { ret = __smsc95xx_read_reg(dev, MII_ADDR, &val, in_pm); if (ret < 0) { + /* Ignore -ENODEV error during disconnect() */ + if (ret == -ENODEV) + return 0; netdev_warn(dev->net, "Error reading MII_ACCESS\n"); return ret; } @@ -194,7 +198,8 @@ static int __smsc95xx_mdio_read(struct usbnet *dev, int phy_id, int idx, addr = mii_address_cmd(phy_id, idx, MII_READ_ | MII_BUSY_); ret = __smsc95xx_write_reg(dev, MII_ADDR, addr, in_pm); if (ret < 0) { - netdev_warn(dev->net, "Error writing MII_ADDR\n"); + if (ret != -ENODEV) + netdev_warn(dev->net, "Error writing MII_ADDR\n"); goto done; } @@ -206,7 +211,8 @@ static int __smsc95xx_mdio_read(struct usbnet *dev, int phy_id, int idx, ret = __smsc95xx_read_reg(dev, MII_DATA, &val, in_pm); if (ret < 0) { - netdev_warn(dev->net, "Error reading MII_DATA\n"); + if (ret != -ENODEV) + netdev_warn(dev->net, "Error reading MII_DATA\n"); goto done; } @@ -214,6 +220,10 @@ static int __smsc95xx_mdio_read(struct usbnet *dev, int phy_id, int idx, done: mutex_unlock(&dev->phy_mutex); + + /* Ignore -ENODEV error during disconnect() */ + if (ret == -ENODEV) + return 0; return ret; } @@ -235,7 +245,8 @@ static void __smsc95xx_mdio_write(struct usbnet *dev, int phy_id, val = regval; ret = __smsc95xx_write_reg(dev, MII_DATA, val, in_pm); if (ret < 0) { - netdev_warn(dev->net, "Error writing MII_DATA\n"); + if (ret != -ENODEV) + netdev_warn(dev->net, "Error writing MII_DATA\n"); goto done; } @@ -243,7 +254,8 @@ static void __smsc95xx_mdio_write(struct usbnet *dev, int phy_id, addr = mii_address_cmd(phy_id, idx, MII_WRITE_ | MII_BUSY_); ret = __smsc95xx_write_reg(dev, MII_ADDR, addr, in_pm); if (ret < 0) { - netdev_warn(dev->net, "Error writing MII_ADDR\n"); + if (ret != -ENODEV) + netdev_warn(dev->net, "Error writing MII_ADDR\n"); goto done; } diff --git a/drivers/net/usb/sr9700.c b/drivers/net/usb/sr9700.c index b658510cc9a4..5a53e63d33a6 100644 --- a/drivers/net/usb/sr9700.c +++ b/drivers/net/usb/sr9700.c @@ -413,7 +413,7 @@ static int sr9700_rx_fixup(struct usbnet *dev, struct sk_buff *skb) /* ignore the CRC length */ len = (skb->data[1] | (skb->data[2] << 8)) - 4; - if (len > ETH_FRAME_LEN) + if (len > ETH_FRAME_LEN || len > skb->len) return 0; /* the last packet of current skb */ diff --git a/drivers/net/usb/zaurus.c b/drivers/net/usb/zaurus.c index 8e717a0b559b..7984f2157d22 100644 --- a/drivers/net/usb/zaurus.c +++ b/drivers/net/usb/zaurus.c @@ -256,6 +256,11 @@ static const struct usb_device_id products [] = { .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET, \ .bInterfaceProtocol = USB_CDC_PROTO_NONE +#define ZAURUS_FAKE_INTERFACE \ + .bInterfaceClass = USB_CLASS_COMM, \ + .bInterfaceSubClass = USB_CDC_SUBCLASS_MDLM, \ + .bInterfaceProtocol = USB_CDC_PROTO_NONE + /* SA-1100 based Sharp Zaurus ("collie"), or compatible. */ { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO @@ -315,6 +320,13 @@ static const struct usb_device_id products [] = { .driver_info = ZAURUS_PXA_INFO, }, { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO + | USB_DEVICE_ID_MATCH_DEVICE, + .idVendor = 0x04DD, + .idProduct = 0x9032, /* SL-6000 */ + ZAURUS_FAKE_INTERFACE, + .driver_info = (unsigned long)&bogus_mdlm_info, +}, { + .match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_DEVICE, .idVendor = 0x04DD, /* reported with some C860 units */ diff --git a/drivers/net/veth.c b/drivers/net/veth.c index 354a963075c5..d29fb9759cc9 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -265,9 +265,10 @@ static void __veth_xdp_flush(struct veth_rq *rq) { /* Write ptr_ring before reading rx_notify_masked */ smp_mb(); - if (!rq->rx_notify_masked) { - rq->rx_notify_masked = true; - napi_schedule(&rq->xdp_napi); + if (!READ_ONCE(rq->rx_notify_masked) && + napi_schedule_prep(&rq->xdp_napi)) { + WRITE_ONCE(rq->rx_notify_masked, true); + __napi_schedule(&rq->xdp_napi); } } @@ -912,8 +913,10 @@ static int veth_poll(struct napi_struct *napi, int budget) /* Write rx_notify_masked before reading ptr_ring */ smp_store_mb(rq->rx_notify_masked, false); if (unlikely(!__ptr_ring_empty(&rq->xdp_ring))) { - rq->rx_notify_masked = true; - napi_schedule(&rq->xdp_napi); + if (napi_schedule_prep(&rq->xdp_napi)) { + WRITE_ONCE(rq->rx_notify_masked, true); + __napi_schedule(&rq->xdp_napi); + } } } diff --git a/drivers/net/wireguard/device.c b/drivers/net/wireguard/device.c index a46067c38bf5..0fad1331303c 100644 --- a/drivers/net/wireguard/device.c +++ b/drivers/net/wireguard/device.c @@ -59,9 +59,7 @@ out: return ret; } -#ifdef CONFIG_PM_SLEEP -static int wg_pm_notification(struct notifier_block *nb, unsigned long action, - void *data) +static int wg_pm_notification(struct notifier_block *nb, unsigned long action, void *data) { struct wg_device *wg; struct wg_peer *peer; @@ -92,7 +90,24 @@ static int wg_pm_notification(struct notifier_block *nb, unsigned long action, } static struct notifier_block pm_notifier = { .notifier_call = wg_pm_notification }; -#endif + +static int wg_vm_notification(struct notifier_block *nb, unsigned long action, void *data) +{ + struct wg_device *wg; + struct wg_peer *peer; + + rtnl_lock(); + list_for_each_entry(wg, &device_list, device_list) { + mutex_lock(&wg->device_update_lock); + list_for_each_entry(peer, &wg->peer_list, peer_list) + wg_noise_expire_current_peer_keypairs(peer); + mutex_unlock(&wg->device_update_lock); + } + rtnl_unlock(); + return 0; +} + +static struct notifier_block vm_notifier = { .notifier_call = wg_vm_notification }; static int wg_stop(struct net_device *dev) { @@ -424,16 +439,18 @@ int __init wg_device_init(void) { int ret; -#ifdef CONFIG_PM_SLEEP ret = register_pm_notifier(&pm_notifier); if (ret) return ret; -#endif - ret = register_pernet_device(&pernet_ops); + ret = register_random_vmfork_notifier(&vm_notifier); if (ret) goto error_pm; + ret = register_pernet_device(&pernet_ops); + if (ret) + goto error_vm; + ret = rtnl_link_register(&link_ops); if (ret) goto error_pernet; @@ -442,10 +459,10 @@ int __init wg_device_init(void) error_pernet: unregister_pernet_device(&pernet_ops); +error_vm: + unregister_random_vmfork_notifier(&vm_notifier); error_pm: -#ifdef CONFIG_PM_SLEEP unregister_pm_notifier(&pm_notifier); -#endif return ret; } @@ -453,8 +470,7 @@ void wg_device_uninit(void) { rtnl_link_unregister(&link_ops); unregister_pernet_device(&pernet_ops); -#ifdef CONFIG_PM_SLEEP + unregister_random_vmfork_notifier(&vm_notifier); unregister_pm_notifier(&pm_notifier); -#endif rcu_barrier(); } diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c index 62c453a21e49..7c1c2658cb5f 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.c +++ b/drivers/net/wireless/ath/ath10k/wmi.c @@ -2611,36 +2611,9 @@ int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb) ath10k_mac_handle_beacon(ar, skb); if (ieee80211_is_beacon(hdr->frame_control) || - ieee80211_is_probe_resp(hdr->frame_control)) { - struct ieee80211_mgmt *mgmt = (void *)skb->data; - enum cfg80211_bss_frame_type ftype; - u8 *ies; - int ies_ch; - + ieee80211_is_probe_resp(hdr->frame_control)) status->boottime_ns = ktime_get_boottime_ns(); - if (!ar->scan_channel) - goto drop; - - ies = mgmt->u.beacon.variable; - - if (ieee80211_is_beacon(mgmt->frame_control)) - ftype = CFG80211_BSS_FTYPE_BEACON; - else - ftype = CFG80211_BSS_FTYPE_PRESP; - - ies_ch = cfg80211_get_ies_channel_number(mgmt->u.beacon.variable, - skb_tail_pointer(skb) - ies, - sband->band, ftype); - - if (ies_ch > 0 && ies_ch != channel) { - ath10k_dbg(ar, ATH10K_DBG_MGMT, - "channel mismatched ds channel %d scan channel %d\n", - ies_ch, channel); - goto drop; - } - } - ath10k_dbg(ar, ATH10K_DBG_MGMT, "event mgmt rx skb %pK len %d ftype %02x stype %02x\n", skb, skb->len, @@ -2654,10 +2627,6 @@ int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb) ieee80211_rx_ni(ar->hw, skb); return 0; - -drop: - dev_kfree_skb(skb); - return 0; } static int freq_to_idx(struct ath10k *ar, int freq) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c index 0eb13e5df517..d99140960a82 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c @@ -693,7 +693,7 @@ int brcmf_fw_get_firmwares(struct device *dev, struct brcmf_fw_request *req, { struct brcmf_fw_item *first = &req->items[0]; struct brcmf_fw *fwctx; - char *alt_path; + char *alt_path = NULL; int ret; brcmf_dbg(TRACE, "enter: dev=%s\n", dev_name(dev)); @@ -712,7 +712,9 @@ int brcmf_fw_get_firmwares(struct device *dev, struct brcmf_fw_request *req, fwctx->done = fw_cb; /* First try alternative board-specific path if any */ - alt_path = brcm_alt_fw_path(first->path, fwctx->req->board_type); + if (fwctx->req->board_type) + alt_path = brcm_alt_fw_path(first->path, + fwctx->req->board_type); if (alt_path) { ret = request_firmware_nowait(THIS_MODULE, true, alt_path, fwctx->dev, GFP_KERNEL, fwctx, diff --git a/drivers/net/wireless/intel/Makefile b/drivers/net/wireless/intel/Makefile index 1364b0014488..208e73a16051 100644 --- a/drivers/net/wireless/intel/Makefile +++ b/drivers/net/wireless/intel/Makefile @@ -5,3 +5,4 @@ obj-$(CONFIG_IPW2200) += ipw2x00/ obj-$(CONFIG_IWLEGACY) += iwlegacy/ obj-$(CONFIG_IWLWIFI) += iwlwifi/ +obj-$(CONFIG_IWLMEI) += iwlwifi/ diff --git a/drivers/net/wireless/intel/iwlwifi/Kconfig b/drivers/net/wireless/intel/iwlwifi/Kconfig index c21c0c68849a..85e704283755 100644 --- a/drivers/net/wireless/intel/iwlwifi/Kconfig +++ b/drivers/net/wireless/intel/iwlwifi/Kconfig @@ -80,19 +80,6 @@ config IWLWIFI_OPMODE_MODULAR comment "WARNING: iwlwifi is useless without IWLDVM or IWLMVM" depends on IWLDVM=n && IWLMVM=n -config IWLWIFI_BCAST_FILTERING - bool "Enable broadcast filtering" - depends on IWLMVM - help - Say Y here to enable default bcast filtering configuration. - - Enabling broadcast filtering will drop any incoming wireless - broadcast frames, except some very specific predefined - patterns (e.g. incoming arp requests). - - If unsure, don't enable this option, as some programs might - expect incoming broadcasts for their normal operations. - menu "Debugging Options" config IWLWIFI_DEBUG diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c index 790c96df58cb..c17ab53fcd8f 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright (C) 2017 Intel Deutschland GmbH - * Copyright (C) 2019-2021 Intel Corporation + * Copyright (C) 2019-2022 Intel Corporation */ #include <linux/uuid.h> #include "iwl-drv.h" @@ -888,10 +888,11 @@ bool iwl_sar_geo_support(struct iwl_fw_runtime *fwrt) * only one using version 36, so skip this version entirely. */ return IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) >= 38 || - IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) == 17 || - (IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) == 29 && - ((fwrt->trans->hw_rev & CSR_HW_REV_TYPE_MSK) == - CSR_HW_REV_TYPE_7265D)); + (IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) == 17 && + fwrt->trans->hw_rev != CSR_HW_REV_TYPE_3160) || + (IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) == 29 && + ((fwrt->trans->hw_rev & CSR_HW_REV_TYPE_MSK) == + CSR_HW_REV_TYPE_7265D)); } IWL_EXPORT_SYMBOL(iwl_sar_geo_support); diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h index 0703e41403a6..35b8856e511f 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h @@ -502,11 +502,6 @@ enum iwl_legacy_cmds { DEBUG_LOG_MSG = 0xf7, /** - * @BCAST_FILTER_CMD: &struct iwl_bcast_filter_cmd - */ - BCAST_FILTER_CMD = 0xcf, - - /** * @MCAST_FILTER_CMD: &struct iwl_mcast_filter_cmd */ MCAST_FILTER_CMD = 0xd0, diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/filter.h b/drivers/net/wireless/intel/iwlwifi/fw/api/filter.h index dd62a63956b3..e44c70b7c790 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/filter.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/filter.h @@ -36,92 +36,4 @@ struct iwl_mcast_filter_cmd { u8 addr_list[0]; } __packed; /* MCAST_FILTERING_CMD_API_S_VER_1 */ -#define MAX_BCAST_FILTERS 8 -#define MAX_BCAST_FILTER_ATTRS 2 - -/** - * enum iwl_mvm_bcast_filter_attr_offset - written by fw for each Rx packet - * @BCAST_FILTER_OFFSET_PAYLOAD_START: offset is from payload start. - * @BCAST_FILTER_OFFSET_IP_END: offset is from ip header end (i.e. - * start of ip payload). - */ -enum iwl_mvm_bcast_filter_attr_offset { - BCAST_FILTER_OFFSET_PAYLOAD_START = 0, - BCAST_FILTER_OFFSET_IP_END = 1, -}; - -/** - * struct iwl_fw_bcast_filter_attr - broadcast filter attribute - * @offset_type: &enum iwl_mvm_bcast_filter_attr_offset. - * @offset: starting offset of this pattern. - * @reserved1: reserved - * @val: value to match - big endian (MSB is the first - * byte to match from offset pos). - * @mask: mask to match (big endian). - */ -struct iwl_fw_bcast_filter_attr { - u8 offset_type; - u8 offset; - __le16 reserved1; - __be32 val; - __be32 mask; -} __packed; /* BCAST_FILTER_ATT_S_VER_1 */ - -/** - * enum iwl_mvm_bcast_filter_frame_type - filter frame type - * @BCAST_FILTER_FRAME_TYPE_ALL: consider all frames. - * @BCAST_FILTER_FRAME_TYPE_IPV4: consider only ipv4 frames - */ -enum iwl_mvm_bcast_filter_frame_type { - BCAST_FILTER_FRAME_TYPE_ALL = 0, - BCAST_FILTER_FRAME_TYPE_IPV4 = 1, -}; - -/** - * struct iwl_fw_bcast_filter - broadcast filter - * @discard: discard frame (1) or let it pass (0). - * @frame_type: &enum iwl_mvm_bcast_filter_frame_type. - * @reserved1: reserved - * @num_attrs: number of valid attributes in this filter. - * @attrs: attributes of this filter. a filter is considered matched - * only when all its attributes are matched (i.e. AND relationship) - */ -struct iwl_fw_bcast_filter { - u8 discard; - u8 frame_type; - u8 num_attrs; - u8 reserved1; - struct iwl_fw_bcast_filter_attr attrs[MAX_BCAST_FILTER_ATTRS]; -} __packed; /* BCAST_FILTER_S_VER_1 */ - -/** - * struct iwl_fw_bcast_mac - per-mac broadcast filtering configuration. - * @default_discard: default action for this mac (discard (1) / pass (0)). - * @reserved1: reserved - * @attached_filters: bitmap of relevant filters for this mac. - */ -struct iwl_fw_bcast_mac { - u8 default_discard; - u8 reserved1; - __le16 attached_filters; -} __packed; /* BCAST_MAC_CONTEXT_S_VER_1 */ - -/** - * struct iwl_bcast_filter_cmd - broadcast filtering configuration - * @disable: enable (0) / disable (1) - * @max_bcast_filters: max number of filters (MAX_BCAST_FILTERS) - * @max_macs: max number of macs (NUM_MAC_INDEX_DRIVER) - * @reserved1: reserved - * @filters: broadcast filters - * @macs: broadcast filtering configuration per-mac - */ -struct iwl_bcast_filter_cmd { - u8 disable; - u8 max_bcast_filters; - u8 max_macs; - u8 reserved1; - struct iwl_fw_bcast_filter filters[MAX_BCAST_FILTERS]; - struct iwl_fw_bcast_mac macs[NUM_MAC_INDEX_DRIVER]; -} __packed; /* BCAST_FILTERING_HCMD_API_S_VER_1 */ - #endif /* __iwl_fw_api_filter_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h index 173a6991587b..4a7723eb8c1d 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h @@ -752,7 +752,6 @@ struct iwl_lq_cmd { u8 iwl_fw_rate_idx_to_plcp(int idx); u32 iwl_new_rate_from_v1(u32 rate_v1); -u32 iwl_legacy_rate_to_fw_idx(u32 rate_n_flags); const struct iwl_rate_mcs_info *iwl_rate_mcs(int idx); const char *iwl_rs_pretty_ant(u8 ant); const char *iwl_rs_pretty_bw(int bw); diff --git a/drivers/net/wireless/intel/iwlwifi/fw/file.h b/drivers/net/wireless/intel/iwlwifi/fw/file.h index e4ebda64cd52..efc6540d31af 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/file.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/file.h @@ -181,7 +181,6 @@ struct iwl_ucode_capa { * @IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE: new NS offload (large version) * @IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT: General support for uAPSD * @IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD: P2P client supports uAPSD power save - * @IWL_UCODE_TLV_FLAGS_BCAST_FILTERING: uCode supports broadcast filtering. * @IWL_UCODE_TLV_FLAGS_EBS_SUPPORT: this uCode image supports EBS. */ enum iwl_ucode_tlv_flag { @@ -196,7 +195,6 @@ enum iwl_ucode_tlv_flag { IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT = BIT(24), IWL_UCODE_TLV_FLAGS_EBS_SUPPORT = BIT(25), IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD = BIT(26), - IWL_UCODE_TLV_FLAGS_BCAST_FILTERING = BIT(29), }; typedef unsigned int __bitwise iwl_ucode_tlv_api_t; diff --git a/drivers/net/wireless/intel/iwlwifi/fw/rs.c b/drivers/net/wireless/intel/iwlwifi/fw/rs.c index a21c3befd93b..a835214611ce 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/rs.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/rs.c @@ -91,6 +91,20 @@ const char *iwl_rs_pretty_bw(int bw) } IWL_EXPORT_SYMBOL(iwl_rs_pretty_bw); +static u32 iwl_legacy_rate_to_fw_idx(u32 rate_n_flags) +{ + int rate = rate_n_flags & RATE_LEGACY_RATE_MSK_V1; + int idx; + bool ofdm = !(rate_n_flags & RATE_MCS_CCK_MSK_V1); + int offset = ofdm ? IWL_FIRST_OFDM_RATE : 0; + int last = ofdm ? IWL_RATE_COUNT_LEGACY : IWL_FIRST_OFDM_RATE; + + for (idx = offset; idx < last; idx++) + if (iwl_fw_rate_idx_to_plcp(idx) == rate) + return idx - offset; + return IWL_RATE_INVALID; +} + u32 iwl_new_rate_from_v1(u32 rate_v1) { u32 rate_v2 = 0; @@ -144,7 +158,10 @@ u32 iwl_new_rate_from_v1(u32 rate_v1) } else { u32 legacy_rate = iwl_legacy_rate_to_fw_idx(rate_v1); - WARN_ON(legacy_rate < 0); + if (WARN_ON_ONCE(legacy_rate == IWL_RATE_INVALID)) + legacy_rate = (rate_v1 & RATE_MCS_CCK_MSK_V1) ? + IWL_FIRST_CCK_RATE : IWL_FIRST_OFDM_RATE; + rate_v2 |= legacy_rate; if (!(rate_v1 & RATE_MCS_CCK_MSK_V1)) rate_v2 |= RATE_MCS_LEGACY_OFDM_MSK; @@ -172,20 +189,6 @@ u32 iwl_new_rate_from_v1(u32 rate_v1) } IWL_EXPORT_SYMBOL(iwl_new_rate_from_v1); -u32 iwl_legacy_rate_to_fw_idx(u32 rate_n_flags) -{ - int rate = rate_n_flags & RATE_LEGACY_RATE_MSK_V1; - int idx; - bool ofdm = !(rate_n_flags & RATE_MCS_CCK_MSK_V1); - int offset = ofdm ? IWL_FIRST_OFDM_RATE : 0; - int last = ofdm ? IWL_RATE_COUNT_LEGACY : IWL_FIRST_OFDM_RATE; - - for (idx = offset; idx < last; idx++) - if (iwl_fw_rate_idx_to_plcp(idx) == rate) - return idx - offset; - return -1; -} - int rs_pretty_print_rate(char *buf, int bufsz, const u32 rate) { char *type; diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h index f90d4662c164..8e10ba88afb3 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2005-2014, 2018-2021 Intel Corporation + * Copyright (C) 2005-2014, 2018-2022 Intel Corporation * Copyright (C) 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2016 Intel Deutschland GmbH */ @@ -329,6 +329,7 @@ enum { #define CSR_HW_REV_TYPE_2x00 (0x0000100) #define CSR_HW_REV_TYPE_105 (0x0000110) #define CSR_HW_REV_TYPE_135 (0x0000120) +#define CSR_HW_REV_TYPE_3160 (0x0000164) #define CSR_HW_REV_TYPE_7265D (0x0000210) #define CSR_HW_REV_TYPE_NONE (0x00001F0) #define CSR_HW_REV_TYPE_QNJ (0x0000360) diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c index 83e3b731ad29..6651e78b39ec 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c @@ -1707,6 +1707,8 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) out_unbind: complete(&drv->request_firmware_complete); device_release_driver(drv->trans->dev); + /* drv has just been freed by the release */ + failure = false; free: if (failure) iwl_dealloc_ucode(drv); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c index dd58c8f9aa11..04addf964d83 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c @@ -553,8 +553,7 @@ static const struct ieee80211_sband_iftype_data iwl_he_capa[] = { .has_he = true, .he_cap_elem = { .mac_cap_info[0] = - IEEE80211_HE_MAC_CAP0_HTC_HE | - IEEE80211_HE_MAC_CAP0_TWT_REQ, + IEEE80211_HE_MAC_CAP0_HTC_HE, .mac_cap_info[1] = IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US | IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_8, diff --git a/drivers/net/wireless/intel/iwlwifi/mei/main.c b/drivers/net/wireless/intel/iwlwifi/mei/main.c index d9733aaf6f6e..2f7f0f994ca3 100644 --- a/drivers/net/wireless/intel/iwlwifi/mei/main.c +++ b/drivers/net/wireless/intel/iwlwifi/mei/main.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (C) 2021 Intel Corporation + * Copyright (C) 2021-2022 Intel Corporation */ #include <linux/etherdevice.h> @@ -146,6 +146,7 @@ struct iwl_mei_filters { * @csme_taking_ownership: true when CSME is taking ownership. Used to remember * to send CSME_OWNERSHIP_CONFIRMED when the driver completes its down * flow. + * @link_prot_state: true when we are in link protection PASSIVE * @csa_throttle_end_wk: used when &csa_throttled is true * @data_q_lock: protects the access to the data queues which are * accessed without the mutex. @@ -165,6 +166,7 @@ struct iwl_mei { bool amt_enabled; bool csa_throttled; bool csme_taking_ownership; + bool link_prot_state; struct delayed_work csa_throttle_end_wk; spinlock_t data_q_lock; @@ -229,8 +231,6 @@ static int iwl_mei_alloc_shared_mem(struct mei_cl_device *cldev) if (IS_ERR(mem->ctrl)) { int ret = PTR_ERR(mem->ctrl); - dev_err(&cldev->dev, "Couldn't allocate the shared memory: %d\n", - ret); mem->ctrl = NULL; return ret; @@ -669,6 +669,8 @@ iwl_mei_handle_conn_status(struct mei_cl_device *cldev, iwl_mei_cache.ops->me_conn_status(iwl_mei_cache.priv, &conn_info); + mei->link_prot_state = status->link_prot_state; + /* * Update the Rfkill state in case the host does not own the device: * if we are in Link Protection, ask to not touch the device, else, @@ -1663,9 +1665,11 @@ int iwl_mei_register(void *priv, const struct iwl_mei_ops *ops) mei_cldev_get_drvdata(iwl_mei_global_cldev); /* we have already a SAP connection */ - if (iwl_mei_is_connected()) + if (iwl_mei_is_connected()) { iwl_mei_send_sap_msg(mei->cldev, SAP_MSG_NOTIF_WIFIDR_UP); + ops->rfkill(priv, mei->link_prot_state); + } } ret = 0; @@ -1784,6 +1788,8 @@ static void iwl_mei_dbgfs_unregister(struct iwl_mei *mei) {} #endif /* CONFIG_DEBUG_FS */ +#define ALLOC_SHARED_MEM_RETRY_MAX_NUM 3 + /* * iwl_mei_probe - the probe function called by the mei bus enumeration * @@ -1795,6 +1801,7 @@ static void iwl_mei_dbgfs_unregister(struct iwl_mei *mei) {} static int iwl_mei_probe(struct mei_cl_device *cldev, const struct mei_cl_device_id *id) { + int alloc_retry = ALLOC_SHARED_MEM_RETRY_MAX_NUM; struct iwl_mei *mei; int ret; @@ -1812,15 +1819,31 @@ static int iwl_mei_probe(struct mei_cl_device *cldev, mei_cldev_set_drvdata(cldev, mei); mei->cldev = cldev; - /* - * The CSME firmware needs to boot the internal WLAN client. Wait here - * so that the DMA map request will succeed. - */ - msleep(20); + do { + ret = iwl_mei_alloc_shared_mem(cldev); + if (!ret) + break; + /* + * The CSME firmware needs to boot the internal WLAN client. + * This can take time in certain configurations (usually + * upon resume and when the whole CSME firmware is shut down + * during suspend). + * + * Wait a bit before retrying and hope we'll succeed next time. + */ - ret = iwl_mei_alloc_shared_mem(cldev); - if (ret) + dev_dbg(&cldev->dev, + "Couldn't allocate the shared memory: %d, attempt %d / %d\n", + ret, alloc_retry, ALLOC_SHARED_MEM_RETRY_MAX_NUM); + msleep(100); + alloc_retry--; + } while (alloc_retry); + + if (ret) { + dev_err(&cldev->dev, "Couldn't allocate the shared memory: %d\n", + ret); goto free; + } iwl_mei_init_shared_mem(mei); diff --git a/drivers/net/wireless/intel/iwlwifi/mei/net.c b/drivers/net/wireless/intel/iwlwifi/mei/net.c index 5f966af69720..468102a95e1b 100644 --- a/drivers/net/wireless/intel/iwlwifi/mei/net.c +++ b/drivers/net/wireless/intel/iwlwifi/mei/net.c @@ -195,8 +195,7 @@ static bool iwl_mei_rx_filter_ipv4(struct sk_buff *skb, bool match; if (!pskb_may_pull(skb, skb_network_offset(skb) + sizeof(*iphdr)) || - !pskb_may_pull(skb, skb_network_offset(skb) + - sizeof(ip_hdrlen(skb) - sizeof(*iphdr)))) + !pskb_may_pull(skb, skb_network_offset(skb) + ip_hdrlen(skb))) return false; iphdrlen = ip_hdrlen(skb); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c index fb4920b01dbb..445c94adb076 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c @@ -5,6 +5,7 @@ * Copyright (C) 2016-2017 Intel Deutschland GmbH */ #include <linux/vmalloc.h> +#include <linux/err.h> #include <linux/ieee80211.h> #include <linux/netdevice.h> @@ -1369,189 +1370,6 @@ static ssize_t iwl_dbgfs_dbg_time_point_write(struct iwl_mvm *mvm, return count; } -#define ADD_TEXT(...) pos += scnprintf(buf + pos, bufsz - pos, __VA_ARGS__) -#ifdef CONFIG_IWLWIFI_BCAST_FILTERING -static ssize_t iwl_dbgfs_bcast_filters_read(struct file *file, - char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct iwl_mvm *mvm = file->private_data; - struct iwl_bcast_filter_cmd cmd; - const struct iwl_fw_bcast_filter *filter; - char *buf; - int bufsz = 1024; - int i, j, pos = 0; - ssize_t ret; - - buf = kzalloc(bufsz, GFP_KERNEL); - if (!buf) - return -ENOMEM; - - mutex_lock(&mvm->mutex); - if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd)) { - ADD_TEXT("None\n"); - mutex_unlock(&mvm->mutex); - goto out; - } - mutex_unlock(&mvm->mutex); - - for (i = 0; cmd.filters[i].attrs[0].mask; i++) { - filter = &cmd.filters[i]; - - ADD_TEXT("Filter [%d]:\n", i); - ADD_TEXT("\tDiscard=%d\n", filter->discard); - ADD_TEXT("\tFrame Type: %s\n", - filter->frame_type ? "IPv4" : "Generic"); - - for (j = 0; j < ARRAY_SIZE(filter->attrs); j++) { - const struct iwl_fw_bcast_filter_attr *attr; - - attr = &filter->attrs[j]; - if (!attr->mask) - break; - - ADD_TEXT("\tAttr [%d]: offset=%d (from %s), mask=0x%x, value=0x%x reserved=0x%x\n", - j, attr->offset, - attr->offset_type ? "IP End" : - "Payload Start", - be32_to_cpu(attr->mask), - be32_to_cpu(attr->val), - le16_to_cpu(attr->reserved1)); - } - } -out: - ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); - kfree(buf); - return ret; -} - -static ssize_t iwl_dbgfs_bcast_filters_write(struct iwl_mvm *mvm, char *buf, - size_t count, loff_t *ppos) -{ - int pos, next_pos; - struct iwl_fw_bcast_filter filter = {}; - struct iwl_bcast_filter_cmd cmd; - u32 filter_id, attr_id, mask, value; - int err = 0; - - if (sscanf(buf, "%d %hhi %hhi %n", &filter_id, &filter.discard, - &filter.frame_type, &pos) != 3) - return -EINVAL; - - if (filter_id >= ARRAY_SIZE(mvm->dbgfs_bcast_filtering.cmd.filters) || - filter.frame_type > BCAST_FILTER_FRAME_TYPE_IPV4) - return -EINVAL; - - for (attr_id = 0; attr_id < ARRAY_SIZE(filter.attrs); - attr_id++) { - struct iwl_fw_bcast_filter_attr *attr = - &filter.attrs[attr_id]; - - if (pos >= count) - break; - - if (sscanf(&buf[pos], "%hhi %hhi %i %i %n", - &attr->offset, &attr->offset_type, - &mask, &value, &next_pos) != 4) - return -EINVAL; - - attr->mask = cpu_to_be32(mask); - attr->val = cpu_to_be32(value); - if (mask) - filter.num_attrs++; - - pos += next_pos; - } - - mutex_lock(&mvm->mutex); - memcpy(&mvm->dbgfs_bcast_filtering.cmd.filters[filter_id], - &filter, sizeof(filter)); - - /* send updated bcast filtering configuration */ - if (iwl_mvm_firmware_running(mvm) && - mvm->dbgfs_bcast_filtering.override && - iwl_mvm_bcast_filter_build_cmd(mvm, &cmd)) - err = iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, 0, - sizeof(cmd), &cmd); - mutex_unlock(&mvm->mutex); - - return err ?: count; -} - -static ssize_t iwl_dbgfs_bcast_filters_macs_read(struct file *file, - char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct iwl_mvm *mvm = file->private_data; - struct iwl_bcast_filter_cmd cmd; - char *buf; - int bufsz = 1024; - int i, pos = 0; - ssize_t ret; - - buf = kzalloc(bufsz, GFP_KERNEL); - if (!buf) - return -ENOMEM; - - mutex_lock(&mvm->mutex); - if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd)) { - ADD_TEXT("None\n"); - mutex_unlock(&mvm->mutex); - goto out; - } - mutex_unlock(&mvm->mutex); - - for (i = 0; i < ARRAY_SIZE(cmd.macs); i++) { - const struct iwl_fw_bcast_mac *mac = &cmd.macs[i]; - - ADD_TEXT("Mac [%d]: discard=%d attached_filters=0x%x\n", - i, mac->default_discard, mac->attached_filters); - } -out: - ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); - kfree(buf); - return ret; -} - -static ssize_t iwl_dbgfs_bcast_filters_macs_write(struct iwl_mvm *mvm, - char *buf, size_t count, - loff_t *ppos) -{ - struct iwl_bcast_filter_cmd cmd; - struct iwl_fw_bcast_mac mac = {}; - u32 mac_id, attached_filters; - int err = 0; - - if (!mvm->bcast_filters) - return -ENOENT; - - if (sscanf(buf, "%d %hhi %i", &mac_id, &mac.default_discard, - &attached_filters) != 3) - return -EINVAL; - - if (mac_id >= ARRAY_SIZE(cmd.macs) || - mac.default_discard > 1 || - attached_filters >= BIT(ARRAY_SIZE(cmd.filters))) - return -EINVAL; - - mac.attached_filters = cpu_to_le16(attached_filters); - - mutex_lock(&mvm->mutex); - memcpy(&mvm->dbgfs_bcast_filtering.cmd.macs[mac_id], - &mac, sizeof(mac)); - - /* send updated bcast filtering configuration */ - if (iwl_mvm_firmware_running(mvm) && - mvm->dbgfs_bcast_filtering.override && - iwl_mvm_bcast_filter_build_cmd(mvm, &cmd)) - err = iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, 0, - sizeof(cmd), &cmd); - mutex_unlock(&mvm->mutex); - - return err ?: count; -} -#endif - #define MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \ _MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct iwl_mvm) #define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \ @@ -1881,11 +1699,6 @@ MVM_DEBUGFS_WRITE_FILE_OPS(inject_beacon_ie_restore, 512); MVM_DEBUGFS_READ_FILE_OPS(uapsd_noagg_bssids); -#ifdef CONFIG_IWLWIFI_BCAST_FILTERING -MVM_DEBUGFS_READ_WRITE_FILE_OPS(bcast_filters, 256); -MVM_DEBUGFS_READ_WRITE_FILE_OPS(bcast_filters_macs, 256); -#endif - #ifdef CONFIG_ACPI MVM_DEBUGFS_READ_FILE_OPS(sar_geo_profile); #endif @@ -2045,7 +1858,6 @@ void iwl_mvm_sta_add_debugfs(struct ieee80211_hw *hw, void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm) { struct dentry *bcast_dir __maybe_unused; - char buf[100]; spin_lock_init(&mvm->drv_stats_lock); @@ -2097,21 +1909,6 @@ void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm) MVM_DEBUGFS_ADD_FILE(uapsd_noagg_bssids, mvm->debugfs_dir, S_IRUSR); -#ifdef CONFIG_IWLWIFI_BCAST_FILTERING - if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING) { - bcast_dir = debugfs_create_dir("bcast_filtering", - mvm->debugfs_dir); - - debugfs_create_bool("override", 0600, bcast_dir, - &mvm->dbgfs_bcast_filtering.override); - - MVM_DEBUGFS_ADD_FILE_ALIAS("filters", bcast_filters, - bcast_dir, 0600); - MVM_DEBUGFS_ADD_FILE_ALIAS("macs", bcast_filters_macs, - bcast_dir, 0600); - } -#endif - #ifdef CONFIG_PM_SLEEP MVM_DEBUGFS_ADD_FILE(d3_test, mvm->debugfs_dir, 0400); debugfs_create_bool("d3_wake_sysassert", 0600, mvm->debugfs_dir, @@ -2142,6 +1939,11 @@ void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm) * Create a symlink with mac80211. It will be removed when mac80211 * exists (before the opmode exists which removes the target.) */ - snprintf(buf, 100, "../../%pd2", mvm->debugfs_dir->d_parent); - debugfs_create_symlink("iwlwifi", mvm->hw->wiphy->debugfsdir, buf); + if (!IS_ERR(mvm->debugfs_dir)) { + char buf[100]; + + snprintf(buf, 100, "../../%pd2", mvm->debugfs_dir->d_parent); + debugfs_create_symlink("iwlwifi", mvm->hw->wiphy->debugfsdir, + buf); + } } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index 6f4690e56a46..ae589b3b8c46 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c @@ -1741,7 +1741,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm) ret = iwl_mvm_sar_init(mvm); if (ret == 0) ret = iwl_mvm_sar_geo_init(mvm); - else if (ret < 0) + if (ret < 0) goto error; ret = iwl_mvm_sgom_init(mvm); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index 65f4fe3ef504..709a3df57b10 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -55,79 +55,6 @@ static const struct ieee80211_iface_combination iwl_mvm_iface_combinations[] = { }, }; -#ifdef CONFIG_IWLWIFI_BCAST_FILTERING -/* - * Use the reserved field to indicate magic values. - * these values will only be used internally by the driver, - * and won't make it to the fw (reserved will be 0). - * BC_FILTER_MAGIC_IP - configure the val of this attribute to - * be the vif's ip address. in case there is not a single - * ip address (0, or more than 1), this attribute will - * be skipped. - * BC_FILTER_MAGIC_MAC - set the val of this attribute to - * the LSB bytes of the vif's mac address - */ -enum { - BC_FILTER_MAGIC_NONE = 0, - BC_FILTER_MAGIC_IP, - BC_FILTER_MAGIC_MAC, -}; - -static const struct iwl_fw_bcast_filter iwl_mvm_default_bcast_filters[] = { - { - /* arp */ - .discard = 0, - .frame_type = BCAST_FILTER_FRAME_TYPE_ALL, - .attrs = { - { - /* frame type - arp, hw type - ethernet */ - .offset_type = - BCAST_FILTER_OFFSET_PAYLOAD_START, - .offset = sizeof(rfc1042_header), - .val = cpu_to_be32(0x08060001), - .mask = cpu_to_be32(0xffffffff), - }, - { - /* arp dest ip */ - .offset_type = - BCAST_FILTER_OFFSET_PAYLOAD_START, - .offset = sizeof(rfc1042_header) + 2 + - sizeof(struct arphdr) + - ETH_ALEN + sizeof(__be32) + - ETH_ALEN, - .mask = cpu_to_be32(0xffffffff), - /* mark it as special field */ - .reserved1 = cpu_to_le16(BC_FILTER_MAGIC_IP), - }, - }, - }, - { - /* dhcp offer bcast */ - .discard = 0, - .frame_type = BCAST_FILTER_FRAME_TYPE_IPV4, - .attrs = { - { - /* udp dest port - 68 (bootp client)*/ - .offset_type = BCAST_FILTER_OFFSET_IP_END, - .offset = offsetof(struct udphdr, dest), - .val = cpu_to_be32(0x00440000), - .mask = cpu_to_be32(0xffff0000), - }, - { - /* dhcp - lsb bytes of client hw address */ - .offset_type = BCAST_FILTER_OFFSET_IP_END, - .offset = 38, - .mask = cpu_to_be32(0xffffffff), - /* mark it as special field */ - .reserved1 = cpu_to_le16(BC_FILTER_MAGIC_MAC), - }, - }, - }, - /* last filter must be empty */ - {}, -}; -#endif - static const struct cfg80211_pmsr_capabilities iwl_mvm_pmsr_capa = { .max_peers = IWL_MVM_TOF_MAX_APS, .report_ap_tsf = 1, @@ -299,7 +226,6 @@ static const u8 he_if_types_ext_capa_sta[] = { [0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING, [2] = WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT, [7] = WLAN_EXT_CAPA8_OPMODE_NOTIF, - [9] = WLAN_EXT_CAPA10_TWT_REQUESTER_SUPPORT, }; static const struct wiphy_iftype_ext_capab he_iftypes_ext_capa[] = { @@ -693,11 +619,6 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) } #endif -#ifdef CONFIG_IWLWIFI_BCAST_FILTERING - /* assign default bcast filtering configuration */ - mvm->bcast_filters = iwl_mvm_default_bcast_filters; -#endif - ret = iwl_mvm_leds_init(mvm); if (ret) return ret; @@ -1853,162 +1774,6 @@ static void iwl_mvm_config_iface_filter(struct ieee80211_hw *hw, mutex_unlock(&mvm->mutex); } -#ifdef CONFIG_IWLWIFI_BCAST_FILTERING -struct iwl_bcast_iter_data { - struct iwl_mvm *mvm; - struct iwl_bcast_filter_cmd *cmd; - u8 current_filter; -}; - -static void -iwl_mvm_set_bcast_filter(struct ieee80211_vif *vif, - const struct iwl_fw_bcast_filter *in_filter, - struct iwl_fw_bcast_filter *out_filter) -{ - struct iwl_fw_bcast_filter_attr *attr; - int i; - - memcpy(out_filter, in_filter, sizeof(*out_filter)); - - for (i = 0; i < ARRAY_SIZE(out_filter->attrs); i++) { - attr = &out_filter->attrs[i]; - - if (!attr->mask) - break; - - switch (attr->reserved1) { - case cpu_to_le16(BC_FILTER_MAGIC_IP): - if (vif->bss_conf.arp_addr_cnt != 1) { - attr->mask = 0; - continue; - } - - attr->val = vif->bss_conf.arp_addr_list[0]; - break; - case cpu_to_le16(BC_FILTER_MAGIC_MAC): - attr->val = *(__be32 *)&vif->addr[2]; - break; - default: - break; - } - attr->reserved1 = 0; - out_filter->num_attrs++; - } -} - -static void iwl_mvm_bcast_filter_iterator(void *_data, u8 *mac, - struct ieee80211_vif *vif) -{ - struct iwl_bcast_iter_data *data = _data; - struct iwl_mvm *mvm = data->mvm; - struct iwl_bcast_filter_cmd *cmd = data->cmd; - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_fw_bcast_mac *bcast_mac; - int i; - - if (WARN_ON(mvmvif->id >= ARRAY_SIZE(cmd->macs))) - return; - - bcast_mac = &cmd->macs[mvmvif->id]; - - /* - * enable filtering only for associated stations, but not for P2P - * Clients - */ - if (vif->type != NL80211_IFTYPE_STATION || vif->p2p || - !vif->bss_conf.assoc) - return; - - bcast_mac->default_discard = 1; - - /* copy all configured filters */ - for (i = 0; mvm->bcast_filters[i].attrs[0].mask; i++) { - /* - * Make sure we don't exceed our filters limit. - * if there is still a valid filter to be configured, - * be on the safe side and just allow bcast for this mac. - */ - if (WARN_ON_ONCE(data->current_filter >= - ARRAY_SIZE(cmd->filters))) { - bcast_mac->default_discard = 0; - bcast_mac->attached_filters = 0; - break; - } - - iwl_mvm_set_bcast_filter(vif, - &mvm->bcast_filters[i], - &cmd->filters[data->current_filter]); - - /* skip current filter if it contains no attributes */ - if (!cmd->filters[data->current_filter].num_attrs) - continue; - - /* attach the filter to current mac */ - bcast_mac->attached_filters |= - cpu_to_le16(BIT(data->current_filter)); - - data->current_filter++; - } -} - -bool iwl_mvm_bcast_filter_build_cmd(struct iwl_mvm *mvm, - struct iwl_bcast_filter_cmd *cmd) -{ - struct iwl_bcast_iter_data iter_data = { - .mvm = mvm, - .cmd = cmd, - }; - - if (IWL_MVM_FW_BCAST_FILTER_PASS_ALL) - return false; - - memset(cmd, 0, sizeof(*cmd)); - cmd->max_bcast_filters = ARRAY_SIZE(cmd->filters); - cmd->max_macs = ARRAY_SIZE(cmd->macs); - -#ifdef CONFIG_IWLWIFI_DEBUGFS - /* use debugfs filters/macs if override is configured */ - if (mvm->dbgfs_bcast_filtering.override) { - memcpy(cmd->filters, &mvm->dbgfs_bcast_filtering.cmd.filters, - sizeof(cmd->filters)); - memcpy(cmd->macs, &mvm->dbgfs_bcast_filtering.cmd.macs, - sizeof(cmd->macs)); - return true; - } -#endif - - /* if no filters are configured, do nothing */ - if (!mvm->bcast_filters) - return false; - - /* configure and attach these filters for each associated sta vif */ - ieee80211_iterate_active_interfaces( - mvm->hw, IEEE80211_IFACE_ITER_NORMAL, - iwl_mvm_bcast_filter_iterator, &iter_data); - - return true; -} - -static int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm) -{ - struct iwl_bcast_filter_cmd cmd; - - if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING)) - return 0; - - if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd)) - return 0; - - return iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, 0, - sizeof(cmd), &cmd); -} -#else -static inline int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm) -{ - return 0; -} -#endif - static int iwl_mvm_update_mu_groups(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { @@ -2520,7 +2285,6 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, } iwl_mvm_recalc_multicast(mvm); - iwl_mvm_configure_bcast_filter(mvm); /* reset rssi values */ mvmvif->bf_data.ave_beacon_signal = 0; @@ -2570,11 +2334,6 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, } } - if (changes & BSS_CHANGED_ARP_FILTER) { - IWL_DEBUG_MAC80211(mvm, "arp filter changed\n"); - iwl_mvm_configure_bcast_filter(mvm); - } - if (changes & BSS_CHANGED_BANDWIDTH) iwl_mvm_apply_fw_smps_request(vif); } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index 1dcbb0eb63c3..d78f40730594 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -884,17 +884,6 @@ struct iwl_mvm { /* rx chain antennas set through debugfs for the scan command */ u8 scan_rx_ant; -#ifdef CONFIG_IWLWIFI_BCAST_FILTERING - /* broadcast filters to configure for each associated station */ - const struct iwl_fw_bcast_filter *bcast_filters; -#ifdef CONFIG_IWLWIFI_DEBUGFS - struct { - bool override; - struct iwl_bcast_filter_cmd cmd; - } dbgfs_bcast_filtering; -#endif -#endif - /* Internal station */ struct iwl_mvm_int_sta aux_sta; struct iwl_mvm_int_sta snif_sta; @@ -1593,8 +1582,6 @@ int iwl_mvm_up(struct iwl_mvm *mvm); int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm); int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm); -bool iwl_mvm_bcast_filter_build_cmd(struct iwl_mvm *mvm, - struct iwl_bcast_filter_cmd *cmd); /* * FW notifications / CMD responses handlers @@ -2225,7 +2212,7 @@ static inline void iwl_mvm_mei_device_down(struct iwl_mvm *mvm) static inline void iwl_mvm_mei_set_sw_rfkill_state(struct iwl_mvm *mvm) { bool sw_rfkill = - mvm->hw_registered ? rfkill_blocked(mvm->hw->wiphy->rfkill) : false; + mvm->hw_registered ? rfkill_soft_blocked(mvm->hw->wiphy->rfkill) : false; if (mvm->mei_registered) iwl_mei_set_rfkill_state(iwl_mvm_is_radio_killed(mvm), diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index 87630d38dc52..1f8b97995b94 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -469,7 +469,6 @@ static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = { HCMD_NAME(MCC_CHUB_UPDATE_CMD), HCMD_NAME(MARKER_CMD), HCMD_NAME(BT_PROFILE_NOTIFICATION), - HCMD_NAME(BCAST_FILTER_CMD), HCMD_NAME(MCAST_FILTER_CMD), HCMD_NAME(REPLY_SF_CFG_CMD), HCMD_NAME(REPLY_BEACON_FILTERING_CMD), diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index 6fa2c12f7955..9213f8518f10 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c @@ -1427,7 +1427,7 @@ static void iwl_mvm_hwrate_to_tx_status(const struct iwl_fw *fw, struct ieee80211_tx_rate *r = &info->status.rates[0]; if (iwl_fw_lookup_notif_ver(fw, LONG_GROUP, - TX_CMD, 0) > 6) + TX_CMD, 0) <= 6) rate_n_flags = iwl_new_rate_from_v1(rate_n_flags); info->status.antenna = diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/vendor-cmd.c b/drivers/net/wireless/intel/iwlwifi/mvm/vendor-cmd.c index 78450366312b..080a1587caa5 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/vendor-cmd.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/vendor-cmd.c @@ -71,12 +71,13 @@ static int iwl_mvm_vendor_host_get_ownership(struct wiphy *wiphy, { struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + int ret; mutex_lock(&mvm->mutex); - iwl_mvm_mei_get_ownership(mvm); + ret = iwl_mvm_mei_get_ownership(mvm); mutex_unlock(&mvm->mutex); - return 0; + return ret; } static const struct wiphy_vendor_command iwl_mvm_vendor_commands[] = { diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c index 0febdcacbd42..94f40c4d2421 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c @@ -385,8 +385,7 @@ int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans, /* This may fail if AMT took ownership of the device */ if (iwl_pcie_prepare_card_hw(trans)) { IWL_WARN(trans, "Exit HW not ready\n"); - ret = -EIO; - goto out; + return -EIO; } iwl_enable_rfkill_int(trans); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index a63386a01232..ef14584fc0a1 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -1329,8 +1329,7 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans, /* This may fail if AMT took ownership of the device */ if (iwl_pcie_prepare_card_hw(trans)) { IWL_WARN(trans, "Exit HW not ready\n"); - ret = -EIO; - goto out; + return -EIO; } iwl_enable_rfkill_int(trans); diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index 8d54f9face2f..fc5725f6daee 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -2336,6 +2336,15 @@ static void hw_scan_work(struct work_struct *work) if (req->ie_len) skb_put_data(probe, req->ie, req->ie_len); + if (!ieee80211_tx_prepare_skb(hwsim->hw, + hwsim->hw_scan_vif, + probe, + hwsim->tmp_chan->band, + NULL)) { + kfree_skb(probe); + continue; + } + local_bh_disable(); mac80211_hwsim_tx_frame(hwsim->hw, probe, hwsim->tmp_chan); @@ -3770,6 +3779,10 @@ static int hwsim_tx_info_frame_received_nl(struct sk_buff *skb_2, } txi->flags |= IEEE80211_TX_STAT_ACK; } + + if (hwsim_flags & HWSIM_TX_CTL_NO_ACK) + txi->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED; + ieee80211_tx_status_irqsafe(data2->hw, skb); return 0; out: diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c index d24b7a7993aa..990360d75cb6 100644 --- a/drivers/net/xen-netback/xenbus.c +++ b/drivers/net/xen-netback/xenbus.c @@ -256,6 +256,7 @@ static void backend_disconnect(struct backend_info *be) unsigned int queue_index; xen_unregister_watchers(vif); + xenbus_rm(XBT_NIL, be->dev->nodename, "hotplug-status"); #ifdef CONFIG_DEBUG_FS xenvif_debugfs_delif(vif); #endif /* CONFIG_DEBUG_FS */ @@ -675,7 +676,6 @@ static void hotplug_status_changed(struct xenbus_watch *watch, /* Not interested in this watch anymore. */ unregister_hotplug_status_watch(be); - xenbus_rm(XBT_NIL, be->dev->nodename, "hotplug-status"); } kfree(str); } @@ -824,15 +824,11 @@ static void connect(struct backend_info *be) xenvif_carrier_on(be->vif); unregister_hotplug_status_watch(be); - if (xenbus_exists(XBT_NIL, dev->nodename, "hotplug-status")) { - err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch, - NULL, hotplug_status_changed, - "%s/%s", dev->nodename, - "hotplug-status"); - if (err) - goto err; + err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch, NULL, + hotplug_status_changed, + "%s/%s", dev->nodename, "hotplug-status"); + if (!err) be->have_hotplug_status_watch = 1; - } netif_tx_wake_all_queues(be->vif->dev); diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index 8b18246ad999..daa4e6106aac 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -424,14 +424,12 @@ static bool xennet_tx_buf_gc(struct netfront_queue *queue) queue->tx_link[id] = TX_LINK_NONE; skb = queue->tx_skbs[id]; queue->tx_skbs[id] = NULL; - if (unlikely(gnttab_query_foreign_access( - queue->grant_tx_ref[id]) != 0)) { + if (unlikely(!gnttab_end_foreign_access_ref( + queue->grant_tx_ref[id], GNTMAP_readonly))) { dev_alert(dev, "Grant still in use by backend domain\n"); goto err; } - gnttab_end_foreign_access_ref( - queue->grant_tx_ref[id], GNTMAP_readonly); gnttab_release_grant_reference( &queue->gref_tx_head, queue->grant_tx_ref[id]); queue->grant_tx_ref[id] = GRANT_INVALID_REF; @@ -842,6 +840,28 @@ static int xennet_close(struct net_device *dev) return 0; } +static void xennet_destroy_queues(struct netfront_info *info) +{ + unsigned int i; + + for (i = 0; i < info->netdev->real_num_tx_queues; i++) { + struct netfront_queue *queue = &info->queues[i]; + + if (netif_running(info->netdev)) + napi_disable(&queue->napi); + netif_napi_del(&queue->napi); + } + + kfree(info->queues); + info->queues = NULL; +} + +static void xennet_uninit(struct net_device *dev) +{ + struct netfront_info *np = netdev_priv(dev); + xennet_destroy_queues(np); +} + static void xennet_set_rx_rsp_cons(struct netfront_queue *queue, RING_IDX val) { unsigned long flags; @@ -968,7 +988,6 @@ static int xennet_get_responses(struct netfront_queue *queue, struct device *dev = &queue->info->netdev->dev; struct bpf_prog *xdp_prog; struct xdp_buff xdp; - unsigned long ret; int slots = 1; int err = 0; u32 verdict; @@ -1010,8 +1029,13 @@ static int xennet_get_responses(struct netfront_queue *queue, goto next; } - ret = gnttab_end_foreign_access_ref(ref, 0); - BUG_ON(!ret); + if (!gnttab_end_foreign_access_ref(ref, 0)) { + dev_alert(dev, + "Grant still in use by backend domain\n"); + queue->info->broken = true; + dev_alert(dev, "Disabled for further use\n"); + return -EINVAL; + } gnttab_release_grant_reference(&queue->gref_rx_head, ref); @@ -1232,6 +1256,10 @@ static int xennet_poll(struct napi_struct *napi, int budget) &need_xdp_flush); if (unlikely(err)) { + if (queue->info->broken) { + spin_unlock(&queue->rx_lock); + return 0; + } err: while ((skb = __skb_dequeue(&tmpq))) __skb_queue_tail(&errq, skb); @@ -1611,6 +1639,7 @@ static int xennet_xdp(struct net_device *dev, struct netdev_bpf *xdp) } static const struct net_device_ops xennet_netdev_ops = { + .ndo_uninit = xennet_uninit, .ndo_open = xennet_open, .ndo_stop = xennet_close, .ndo_start_xmit = xennet_start_xmit, @@ -1895,7 +1924,7 @@ static int setup_netfront(struct xenbus_device *dev, struct netfront_queue *queue, unsigned int feature_split_evtchn) { struct xen_netif_tx_sring *txs; - struct xen_netif_rx_sring *rxs; + struct xen_netif_rx_sring *rxs = NULL; grant_ref_t gref; int err; @@ -1915,21 +1944,21 @@ static int setup_netfront(struct xenbus_device *dev, err = xenbus_grant_ring(dev, txs, 1, &gref); if (err < 0) - goto grant_tx_ring_fail; + goto fail; queue->tx_ring_ref = gref; rxs = (struct xen_netif_rx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH); if (!rxs) { err = -ENOMEM; xenbus_dev_fatal(dev, err, "allocating rx ring page"); - goto alloc_rx_ring_fail; + goto fail; } SHARED_RING_INIT(rxs); FRONT_RING_INIT(&queue->rx, rxs, XEN_PAGE_SIZE); err = xenbus_grant_ring(dev, rxs, 1, &gref); if (err < 0) - goto grant_rx_ring_fail; + goto fail; queue->rx_ring_ref = gref; if (feature_split_evtchn) @@ -1942,22 +1971,28 @@ static int setup_netfront(struct xenbus_device *dev, err = setup_netfront_single(queue); if (err) - goto alloc_evtchn_fail; + goto fail; return 0; /* If we fail to setup netfront, it is safe to just revoke access to * granted pages because backend is not accessing it at this point. */ -alloc_evtchn_fail: - gnttab_end_foreign_access_ref(queue->rx_ring_ref, 0); -grant_rx_ring_fail: - free_page((unsigned long)rxs); -alloc_rx_ring_fail: - gnttab_end_foreign_access_ref(queue->tx_ring_ref, 0); -grant_tx_ring_fail: - free_page((unsigned long)txs); -fail: + fail: + if (queue->rx_ring_ref != GRANT_INVALID_REF) { + gnttab_end_foreign_access(queue->rx_ring_ref, 0, + (unsigned long)rxs); + queue->rx_ring_ref = GRANT_INVALID_REF; + } else { + free_page((unsigned long)rxs); + } + if (queue->tx_ring_ref != GRANT_INVALID_REF) { + gnttab_end_foreign_access(queue->tx_ring_ref, 0, + (unsigned long)txs); + queue->tx_ring_ref = GRANT_INVALID_REF; + } else { + free_page((unsigned long)txs); + } return err; } @@ -2103,22 +2138,6 @@ error: return err; } -static void xennet_destroy_queues(struct netfront_info *info) -{ - unsigned int i; - - for (i = 0; i < info->netdev->real_num_tx_queues; i++) { - struct netfront_queue *queue = &info->queues[i]; - - if (netif_running(info->netdev)) - napi_disable(&queue->napi); - netif_napi_del(&queue->napi); - } - - kfree(info->queues); - info->queues = NULL; -} - static int xennet_create_page_pool(struct netfront_queue *queue) diff --git a/drivers/nfc/port100.c b/drivers/nfc/port100.c index d7db1a0e6be1..00d8ea6dcb5d 100644 --- a/drivers/nfc/port100.c +++ b/drivers/nfc/port100.c @@ -1612,7 +1612,9 @@ free_nfc_dev: nfc_digital_free_device(dev->nfc_digital_dev); error: + usb_kill_urb(dev->in_urb); usb_free_urb(dev->in_urb); + usb_kill_urb(dev->out_urb); usb_free_urb(dev->out_urb); usb_put_dev(dev->udev); diff --git a/drivers/ntb/hw/intel/ntb_hw_gen4.c b/drivers/ntb/hw/intel/ntb_hw_gen4.c index fede05151f69..4081fc538ff4 100644 --- a/drivers/ntb/hw/intel/ntb_hw_gen4.c +++ b/drivers/ntb/hw/intel/ntb_hw_gen4.c @@ -168,6 +168,18 @@ static enum ntb_topo gen4_ppd_topo(struct intel_ntb_dev *ndev, u32 ppd) return NTB_TOPO_NONE; } +static enum ntb_topo spr_ppd_topo(struct intel_ntb_dev *ndev, u32 ppd) +{ + switch (ppd & SPR_PPD_TOPO_MASK) { + case SPR_PPD_TOPO_B2B_USD: + return NTB_TOPO_B2B_USD; + case SPR_PPD_TOPO_B2B_DSD: + return NTB_TOPO_B2B_DSD; + } + + return NTB_TOPO_NONE; +} + int gen4_init_dev(struct intel_ntb_dev *ndev) { struct pci_dev *pdev = ndev->ntb.pdev; @@ -183,7 +195,10 @@ int gen4_init_dev(struct intel_ntb_dev *ndev) } ppd1 = ioread32(ndev->self_mmio + GEN4_PPD1_OFFSET); - ndev->ntb.topo = gen4_ppd_topo(ndev, ppd1); + if (pdev_is_ICX(pdev)) + ndev->ntb.topo = gen4_ppd_topo(ndev, ppd1); + else if (pdev_is_SPR(pdev)) + ndev->ntb.topo = spr_ppd_topo(ndev, ppd1); dev_dbg(&pdev->dev, "ppd %#x topo %s\n", ppd1, ntb_topo_string(ndev->ntb.topo)); if (ndev->ntb.topo == NTB_TOPO_NONE) diff --git a/drivers/ntb/hw/intel/ntb_hw_gen4.h b/drivers/ntb/hw/intel/ntb_hw_gen4.h index 3fcd3fdce9ed..f91323eaf5ce 100644 --- a/drivers/ntb/hw/intel/ntb_hw_gen4.h +++ b/drivers/ntb/hw/intel/ntb_hw_gen4.h @@ -49,10 +49,14 @@ #define GEN4_PPD_CLEAR_TRN 0x0001 #define GEN4_PPD_LINKTRN 0x0008 #define GEN4_PPD_CONN_MASK 0x0300 +#define SPR_PPD_CONN_MASK 0x0700 #define GEN4_PPD_CONN_B2B 0x0200 #define GEN4_PPD_DEV_MASK 0x1000 #define GEN4_PPD_DEV_DSD 0x1000 #define GEN4_PPD_DEV_USD 0x0000 +#define SPR_PPD_DEV_MASK 0x4000 +#define SPR_PPD_DEV_DSD 0x4000 +#define SPR_PPD_DEV_USD 0x0000 #define GEN4_LINK_CTRL_LINK_DISABLE 0x0010 #define GEN4_SLOTSTS 0xb05a @@ -62,6 +66,10 @@ #define GEN4_PPD_TOPO_B2B_USD (GEN4_PPD_CONN_B2B | GEN4_PPD_DEV_USD) #define GEN4_PPD_TOPO_B2B_DSD (GEN4_PPD_CONN_B2B | GEN4_PPD_DEV_DSD) +#define SPR_PPD_TOPO_MASK (SPR_PPD_CONN_MASK | SPR_PPD_DEV_MASK) +#define SPR_PPD_TOPO_B2B_USD (GEN4_PPD_CONN_B2B | SPR_PPD_DEV_USD) +#define SPR_PPD_TOPO_B2B_DSD (GEN4_PPD_CONN_B2B | SPR_PPD_DEV_DSD) + #define GEN4_DB_COUNT 32 #define GEN4_DB_LINK 32 #define GEN4_DB_LINK_BIT BIT_ULL(GEN4_DB_LINK) @@ -112,4 +120,12 @@ static inline int pdev_is_ICX(struct pci_dev *pdev) return 0; } +static inline int pdev_is_SPR(struct pci_dev *pdev) +{ + if (pdev_is_gen4(pdev) && + pdev->revision > PCI_DEVICE_REVISION_ICX_MAX) + return 1; + return 0; +} + #endif diff --git a/drivers/ntb/msi.c b/drivers/ntb/msi.c index dd683cb58d09..6295e55ef85e 100644 --- a/drivers/ntb/msi.c +++ b/drivers/ntb/msi.c @@ -33,7 +33,6 @@ int ntb_msi_init(struct ntb_dev *ntb, { phys_addr_t mw_phys_addr; resource_size_t mw_size; - size_t struct_size; int peer_widx; int peers; int ret; @@ -43,9 +42,8 @@ int ntb_msi_init(struct ntb_dev *ntb, if (peers <= 0) return -EINVAL; - struct_size = sizeof(*ntb->msi) + sizeof(*ntb->msi->peer_mws) * peers; - - ntb->msi = devm_kzalloc(&ntb->dev, struct_size, GFP_KERNEL); + ntb->msi = devm_kzalloc(&ntb->dev, struct_size(ntb->msi, peer_mws, peers), + GFP_KERNEL); if (!ntb->msi) return -ENOMEM; diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 5e0bfda04bd7..fd4720d37cc0 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -368,6 +368,7 @@ EXPORT_SYMBOL_GPL(nvme_complete_rq); void nvme_complete_batch_req(struct request *req) { + trace_nvme_complete_rq(req); nvme_cleanup_cmd(req); nvme_end_req_zoned(req); } @@ -1722,7 +1723,7 @@ static int nvme_setup_streams_ns(struct nvme_ctrl *ctrl, struct nvme_ns *ns, return 0; } -static int nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id) +static void nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id) { struct nvme_ctrl *ctrl = ns->ctrl; @@ -1738,7 +1739,8 @@ static int nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id) ns->features &= ~(NVME_NS_METADATA_SUPPORTED | NVME_NS_EXT_LBAS); if (!ns->ms || !(ctrl->ops->flags & NVME_F_METADATA_SUPPORTED)) - return 0; + return; + if (ctrl->ops->flags & NVME_F_FABRICS) { /* * The NVMe over Fabrics specification only supports metadata as @@ -1746,7 +1748,7 @@ static int nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id) * remap the separate metadata buffer from the block layer. */ if (WARN_ON_ONCE(!(id->flbas & NVME_NS_FLBAS_META_EXT))) - return -EINVAL; + return; ns->features |= NVME_NS_EXT_LBAS; @@ -1773,8 +1775,6 @@ static int nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id) else ns->features |= NVME_NS_METADATA_SUPPORTED; } - - return 0; } static void nvme_set_queue_limits(struct nvme_ctrl *ctrl, @@ -1915,9 +1915,7 @@ static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_id_ns *id) ns->lba_shift = id->lbaf[lbaf].ds; nvme_set_queue_limits(ns->ctrl, ns->queue); - ret = nvme_configure_metadata(ns, id); - if (ret) - goto out_unfreeze; + nvme_configure_metadata(ns, id); nvme_set_chunk_sectors(ns, id); nvme_update_disk_info(ns->disk, ns, id); @@ -1933,7 +1931,7 @@ static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_id_ns *id) if (blk_queue_is_zoned(ns->queue)) { ret = nvme_revalidate_zones(ns); if (ret && !nvme_first_scan(ns->disk)) - goto out; + return ret; } if (nvme_ns_head_multipath(ns->head)) { @@ -1948,16 +1946,16 @@ static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_id_ns *id) return 0; out_unfreeze: - blk_mq_unfreeze_queue(ns->disk->queue); -out: /* * If probing fails due an unsupported feature, hide the block device, * but still allow other access. */ if (ret == -ENODEV) { ns->disk->flags |= GENHD_FL_HIDDEN; + set_bit(NVME_NS_READY, &ns->flags); ret = 0; } + blk_mq_unfreeze_queue(ns->disk->queue); return ret; } @@ -4253,7 +4251,14 @@ static void nvme_async_event_work(struct work_struct *work) container_of(work, struct nvme_ctrl, async_event_work); nvme_aen_uevent(ctrl); - ctrl->ops->submit_async_event(ctrl); + + /* + * The transport drivers must guarantee AER submission here is safe by + * flushing ctrl async_event_work after changing the controller state + * from LIVE and before freeing the admin queue. + */ + if (ctrl->state == NVME_CTRL_LIVE) + ctrl->ops->submit_async_event(ctrl); } static bool nvme_ctrl_pp_status(struct nvme_ctrl *ctrl) @@ -4566,7 +4571,7 @@ static void nvme_set_queue_dying(struct nvme_ns *ns) if (test_and_set_bit(NVME_NS_DEAD, &ns->flags)) return; - blk_set_queue_dying(ns->queue); + blk_mark_disk_dead(ns->disk); nvme_start_ns_queue(ns); set_capacity_and_notify(ns->disk, 0); diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h index c3203ff1c654..1e3a09cad961 100644 --- a/drivers/nvme/host/fabrics.h +++ b/drivers/nvme/host/fabrics.h @@ -170,6 +170,7 @@ nvmf_ctlr_matches_baseopts(struct nvme_ctrl *ctrl, struct nvmf_ctrl_options *opts) { if (ctrl->state == NVME_CTRL_DELETING || + ctrl->state == NVME_CTRL_DELETING_NOIO || ctrl->state == NVME_CTRL_DEAD || strcmp(opts->subsysnqn, ctrl->opts->subsysnqn) || strcmp(opts->host->nqn, ctrl->opts->host->nqn) || diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index f8bf6606eb2f..ff775235534c 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -848,7 +848,7 @@ void nvme_mpath_remove_disk(struct nvme_ns_head *head) { if (!head->disk) return; - blk_set_queue_dying(head->disk->queue); + blk_mark_disk_dead(head->disk); /* make sure all pending bios are cleaned up */ kblockd_schedule_work(&head->requeue_work); flush_work(&head->requeue_work); diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 850f84d204d0..9c55e4be8a39 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -1200,6 +1200,7 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work) struct nvme_rdma_ctrl, err_work); nvme_stop_keep_alive(&ctrl->ctrl); + flush_work(&ctrl->ctrl.async_event_work); nvme_rdma_teardown_io_queues(ctrl, false); nvme_start_queues(&ctrl->ctrl); nvme_rdma_teardown_admin_queue(ctrl, false); diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index 4ceb28675fdf..65e00c64a588 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -44,6 +44,8 @@ struct nvme_tcp_request { u32 data_len; u32 pdu_len; u32 pdu_sent; + u32 h2cdata_left; + u32 h2cdata_offset; u16 ttag; __le16 status; struct list_head entry; @@ -95,6 +97,7 @@ struct nvme_tcp_queue { struct nvme_tcp_request *request; int queue_size; + u32 maxh2cdata; size_t cmnd_capsule_len; struct nvme_tcp_ctrl *ctrl; unsigned long flags; @@ -572,23 +575,26 @@ static int nvme_tcp_handle_comp(struct nvme_tcp_queue *queue, return ret; } -static void nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req, - struct nvme_tcp_r2t_pdu *pdu) +static void nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req) { struct nvme_tcp_data_pdu *data = req->pdu; struct nvme_tcp_queue *queue = req->queue; struct request *rq = blk_mq_rq_from_pdu(req); + u32 h2cdata_sent = req->pdu_len; u8 hdgst = nvme_tcp_hdgst_len(queue); u8 ddgst = nvme_tcp_ddgst_len(queue); req->state = NVME_TCP_SEND_H2C_PDU; req->offset = 0; - req->pdu_len = le32_to_cpu(pdu->r2t_length); + req->pdu_len = min(req->h2cdata_left, queue->maxh2cdata); req->pdu_sent = 0; + req->h2cdata_left -= req->pdu_len; + req->h2cdata_offset += h2cdata_sent; memset(data, 0, sizeof(*data)); data->hdr.type = nvme_tcp_h2c_data; - data->hdr.flags = NVME_TCP_F_DATA_LAST; + if (!req->h2cdata_left) + data->hdr.flags = NVME_TCP_F_DATA_LAST; if (queue->hdr_digest) data->hdr.flags |= NVME_TCP_F_HDGST; if (queue->data_digest) @@ -597,9 +603,9 @@ static void nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req, data->hdr.pdo = data->hdr.hlen + hdgst; data->hdr.plen = cpu_to_le32(data->hdr.hlen + hdgst + req->pdu_len + ddgst); - data->ttag = pdu->ttag; + data->ttag = req->ttag; data->command_id = nvme_cid(rq); - data->data_offset = pdu->r2t_offset; + data->data_offset = cpu_to_le32(req->h2cdata_offset); data->data_length = cpu_to_le32(req->pdu_len); } @@ -609,6 +615,7 @@ static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue, struct nvme_tcp_request *req; struct request *rq; u32 r2t_length = le32_to_cpu(pdu->r2t_length); + u32 r2t_offset = le32_to_cpu(pdu->r2t_offset); rq = nvme_find_rq(nvme_tcp_tagset(queue), pdu->command_id); if (!rq) { @@ -633,14 +640,19 @@ static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue, return -EPROTO; } - if (unlikely(le32_to_cpu(pdu->r2t_offset) < req->data_sent)) { + if (unlikely(r2t_offset < req->data_sent)) { dev_err(queue->ctrl->ctrl.device, "req %d unexpected r2t offset %u (expected %zu)\n", - rq->tag, le32_to_cpu(pdu->r2t_offset), req->data_sent); + rq->tag, r2t_offset, req->data_sent); return -EPROTO; } - nvme_tcp_setup_h2c_data_pdu(req, pdu); + req->pdu_len = 0; + req->h2cdata_left = r2t_length; + req->h2cdata_offset = r2t_offset; + req->ttag = pdu->ttag; + + nvme_tcp_setup_h2c_data_pdu(req); nvme_tcp_queue_request(req, false, true); return 0; @@ -913,13 +925,22 @@ static inline void nvme_tcp_done_send_req(struct nvme_tcp_queue *queue) static void nvme_tcp_fail_request(struct nvme_tcp_request *req) { - nvme_tcp_end_request(blk_mq_rq_from_pdu(req), NVME_SC_HOST_PATH_ERROR); + if (nvme_tcp_async_req(req)) { + union nvme_result res = {}; + + nvme_complete_async_event(&req->queue->ctrl->ctrl, + cpu_to_le16(NVME_SC_HOST_PATH_ERROR), &res); + } else { + nvme_tcp_end_request(blk_mq_rq_from_pdu(req), + NVME_SC_HOST_PATH_ERROR); + } } static int nvme_tcp_try_send_data(struct nvme_tcp_request *req) { struct nvme_tcp_queue *queue = req->queue; int req_data_len = req->data_len; + u32 h2cdata_left = req->h2cdata_left; while (true) { struct page *page = nvme_tcp_req_cur_page(req); @@ -964,7 +985,10 @@ static int nvme_tcp_try_send_data(struct nvme_tcp_request *req) req->state = NVME_TCP_SEND_DDGST; req->offset = 0; } else { - nvme_tcp_done_send_req(queue); + if (h2cdata_left) + nvme_tcp_setup_h2c_data_pdu(req); + else + nvme_tcp_done_send_req(queue); } return 1; } @@ -1022,9 +1046,14 @@ static int nvme_tcp_try_send_data_pdu(struct nvme_tcp_request *req) if (queue->hdr_digest && !req->offset) nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu)); - ret = kernel_sendpage(queue->sock, virt_to_page(pdu), - offset_in_page(pdu) + req->offset, len, - MSG_DONTWAIT | MSG_MORE | MSG_SENDPAGE_NOTLAST); + if (!req->h2cdata_left) + ret = kernel_sendpage(queue->sock, virt_to_page(pdu), + offset_in_page(pdu) + req->offset, len, + MSG_DONTWAIT | MSG_MORE | MSG_SENDPAGE_NOTLAST); + else + ret = sock_no_sendpage(queue->sock, virt_to_page(pdu), + offset_in_page(pdu) + req->offset, len, + MSG_DONTWAIT | MSG_MORE); if (unlikely(ret <= 0)) return ret; @@ -1044,6 +1073,7 @@ static int nvme_tcp_try_send_ddgst(struct nvme_tcp_request *req) { struct nvme_tcp_queue *queue = req->queue; size_t offset = req->offset; + u32 h2cdata_left = req->h2cdata_left; int ret; struct msghdr msg = { .msg_flags = MSG_DONTWAIT }; struct kvec iov = { @@ -1061,7 +1091,10 @@ static int nvme_tcp_try_send_ddgst(struct nvme_tcp_request *req) return ret; if (offset + ret == NVME_TCP_DIGEST_LENGTH) { - nvme_tcp_done_send_req(queue); + if (h2cdata_left) + nvme_tcp_setup_h2c_data_pdu(req); + else + nvme_tcp_done_send_req(queue); return 1; } @@ -1253,6 +1286,7 @@ static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue) struct msghdr msg = {}; struct kvec iov; bool ctrl_hdgst, ctrl_ddgst; + u32 maxh2cdata; int ret; icreq = kzalloc(sizeof(*icreq), GFP_KERNEL); @@ -1336,6 +1370,14 @@ static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue) goto free_icresp; } + maxh2cdata = le32_to_cpu(icresp->maxdata); + if ((maxh2cdata % 4) || (maxh2cdata < NVME_TCP_MIN_MAXH2CDATA)) { + pr_err("queue %d: invalid maxh2cdata returned %u\n", + nvme_tcp_queue_id(queue), maxh2cdata); + goto free_icresp; + } + queue->maxh2cdata = maxh2cdata; + ret = 0; free_icresp: kfree(icresp); @@ -2096,6 +2138,7 @@ static void nvme_tcp_error_recovery_work(struct work_struct *work) struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl; nvme_stop_keep_alive(ctrl); + flush_work(&ctrl->async_event_work); nvme_tcp_teardown_io_queues(ctrl, false); /* unquiesce to fail fast pending requests */ nvme_start_queues(ctrl); @@ -2320,6 +2363,7 @@ static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns, req->data_sent = 0; req->pdu_len = 0; req->pdu_sent = 0; + req->h2cdata_left = 0; req->data_len = blk_rq_nr_phys_segments(rq) ? blk_rq_payload_bytes(rq) : 0; req->curr_bio = rq->bio; diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c index 091a0ca16361..496d775c6770 100644 --- a/drivers/nvme/target/configfs.c +++ b/drivers/nvme/target/configfs.c @@ -1233,44 +1233,6 @@ static ssize_t nvmet_subsys_attr_model_store(struct config_item *item, } CONFIGFS_ATTR(nvmet_subsys_, attr_model); -static ssize_t nvmet_subsys_attr_discovery_nqn_show(struct config_item *item, - char *page) -{ - return snprintf(page, PAGE_SIZE, "%s\n", - nvmet_disc_subsys->subsysnqn); -} - -static ssize_t nvmet_subsys_attr_discovery_nqn_store(struct config_item *item, - const char *page, size_t count) -{ - struct nvmet_subsys *subsys = to_subsys(item); - char *subsysnqn; - int len; - - len = strcspn(page, "\n"); - if (!len) - return -EINVAL; - - subsysnqn = kmemdup_nul(page, len, GFP_KERNEL); - if (!subsysnqn) - return -ENOMEM; - - /* - * The discovery NQN must be different from subsystem NQN. - */ - if (!strcmp(subsysnqn, subsys->subsysnqn)) { - kfree(subsysnqn); - return -EBUSY; - } - down_write(&nvmet_config_sem); - kfree(nvmet_disc_subsys->subsysnqn); - nvmet_disc_subsys->subsysnqn = subsysnqn; - up_write(&nvmet_config_sem); - - return count; -} -CONFIGFS_ATTR(nvmet_subsys_, attr_discovery_nqn); - #ifdef CONFIG_BLK_DEV_INTEGRITY static ssize_t nvmet_subsys_attr_pi_enable_show(struct config_item *item, char *page) @@ -1300,7 +1262,6 @@ static struct configfs_attribute *nvmet_subsys_attrs[] = { &nvmet_subsys_attr_attr_cntlid_min, &nvmet_subsys_attr_attr_cntlid_max, &nvmet_subsys_attr_attr_model, - &nvmet_subsys_attr_attr_discovery_nqn, #ifdef CONFIG_BLK_DEV_INTEGRITY &nvmet_subsys_attr_attr_pi_enable, #endif diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index 5119c687de68..626caf6f1e4b 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c @@ -1493,8 +1493,7 @@ static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port, if (!port) return NULL; - if (!strcmp(NVME_DISC_SUBSYS_NAME, subsysnqn) || - !strcmp(nvmet_disc_subsys->subsysnqn, subsysnqn)) { + if (!strcmp(NVME_DISC_SUBSYS_NAME, subsysnqn)) { if (!kref_get_unless_zero(&nvmet_disc_subsys->ref)) return NULL; return nvmet_disc_subsys; diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c index 23a38dcf0fc4..9fd1602b539d 100644 --- a/drivers/nvmem/core.c +++ b/drivers/nvmem/core.c @@ -771,7 +771,7 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config) if (config->wp_gpio) nvmem->wp_gpio = config->wp_gpio; - else + else if (!config->ignore_wp) nvmem->wp_gpio = gpiod_get_optional(config->dev, "wp", GPIOD_OUT_HIGH); if (IS_ERR(nvmem->wp_gpio)) { diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c index ad85ff6474ff..ec315b060cd5 100644 --- a/drivers/of/fdt.c +++ b/drivers/of/fdt.c @@ -648,8 +648,8 @@ void __init early_init_fdt_scan_reserved_mem(void) } fdt_scan_reserved_mem(); - fdt_init_reserved_mem(); fdt_reserve_elfcorehdr(); + fdt_init_reserved_mem(); } /** diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c index 70992103c07d..2c2fb161b572 100644 --- a/drivers/of/unittest.c +++ b/drivers/of/unittest.c @@ -513,24 +513,24 @@ static void __init of_unittest_parse_phandle_with_args(void) memset(&args, 0, sizeof(args)); EXPECT_BEGIN(KERN_INFO, - "OF: /testcase-data/phandle-tests/consumer-a: #phandle-cells = 3 found -1"); + "OF: /testcase-data/phandle-tests/consumer-a: #phandle-cells = 3 found 1"); rc = of_parse_phandle_with_args(np, "phandle-list-bad-args", "#phandle-cells", 1, &args); EXPECT_END(KERN_INFO, - "OF: /testcase-data/phandle-tests/consumer-a: #phandle-cells = 3 found -1"); + "OF: /testcase-data/phandle-tests/consumer-a: #phandle-cells = 3 found 1"); unittest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc); EXPECT_BEGIN(KERN_INFO, - "OF: /testcase-data/phandle-tests/consumer-a: #phandle-cells = 3 found -1"); + "OF: /testcase-data/phandle-tests/consumer-a: #phandle-cells = 3 found 1"); rc = of_count_phandle_with_args(np, "phandle-list-bad-args", "#phandle-cells"); EXPECT_END(KERN_INFO, - "OF: /testcase-data/phandle-tests/consumer-a: #phandle-cells = 3 found -1"); + "OF: /testcase-data/phandle-tests/consumer-a: #phandle-cells = 3 found 1"); unittest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc); } @@ -670,12 +670,12 @@ static void __init of_unittest_parse_phandle_with_args_map(void) memset(&args, 0, sizeof(args)); EXPECT_BEGIN(KERN_INFO, - "OF: /testcase-data/phandle-tests/consumer-b: #phandle-cells = 2 found -1"); + "OF: /testcase-data/phandle-tests/consumer-b: #phandle-cells = 2 found 1"); rc = of_parse_phandle_with_args_map(np, "phandle-list-bad-args", "phandle", 1, &args); EXPECT_END(KERN_INFO, - "OF: /testcase-data/phandle-tests/consumer-b: #phandle-cells = 2 found -1"); + "OF: /testcase-data/phandle-tests/consumer-b: #phandle-cells = 2 found 1"); unittest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc); } @@ -1257,12 +1257,12 @@ static void __init of_unittest_platform_populate(void) unittest(pdev, "device 2 creation failed\n"); EXPECT_BEGIN(KERN_INFO, - "platform testcase-data:testcase-device2: IRQ index 0 not found"); + "platform testcase-data:testcase-device2: error -ENXIO: IRQ index 0 not found"); irq = platform_get_irq(pdev, 0); EXPECT_END(KERN_INFO, - "platform testcase-data:testcase-device2: IRQ index 0 not found"); + "platform testcase-data:testcase-device2: error -ENXIO: IRQ index 0 not found"); unittest(irq < 0 && irq != -EPROBE_DEFER, "device parsing error failed - %d\n", irq); diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c index 059566f54429..9be007c9420f 100644 --- a/drivers/parisc/ccio-dma.c +++ b/drivers/parisc/ccio-dma.c @@ -1003,7 +1003,7 @@ ccio_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents, ioc->usg_calls++; #endif - while(sg_dma_len(sglist) && nents--) { + while (nents && sg_dma_len(sglist)) { #ifdef CCIO_COLLECT_STATS ioc->usg_pages += sg_dma_len(sglist) >> PAGE_SHIFT; @@ -1011,6 +1011,7 @@ ccio_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents, ccio_unmap_page(dev, sg_dma_address(sglist), sg_dma_len(sglist), direction, 0); ++sglist; + nents--; } DBG_RUN_SG("%s() DONE (nents %d)\n", __func__, nents); diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c index e60690d38d67..374b9199878d 100644 --- a/drivers/parisc/sba_iommu.c +++ b/drivers/parisc/sba_iommu.c @@ -1047,7 +1047,7 @@ sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents, spin_unlock_irqrestore(&ioc->res_lock, flags); #endif - while (sg_dma_len(sglist) && nents--) { + while (nents && sg_dma_len(sglist)) { sba_unmap_page(dev, sg_dma_address(sglist), sg_dma_len(sglist), direction, 0); @@ -1056,6 +1056,7 @@ sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents, ioc->usingle_calls--; /* kluge since call is unmap_sg() */ #endif ++sglist; + nents--; } DBG_RUN_SG("%s() DONE (nents %d)\n", __func__, nents); diff --git a/drivers/pci/controller/cadence/pci-j721e.c b/drivers/pci/controller/cadence/pci-j721e.c index 489586a4cdc7..768d33f9ebc8 100644 --- a/drivers/pci/controller/cadence/pci-j721e.c +++ b/drivers/pci/controller/cadence/pci-j721e.c @@ -356,8 +356,8 @@ static int j721e_pcie_probe(struct platform_device *pdev) const struct j721e_pcie_data *data; struct cdns_pcie *cdns_pcie; struct j721e_pcie *pcie; - struct cdns_pcie_rc *rc; - struct cdns_pcie_ep *ep; + struct cdns_pcie_rc *rc = NULL; + struct cdns_pcie_ep *ep = NULL; struct gpio_desc *gpiod; void __iomem *base; struct clk *clk; @@ -376,6 +376,46 @@ static int j721e_pcie_probe(struct platform_device *pdev) if (!pcie) return -ENOMEM; + switch (mode) { + case PCI_MODE_RC: + if (!IS_ENABLED(CONFIG_PCIE_CADENCE_HOST)) + return -ENODEV; + + bridge = devm_pci_alloc_host_bridge(dev, sizeof(*rc)); + if (!bridge) + return -ENOMEM; + + if (!data->byte_access_allowed) + bridge->ops = &cdns_ti_pcie_host_ops; + rc = pci_host_bridge_priv(bridge); + rc->quirk_retrain_flag = data->quirk_retrain_flag; + rc->quirk_detect_quiet_flag = data->quirk_detect_quiet_flag; + + cdns_pcie = &rc->pcie; + cdns_pcie->dev = dev; + cdns_pcie->ops = &j721e_pcie_ops; + pcie->cdns_pcie = cdns_pcie; + break; + case PCI_MODE_EP: + if (!IS_ENABLED(CONFIG_PCIE_CADENCE_EP)) + return -ENODEV; + + ep = devm_kzalloc(dev, sizeof(*ep), GFP_KERNEL); + if (!ep) + return -ENOMEM; + + ep->quirk_detect_quiet_flag = data->quirk_detect_quiet_flag; + + cdns_pcie = &ep->pcie; + cdns_pcie->dev = dev; + cdns_pcie->ops = &j721e_pcie_ops; + pcie->cdns_pcie = cdns_pcie; + break; + default: + dev_err(dev, "INVALID device type %d\n", mode); + return 0; + } + pcie->mode = mode; pcie->linkdown_irq_regfield = data->linkdown_irq_regfield; @@ -426,28 +466,6 @@ static int j721e_pcie_probe(struct platform_device *pdev) switch (mode) { case PCI_MODE_RC: - if (!IS_ENABLED(CONFIG_PCIE_CADENCE_HOST)) { - ret = -ENODEV; - goto err_get_sync; - } - - bridge = devm_pci_alloc_host_bridge(dev, sizeof(*rc)); - if (!bridge) { - ret = -ENOMEM; - goto err_get_sync; - } - - if (!data->byte_access_allowed) - bridge->ops = &cdns_ti_pcie_host_ops; - rc = pci_host_bridge_priv(bridge); - rc->quirk_retrain_flag = data->quirk_retrain_flag; - rc->quirk_detect_quiet_flag = data->quirk_detect_quiet_flag; - - cdns_pcie = &rc->pcie; - cdns_pcie->dev = dev; - cdns_pcie->ops = &j721e_pcie_ops; - pcie->cdns_pcie = cdns_pcie; - gpiod = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW); if (IS_ERR(gpiod)) { ret = PTR_ERR(gpiod); @@ -497,23 +515,6 @@ static int j721e_pcie_probe(struct platform_device *pdev) break; case PCI_MODE_EP: - if (!IS_ENABLED(CONFIG_PCIE_CADENCE_EP)) { - ret = -ENODEV; - goto err_get_sync; - } - - ep = devm_kzalloc(dev, sizeof(*ep), GFP_KERNEL); - if (!ep) { - ret = -ENOMEM; - goto err_get_sync; - } - ep->quirk_detect_quiet_flag = data->quirk_detect_quiet_flag; - - cdns_pcie = &ep->pcie; - cdns_pcie->dev = dev; - cdns_pcie->ops = &j721e_pcie_ops; - pcie->cdns_pcie = cdns_pcie; - ret = cdns_pcie_init_phy(dev, cdns_pcie); if (ret) { dev_err(dev, "Failed to init phy\n"); @@ -525,8 +526,6 @@ static int j721e_pcie_probe(struct platform_device *pdev) goto err_pcie_setup; break; - default: - dev_err(dev, "INVALID device type %d\n", mode); } return 0; diff --git a/drivers/pci/controller/dwc/pcie-kirin.c b/drivers/pci/controller/dwc/pcie-kirin.c index fa6886d66488..c625fc6bb287 100644 --- a/drivers/pci/controller/dwc/pcie-kirin.c +++ b/drivers/pci/controller/dwc/pcie-kirin.c @@ -756,22 +756,28 @@ static int __exit kirin_pcie_remove(struct platform_device *pdev) return 0; } +struct kirin_pcie_data { + enum pcie_kirin_phy_type phy_type; +}; + +static const struct kirin_pcie_data kirin_960_data = { + .phy_type = PCIE_KIRIN_INTERNAL_PHY, +}; + +static const struct kirin_pcie_data kirin_970_data = { + .phy_type = PCIE_KIRIN_EXTERNAL_PHY, +}; + static const struct of_device_id kirin_pcie_match[] = { - { - .compatible = "hisilicon,kirin960-pcie", - .data = (void *)PCIE_KIRIN_INTERNAL_PHY - }, - { - .compatible = "hisilicon,kirin970-pcie", - .data = (void *)PCIE_KIRIN_EXTERNAL_PHY - }, + { .compatible = "hisilicon,kirin960-pcie", .data = &kirin_960_data }, + { .compatible = "hisilicon,kirin970-pcie", .data = &kirin_970_data }, {}, }; static int kirin_pcie_probe(struct platform_device *pdev) { - enum pcie_kirin_phy_type phy_type; struct device *dev = &pdev->dev; + const struct kirin_pcie_data *data; struct kirin_pcie *kirin_pcie; struct dw_pcie *pci; int ret; @@ -781,13 +787,12 @@ static int kirin_pcie_probe(struct platform_device *pdev) return -EINVAL; } - phy_type = (long)of_device_get_match_data(dev); - if (!phy_type) { + data = of_device_get_match_data(dev); + if (!data) { dev_err(dev, "OF data missing\n"); return -EINVAL; } - kirin_pcie = devm_kzalloc(dev, sizeof(struct kirin_pcie), GFP_KERNEL); if (!kirin_pcie) return -ENOMEM; @@ -800,7 +805,7 @@ static int kirin_pcie_probe(struct platform_device *pdev) pci->ops = &kirin_dw_pcie_ops; pci->pp.ops = &kirin_pcie_host_ops; kirin_pcie->pci = pci; - kirin_pcie->type = phy_type; + kirin_pcie->type = data->phy_type; ret = kirin_pcie_get_resource(kirin_pcie, pdev); if (ret) diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c index 20ea2ee330b8..ae0bc2fee4ca 100644 --- a/drivers/pci/controller/pci-hyperv.c +++ b/drivers/pci/controller/pci-hyperv.c @@ -2155,8 +2155,17 @@ static void hv_pci_assign_numa_node(struct hv_pcibus_device *hbus) if (!hv_dev) continue; - if (hv_dev->desc.flags & HV_PCI_DEVICE_FLAG_NUMA_AFFINITY) - set_dev_node(&dev->dev, hv_dev->desc.virtual_numa_node); + if (hv_dev->desc.flags & HV_PCI_DEVICE_FLAG_NUMA_AFFINITY && + hv_dev->desc.virtual_numa_node < num_possible_nodes()) + /* + * The kernel may boot with some NUMA nodes offline + * (e.g. in a KDUMP kernel) or with NUMA disabled via + * "numa=off". In those cases, adjust the host provided + * NUMA node to a valid NUMA node used by the kernel. + */ + set_dev_node(&dev->dev, + numa_map_to_online_node( + hv_dev->desc.virtual_numa_node)); put_pcichild(hv_dev); } diff --git a/drivers/pci/controller/pci-mvebu.c b/drivers/pci/controller/pci-mvebu.c index 71258ea3d35f..f8e82c5e2d87 100644 --- a/drivers/pci/controller/pci-mvebu.c +++ b/drivers/pci/controller/pci-mvebu.c @@ -1329,7 +1329,8 @@ static int mvebu_pcie_probe(struct platform_device *pdev) * indirectly via kernel emulated PCI bridge driver. */ mvebu_pcie_setup_hw(port); - mvebu_pcie_set_local_dev_nr(port, 0); + mvebu_pcie_set_local_dev_nr(port, 1); + mvebu_pcie_set_local_bus_nr(port, 0); } pcie->nports = i; diff --git a/drivers/pci/controller/pcie-apple.c b/drivers/pci/controller/pcie-apple.c index 854d95163112..a2c3c207a04b 100644 --- a/drivers/pci/controller/pcie-apple.c +++ b/drivers/pci/controller/pcie-apple.c @@ -219,7 +219,7 @@ static int apple_msi_domain_alloc(struct irq_domain *domain, unsigned int virq, if (hwirq < 0) return -ENOSPC; - fwspec.param[1] += hwirq; + fwspec.param[fwspec.param_count - 2] += hwirq; ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, &fwspec); if (ret) diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c index cc166c683638..eb05cceab964 100644 --- a/drivers/pci/controller/vmd.c +++ b/drivers/pci/controller/vmd.c @@ -99,11 +99,13 @@ struct vmd_irq { * @srcu: SRCU struct for local synchronization. * @count: number of child IRQs assigned to this vector; used to track * sharing. + * @virq: The underlying VMD Linux interrupt number */ struct vmd_irq_list { struct list_head irq_list; struct srcu_struct srcu; unsigned int count; + unsigned int virq; }; struct vmd_dev { @@ -253,7 +255,6 @@ static int vmd_msi_init(struct irq_domain *domain, struct msi_domain_info *info, struct msi_desc *desc = arg->desc; struct vmd_dev *vmd = vmd_from_bus(msi_desc_to_pci_dev(desc)->bus); struct vmd_irq *vmdirq = kzalloc(sizeof(*vmdirq), GFP_KERNEL); - unsigned int index, vector; if (!vmdirq) return -ENOMEM; @@ -261,10 +262,8 @@ static int vmd_msi_init(struct irq_domain *domain, struct msi_domain_info *info, INIT_LIST_HEAD(&vmdirq->node); vmdirq->irq = vmd_next_irq(vmd, desc); vmdirq->virq = virq; - index = index_from_irqs(vmd, vmdirq->irq); - vector = pci_irq_vector(vmd->dev, index); - irq_domain_set_info(domain, virq, vector, info->chip, vmdirq, + irq_domain_set_info(domain, virq, vmdirq->irq->virq, info->chip, vmdirq, handle_untracked_irq, vmd, NULL); return 0; } @@ -685,7 +684,8 @@ static int vmd_alloc_irqs(struct vmd_dev *vmd) return err; INIT_LIST_HEAD(&vmd->irqs[i].irq_list); - err = devm_request_irq(&dev->dev, pci_irq_vector(dev, i), + vmd->irqs[i].virq = pci_irq_vector(dev, i); + err = devm_request_irq(&dev->dev, vmd->irqs[i].virq, vmd_irq, IRQF_NO_THREAD, vmd->name, &vmd->irqs[i]); if (err) @@ -969,7 +969,7 @@ static int vmd_suspend(struct device *dev) int i; for (i = 0; i < vmd->msix_count; i++) - devm_free_irq(dev, pci_irq_vector(pdev, i), &vmd->irqs[i]); + devm_free_irq(dev, vmd->irqs[i].virq, &vmd->irqs[i]); return 0; } @@ -981,7 +981,7 @@ static int vmd_resume(struct device *dev) int err, i; for (i = 0; i < vmd->msix_count; i++) { - err = devm_request_irq(dev, pci_irq_vector(pdev, i), + err = devm_request_irq(dev, vmd->irqs[i].virq, vmd_irq, IRQF_NO_THREAD, vmd->name, &vmd->irqs[i]); if (err) diff --git a/drivers/pci/msi/msi.c b/drivers/pci/msi/msi.c index c19c7ca58186..9037a7827eca 100644 --- a/drivers/pci/msi/msi.c +++ b/drivers/pci/msi/msi.c @@ -1111,7 +1111,8 @@ const struct cpumask *pci_irq_get_affinity(struct pci_dev *dev, int nr) if (!desc) return cpu_possible_mask; - if (WARN_ON_ONCE(!desc->affinity)) + /* MSI[X] interrupts can be allocated without affinity descriptor */ + if (!desc->affinity) return NULL; /* diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index 588588cfda48..415f7664b010 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c @@ -596,7 +596,7 @@ static int pci_legacy_suspend(struct device *dev, pm_message_t state) int error; error = drv->suspend(pci_dev, state); - suspend_report_result(drv->suspend, error); + suspend_report_result(dev, drv->suspend, error); if (error) return error; @@ -775,7 +775,7 @@ static int pci_pm_suspend(struct device *dev) int error; error = pm->suspend(dev); - suspend_report_result(pm->suspend, error); + suspend_report_result(dev, pm->suspend, error); if (error) return error; @@ -821,7 +821,7 @@ static int pci_pm_suspend_noirq(struct device *dev) int error; error = pm->suspend_noirq(dev); - suspend_report_result(pm->suspend_noirq, error); + suspend_report_result(dev, pm->suspend_noirq, error); if (error) return error; @@ -1010,7 +1010,7 @@ static int pci_pm_freeze(struct device *dev) int error; error = pm->freeze(dev); - suspend_report_result(pm->freeze, error); + suspend_report_result(dev, pm->freeze, error); if (error) return error; } @@ -1030,7 +1030,7 @@ static int pci_pm_freeze_noirq(struct device *dev) int error; error = pm->freeze_noirq(dev); - suspend_report_result(pm->freeze_noirq, error); + suspend_report_result(dev, pm->freeze_noirq, error); if (error) return error; } @@ -1116,7 +1116,7 @@ static int pci_pm_poweroff(struct device *dev) int error; error = pm->poweroff(dev); - suspend_report_result(pm->poweroff, error); + suspend_report_result(dev, pm->poweroff, error); if (error) return error; } @@ -1154,7 +1154,7 @@ static int pci_pm_poweroff_noirq(struct device *dev) int error; error = pm->poweroff_noirq(dev); - suspend_report_result(pm->poweroff_noirq, error); + suspend_report_result(dev, pm->poweroff_noirq, error); if (error) return error; } diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c index bda630889f95..604feeb84ee4 100644 --- a/drivers/pci/pcie/portdrv_core.c +++ b/drivers/pci/pcie/portdrv_core.c @@ -166,6 +166,9 @@ static int pcie_init_service_irqs(struct pci_dev *dev, int *irqs, int mask) { int ret, i; + for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++) + irqs[i] = -1; + /* * If we support PME but can't use MSI/MSI-X for it, we have to * fall back to INTx or other interrupts, e.g., a system shared @@ -314,10 +317,8 @@ static int pcie_device_init(struct pci_dev *pdev, int service, int irq) */ int pcie_port_device_register(struct pci_dev *dev) { - int status, capabilities, irq_services, i, nr_service; - int irqs[PCIE_PORT_DEVICE_MAXSERVICES] = { - [0 ... PCIE_PORT_DEVICE_MAXSERVICES-1] = -1 - }; + int status, capabilities, i, nr_service; + int irqs[PCIE_PORT_DEVICE_MAXSERVICES]; /* Enable PCI Express port device */ status = pci_enable_device(dev); @@ -330,32 +331,18 @@ int pcie_port_device_register(struct pci_dev *dev) return 0; pci_set_master(dev); - - irq_services = 0; - if (IS_ENABLED(CONFIG_PCIE_PME)) - irq_services |= PCIE_PORT_SERVICE_PME; - if (IS_ENABLED(CONFIG_PCIEAER)) - irq_services |= PCIE_PORT_SERVICE_AER; - if (IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE)) - irq_services |= PCIE_PORT_SERVICE_HP; - if (IS_ENABLED(CONFIG_PCIE_DPC)) - irq_services |= PCIE_PORT_SERVICE_DPC; - irq_services &= capabilities; - - if (irq_services) { - /* - * Initialize service IRQs. Don't use service devices that - * require interrupts if there is no way to generate them. - * However, some drivers may have a polling mode (e.g. - * pciehp_poll_mode) that can be used in the absence of IRQs. - * Allow them to determine if that is to be used. - */ - status = pcie_init_service_irqs(dev, irqs, irq_services); - if (status) { - irq_services &= PCIE_PORT_SERVICE_HP; - if (!irq_services) - goto error_disable; - } + /* + * Initialize service irqs. Don't use service devices that + * require interrupts if there is no way to generate them. + * However, some drivers may have a polling mode (e.g. pciehp_poll_mode) + * that can be used in the absence of irqs. Allow them to determine + * if that is to be used. + */ + status = pcie_init_service_irqs(dev, irqs, capabilities); + if (status) { + capabilities &= PCIE_PORT_SERVICE_HP; + if (!capabilities) + goto error_disable; } /* Allocate child services if any */ diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index d2dd6a6cda60..65f7f6b0576c 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -5344,11 +5344,6 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0422, quirk_no_ext_tags); */ static void quirk_amd_harvest_no_ats(struct pci_dev *pdev) { - if ((pdev->device == 0x7312 && pdev->revision != 0x00) || - (pdev->device == 0x7340 && pdev->revision != 0xc5) || - (pdev->device == 0x7341 && pdev->revision != 0x00)) - return; - if (pdev->device == 0x15d8) { if (pdev->revision == 0xcf && pdev->subsystem_vendor == 0xea50 && @@ -5370,10 +5365,19 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x98e4, quirk_amd_harvest_no_ats); /* AMD Iceland dGPU */ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x6900, quirk_amd_harvest_no_ats); /* AMD Navi10 dGPU */ +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7310, quirk_amd_harvest_no_ats); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7312, quirk_amd_harvest_no_ats); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7318, quirk_amd_harvest_no_ats); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7319, quirk_amd_harvest_no_ats); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x731a, quirk_amd_harvest_no_ats); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x731b, quirk_amd_harvest_no_ats); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x731e, quirk_amd_harvest_no_ats); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x731f, quirk_amd_harvest_no_ats); /* AMD Navi14 dGPU */ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7340, quirk_amd_harvest_no_ats); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7341, quirk_amd_harvest_no_ats); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7347, quirk_amd_harvest_no_ats); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x734f, quirk_amd_harvest_no_ats); /* AMD Raven platform iGPU */ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x15d8, quirk_amd_harvest_no_ats); #endif /* CONFIG_PCI_ATS */ diff --git a/drivers/perf/Kconfig b/drivers/perf/Kconfig index e1a0c44bc686..d05ca6ebbb9d 100644 --- a/drivers/perf/Kconfig +++ b/drivers/perf/Kconfig @@ -141,11 +141,25 @@ config ARM_DMC620_PMU config MARVELL_CN10K_TAD_PMU tristate "Marvell CN10K LLC-TAD PMU" - depends on ARM64 || (COMPILE_TEST && 64BIT) + depends on ARCH_THUNDER || (COMPILE_TEST && 64BIT) help Provides support for Last-Level cache Tag-and-data Units (LLC-TAD) performance monitors on CN10K family silicons. +config APPLE_M1_CPU_PMU + bool "Apple M1 CPU PMU support" + depends on ARM_PMU && ARCH_APPLE + help + Provides support for the non-architectural CPU PMUs present on + the Apple M1 SoCs and derivatives. + source "drivers/perf/hisilicon/Kconfig" +config MARVELL_CN10K_DDR_PMU + tristate "Enable MARVELL CN10K DRAM Subsystem(DSS) PMU Support" + depends on ARM64 || (COMPILE_TEST && 64BIT) + help + Enable perf support for Marvell DDR Performance monitoring + event on CN10K platform. + endmenu diff --git a/drivers/perf/Makefile b/drivers/perf/Makefile index 2db5418d5b0a..4f43080ec54e 100644 --- a/drivers/perf/Makefile +++ b/drivers/perf/Makefile @@ -15,3 +15,5 @@ obj-$(CONFIG_XGENE_PMU) += xgene_pmu.o obj-$(CONFIG_ARM_SPE_PMU) += arm_spe_pmu.o obj-$(CONFIG_ARM_DMC620_PMU) += arm_dmc620_pmu.o obj-$(CONFIG_MARVELL_CN10K_TAD_PMU) += marvell_cn10k_tad_pmu.o +obj-$(CONFIG_MARVELL_CN10K_DDR_PMU) += marvell_cn10k_ddr_pmu.o +obj-$(CONFIG_APPLE_M1_CPU_PMU) += apple_m1_cpu_pmu.o diff --git a/drivers/perf/apple_m1_cpu_pmu.c b/drivers/perf/apple_m1_cpu_pmu.c new file mode 100644 index 000000000000..979a7c2b4f56 --- /dev/null +++ b/drivers/perf/apple_m1_cpu_pmu.c @@ -0,0 +1,584 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * CPU PMU driver for the Apple M1 and derivatives + * + * Copyright (C) 2021 Google LLC + * + * Author: Marc Zyngier <maz@kernel.org> + * + * Most of the information used in this driver was provided by the + * Asahi Linux project. The rest was experimentally discovered. + */ + +#include <linux/of.h> +#include <linux/perf/arm_pmu.h> +#include <linux/platform_device.h> + +#include <asm/apple_m1_pmu.h> +#include <asm/irq_regs.h> +#include <asm/perf_event.h> + +#define M1_PMU_NR_COUNTERS 10 + +#define M1_PMU_CFG_EVENT GENMASK(7, 0) + +#define ANY_BUT_0_1 GENMASK(9, 2) +#define ONLY_2_TO_7 GENMASK(7, 2) +#define ONLY_2_4_6 (BIT(2) | BIT(4) | BIT(6)) +#define ONLY_5_6_7 (BIT(5) | BIT(6) | BIT(7)) + +/* + * Description of the events we actually know about, as well as those with + * a specific counter affinity. Yes, this is a grand total of two known + * counters, and the rest is anybody's guess. + * + * Not all counters can count all events. Counters #0 and #1 are wired to + * count cycles and instructions respectively, and some events have + * bizarre mappings (every other counter, or even *one* counter). These + * restrictions equally apply to both P and E cores. + * + * It is worth noting that the PMUs attached to P and E cores are likely + * to be different because the underlying uarches are different. At the + * moment, we don't really need to distinguish between the two because we + * know next to nothing about the events themselves, and we already have + * per cpu-type PMU abstractions. + * + * If we eventually find out that the events are different across + * implementations, we'll have to introduce per cpu-type tables. + */ +enum m1_pmu_events { + M1_PMU_PERFCTR_UNKNOWN_01 = 0x01, + M1_PMU_PERFCTR_CPU_CYCLES = 0x02, + M1_PMU_PERFCTR_INSTRUCTIONS = 0x8c, + M1_PMU_PERFCTR_UNKNOWN_8d = 0x8d, + M1_PMU_PERFCTR_UNKNOWN_8e = 0x8e, + M1_PMU_PERFCTR_UNKNOWN_8f = 0x8f, + M1_PMU_PERFCTR_UNKNOWN_90 = 0x90, + M1_PMU_PERFCTR_UNKNOWN_93 = 0x93, + M1_PMU_PERFCTR_UNKNOWN_94 = 0x94, + M1_PMU_PERFCTR_UNKNOWN_95 = 0x95, + M1_PMU_PERFCTR_UNKNOWN_96 = 0x96, + M1_PMU_PERFCTR_UNKNOWN_97 = 0x97, + M1_PMU_PERFCTR_UNKNOWN_98 = 0x98, + M1_PMU_PERFCTR_UNKNOWN_99 = 0x99, + M1_PMU_PERFCTR_UNKNOWN_9a = 0x9a, + M1_PMU_PERFCTR_UNKNOWN_9b = 0x9b, + M1_PMU_PERFCTR_UNKNOWN_9c = 0x9c, + M1_PMU_PERFCTR_UNKNOWN_9f = 0x9f, + M1_PMU_PERFCTR_UNKNOWN_bf = 0xbf, + M1_PMU_PERFCTR_UNKNOWN_c0 = 0xc0, + M1_PMU_PERFCTR_UNKNOWN_c1 = 0xc1, + M1_PMU_PERFCTR_UNKNOWN_c4 = 0xc4, + M1_PMU_PERFCTR_UNKNOWN_c5 = 0xc5, + M1_PMU_PERFCTR_UNKNOWN_c6 = 0xc6, + M1_PMU_PERFCTR_UNKNOWN_c8 = 0xc8, + M1_PMU_PERFCTR_UNKNOWN_ca = 0xca, + M1_PMU_PERFCTR_UNKNOWN_cb = 0xcb, + M1_PMU_PERFCTR_UNKNOWN_f5 = 0xf5, + M1_PMU_PERFCTR_UNKNOWN_f6 = 0xf6, + M1_PMU_PERFCTR_UNKNOWN_f7 = 0xf7, + M1_PMU_PERFCTR_UNKNOWN_f8 = 0xf8, + M1_PMU_PERFCTR_UNKNOWN_fd = 0xfd, + M1_PMU_PERFCTR_LAST = M1_PMU_CFG_EVENT, + + /* + * From this point onwards, these are not actual HW events, + * but attributes that get stored in hw->config_base. + */ + M1_PMU_CFG_COUNT_USER = BIT(8), + M1_PMU_CFG_COUNT_KERNEL = BIT(9), +}; + +/* + * Per-event affinity table. Most events can be installed on counter + * 2-9, but there are a number of exceptions. Note that this table + * has been created experimentally, and I wouldn't be surprised if more + * counters had strange affinities. + */ +static const u16 m1_pmu_event_affinity[M1_PMU_PERFCTR_LAST + 1] = { + [0 ... M1_PMU_PERFCTR_LAST] = ANY_BUT_0_1, + [M1_PMU_PERFCTR_UNKNOWN_01] = BIT(7), + [M1_PMU_PERFCTR_CPU_CYCLES] = ANY_BUT_0_1 | BIT(0), + [M1_PMU_PERFCTR_INSTRUCTIONS] = BIT(7) | BIT(1), + [M1_PMU_PERFCTR_UNKNOWN_8d] = ONLY_5_6_7, + [M1_PMU_PERFCTR_UNKNOWN_8e] = ONLY_5_6_7, + [M1_PMU_PERFCTR_UNKNOWN_8f] = ONLY_5_6_7, + [M1_PMU_PERFCTR_UNKNOWN_90] = ONLY_5_6_7, + [M1_PMU_PERFCTR_UNKNOWN_93] = ONLY_5_6_7, + [M1_PMU_PERFCTR_UNKNOWN_94] = ONLY_5_6_7, + [M1_PMU_PERFCTR_UNKNOWN_95] = ONLY_5_6_7, + [M1_PMU_PERFCTR_UNKNOWN_96] = ONLY_5_6_7, + [M1_PMU_PERFCTR_UNKNOWN_97] = BIT(7), + [M1_PMU_PERFCTR_UNKNOWN_98] = ONLY_5_6_7, + [M1_PMU_PERFCTR_UNKNOWN_99] = ONLY_5_6_7, + [M1_PMU_PERFCTR_UNKNOWN_9a] = BIT(7), + [M1_PMU_PERFCTR_UNKNOWN_9b] = ONLY_5_6_7, + [M1_PMU_PERFCTR_UNKNOWN_9c] = ONLY_5_6_7, + [M1_PMU_PERFCTR_UNKNOWN_9f] = BIT(7), + [M1_PMU_PERFCTR_UNKNOWN_bf] = ONLY_5_6_7, + [M1_PMU_PERFCTR_UNKNOWN_c0] = ONLY_5_6_7, + [M1_PMU_PERFCTR_UNKNOWN_c1] = ONLY_5_6_7, + [M1_PMU_PERFCTR_UNKNOWN_c4] = ONLY_5_6_7, + [M1_PMU_PERFCTR_UNKNOWN_c5] = ONLY_5_6_7, + [M1_PMU_PERFCTR_UNKNOWN_c6] = ONLY_5_6_7, + [M1_PMU_PERFCTR_UNKNOWN_c8] = ONLY_5_6_7, + [M1_PMU_PERFCTR_UNKNOWN_ca] = ONLY_5_6_7, + [M1_PMU_PERFCTR_UNKNOWN_cb] = ONLY_5_6_7, + [M1_PMU_PERFCTR_UNKNOWN_f5] = ONLY_2_4_6, + [M1_PMU_PERFCTR_UNKNOWN_f6] = ONLY_2_4_6, + [M1_PMU_PERFCTR_UNKNOWN_f7] = ONLY_2_4_6, + [M1_PMU_PERFCTR_UNKNOWN_f8] = ONLY_2_TO_7, + [M1_PMU_PERFCTR_UNKNOWN_fd] = ONLY_2_4_6, +}; + +static const unsigned m1_pmu_perf_map[PERF_COUNT_HW_MAX] = { + PERF_MAP_ALL_UNSUPPORTED, + [PERF_COUNT_HW_CPU_CYCLES] = M1_PMU_PERFCTR_CPU_CYCLES, + [PERF_COUNT_HW_INSTRUCTIONS] = M1_PMU_PERFCTR_INSTRUCTIONS, + /* No idea about the rest yet */ +}; + +/* sysfs definitions */ +static ssize_t m1_pmu_events_sysfs_show(struct device *dev, + struct device_attribute *attr, + char *page) +{ + struct perf_pmu_events_attr *pmu_attr; + + pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr); + + return sprintf(page, "event=0x%04llx\n", pmu_attr->id); +} + +#define M1_PMU_EVENT_ATTR(name, config) \ + PMU_EVENT_ATTR_ID(name, m1_pmu_events_sysfs_show, config) + +static struct attribute *m1_pmu_event_attrs[] = { + M1_PMU_EVENT_ATTR(cycles, M1_PMU_PERFCTR_CPU_CYCLES), + M1_PMU_EVENT_ATTR(instructions, M1_PMU_PERFCTR_INSTRUCTIONS), + NULL, +}; + +static const struct attribute_group m1_pmu_events_attr_group = { + .name = "events", + .attrs = m1_pmu_event_attrs, +}; + +PMU_FORMAT_ATTR(event, "config:0-7"); + +static struct attribute *m1_pmu_format_attrs[] = { + &format_attr_event.attr, + NULL, +}; + +static const struct attribute_group m1_pmu_format_attr_group = { + .name = "format", + .attrs = m1_pmu_format_attrs, +}; + +/* Low level accessors. No synchronisation. */ +#define PMU_READ_COUNTER(_idx) \ + case _idx: return read_sysreg_s(SYS_IMP_APL_PMC## _idx ##_EL1) + +#define PMU_WRITE_COUNTER(_val, _idx) \ + case _idx: \ + write_sysreg_s(_val, SYS_IMP_APL_PMC## _idx ##_EL1); \ + return + +static u64 m1_pmu_read_hw_counter(unsigned int index) +{ + switch (index) { + PMU_READ_COUNTER(0); + PMU_READ_COUNTER(1); + PMU_READ_COUNTER(2); + PMU_READ_COUNTER(3); + PMU_READ_COUNTER(4); + PMU_READ_COUNTER(5); + PMU_READ_COUNTER(6); + PMU_READ_COUNTER(7); + PMU_READ_COUNTER(8); + PMU_READ_COUNTER(9); + } + + BUG(); +} + +static void m1_pmu_write_hw_counter(u64 val, unsigned int index) +{ + switch (index) { + PMU_WRITE_COUNTER(val, 0); + PMU_WRITE_COUNTER(val, 1); + PMU_WRITE_COUNTER(val, 2); + PMU_WRITE_COUNTER(val, 3); + PMU_WRITE_COUNTER(val, 4); + PMU_WRITE_COUNTER(val, 5); + PMU_WRITE_COUNTER(val, 6); + PMU_WRITE_COUNTER(val, 7); + PMU_WRITE_COUNTER(val, 8); + PMU_WRITE_COUNTER(val, 9); + } + + BUG(); +} + +#define get_bit_offset(index, mask) (__ffs(mask) + (index)) + +static void __m1_pmu_enable_counter(unsigned int index, bool en) +{ + u64 val, bit; + + switch (index) { + case 0 ... 7: + bit = BIT(get_bit_offset(index, PMCR0_CNT_ENABLE_0_7)); + break; + case 8 ... 9: + bit = BIT(get_bit_offset(index - 8, PMCR0_CNT_ENABLE_8_9)); + break; + default: + BUG(); + } + + val = read_sysreg_s(SYS_IMP_APL_PMCR0_EL1); + + if (en) + val |= bit; + else + val &= ~bit; + + write_sysreg_s(val, SYS_IMP_APL_PMCR0_EL1); +} + +static void m1_pmu_enable_counter(unsigned int index) +{ + __m1_pmu_enable_counter(index, true); +} + +static void m1_pmu_disable_counter(unsigned int index) +{ + __m1_pmu_enable_counter(index, false); +} + +static void __m1_pmu_enable_counter_interrupt(unsigned int index, bool en) +{ + u64 val, bit; + + switch (index) { + case 0 ... 7: + bit = BIT(get_bit_offset(index, PMCR0_PMI_ENABLE_0_7)); + break; + case 8 ... 9: + bit = BIT(get_bit_offset(index - 8, PMCR0_PMI_ENABLE_8_9)); + break; + default: + BUG(); + } + + val = read_sysreg_s(SYS_IMP_APL_PMCR0_EL1); + + if (en) + val |= bit; + else + val &= ~bit; + + write_sysreg_s(val, SYS_IMP_APL_PMCR0_EL1); +} + +static void m1_pmu_enable_counter_interrupt(unsigned int index) +{ + __m1_pmu_enable_counter_interrupt(index, true); +} + +static void m1_pmu_disable_counter_interrupt(unsigned int index) +{ + __m1_pmu_enable_counter_interrupt(index, false); +} + +static void m1_pmu_configure_counter(unsigned int index, u8 event, + bool user, bool kernel) +{ + u64 val, user_bit, kernel_bit; + int shift; + + switch (index) { + case 0 ... 7: + user_bit = BIT(get_bit_offset(index, PMCR1_COUNT_A64_EL0_0_7)); + kernel_bit = BIT(get_bit_offset(index, PMCR1_COUNT_A64_EL1_0_7)); + break; + case 8 ... 9: + user_bit = BIT(get_bit_offset(index - 8, PMCR1_COUNT_A64_EL0_8_9)); + kernel_bit = BIT(get_bit_offset(index - 8, PMCR1_COUNT_A64_EL1_8_9)); + break; + default: + BUG(); + } + + val = read_sysreg_s(SYS_IMP_APL_PMCR1_EL1); + + if (user) + val |= user_bit; + else + val &= ~user_bit; + + if (kernel) + val |= kernel_bit; + else + val &= ~kernel_bit; + + write_sysreg_s(val, SYS_IMP_APL_PMCR1_EL1); + + /* + * Counters 0 and 1 have fixed events. For anything else, + * place the event at the expected location in the relevant + * register (PMESR0 holds the event configuration for counters + * 2-5, resp. PMESR1 for counters 6-9). + */ + switch (index) { + case 0 ... 1: + break; + case 2 ... 5: + shift = (index - 2) * 8; + val = read_sysreg_s(SYS_IMP_APL_PMESR0_EL1); + val &= ~((u64)0xff << shift); + val |= (u64)event << shift; + write_sysreg_s(val, SYS_IMP_APL_PMESR0_EL1); + break; + case 6 ... 9: + shift = (index - 6) * 8; + val = read_sysreg_s(SYS_IMP_APL_PMESR1_EL1); + val &= ~((u64)0xff << shift); + val |= (u64)event << shift; + write_sysreg_s(val, SYS_IMP_APL_PMESR1_EL1); + break; + } +} + +/* arm_pmu backend */ +static void m1_pmu_enable_event(struct perf_event *event) +{ + bool user, kernel; + u8 evt; + + evt = event->hw.config_base & M1_PMU_CFG_EVENT; + user = event->hw.config_base & M1_PMU_CFG_COUNT_USER; + kernel = event->hw.config_base & M1_PMU_CFG_COUNT_KERNEL; + + m1_pmu_disable_counter_interrupt(event->hw.idx); + m1_pmu_disable_counter(event->hw.idx); + isb(); + + m1_pmu_configure_counter(event->hw.idx, evt, user, kernel); + m1_pmu_enable_counter(event->hw.idx); + m1_pmu_enable_counter_interrupt(event->hw.idx); + isb(); +} + +static void m1_pmu_disable_event(struct perf_event *event) +{ + m1_pmu_disable_counter_interrupt(event->hw.idx); + m1_pmu_disable_counter(event->hw.idx); + isb(); +} + +static irqreturn_t m1_pmu_handle_irq(struct arm_pmu *cpu_pmu) +{ + struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events); + struct pt_regs *regs; + u64 overflow, state; + int idx; + + overflow = read_sysreg_s(SYS_IMP_APL_PMSR_EL1); + if (!overflow) { + /* Spurious interrupt? */ + state = read_sysreg_s(SYS_IMP_APL_PMCR0_EL1); + state &= ~PMCR0_IACT; + write_sysreg_s(state, SYS_IMP_APL_PMCR0_EL1); + isb(); + return IRQ_NONE; + } + + cpu_pmu->stop(cpu_pmu); + + regs = get_irq_regs(); + + for (idx = 0; idx < cpu_pmu->num_events; idx++) { + struct perf_event *event = cpuc->events[idx]; + struct perf_sample_data data; + + if (!event) + continue; + + armpmu_event_update(event); + perf_sample_data_init(&data, 0, event->hw.last_period); + if (!armpmu_event_set_period(event)) + continue; + + if (perf_event_overflow(event, &data, regs)) + m1_pmu_disable_event(event); + } + + cpu_pmu->start(cpu_pmu); + + return IRQ_HANDLED; +} + +static u64 m1_pmu_read_counter(struct perf_event *event) +{ + return m1_pmu_read_hw_counter(event->hw.idx); +} + +static void m1_pmu_write_counter(struct perf_event *event, u64 value) +{ + m1_pmu_write_hw_counter(value, event->hw.idx); + isb(); +} + +static int m1_pmu_get_event_idx(struct pmu_hw_events *cpuc, + struct perf_event *event) +{ + unsigned long evtype = event->hw.config_base & M1_PMU_CFG_EVENT; + unsigned long affinity = m1_pmu_event_affinity[evtype]; + int idx; + + /* + * Place the event on the first free counter that can count + * this event. + * + * We could do a better job if we had a view of all the events + * counting on the PMU at any given time, and by placing the + * most constraining events first. + */ + for_each_set_bit(idx, &affinity, M1_PMU_NR_COUNTERS) { + if (!test_and_set_bit(idx, cpuc->used_mask)) + return idx; + } + + return -EAGAIN; +} + +static void m1_pmu_clear_event_idx(struct pmu_hw_events *cpuc, + struct perf_event *event) +{ + clear_bit(event->hw.idx, cpuc->used_mask); +} + +static void __m1_pmu_set_mode(u8 mode) +{ + u64 val; + + val = read_sysreg_s(SYS_IMP_APL_PMCR0_EL1); + val &= ~(PMCR0_IMODE | PMCR0_IACT); + val |= FIELD_PREP(PMCR0_IMODE, mode); + write_sysreg_s(val, SYS_IMP_APL_PMCR0_EL1); + isb(); +} + +static void m1_pmu_start(struct arm_pmu *cpu_pmu) +{ + __m1_pmu_set_mode(PMCR0_IMODE_FIQ); +} + +static void m1_pmu_stop(struct arm_pmu *cpu_pmu) +{ + __m1_pmu_set_mode(PMCR0_IMODE_OFF); +} + +static int m1_pmu_map_event(struct perf_event *event) +{ + /* + * Although the counters are 48bit wide, bit 47 is what + * triggers the overflow interrupt. Advertise the counters + * being 47bit wide to mimick the behaviour of the ARM PMU. + */ + event->hw.flags |= ARMPMU_EVT_47BIT; + return armpmu_map_event(event, &m1_pmu_perf_map, NULL, M1_PMU_CFG_EVENT); +} + +static void m1_pmu_reset(void *info) +{ + int i; + + __m1_pmu_set_mode(PMCR0_IMODE_OFF); + + for (i = 0; i < M1_PMU_NR_COUNTERS; i++) { + m1_pmu_disable_counter(i); + m1_pmu_disable_counter_interrupt(i); + m1_pmu_write_hw_counter(0, i); + } + + isb(); +} + +static int m1_pmu_set_event_filter(struct hw_perf_event *event, + struct perf_event_attr *attr) +{ + unsigned long config_base = 0; + + if (!attr->exclude_guest) + return -EINVAL; + if (!attr->exclude_kernel) + config_base |= M1_PMU_CFG_COUNT_KERNEL; + if (!attr->exclude_user) + config_base |= M1_PMU_CFG_COUNT_USER; + + event->config_base = config_base; + + return 0; +} + +static int m1_pmu_init(struct arm_pmu *cpu_pmu) +{ + cpu_pmu->handle_irq = m1_pmu_handle_irq; + cpu_pmu->enable = m1_pmu_enable_event; + cpu_pmu->disable = m1_pmu_disable_event; + cpu_pmu->read_counter = m1_pmu_read_counter; + cpu_pmu->write_counter = m1_pmu_write_counter; + cpu_pmu->get_event_idx = m1_pmu_get_event_idx; + cpu_pmu->clear_event_idx = m1_pmu_clear_event_idx; + cpu_pmu->start = m1_pmu_start; + cpu_pmu->stop = m1_pmu_stop; + cpu_pmu->map_event = m1_pmu_map_event; + cpu_pmu->reset = m1_pmu_reset; + cpu_pmu->set_event_filter = m1_pmu_set_event_filter; + + cpu_pmu->num_events = M1_PMU_NR_COUNTERS; + cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] = &m1_pmu_events_attr_group; + cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] = &m1_pmu_format_attr_group; + return 0; +} + +/* Device driver gunk */ +static int m1_pmu_ice_init(struct arm_pmu *cpu_pmu) +{ + cpu_pmu->name = "apple_icestorm_pmu"; + return m1_pmu_init(cpu_pmu); +} + +static int m1_pmu_fire_init(struct arm_pmu *cpu_pmu) +{ + cpu_pmu->name = "apple_firestorm_pmu"; + return m1_pmu_init(cpu_pmu); +} + +static const struct of_device_id m1_pmu_of_device_ids[] = { + { .compatible = "apple,icestorm-pmu", .data = m1_pmu_ice_init, }, + { .compatible = "apple,firestorm-pmu", .data = m1_pmu_fire_init, }, + { }, +}; +MODULE_DEVICE_TABLE(of, m1_pmu_of_device_ids); + +static int m1_pmu_device_probe(struct platform_device *pdev) +{ + return arm_pmu_device_probe(pdev, m1_pmu_of_device_ids, NULL); +} + +static struct platform_driver m1_pmu_driver = { + .driver = { + .name = "apple-m1-cpu-pmu", + .of_match_table = m1_pmu_of_device_ids, + .suppress_bind_attrs = true, + }, + .probe = m1_pmu_device_probe, +}; + +module_platform_driver(m1_pmu_driver); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/perf/arm-cci.c b/drivers/perf/arm-cci.c index 54aca3a62814..96e09fa40909 100644 --- a/drivers/perf/arm-cci.c +++ b/drivers/perf/arm-cci.c @@ -1096,7 +1096,7 @@ static void cci_pmu_enable(struct pmu *pmu) { struct cci_pmu *cci_pmu = to_cci_pmu(pmu); struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events; - int enabled = bitmap_weight(hw_events->used_mask, cci_pmu->num_cntrs); + bool enabled = !bitmap_empty(hw_events->used_mask, cci_pmu->num_cntrs); unsigned long flags; if (!enabled) diff --git a/drivers/perf/arm-ccn.c b/drivers/perf/arm-ccn.c index a96c31604545..40b352e8aa7f 100644 --- a/drivers/perf/arm-ccn.c +++ b/drivers/perf/arm-ccn.c @@ -1460,8 +1460,7 @@ static irqreturn_t arm_ccn_irq_handler(int irq, void *dev_id) static int arm_ccn_probe(struct platform_device *pdev) { struct arm_ccn *ccn; - struct resource *res; - unsigned int irq; + int irq; int err; ccn = devm_kzalloc(&pdev->dev, sizeof(*ccn), GFP_KERNEL); @@ -1474,10 +1473,9 @@ static int arm_ccn_probe(struct platform_device *pdev) if (IS_ERR(ccn->base)) return PTR_ERR(ccn->base); - res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); - if (!res) - return -EINVAL; - irq = res->start; + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; /* Check if we can use the interrupt */ writel(CCN_MN_ERRINT_STATUS__PMU_EVENTS__DISABLE, diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c index 0e48adce57ef..9c1d82be7a2f 100644 --- a/drivers/perf/arm-cmn.c +++ b/drivers/perf/arm-cmn.c @@ -71,9 +71,11 @@ #define CMN_DTM_WPn(n) (0x1A0 + (n) * 0x18) #define CMN_DTM_WPn_CONFIG(n) (CMN_DTM_WPn(n) + 0x00) #define CMN_DTM_WPn_CONFIG_WP_DEV_SEL2 GENMASK_ULL(18,17) -#define CMN_DTM_WPn_CONFIG_WP_COMBINE BIT(6) -#define CMN_DTM_WPn_CONFIG_WP_EXCLUSIVE BIT(5) -#define CMN_DTM_WPn_CONFIG_WP_GRP BIT(4) +#define CMN_DTM_WPn_CONFIG_WP_COMBINE BIT(9) +#define CMN_DTM_WPn_CONFIG_WP_EXCLUSIVE BIT(8) +#define CMN600_WPn_CONFIG_WP_COMBINE BIT(6) +#define CMN600_WPn_CONFIG_WP_EXCLUSIVE BIT(5) +#define CMN_DTM_WPn_CONFIG_WP_GRP GENMASK_ULL(5, 4) #define CMN_DTM_WPn_CONFIG_WP_CHN_SEL GENMASK_ULL(3, 1) #define CMN_DTM_WPn_CONFIG_WP_DEV_SEL BIT(0) #define CMN_DTM_WPn_VAL(n) (CMN_DTM_WPn(n) + 0x08) @@ -155,6 +157,7 @@ #define CMN_CONFIG_WP_COMBINE GENMASK_ULL(27, 24) #define CMN_CONFIG_WP_DEV_SEL GENMASK_ULL(50, 48) #define CMN_CONFIG_WP_CHN_SEL GENMASK_ULL(55, 51) +/* Note that we don't yet support the tertiary match group on newer IPs */ #define CMN_CONFIG_WP_GRP BIT_ULL(56) #define CMN_CONFIG_WP_EXCLUSIVE BIT_ULL(57) #define CMN_CONFIG1_WP_VAL GENMASK_ULL(63, 0) @@ -353,7 +356,7 @@ static struct arm_cmn_node *arm_cmn_node(const struct arm_cmn *cmn, return NULL; } -struct dentry *arm_cmn_debugfs; +static struct dentry *arm_cmn_debugfs; #ifdef CONFIG_DEBUG_FS static const char *arm_cmn_device_type(u8 type) @@ -595,6 +598,9 @@ static umode_t arm_cmn_event_attr_is_visible(struct kobject *kobj, if ((intf & 4) && !(cmn->ports_used & BIT(intf & 3))) return 0; + if (chan == 4 && cmn->model == CMN600) + return 0; + if ((chan == 5 && cmn->rsp_vc_num < 2) || (chan == 6 && cmn->dat_vc_num < 2)) return 0; @@ -905,15 +911,18 @@ static u32 arm_cmn_wp_config(struct perf_event *event) u32 grp = CMN_EVENT_WP_GRP(event); u32 exc = CMN_EVENT_WP_EXCLUSIVE(event); u32 combine = CMN_EVENT_WP_COMBINE(event); + bool is_cmn600 = to_cmn(event->pmu)->model == CMN600; config = FIELD_PREP(CMN_DTM_WPn_CONFIG_WP_DEV_SEL, dev) | FIELD_PREP(CMN_DTM_WPn_CONFIG_WP_CHN_SEL, chn) | FIELD_PREP(CMN_DTM_WPn_CONFIG_WP_GRP, grp) | - FIELD_PREP(CMN_DTM_WPn_CONFIG_WP_EXCLUSIVE, exc) | FIELD_PREP(CMN_DTM_WPn_CONFIG_WP_DEV_SEL2, dev >> 1); + if (exc) + config |= is_cmn600 ? CMN600_WPn_CONFIG_WP_EXCLUSIVE : + CMN_DTM_WPn_CONFIG_WP_EXCLUSIVE; if (combine && !grp) - config |= CMN_DTM_WPn_CONFIG_WP_COMBINE; - + config |= is_cmn600 ? CMN600_WPn_CONFIG_WP_COMBINE : + CMN_DTM_WPn_CONFIG_WP_COMBINE; return config; } diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c index 295cc7952d0e..9694370651fa 100644 --- a/drivers/perf/arm_pmu.c +++ b/drivers/perf/arm_pmu.c @@ -109,6 +109,8 @@ static inline u64 arm_pmu_event_max_period(struct perf_event *event) { if (event->hw.flags & ARMPMU_EVT_64BIT) return GENMASK_ULL(63, 0); + else if (event->hw.flags & ARMPMU_EVT_47BIT) + return GENMASK_ULL(46, 0); else return GENMASK_ULL(31, 0); } @@ -524,7 +526,7 @@ static void armpmu_enable(struct pmu *pmu) { struct arm_pmu *armpmu = to_arm_pmu(pmu); struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events); - int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events); + bool enabled = !bitmap_empty(hw_events->used_mask, armpmu->num_events); /* For task-bound events we may be called on other CPUs */ if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus)) @@ -785,7 +787,7 @@ static int cpu_pm_pmu_notify(struct notifier_block *b, unsigned long cmd, { struct arm_pmu *armpmu = container_of(b, struct arm_pmu, cpu_pm_nb); struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events); - int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events); + bool enabled = !bitmap_empty(hw_events->used_mask, armpmu->num_events); if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus)) return NOTIFY_DONE; diff --git a/drivers/perf/hisilicon/hisi_uncore_pmu.c b/drivers/perf/hisilicon/hisi_uncore_pmu.c index a738aeab5c04..358e4e284a62 100644 --- a/drivers/perf/hisilicon/hisi_uncore_pmu.c +++ b/drivers/perf/hisilicon/hisi_uncore_pmu.c @@ -393,7 +393,7 @@ EXPORT_SYMBOL_GPL(hisi_uncore_pmu_read); void hisi_uncore_pmu_enable(struct pmu *pmu) { struct hisi_pmu *hisi_pmu = to_hisi_pmu(pmu); - int enabled = bitmap_weight(hisi_pmu->pmu_events.used_mask, + bool enabled = !bitmap_empty(hisi_pmu->pmu_events.used_mask, hisi_pmu->num_counters); if (!enabled) diff --git a/drivers/perf/marvell_cn10k_ddr_pmu.c b/drivers/perf/marvell_cn10k_ddr_pmu.c new file mode 100644 index 000000000000..665b382a0ee3 --- /dev/null +++ b/drivers/perf/marvell_cn10k_ddr_pmu.c @@ -0,0 +1,758 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell CN10K DRAM Subsystem (DSS) Performance Monitor Driver + * + * Copyright (C) 2021 Marvell. + */ + +#include <linux/init.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_device.h> +#include <linux/perf_event.h> +#include <linux/hrtimer.h> + +/* Performance Counters Operating Mode Control Registers */ +#define DDRC_PERF_CNT_OP_MODE_CTRL 0x8020 +#define OP_MODE_CTRL_VAL_MANNUAL 0x1 + +/* Performance Counters Start Operation Control Registers */ +#define DDRC_PERF_CNT_START_OP_CTRL 0x8028 +#define START_OP_CTRL_VAL_START 0x1ULL +#define START_OP_CTRL_VAL_ACTIVE 0x2 + +/* Performance Counters End Operation Control Registers */ +#define DDRC_PERF_CNT_END_OP_CTRL 0x8030 +#define END_OP_CTRL_VAL_END 0x1ULL + +/* Performance Counters End Status Registers */ +#define DDRC_PERF_CNT_END_STATUS 0x8038 +#define END_STATUS_VAL_END_TIMER_MODE_END 0x1 + +/* Performance Counters Configuration Registers */ +#define DDRC_PERF_CFG_BASE 0x8040 + +/* 8 Generic event counter + 2 fixed event counters */ +#define DDRC_PERF_NUM_GEN_COUNTERS 8 +#define DDRC_PERF_NUM_FIX_COUNTERS 2 +#define DDRC_PERF_READ_COUNTER_IDX DDRC_PERF_NUM_GEN_COUNTERS +#define DDRC_PERF_WRITE_COUNTER_IDX (DDRC_PERF_NUM_GEN_COUNTERS + 1) +#define DDRC_PERF_NUM_COUNTERS (DDRC_PERF_NUM_GEN_COUNTERS + \ + DDRC_PERF_NUM_FIX_COUNTERS) + +/* Generic event counter registers */ +#define DDRC_PERF_CFG(n) (DDRC_PERF_CFG_BASE + 8 * (n)) +#define EVENT_ENABLE BIT_ULL(63) + +/* Two dedicated event counters for DDR reads and writes */ +#define EVENT_DDR_READS 101 +#define EVENT_DDR_WRITES 100 + +/* + * programmable events IDs in programmable event counters. + * DO NOT change these event-id numbers, they are used to + * program event bitmap in h/w. + */ +#define EVENT_OP_IS_ZQLATCH 55 +#define EVENT_OP_IS_ZQSTART 54 +#define EVENT_OP_IS_TCR_MRR 53 +#define EVENT_OP_IS_DQSOSC_MRR 52 +#define EVENT_OP_IS_DQSOSC_MPC 51 +#define EVENT_VISIBLE_WIN_LIMIT_REACHED_WR 50 +#define EVENT_VISIBLE_WIN_LIMIT_REACHED_RD 49 +#define EVENT_BSM_STARVATION 48 +#define EVENT_BSM_ALLOC 47 +#define EVENT_LPR_REQ_WITH_NOCREDIT 46 +#define EVENT_HPR_REQ_WITH_NOCREDIT 45 +#define EVENT_OP_IS_ZQCS 44 +#define EVENT_OP_IS_ZQCL 43 +#define EVENT_OP_IS_LOAD_MODE 42 +#define EVENT_OP_IS_SPEC_REF 41 +#define EVENT_OP_IS_CRIT_REF 40 +#define EVENT_OP_IS_REFRESH 39 +#define EVENT_OP_IS_ENTER_MPSM 35 +#define EVENT_OP_IS_ENTER_POWERDOWN 31 +#define EVENT_OP_IS_ENTER_SELFREF 27 +#define EVENT_WAW_HAZARD 26 +#define EVENT_RAW_HAZARD 25 +#define EVENT_WAR_HAZARD 24 +#define EVENT_WRITE_COMBINE 23 +#define EVENT_RDWR_TRANSITIONS 22 +#define EVENT_PRECHARGE_FOR_OTHER 21 +#define EVENT_PRECHARGE_FOR_RDWR 20 +#define EVENT_OP_IS_PRECHARGE 19 +#define EVENT_OP_IS_MWR 18 +#define EVENT_OP_IS_WR 17 +#define EVENT_OP_IS_RD 16 +#define EVENT_OP_IS_RD_ACTIVATE 15 +#define EVENT_OP_IS_RD_OR_WR 14 +#define EVENT_OP_IS_ACTIVATE 13 +#define EVENT_WR_XACT_WHEN_CRITICAL 12 +#define EVENT_LPR_XACT_WHEN_CRITICAL 11 +#define EVENT_HPR_XACT_WHEN_CRITICAL 10 +#define EVENT_DFI_RD_DATA_CYCLES 9 +#define EVENT_DFI_WR_DATA_CYCLES 8 +#define EVENT_ACT_BYPASS 7 +#define EVENT_READ_BYPASS 6 +#define EVENT_HIF_HI_PRI_RD 5 +#define EVENT_HIF_RMW 4 +#define EVENT_HIF_RD 3 +#define EVENT_HIF_WR 2 +#define EVENT_HIF_RD_OR_WR 1 + +/* Event counter value registers */ +#define DDRC_PERF_CNT_VALUE_BASE 0x8080 +#define DDRC_PERF_CNT_VALUE(n) (DDRC_PERF_CNT_VALUE_BASE + 8 * (n)) + +/* Fixed event counter enable/disable register */ +#define DDRC_PERF_CNT_FREERUN_EN 0x80C0 +#define DDRC_PERF_FREERUN_WRITE_EN 0x1 +#define DDRC_PERF_FREERUN_READ_EN 0x2 + +/* Fixed event counter control register */ +#define DDRC_PERF_CNT_FREERUN_CTRL 0x80C8 +#define DDRC_FREERUN_WRITE_CNT_CLR 0x1 +#define DDRC_FREERUN_READ_CNT_CLR 0x2 + +/* Fixed event counter value register */ +#define DDRC_PERF_CNT_VALUE_WR_OP 0x80D0 +#define DDRC_PERF_CNT_VALUE_RD_OP 0x80D8 +#define DDRC_PERF_CNT_VALUE_OVERFLOW BIT_ULL(48) +#define DDRC_PERF_CNT_MAX_VALUE GENMASK_ULL(48, 0) + +struct cn10k_ddr_pmu { + struct pmu pmu; + void __iomem *base; + unsigned int cpu; + struct device *dev; + int active_events; + struct perf_event *events[DDRC_PERF_NUM_COUNTERS]; + struct hrtimer hrtimer; + struct hlist_node node; +}; + +#define to_cn10k_ddr_pmu(p) container_of(p, struct cn10k_ddr_pmu, pmu) + +static ssize_t cn10k_ddr_pmu_event_show(struct device *dev, + struct device_attribute *attr, + char *page) +{ + struct perf_pmu_events_attr *pmu_attr; + + pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr); + return sysfs_emit(page, "event=0x%02llx\n", pmu_attr->id); + +} + +#define CN10K_DDR_PMU_EVENT_ATTR(_name, _id) \ + PMU_EVENT_ATTR_ID(_name, cn10k_ddr_pmu_event_show, _id) + +static struct attribute *cn10k_ddr_perf_events_attrs[] = { + CN10K_DDR_PMU_EVENT_ATTR(ddr_hif_rd_or_wr_access, EVENT_HIF_RD_OR_WR), + CN10K_DDR_PMU_EVENT_ATTR(ddr_hif_wr_access, EVENT_HIF_WR), + CN10K_DDR_PMU_EVENT_ATTR(ddr_hif_rd_access, EVENT_HIF_RD), + CN10K_DDR_PMU_EVENT_ATTR(ddr_hif_rmw_access, EVENT_HIF_RMW), + CN10K_DDR_PMU_EVENT_ATTR(ddr_hif_pri_rdaccess, EVENT_HIF_HI_PRI_RD), + CN10K_DDR_PMU_EVENT_ATTR(ddr_rd_bypass_access, EVENT_READ_BYPASS), + CN10K_DDR_PMU_EVENT_ATTR(ddr_act_bypass_access, EVENT_ACT_BYPASS), + CN10K_DDR_PMU_EVENT_ATTR(ddr_dif_wr_data_access, EVENT_DFI_WR_DATA_CYCLES), + CN10K_DDR_PMU_EVENT_ATTR(ddr_dif_rd_data_access, EVENT_DFI_RD_DATA_CYCLES), + CN10K_DDR_PMU_EVENT_ATTR(ddr_hpri_sched_rd_crit_access, + EVENT_HPR_XACT_WHEN_CRITICAL), + CN10K_DDR_PMU_EVENT_ATTR(ddr_lpri_sched_rd_crit_access, + EVENT_LPR_XACT_WHEN_CRITICAL), + CN10K_DDR_PMU_EVENT_ATTR(ddr_wr_trxn_crit_access, + EVENT_WR_XACT_WHEN_CRITICAL), + CN10K_DDR_PMU_EVENT_ATTR(ddr_cam_active_access, EVENT_OP_IS_ACTIVATE), + CN10K_DDR_PMU_EVENT_ATTR(ddr_cam_rd_or_wr_access, EVENT_OP_IS_RD_OR_WR), + CN10K_DDR_PMU_EVENT_ATTR(ddr_cam_rd_active_access, EVENT_OP_IS_RD_ACTIVATE), + CN10K_DDR_PMU_EVENT_ATTR(ddr_cam_read, EVENT_OP_IS_RD), + CN10K_DDR_PMU_EVENT_ATTR(ddr_cam_write, EVENT_OP_IS_WR), + CN10K_DDR_PMU_EVENT_ATTR(ddr_cam_mwr, EVENT_OP_IS_MWR), + CN10K_DDR_PMU_EVENT_ATTR(ddr_precharge, EVENT_OP_IS_PRECHARGE), + CN10K_DDR_PMU_EVENT_ATTR(ddr_precharge_for_rdwr, EVENT_PRECHARGE_FOR_RDWR), + CN10K_DDR_PMU_EVENT_ATTR(ddr_precharge_for_other, + EVENT_PRECHARGE_FOR_OTHER), + CN10K_DDR_PMU_EVENT_ATTR(ddr_rdwr_transitions, EVENT_RDWR_TRANSITIONS), + CN10K_DDR_PMU_EVENT_ATTR(ddr_write_combine, EVENT_WRITE_COMBINE), + CN10K_DDR_PMU_EVENT_ATTR(ddr_war_hazard, EVENT_WAR_HAZARD), + CN10K_DDR_PMU_EVENT_ATTR(ddr_raw_hazard, EVENT_RAW_HAZARD), + CN10K_DDR_PMU_EVENT_ATTR(ddr_waw_hazard, EVENT_WAW_HAZARD), + CN10K_DDR_PMU_EVENT_ATTR(ddr_enter_selfref, EVENT_OP_IS_ENTER_SELFREF), + CN10K_DDR_PMU_EVENT_ATTR(ddr_enter_powerdown, EVENT_OP_IS_ENTER_POWERDOWN), + CN10K_DDR_PMU_EVENT_ATTR(ddr_enter_mpsm, EVENT_OP_IS_ENTER_MPSM), + CN10K_DDR_PMU_EVENT_ATTR(ddr_refresh, EVENT_OP_IS_REFRESH), + CN10K_DDR_PMU_EVENT_ATTR(ddr_crit_ref, EVENT_OP_IS_CRIT_REF), + CN10K_DDR_PMU_EVENT_ATTR(ddr_spec_ref, EVENT_OP_IS_SPEC_REF), + CN10K_DDR_PMU_EVENT_ATTR(ddr_load_mode, EVENT_OP_IS_LOAD_MODE), + CN10K_DDR_PMU_EVENT_ATTR(ddr_zqcl, EVENT_OP_IS_ZQCL), + CN10K_DDR_PMU_EVENT_ATTR(ddr_cam_wr_access, EVENT_OP_IS_ZQCS), + CN10K_DDR_PMU_EVENT_ATTR(ddr_hpr_req_with_nocredit, + EVENT_HPR_REQ_WITH_NOCREDIT), + CN10K_DDR_PMU_EVENT_ATTR(ddr_lpr_req_with_nocredit, + EVENT_LPR_REQ_WITH_NOCREDIT), + CN10K_DDR_PMU_EVENT_ATTR(ddr_bsm_alloc, EVENT_BSM_ALLOC), + CN10K_DDR_PMU_EVENT_ATTR(ddr_bsm_starvation, EVENT_BSM_STARVATION), + CN10K_DDR_PMU_EVENT_ATTR(ddr_win_limit_reached_rd, + EVENT_VISIBLE_WIN_LIMIT_REACHED_RD), + CN10K_DDR_PMU_EVENT_ATTR(ddr_win_limit_reached_wr, + EVENT_VISIBLE_WIN_LIMIT_REACHED_WR), + CN10K_DDR_PMU_EVENT_ATTR(ddr_dqsosc_mpc, EVENT_OP_IS_DQSOSC_MPC), + CN10K_DDR_PMU_EVENT_ATTR(ddr_dqsosc_mrr, EVENT_OP_IS_DQSOSC_MRR), + CN10K_DDR_PMU_EVENT_ATTR(ddr_tcr_mrr, EVENT_OP_IS_TCR_MRR), + CN10K_DDR_PMU_EVENT_ATTR(ddr_zqstart, EVENT_OP_IS_ZQSTART), + CN10K_DDR_PMU_EVENT_ATTR(ddr_zqlatch, EVENT_OP_IS_ZQLATCH), + /* Free run event counters */ + CN10K_DDR_PMU_EVENT_ATTR(ddr_ddr_reads, EVENT_DDR_READS), + CN10K_DDR_PMU_EVENT_ATTR(ddr_ddr_writes, EVENT_DDR_WRITES), + NULL +}; + +static struct attribute_group cn10k_ddr_perf_events_attr_group = { + .name = "events", + .attrs = cn10k_ddr_perf_events_attrs, +}; + +PMU_FORMAT_ATTR(event, "config:0-8"); + +static struct attribute *cn10k_ddr_perf_format_attrs[] = { + &format_attr_event.attr, + NULL, +}; + +static struct attribute_group cn10k_ddr_perf_format_attr_group = { + .name = "format", + .attrs = cn10k_ddr_perf_format_attrs, +}; + +static ssize_t cn10k_ddr_perf_cpumask_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct cn10k_ddr_pmu *pmu = dev_get_drvdata(dev); + + return cpumap_print_to_pagebuf(true, buf, cpumask_of(pmu->cpu)); +} + +static struct device_attribute cn10k_ddr_perf_cpumask_attr = + __ATTR(cpumask, 0444, cn10k_ddr_perf_cpumask_show, NULL); + +static struct attribute *cn10k_ddr_perf_cpumask_attrs[] = { + &cn10k_ddr_perf_cpumask_attr.attr, + NULL, +}; + +static struct attribute_group cn10k_ddr_perf_cpumask_attr_group = { + .attrs = cn10k_ddr_perf_cpumask_attrs, +}; + +static const struct attribute_group *cn10k_attr_groups[] = { + &cn10k_ddr_perf_events_attr_group, + &cn10k_ddr_perf_format_attr_group, + &cn10k_ddr_perf_cpumask_attr_group, + NULL, +}; + +/* Default poll timeout is 100 sec, which is very sufficient for + * 48 bit counter incremented max at 5.6 GT/s, which may take many + * hours to overflow. + */ +static unsigned long cn10k_ddr_pmu_poll_period_sec = 100; +module_param_named(poll_period_sec, cn10k_ddr_pmu_poll_period_sec, ulong, 0644); + +static ktime_t cn10k_ddr_pmu_timer_period(void) +{ + return ms_to_ktime((u64)cn10k_ddr_pmu_poll_period_sec * USEC_PER_SEC); +} + +static int ddr_perf_get_event_bitmap(int eventid, u64 *event_bitmap) +{ + switch (eventid) { + case EVENT_HIF_RD_OR_WR ... EVENT_WAW_HAZARD: + case EVENT_OP_IS_REFRESH ... EVENT_OP_IS_ZQLATCH: + *event_bitmap = (1ULL << (eventid - 1)); + break; + case EVENT_OP_IS_ENTER_SELFREF: + case EVENT_OP_IS_ENTER_POWERDOWN: + case EVENT_OP_IS_ENTER_MPSM: + *event_bitmap = (0xFULL << (eventid - 1)); + break; + default: + pr_err("%s Invalid eventid %d\n", __func__, eventid); + return -EINVAL; + } + + return 0; +} + +static int cn10k_ddr_perf_alloc_counter(struct cn10k_ddr_pmu *pmu, + struct perf_event *event) +{ + u8 config = event->attr.config; + int i; + + /* DDR read free-run counter index */ + if (config == EVENT_DDR_READS) { + pmu->events[DDRC_PERF_READ_COUNTER_IDX] = event; + return DDRC_PERF_READ_COUNTER_IDX; + } + + /* DDR write free-run counter index */ + if (config == EVENT_DDR_WRITES) { + pmu->events[DDRC_PERF_WRITE_COUNTER_IDX] = event; + return DDRC_PERF_WRITE_COUNTER_IDX; + } + + /* Allocate DDR generic counters */ + for (i = 0; i < DDRC_PERF_NUM_GEN_COUNTERS; i++) { + if (pmu->events[i] == NULL) { + pmu->events[i] = event; + return i; + } + } + + return -ENOENT; +} + +static void cn10k_ddr_perf_free_counter(struct cn10k_ddr_pmu *pmu, int counter) +{ + pmu->events[counter] = NULL; +} + +static int cn10k_ddr_perf_event_init(struct perf_event *event) +{ + struct cn10k_ddr_pmu *pmu = to_cn10k_ddr_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + + if (event->attr.type != event->pmu->type) + return -ENOENT; + + if (is_sampling_event(event)) { + dev_info(pmu->dev, "Sampling not supported!\n"); + return -EOPNOTSUPP; + } + + if (event->cpu < 0) { + dev_warn(pmu->dev, "Can't provide per-task data!\n"); + return -EOPNOTSUPP; + } + + /* We must NOT create groups containing mixed PMUs */ + if (event->group_leader->pmu != event->pmu && + !is_software_event(event->group_leader)) + return -EINVAL; + + /* Set ownership of event to one CPU, same event can not be observed + * on multiple cpus at same time. + */ + event->cpu = pmu->cpu; + hwc->idx = -1; + return 0; +} + +static void cn10k_ddr_perf_counter_enable(struct cn10k_ddr_pmu *pmu, + int counter, bool enable) +{ + u32 reg; + u64 val; + + if (counter > DDRC_PERF_NUM_COUNTERS) { + pr_err("Error: unsupported counter %d\n", counter); + return; + } + + if (counter < DDRC_PERF_NUM_GEN_COUNTERS) { + reg = DDRC_PERF_CFG(counter); + val = readq_relaxed(pmu->base + reg); + + if (enable) + val |= EVENT_ENABLE; + else + val &= ~EVENT_ENABLE; + + writeq_relaxed(val, pmu->base + reg); + } else { + val = readq_relaxed(pmu->base + DDRC_PERF_CNT_FREERUN_EN); + if (enable) { + if (counter == DDRC_PERF_READ_COUNTER_IDX) + val |= DDRC_PERF_FREERUN_READ_EN; + else + val |= DDRC_PERF_FREERUN_WRITE_EN; + } else { + if (counter == DDRC_PERF_READ_COUNTER_IDX) + val &= ~DDRC_PERF_FREERUN_READ_EN; + else + val &= ~DDRC_PERF_FREERUN_WRITE_EN; + } + writeq_relaxed(val, pmu->base + DDRC_PERF_CNT_FREERUN_EN); + } +} + +static u64 cn10k_ddr_perf_read_counter(struct cn10k_ddr_pmu *pmu, int counter) +{ + u64 val; + + if (counter == DDRC_PERF_READ_COUNTER_IDX) + return readq_relaxed(pmu->base + DDRC_PERF_CNT_VALUE_RD_OP); + + if (counter == DDRC_PERF_WRITE_COUNTER_IDX) + return readq_relaxed(pmu->base + DDRC_PERF_CNT_VALUE_WR_OP); + + val = readq_relaxed(pmu->base + DDRC_PERF_CNT_VALUE(counter)); + return val; +} + +static void cn10k_ddr_perf_event_update(struct perf_event *event) +{ + struct cn10k_ddr_pmu *pmu = to_cn10k_ddr_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + u64 prev_count, new_count, mask; + + do { + prev_count = local64_read(&hwc->prev_count); + new_count = cn10k_ddr_perf_read_counter(pmu, hwc->idx); + } while (local64_xchg(&hwc->prev_count, new_count) != prev_count); + + mask = DDRC_PERF_CNT_MAX_VALUE; + + local64_add((new_count - prev_count) & mask, &event->count); +} + +static void cn10k_ddr_perf_event_start(struct perf_event *event, int flags) +{ + struct cn10k_ddr_pmu *pmu = to_cn10k_ddr_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + int counter = hwc->idx; + + local64_set(&hwc->prev_count, 0); + + cn10k_ddr_perf_counter_enable(pmu, counter, true); + + hwc->state = 0; +} + +static int cn10k_ddr_perf_event_add(struct perf_event *event, int flags) +{ + struct cn10k_ddr_pmu *pmu = to_cn10k_ddr_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + u8 config = event->attr.config; + int counter, ret; + u32 reg_offset; + u64 val; + + counter = cn10k_ddr_perf_alloc_counter(pmu, event); + if (counter < 0) + return -EAGAIN; + + pmu->active_events++; + hwc->idx = counter; + + if (pmu->active_events == 1) + hrtimer_start(&pmu->hrtimer, cn10k_ddr_pmu_timer_period(), + HRTIMER_MODE_REL_PINNED); + + if (counter < DDRC_PERF_NUM_GEN_COUNTERS) { + /* Generic counters, configure event id */ + reg_offset = DDRC_PERF_CFG(counter); + ret = ddr_perf_get_event_bitmap(config, &val); + if (ret) + return ret; + + writeq_relaxed(val, pmu->base + reg_offset); + } else { + /* fixed event counter, clear counter value */ + if (counter == DDRC_PERF_READ_COUNTER_IDX) + val = DDRC_FREERUN_READ_CNT_CLR; + else + val = DDRC_FREERUN_WRITE_CNT_CLR; + + writeq_relaxed(val, pmu->base + DDRC_PERF_CNT_FREERUN_CTRL); + } + + hwc->state |= PERF_HES_STOPPED; + + if (flags & PERF_EF_START) + cn10k_ddr_perf_event_start(event, flags); + + return 0; +} + +static void cn10k_ddr_perf_event_stop(struct perf_event *event, int flags) +{ + struct cn10k_ddr_pmu *pmu = to_cn10k_ddr_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + int counter = hwc->idx; + + cn10k_ddr_perf_counter_enable(pmu, counter, false); + + if (flags & PERF_EF_UPDATE) + cn10k_ddr_perf_event_update(event); + + hwc->state |= PERF_HES_STOPPED; +} + +static void cn10k_ddr_perf_event_del(struct perf_event *event, int flags) +{ + struct cn10k_ddr_pmu *pmu = to_cn10k_ddr_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + int counter = hwc->idx; + + cn10k_ddr_perf_event_stop(event, PERF_EF_UPDATE); + + cn10k_ddr_perf_free_counter(pmu, counter); + pmu->active_events--; + hwc->idx = -1; + + /* Cancel timer when no events to capture */ + if (pmu->active_events == 0) + hrtimer_cancel(&pmu->hrtimer); +} + +static void cn10k_ddr_perf_pmu_enable(struct pmu *pmu) +{ + struct cn10k_ddr_pmu *ddr_pmu = to_cn10k_ddr_pmu(pmu); + + writeq_relaxed(START_OP_CTRL_VAL_START, ddr_pmu->base + + DDRC_PERF_CNT_START_OP_CTRL); +} + +static void cn10k_ddr_perf_pmu_disable(struct pmu *pmu) +{ + struct cn10k_ddr_pmu *ddr_pmu = to_cn10k_ddr_pmu(pmu); + + writeq_relaxed(END_OP_CTRL_VAL_END, ddr_pmu->base + + DDRC_PERF_CNT_END_OP_CTRL); +} + +static void cn10k_ddr_perf_event_update_all(struct cn10k_ddr_pmu *pmu) +{ + struct hw_perf_event *hwc; + int i; + + for (i = 0; i < DDRC_PERF_NUM_GEN_COUNTERS; i++) { + if (pmu->events[i] == NULL) + continue; + + cn10k_ddr_perf_event_update(pmu->events[i]); + } + + /* Reset previous count as h/w counter are reset */ + for (i = 0; i < DDRC_PERF_NUM_GEN_COUNTERS; i++) { + if (pmu->events[i] == NULL) + continue; + + hwc = &pmu->events[i]->hw; + local64_set(&hwc->prev_count, 0); + } +} + +static irqreturn_t cn10k_ddr_pmu_overflow_handler(struct cn10k_ddr_pmu *pmu) +{ + struct perf_event *event; + struct hw_perf_event *hwc; + u64 prev_count, new_count; + u64 value; + int i; + + event = pmu->events[DDRC_PERF_READ_COUNTER_IDX]; + if (event) { + hwc = &event->hw; + prev_count = local64_read(&hwc->prev_count); + new_count = cn10k_ddr_perf_read_counter(pmu, hwc->idx); + + /* Overflow condition is when new count less than + * previous count + */ + if (new_count < prev_count) + cn10k_ddr_perf_event_update(event); + } + + event = pmu->events[DDRC_PERF_WRITE_COUNTER_IDX]; + if (event) { + hwc = &event->hw; + prev_count = local64_read(&hwc->prev_count); + new_count = cn10k_ddr_perf_read_counter(pmu, hwc->idx); + + /* Overflow condition is when new count less than + * previous count + */ + if (new_count < prev_count) + cn10k_ddr_perf_event_update(event); + } + + for (i = 0; i < DDRC_PERF_NUM_GEN_COUNTERS; i++) { + if (pmu->events[i] == NULL) + continue; + + value = cn10k_ddr_perf_read_counter(pmu, i); + if (value == DDRC_PERF_CNT_MAX_VALUE) { + pr_info("Counter-(%d) reached max value\n", i); + cn10k_ddr_perf_event_update_all(pmu); + cn10k_ddr_perf_pmu_disable(&pmu->pmu); + cn10k_ddr_perf_pmu_enable(&pmu->pmu); + } + } + + return IRQ_HANDLED; +} + +static enum hrtimer_restart cn10k_ddr_pmu_timer_handler(struct hrtimer *hrtimer) +{ + struct cn10k_ddr_pmu *pmu = container_of(hrtimer, struct cn10k_ddr_pmu, + hrtimer); + unsigned long flags; + + local_irq_save(flags); + cn10k_ddr_pmu_overflow_handler(pmu); + local_irq_restore(flags); + + hrtimer_forward_now(hrtimer, cn10k_ddr_pmu_timer_period()); + return HRTIMER_RESTART; +} + +static int cn10k_ddr_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node) +{ + struct cn10k_ddr_pmu *pmu = hlist_entry_safe(node, struct cn10k_ddr_pmu, + node); + unsigned int target; + + if (cpu != pmu->cpu) + return 0; + + target = cpumask_any_but(cpu_online_mask, cpu); + if (target >= nr_cpu_ids) + return 0; + + perf_pmu_migrate_context(&pmu->pmu, cpu, target); + pmu->cpu = target; + return 0; +} + +static int cn10k_ddr_perf_probe(struct platform_device *pdev) +{ + struct cn10k_ddr_pmu *ddr_pmu; + struct resource *res; + void __iomem *base; + char *name; + int ret; + + ddr_pmu = devm_kzalloc(&pdev->dev, sizeof(*ddr_pmu), GFP_KERNEL); + if (!ddr_pmu) + return -ENOMEM; + + ddr_pmu->dev = &pdev->dev; + platform_set_drvdata(pdev, ddr_pmu); + + base = devm_platform_get_and_ioremap_resource(pdev, 0, &res); + if (IS_ERR(base)) + return PTR_ERR(base); + + ddr_pmu->base = base; + + /* Setup the PMU counter to work in manual mode */ + writeq_relaxed(OP_MODE_CTRL_VAL_MANNUAL, ddr_pmu->base + + DDRC_PERF_CNT_OP_MODE_CTRL); + + ddr_pmu->pmu = (struct pmu) { + .module = THIS_MODULE, + .capabilities = PERF_PMU_CAP_NO_EXCLUDE, + .task_ctx_nr = perf_invalid_context, + .attr_groups = cn10k_attr_groups, + .event_init = cn10k_ddr_perf_event_init, + .add = cn10k_ddr_perf_event_add, + .del = cn10k_ddr_perf_event_del, + .start = cn10k_ddr_perf_event_start, + .stop = cn10k_ddr_perf_event_stop, + .read = cn10k_ddr_perf_event_update, + .pmu_enable = cn10k_ddr_perf_pmu_enable, + .pmu_disable = cn10k_ddr_perf_pmu_disable, + }; + + /* Choose this cpu to collect perf data */ + ddr_pmu->cpu = raw_smp_processor_id(); + + name = devm_kasprintf(ddr_pmu->dev, GFP_KERNEL, "mrvl_ddr_pmu_%llx", + res->start); + if (!name) + return -ENOMEM; + + hrtimer_init(&ddr_pmu->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + ddr_pmu->hrtimer.function = cn10k_ddr_pmu_timer_handler; + + cpuhp_state_add_instance_nocalls( + CPUHP_AP_PERF_ARM_MARVELL_CN10K_DDR_ONLINE, + &ddr_pmu->node); + + ret = perf_pmu_register(&ddr_pmu->pmu, name, -1); + if (ret) + goto error; + + pr_info("CN10K DDR PMU Driver for ddrc@%llx\n", res->start); + return 0; +error: + cpuhp_state_remove_instance_nocalls( + CPUHP_AP_PERF_ARM_MARVELL_CN10K_DDR_ONLINE, + &ddr_pmu->node); + return ret; +} + +static int cn10k_ddr_perf_remove(struct platform_device *pdev) +{ + struct cn10k_ddr_pmu *ddr_pmu = platform_get_drvdata(pdev); + + cpuhp_state_remove_instance_nocalls( + CPUHP_AP_PERF_ARM_MARVELL_CN10K_DDR_ONLINE, + &ddr_pmu->node); + + perf_pmu_unregister(&ddr_pmu->pmu); + return 0; +} + +#ifdef CONFIG_OF +static const struct of_device_id cn10k_ddr_pmu_of_match[] = { + { .compatible = "marvell,cn10k-ddr-pmu", }, + { }, +}; +MODULE_DEVICE_TABLE(of, cn10k_ddr_pmu_of_match); +#endif + +static struct platform_driver cn10k_ddr_pmu_driver = { + .driver = { + .name = "cn10k-ddr-pmu", + .of_match_table = of_match_ptr(cn10k_ddr_pmu_of_match), + .suppress_bind_attrs = true, + }, + .probe = cn10k_ddr_perf_probe, + .remove = cn10k_ddr_perf_remove, +}; + +static int __init cn10k_ddr_pmu_init(void) +{ + int ret; + + ret = cpuhp_setup_state_multi( + CPUHP_AP_PERF_ARM_MARVELL_CN10K_DDR_ONLINE, + "perf/marvell/cn10k/ddr:online", NULL, + cn10k_ddr_pmu_offline_cpu); + if (ret) + return ret; + + ret = platform_driver_register(&cn10k_ddr_pmu_driver); + if (ret) + cpuhp_remove_multi_state( + CPUHP_AP_PERF_ARM_MARVELL_CN10K_DDR_ONLINE); + return ret; +} + +static void __exit cn10k_ddr_pmu_exit(void) +{ + platform_driver_unregister(&cn10k_ddr_pmu_driver); + cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_MARVELL_CN10K_DDR_ONLINE); +} + +module_init(cn10k_ddr_pmu_init); +module_exit(cn10k_ddr_pmu_exit); + +MODULE_AUTHOR("Bharat Bhushan <bbhushan2@marvell.com>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/perf/marvell_cn10k_tad_pmu.c b/drivers/perf/marvell_cn10k_tad_pmu.c index 7f4d292658e3..ee67305f822d 100644 --- a/drivers/perf/marvell_cn10k_tad_pmu.c +++ b/drivers/perf/marvell_cn10k_tad_pmu.c @@ -368,10 +368,12 @@ static int tad_pmu_remove(struct platform_device *pdev) return 0; } +#ifdef CONFIG_OF static const struct of_device_id tad_pmu_of_match[] = { { .compatible = "marvell,cn10k-tad-pmu", }, {}, }; +#endif static struct platform_driver tad_pmu_driver = { .driver = { diff --git a/drivers/perf/thunderx2_pmu.c b/drivers/perf/thunderx2_pmu.c index 05378c0fd8f3..1edb9c03704f 100644 --- a/drivers/perf/thunderx2_pmu.c +++ b/drivers/perf/thunderx2_pmu.c @@ -887,13 +887,11 @@ static struct tx2_uncore_pmu *tx2_uncore_pmu_init_dev(struct device *dev, static acpi_status tx2_uncore_pmu_add(acpi_handle handle, u32 level, void *data, void **return_value) { + struct acpi_device *adev = acpi_fetch_acpi_dev(handle); struct tx2_uncore_pmu *tx2_pmu; - struct acpi_device *adev; enum tx2_uncore_type type; - if (acpi_bus_get_device(handle, &adev)) - return AE_OK; - if (acpi_bus_get_status(adev) || !adev->status.present) + if (!adev || acpi_bus_get_status(adev) || !adev->status.present) return AE_OK; type = get_tx2_pmu_type(adev); diff --git a/drivers/perf/xgene_pmu.c b/drivers/perf/xgene_pmu.c index 2b6d476bd213..0c32dffc7ede 100644 --- a/drivers/perf/xgene_pmu.c +++ b/drivers/perf/xgene_pmu.c @@ -867,7 +867,7 @@ static void xgene_perf_pmu_enable(struct pmu *pmu) { struct xgene_pmu_dev *pmu_dev = to_pmu_dev(pmu); struct xgene_pmu *xgene_pmu = pmu_dev->parent; - int enabled = bitmap_weight(pmu_dev->cntr_assign_mask, + bool enabled = !bitmap_empty(pmu_dev->cntr_assign_mask, pmu_dev->max_counters); if (!enabled) @@ -1549,14 +1549,12 @@ static const struct acpi_device_id *xgene_pmu_acpi_match_type( static acpi_status acpi_pmu_dev_add(acpi_handle handle, u32 level, void *data, void **return_value) { + struct acpi_device *adev = acpi_fetch_acpi_dev(handle); const struct acpi_device_id *acpi_id; struct xgene_pmu *xgene_pmu = data; struct xgene_pmu_dev_ctx *ctx; - struct acpi_device *adev; - if (acpi_bus_get_device(handle, &adev)) - return AE_OK; - if (acpi_bus_get_status(adev) || !adev->status.present) + if (!adev || acpi_bus_get_status(adev) || !adev->status.present) return AE_OK; acpi_id = xgene_pmu_acpi_match_type(xgene_pmu_acpi_type_match, adev); diff --git a/drivers/phy/amlogic/phy-meson-axg-mipi-dphy.c b/drivers/phy/amlogic/phy-meson-axg-mipi-dphy.c index cd2332bf0e31..fdbd64c03e12 100644 --- a/drivers/phy/amlogic/phy-meson-axg-mipi-dphy.c +++ b/drivers/phy/amlogic/phy-meson-axg-mipi-dphy.c @@ -9,6 +9,7 @@ #include <linux/bitfield.h> #include <linux/bitops.h> +#include <linux/bits.h> #include <linux/clk.h> #include <linux/delay.h> #include <linux/io.h> @@ -250,7 +251,7 @@ static int phy_meson_axg_mipi_dphy_power_on(struct phy *phy) (DIV_ROUND_UP(priv->config.clk_zero, temp) << 16) | (DIV_ROUND_UP(priv->config.clk_prepare, temp) << 24)); regmap_write(priv->regmap, MIPI_DSI_CLK_TIM1, - DIV_ROUND_UP(priv->config.clk_pre, temp)); + DIV_ROUND_UP(priv->config.clk_pre, BITS_PER_BYTE)); regmap_write(priv->regmap, MIPI_DSI_HS_TIM, DIV_ROUND_UP(priv->config.hs_exit, temp) | diff --git a/drivers/phy/broadcom/Kconfig b/drivers/phy/broadcom/Kconfig index f81e23742079..849c4204f550 100644 --- a/drivers/phy/broadcom/Kconfig +++ b/drivers/phy/broadcom/Kconfig @@ -97,8 +97,7 @@ config PHY_BRCM_USB depends on OF select GENERIC_PHY select SOC_BRCMSTB if ARCH_BRCMSTB - default ARCH_BCM4908 - default ARCH_BRCMSTB + default ARCH_BCM4908 || ARCH_BRCMSTB help Enable this to support the Broadcom STB USB PHY. This driver is required by the USB XHCI, EHCI and OHCI diff --git a/drivers/phy/broadcom/phy-brcm-usb.c b/drivers/phy/broadcom/phy-brcm-usb.c index 116fb23aebd9..0f1deb6e0eab 100644 --- a/drivers/phy/broadcom/phy-brcm-usb.c +++ b/drivers/phy/broadcom/phy-brcm-usb.c @@ -18,6 +18,7 @@ #include <linux/soc/brcmstb/brcmstb.h> #include <dt-bindings/phy/phy.h> #include <linux/mfd/syscon.h> +#include <linux/suspend.h> #include "phy-brcm-usb-init.h" @@ -70,12 +71,35 @@ struct brcm_usb_phy_data { int init_count; int wake_irq; struct brcm_usb_phy phys[BRCM_USB_PHY_ID_MAX]; + struct notifier_block pm_notifier; + bool pm_active; }; static s8 *node_reg_names[BRCM_REGS_MAX] = { "crtl", "xhci_ec", "xhci_gbl", "usb_phy", "usb_mdio", "bdc_ec" }; +static int brcm_pm_notifier(struct notifier_block *notifier, + unsigned long pm_event, + void *unused) +{ + struct brcm_usb_phy_data *priv = + container_of(notifier, struct brcm_usb_phy_data, pm_notifier); + + switch (pm_event) { + case PM_HIBERNATION_PREPARE: + case PM_SUSPEND_PREPARE: + priv->pm_active = true; + break; + case PM_POST_RESTORE: + case PM_POST_HIBERNATION: + case PM_POST_SUSPEND: + priv->pm_active = false; + break; + } + return NOTIFY_DONE; +} + static irqreturn_t brcm_usb_phy_wake_isr(int irq, void *dev_id) { struct phy *gphy = dev_id; @@ -91,6 +115,9 @@ static int brcm_usb_phy_init(struct phy *gphy) struct brcm_usb_phy_data *priv = container_of(phy, struct brcm_usb_phy_data, phys[phy->id]); + if (priv->pm_active) + return 0; + /* * Use a lock to make sure a second caller waits until * the base phy is inited before using it. @@ -120,6 +147,9 @@ static int brcm_usb_phy_exit(struct phy *gphy) struct brcm_usb_phy_data *priv = container_of(phy, struct brcm_usb_phy_data, phys[phy->id]); + if (priv->pm_active) + return 0; + dev_dbg(&gphy->dev, "EXIT\n"); if (phy->id == BRCM_USB_PHY_2_0) brcm_usb_uninit_eohci(&priv->ini); @@ -488,6 +518,9 @@ static int brcm_usb_phy_probe(struct platform_device *pdev) if (err) return err; + priv->pm_notifier.notifier_call = brcm_pm_notifier; + register_pm_notifier(&priv->pm_notifier); + mutex_init(&priv->mutex); /* make sure invert settings are correct */ @@ -528,7 +561,10 @@ static int brcm_usb_phy_probe(struct platform_device *pdev) static int brcm_usb_phy_remove(struct platform_device *pdev) { + struct brcm_usb_phy_data *priv = dev_get_drvdata(&pdev->dev); + sysfs_remove_group(&pdev->dev.kobj, &brcm_usb_phy_group); + unregister_pm_notifier(&priv->pm_notifier); return 0; } @@ -539,6 +575,7 @@ static int brcm_usb_phy_suspend(struct device *dev) struct brcm_usb_phy_data *priv = dev_get_drvdata(dev); if (priv->init_count) { + dev_dbg(dev, "SUSPEND\n"); priv->ini.wake_enabled = device_may_wakeup(dev); if (priv->phys[BRCM_USB_PHY_3_0].inited) brcm_usb_uninit_xhci(&priv->ini); @@ -578,6 +615,7 @@ static int brcm_usb_phy_resume(struct device *dev) * Uninitialize anything that wasn't previously initialized. */ if (priv->init_count) { + dev_dbg(dev, "RESUME\n"); if (priv->wake_irq >= 0) disable_irq_wake(priv->wake_irq); brcm_usb_init_common(&priv->ini); diff --git a/drivers/phy/cadence/phy-cadence-sierra.c b/drivers/phy/cadence/phy-cadence-sierra.c index da24acd26666..e265647e29a2 100644 --- a/drivers/phy/cadence/phy-cadence-sierra.c +++ b/drivers/phy/cadence/phy-cadence-sierra.c @@ -1338,7 +1338,7 @@ static int cdns_sierra_phy_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; const struct cdns_sierra_data *data; unsigned int id_value; - int i, ret, node = 0; + int ret, node = 0; void __iomem *base; struct device_node *dn = dev->of_node, *child; @@ -1416,7 +1416,8 @@ static int cdns_sierra_phy_probe(struct platform_device *pdev) dev_err(dev, "failed to get reset %s\n", child->full_name); ret = PTR_ERR(sp->phys[node].lnk_rst); - goto put_child2; + of_node_put(child); + goto put_control; } if (!sp->autoconf) { @@ -1424,7 +1425,9 @@ static int cdns_sierra_phy_probe(struct platform_device *pdev) if (ret) { dev_err(dev, "missing property in node %s\n", child->name); - goto put_child; + of_node_put(child); + reset_control_put(sp->phys[node].lnk_rst); + goto put_control; } } @@ -1434,7 +1437,9 @@ static int cdns_sierra_phy_probe(struct platform_device *pdev) if (IS_ERR(gphy)) { ret = PTR_ERR(gphy); - goto put_child; + of_node_put(child); + reset_control_put(sp->phys[node].lnk_rst); + goto put_control; } sp->phys[node].phy = gphy; phy_set_drvdata(gphy, &sp->phys[node]); @@ -1446,26 +1451,28 @@ static int cdns_sierra_phy_probe(struct platform_device *pdev) if (sp->num_lanes > SIERRA_MAX_LANES) { ret = -EINVAL; dev_err(dev, "Invalid lane configuration\n"); - goto put_child2; + goto put_control; } /* If more than one subnode, configure the PHY as multilink */ if (!sp->autoconf && sp->nsubnodes > 1) { ret = cdns_sierra_phy_configure_multilink(sp); if (ret) - goto put_child2; + goto put_control; } pm_runtime_enable(dev); phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); - return PTR_ERR_OR_ZERO(phy_provider); - -put_child: - node++; -put_child2: - for (i = 0; i < node; i++) - reset_control_put(sp->phys[i].lnk_rst); - of_node_put(child); + if (IS_ERR(phy_provider)) { + ret = PTR_ERR(phy_provider); + goto put_control; + } + + return 0; + +put_control: + while (--node >= 0) + reset_control_put(sp->phys[node].lnk_rst); clk_disable: cdns_sierra_phy_disable_clocks(sp); reset_control_assert(sp->apb_rst); diff --git a/drivers/phy/mediatek/phy-mtk-tphy.c b/drivers/phy/mediatek/phy-mtk-tphy.c index 6d307102f4f6..8ee7682b8e93 100644 --- a/drivers/phy/mediatek/phy-mtk-tphy.c +++ b/drivers/phy/mediatek/phy-mtk-tphy.c @@ -992,7 +992,7 @@ static int phy_efuse_get(struct mtk_tphy *tphy, struct mtk_phy_instance *instanc /* no efuse, ignore it */ if (!instance->efuse_intr && !instance->efuse_rx_imp && - !instance->efuse_rx_imp) { + !instance->efuse_tx_imp) { dev_warn(dev, "no u3 intr efuse, but dts enable it\n"); instance->efuse_sw_en = 0; break; diff --git a/drivers/phy/phy-core-mipi-dphy.c b/drivers/phy/phy-core-mipi-dphy.c index 288c9c67aa74..ccb4045685cd 100644 --- a/drivers/phy/phy-core-mipi-dphy.c +++ b/drivers/phy/phy-core-mipi-dphy.c @@ -36,7 +36,7 @@ int phy_mipi_dphy_get_default_config(unsigned long pixel_clock, cfg->clk_miss = 0; cfg->clk_post = 60000 + 52 * ui; - cfg->clk_pre = 8000; + cfg->clk_pre = 8; cfg->clk_prepare = 38000; cfg->clk_settle = 95000; cfg->clk_term_en = 0; @@ -97,7 +97,7 @@ int phy_mipi_dphy_config_validate(struct phy_configure_opts_mipi_dphy *cfg) if (cfg->clk_post < (60000 + 52 * ui)) return -EINVAL; - if (cfg->clk_pre < 8000) + if (cfg->clk_pre < 8) return -EINVAL; if (cfg->clk_prepare < 38000 || cfg->clk_prepare > 95000) diff --git a/drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c b/drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c index 347dc79a18c1..630e01b5c19b 100644 --- a/drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c +++ b/drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c @@ -5,6 +5,7 @@ * Author: Wyon Bi <bivvy.bi@rock-chips.com> */ +#include <linux/bits.h> #include <linux/kernel.h> #include <linux/clk.h> #include <linux/iopoll.h> @@ -364,7 +365,7 @@ static void inno_dsidphy_mipi_mode_enable(struct inno_dsidphy *inno) * The value of counter for HS Tclk-pre * Tclk-pre = Tpin_txbyteclkhs * value */ - clk_pre = DIV_ROUND_UP(cfg->clk_pre, t_txbyteclkhs); + clk_pre = DIV_ROUND_UP(cfg->clk_pre, BITS_PER_BYTE); /* * The value of counter for HS Tlpx Time diff --git a/drivers/phy/st/phy-stm32-usbphyc.c b/drivers/phy/st/phy-stm32-usbphyc.c index 2ce9bfd783d4..007a23c78d56 100644 --- a/drivers/phy/st/phy-stm32-usbphyc.c +++ b/drivers/phy/st/phy-stm32-usbphyc.c @@ -304,7 +304,7 @@ static int stm32_usbphyc_pll_enable(struct stm32_usbphyc *usbphyc) ret = __stm32_usbphyc_pll_disable(usbphyc); if (ret) - return ret; + goto dec_n_pll_cons; } ret = stm32_usbphyc_regulators_enable(usbphyc); diff --git a/drivers/phy/ti/phy-j721e-wiz.c b/drivers/phy/ti/phy-j721e-wiz.c index b3384c31637a..da546c35d1d5 100644 --- a/drivers/phy/ti/phy-j721e-wiz.c +++ b/drivers/phy/ti/phy-j721e-wiz.c @@ -233,6 +233,7 @@ static const struct clk_div_table clk_div_table[] = { { .val = 1, .div = 2, }, { .val = 2, .div = 4, }, { .val = 3, .div = 8, }, + { /* sentinel */ }, }; static const struct wiz_clk_div_sel clk_div_sel[] = { diff --git a/drivers/phy/xilinx/phy-zynqmp.c b/drivers/phy/xilinx/phy-zynqmp.c index f478d8a17115..9be9535ad7ab 100644 --- a/drivers/phy/xilinx/phy-zynqmp.c +++ b/drivers/phy/xilinx/phy-zynqmp.c @@ -134,7 +134,8 @@ #define PROT_BUS_WIDTH_10 0x0 #define PROT_BUS_WIDTH_20 0x1 #define PROT_BUS_WIDTH_40 0x2 -#define PROT_BUS_WIDTH_SHIFT 2 +#define PROT_BUS_WIDTH_SHIFT(n) ((n) * 2) +#define PROT_BUS_WIDTH_MASK(n) GENMASK((n) * 2 + 1, (n) * 2) /* Number of GT lanes */ #define NUM_LANES 4 @@ -445,12 +446,12 @@ static void xpsgtr_phy_init_sata(struct xpsgtr_phy *gtr_phy) static void xpsgtr_phy_init_sgmii(struct xpsgtr_phy *gtr_phy) { struct xpsgtr_dev *gtr_dev = gtr_phy->dev; + u32 mask = PROT_BUS_WIDTH_MASK(gtr_phy->lane); + u32 val = PROT_BUS_WIDTH_10 << PROT_BUS_WIDTH_SHIFT(gtr_phy->lane); /* Set SGMII protocol TX and RX bus width to 10 bits. */ - xpsgtr_write(gtr_dev, TX_PROT_BUS_WIDTH, - PROT_BUS_WIDTH_10 << (gtr_phy->lane * PROT_BUS_WIDTH_SHIFT)); - xpsgtr_write(gtr_dev, RX_PROT_BUS_WIDTH, - PROT_BUS_WIDTH_10 << (gtr_phy->lane * PROT_BUS_WIDTH_SHIFT)); + xpsgtr_clr_set(gtr_dev, TX_PROT_BUS_WIDTH, mask, val); + xpsgtr_clr_set(gtr_dev, RX_PROT_BUS_WIDTH, mask, val); xpsgtr_bypass_scrambler_8b10b(gtr_phy); } diff --git a/drivers/pinctrl/Makefile b/drivers/pinctrl/Makefile index 08c364d611f5..f64d29f614ec 100644 --- a/drivers/pinctrl/Makefile +++ b/drivers/pinctrl/Makefile @@ -42,9 +42,9 @@ obj-$(CONFIG_PINCTRL_PISTACHIO) += pinctrl-pistachio.o obj-$(CONFIG_PINCTRL_RK805) += pinctrl-rk805.o obj-$(CONFIG_PINCTRL_ROCKCHIP) += pinctrl-rockchip.o obj-$(CONFIG_PINCTRL_SINGLE) += pinctrl-single.o +obj-$(CONFIG_PINCTRL_ST) += pinctrl-st.o obj-$(CONFIG_PINCTRL_STARFIVE) += pinctrl-starfive.o obj-$(CONFIG_PINCTRL_STMFX) += pinctrl-stmfx.o -obj-$(CONFIG_PINCTRL_ST) += pinctrl-st.o obj-$(CONFIG_PINCTRL_SX150X) += pinctrl-sx150x.o obj-$(CONFIG_PINCTRL_TB10X) += pinctrl-tb10x.o obj-$(CONFIG_PINCTRL_THUNDERBAY) += pinctrl-thunderbay.o diff --git a/drivers/pinctrl/bcm/Kconfig b/drivers/pinctrl/bcm/Kconfig index 5123f4c33854..ac1e400bbbac 100644 --- a/drivers/pinctrl/bcm/Kconfig +++ b/drivers/pinctrl/bcm/Kconfig @@ -35,6 +35,7 @@ config PINCTRL_BCM63XX select PINCONF select GENERIC_PINCONF select GPIOLIB + select REGMAP select GPIO_REGMAP config PINCTRL_BCM6318 diff --git a/drivers/pinctrl/bcm/pinctrl-bcm2835.c b/drivers/pinctrl/bcm/pinctrl-bcm2835.c index c4ebfa852b42..47e433e09c5c 100644 --- a/drivers/pinctrl/bcm/pinctrl-bcm2835.c +++ b/drivers/pinctrl/bcm/pinctrl-bcm2835.c @@ -1269,16 +1269,18 @@ static int bcm2835_pinctrl_probe(struct platform_device *pdev) sizeof(*girq->parents), GFP_KERNEL); if (!girq->parents) { - pinctrl_remove_gpio_range(pc->pctl_dev, &pc->gpio_range); - return -ENOMEM; + err = -ENOMEM; + goto out_remove; } if (is_7211) { pc->wake_irq = devm_kcalloc(dev, BCM2835_NUM_IRQS, sizeof(*pc->wake_irq), GFP_KERNEL); - if (!pc->wake_irq) - return -ENOMEM; + if (!pc->wake_irq) { + err = -ENOMEM; + goto out_remove; + } } /* @@ -1306,8 +1308,10 @@ static int bcm2835_pinctrl_probe(struct platform_device *pdev) len = strlen(dev_name(pc->dev)) + 16; name = devm_kzalloc(pc->dev, len, GFP_KERNEL); - if (!name) - return -ENOMEM; + if (!name) { + err = -ENOMEM; + goto out_remove; + } snprintf(name, len, "%s:bank%d", dev_name(pc->dev), i); @@ -1326,11 +1330,14 @@ static int bcm2835_pinctrl_probe(struct platform_device *pdev) err = gpiochip_add_data(&pc->gpio_chip, pc); if (err) { dev_err(dev, "could not add GPIO chip\n"); - pinctrl_remove_gpio_range(pc->pctl_dev, &pc->gpio_range); - return err; + goto out_remove; } return 0; + +out_remove: + pinctrl_remove_gpio_range(pc->pctl_dev, &pc->gpio_range); + return err; } static struct platform_driver bcm2835_pinctrl_driver = { diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c index abffda1fd51e..1d5818269076 100644 --- a/drivers/pinctrl/intel/pinctrl-cherryview.c +++ b/drivers/pinctrl/intel/pinctrl-cherryview.c @@ -1471,8 +1471,9 @@ static void chv_gpio_irq_handler(struct irq_desc *desc) offset = cctx->intr_lines[intr_line]; if (offset == CHV_INVALID_HWIRQ) { - dev_err(dev, "interrupt on unused interrupt line %u\n", intr_line); - continue; + dev_warn_once(dev, "interrupt on unmapped interrupt line %u\n", intr_line); + /* Some boards expect hwirq 0 to trigger in this case */ + offset = 0; } generic_handle_domain_irq(gc->irq.domain, offset); diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c index 85750974d182..826d494f3cc6 100644 --- a/drivers/pinctrl/intel/pinctrl-intel.c +++ b/drivers/pinctrl/intel/pinctrl-intel.c @@ -451,8 +451,8 @@ static void intel_gpio_set_gpio_mode(void __iomem *padcfg0) value &= ~PADCFG0_PMODE_MASK; value |= PADCFG0_PMODE_GPIO; - /* Disable input and output buffers */ - value |= PADCFG0_GPIORXDIS; + /* Disable TX buffer and enable RX (this will be input) */ + value &= ~PADCFG0_GPIORXDIS; value |= PADCFG0_GPIOTXDIS; /* Disable SCI/SMI/NMI generation */ @@ -497,9 +497,6 @@ static int intel_gpio_request_enable(struct pinctrl_dev *pctldev, intel_gpio_set_gpio_mode(padcfg0); - /* Disable TX buffer and enable RX (this will be input) */ - __intel_gpio_set_direction(padcfg0, true); - raw_spin_unlock_irqrestore(&pctrl->lock, flags); return 0; @@ -1115,9 +1112,6 @@ static int intel_gpio_irq_type(struct irq_data *d, unsigned int type) intel_gpio_set_gpio_mode(reg); - /* Disable TX buffer and enable RX (this will be input) */ - __intel_gpio_set_direction(reg, true); - value = readl(reg); value &= ~(PADCFG0_RXEVCFG_MASK | PADCFG0_RXINV); @@ -1216,6 +1210,39 @@ static irqreturn_t intel_gpio_irq(int irq, void *data) return IRQ_RETVAL(ret); } +static void intel_gpio_irq_init(struct intel_pinctrl *pctrl) +{ + int i; + + for (i = 0; i < pctrl->ncommunities; i++) { + const struct intel_community *community; + void __iomem *base; + unsigned int gpp; + + community = &pctrl->communities[i]; + base = community->regs; + + for (gpp = 0; gpp < community->ngpps; gpp++) { + /* Mask and clear all interrupts */ + writel(0, base + community->ie_offset + gpp * 4); + writel(0xffff, base + community->is_offset + gpp * 4); + } + } +} + +static int intel_gpio_irq_init_hw(struct gpio_chip *gc) +{ + struct intel_pinctrl *pctrl = gpiochip_get_data(gc); + + /* + * Make sure the interrupt lines are in a proper state before + * further configuration. + */ + intel_gpio_irq_init(pctrl); + + return 0; +} + static int intel_gpio_add_community_ranges(struct intel_pinctrl *pctrl, const struct intel_community *community) { @@ -1320,6 +1347,7 @@ static int intel_gpio_probe(struct intel_pinctrl *pctrl, int irq) girq->num_parents = 0; girq->default_type = IRQ_TYPE_NONE; girq->handler = handle_bad_irq; + girq->init_hw = intel_gpio_irq_init_hw; ret = devm_gpiochip_add_data(pctrl->dev, &pctrl->chip, pctrl); if (ret) { @@ -1695,26 +1723,6 @@ int intel_pinctrl_suspend_noirq(struct device *dev) } EXPORT_SYMBOL_GPL(intel_pinctrl_suspend_noirq); -static void intel_gpio_irq_init(struct intel_pinctrl *pctrl) -{ - size_t i; - - for (i = 0; i < pctrl->ncommunities; i++) { - const struct intel_community *community; - void __iomem *base; - unsigned int gpp; - - community = &pctrl->communities[i]; - base = community->regs; - - for (gpp = 0; gpp < community->ngpps; gpp++) { - /* Mask and clear all interrupts */ - writel(0, base + community->ie_offset + gpp * 4); - writel(0xffff, base + community->is_offset + gpp * 4); - } - } -} - static bool intel_gpio_update_reg(void __iomem *reg, u32 mask, u32 value) { u32 curr, updated; diff --git a/drivers/pinctrl/intel/pinctrl-tigerlake.c b/drivers/pinctrl/intel/pinctrl-tigerlake.c index 0bcd19597e4a..3ddaeffc0415 100644 --- a/drivers/pinctrl/intel/pinctrl-tigerlake.c +++ b/drivers/pinctrl/intel/pinctrl-tigerlake.c @@ -749,7 +749,6 @@ static const struct acpi_device_id tgl_pinctrl_acpi_match[] = { { "INT34C5", (kernel_ulong_t)&tgllp_soc_data }, { "INT34C6", (kernel_ulong_t)&tglh_soc_data }, { "INTC1055", (kernel_ulong_t)&tgllp_soc_data }, - { "INTC1057", (kernel_ulong_t)&tgllp_soc_data }, { } }; MODULE_DEVICE_TABLE(acpi, tgl_pinctrl_acpi_match); diff --git a/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c b/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c index 4d81908d6725..ba536fd4d674 100644 --- a/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c +++ b/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c @@ -78,7 +78,6 @@ struct npcm7xx_gpio { struct gpio_chip gc; int irqbase; int irq; - void *priv; struct irq_chip irq_chip; u32 pinctrl_id; int (*direction_input)(struct gpio_chip *chip, unsigned offset); @@ -226,7 +225,7 @@ static void npcmgpio_irq_handler(struct irq_desc *desc) chained_irq_enter(chip, desc); sts = ioread32(bank->base + NPCM7XX_GP_N_EVST); en = ioread32(bank->base + NPCM7XX_GP_N_EVEN); - dev_dbg(chip->parent_device, "==> got irq sts %.8x %.8x\n", sts, + dev_dbg(bank->gc.parent, "==> got irq sts %.8x %.8x\n", sts, en); sts &= en; @@ -241,33 +240,33 @@ static int npcmgpio_set_irq_type(struct irq_data *d, unsigned int type) gpiochip_get_data(irq_data_get_irq_chip_data(d)); unsigned int gpio = BIT(d->hwirq); - dev_dbg(d->chip->parent_device, "setirqtype: %u.%u = %u\n", gpio, + dev_dbg(bank->gc.parent, "setirqtype: %u.%u = %u\n", gpio, d->irq, type); switch (type) { case IRQ_TYPE_EDGE_RISING: - dev_dbg(d->chip->parent_device, "edge.rising\n"); + dev_dbg(bank->gc.parent, "edge.rising\n"); npcm_gpio_clr(&bank->gc, bank->base + NPCM7XX_GP_N_EVBE, gpio); npcm_gpio_clr(&bank->gc, bank->base + NPCM7XX_GP_N_POL, gpio); break; case IRQ_TYPE_EDGE_FALLING: - dev_dbg(d->chip->parent_device, "edge.falling\n"); + dev_dbg(bank->gc.parent, "edge.falling\n"); npcm_gpio_clr(&bank->gc, bank->base + NPCM7XX_GP_N_EVBE, gpio); npcm_gpio_set(&bank->gc, bank->base + NPCM7XX_GP_N_POL, gpio); break; case IRQ_TYPE_EDGE_BOTH: - dev_dbg(d->chip->parent_device, "edge.both\n"); + dev_dbg(bank->gc.parent, "edge.both\n"); npcm_gpio_set(&bank->gc, bank->base + NPCM7XX_GP_N_EVBE, gpio); break; case IRQ_TYPE_LEVEL_LOW: - dev_dbg(d->chip->parent_device, "level.low\n"); + dev_dbg(bank->gc.parent, "level.low\n"); npcm_gpio_set(&bank->gc, bank->base + NPCM7XX_GP_N_POL, gpio); break; case IRQ_TYPE_LEVEL_HIGH: - dev_dbg(d->chip->parent_device, "level.high\n"); + dev_dbg(bank->gc.parent, "level.high\n"); npcm_gpio_clr(&bank->gc, bank->base + NPCM7XX_GP_N_POL, gpio); break; default: - dev_dbg(d->chip->parent_device, "invalid irq type\n"); + dev_dbg(bank->gc.parent, "invalid irq type\n"); return -EINVAL; } @@ -289,7 +288,7 @@ static void npcmgpio_irq_ack(struct irq_data *d) gpiochip_get_data(irq_data_get_irq_chip_data(d)); unsigned int gpio = d->hwirq; - dev_dbg(d->chip->parent_device, "irq_ack: %u.%u\n", gpio, d->irq); + dev_dbg(bank->gc.parent, "irq_ack: %u.%u\n", gpio, d->irq); iowrite32(BIT(gpio), bank->base + NPCM7XX_GP_N_EVST); } @@ -301,7 +300,7 @@ static void npcmgpio_irq_mask(struct irq_data *d) unsigned int gpio = d->hwirq; /* Clear events */ - dev_dbg(d->chip->parent_device, "irq_mask: %u.%u\n", gpio, d->irq); + dev_dbg(bank->gc.parent, "irq_mask: %u.%u\n", gpio, d->irq); iowrite32(BIT(gpio), bank->base + NPCM7XX_GP_N_EVENC); } @@ -313,7 +312,7 @@ static void npcmgpio_irq_unmask(struct irq_data *d) unsigned int gpio = d->hwirq; /* Enable events */ - dev_dbg(d->chip->parent_device, "irq_unmask: %u.%u\n", gpio, d->irq); + dev_dbg(bank->gc.parent, "irq_unmask: %u.%u\n", gpio, d->irq); iowrite32(BIT(gpio), bank->base + NPCM7XX_GP_N_EVENS); } @@ -323,7 +322,7 @@ static unsigned int npcmgpio_irq_startup(struct irq_data *d) unsigned int gpio = d->hwirq; /* active-high, input, clear interrupt, enable interrupt */ - dev_dbg(d->chip->parent_device, "startup: %u.%u\n", gpio, d->irq); + dev_dbg(gc->parent, "startup: %u.%u\n", gpio, d->irq); npcmgpio_direction_input(gc, gpio); npcmgpio_irq_ack(d); npcmgpio_irq_unmask(d); diff --git a/drivers/pinctrl/pinctrl-k210.c b/drivers/pinctrl/pinctrl-k210.c index 49e32684dbb2..ecab6bf63dc6 100644 --- a/drivers/pinctrl/pinctrl-k210.c +++ b/drivers/pinctrl/pinctrl-k210.c @@ -482,7 +482,7 @@ static int k210_pinconf_get_drive(unsigned int max_strength_ua) { int i; - for (i = K210_PC_DRIVE_MAX; i; i--) { + for (i = K210_PC_DRIVE_MAX; i >= 0; i--) { if (k210_pinconf_drive_strength[i] <= max_strength_ua) return i; } @@ -527,7 +527,7 @@ static int k210_pinconf_set_param(struct pinctrl_dev *pctldev, case PIN_CONFIG_BIAS_PULL_UP: if (!arg) return -EINVAL; - val |= K210_PC_PD; + val |= K210_PC_PU; break; case PIN_CONFIG_DRIVE_STRENGTH: arg *= 1000; diff --git a/drivers/pinctrl/pinctrl-microchip-sgpio.c b/drivers/pinctrl/pinctrl-microchip-sgpio.c index 8e081c90bdb2..639f1130e989 100644 --- a/drivers/pinctrl/pinctrl-microchip-sgpio.c +++ b/drivers/pinctrl/pinctrl-microchip-sgpio.c @@ -137,7 +137,8 @@ static inline int sgpio_addr_to_pin(struct sgpio_priv *priv, int port, int bit) static inline u32 sgpio_get_addr(struct sgpio_priv *priv, u32 rno, u32 off) { - return priv->properties->regoff[rno] + off; + return (priv->properties->regoff[rno] + off) * + regmap_get_reg_stride(priv->regs); } static u32 sgpio_readl(struct sgpio_priv *priv, u32 rno, u32 off) diff --git a/drivers/pinctrl/pinctrl-starfive.c b/drivers/pinctrl/pinctrl-starfive.c index 0b912152a405..ab4b2ee9f217 100644 --- a/drivers/pinctrl/pinctrl-starfive.c +++ b/drivers/pinctrl/pinctrl-starfive.c @@ -1164,6 +1164,7 @@ static int starfive_irq_set_type(struct irq_data *d, unsigned int trigger) } static struct irq_chip starfive_irq_chip = { + .name = "StarFive GPIO", .irq_ack = starfive_irq_ack, .irq_mask = starfive_irq_mask, .irq_mask_ack = starfive_irq_mask_ack, @@ -1307,9 +1308,6 @@ static int starfive_probe(struct platform_device *pdev) sfp->gc.base = -1; sfp->gc.ngpio = NR_GPIOS; - starfive_irq_chip.parent_device = dev; - starfive_irq_chip.name = sfp->gc.label; - sfp->gc.irq.chip = &starfive_irq_chip; sfp->gc.irq.parent_handler = starfive_gpio_irq_handler; sfp->gc.irq.num_parents = 1; @@ -1330,6 +1328,8 @@ static int starfive_probe(struct platform_device *pdev) if (ret) return dev_err_probe(dev, ret, "could not register gpiochip\n"); + irq_domain_set_pm_device(sfp->gc.irq.domain, dev); + out_pinctrl_enable: return pinctrl_enable(sfp->pctl); } diff --git a/drivers/pinctrl/pinctrl-thunderbay.c b/drivers/pinctrl/pinctrl-thunderbay.c index b5b47f4dd774..79d44bca039e 100644 --- a/drivers/pinctrl/pinctrl-thunderbay.c +++ b/drivers/pinctrl/pinctrl-thunderbay.c @@ -773,63 +773,42 @@ static int thunderbay_build_groups(struct thunderbay_pinctrl *tpc) static int thunderbay_add_functions(struct thunderbay_pinctrl *tpc, struct function_desc *funcs) { - struct function_desc *function = funcs; int i; /* Assign the groups for each function */ - for (i = 0; i < tpc->soc->npins; i++) { - const struct pinctrl_pin_desc *pin_info = thunderbay_pins + i; - struct thunderbay_mux_desc *pin_mux = pin_info->drv_data; - - while (pin_mux->name) { - const char **grp; - int j, grp_num, match = 0; - size_t grp_size; - struct function_desc *func; - - for (j = 0; j < tpc->nfuncs; j++) { - if (!strcmp(pin_mux->name, function[j].name)) { - match = 1; - break; - } - } - - if (!match) - return -EINVAL; - - func = function + j; - grp_num = func->num_group_names; - grp_size = sizeof(*func->group_names); - - if (!func->group_names) { - func->group_names = devm_kcalloc(tpc->dev, - grp_num, - grp_size, - GFP_KERNEL); - if (!func->group_names) { - kfree(func); - return -ENOMEM; - } + for (i = 0; i < tpc->nfuncs; i++) { + struct function_desc *func = &funcs[i]; + const char **group_names; + unsigned int grp_idx = 0; + int j; + + group_names = devm_kcalloc(tpc->dev, func->num_group_names, + sizeof(*group_names), GFP_KERNEL); + if (!group_names) + return -ENOMEM; + + for (j = 0; j < tpc->soc->npins; j++) { + const struct pinctrl_pin_desc *pin_info = &thunderbay_pins[j]; + struct thunderbay_mux_desc *pin_mux; + + for (pin_mux = pin_info->drv_data; pin_mux->name; pin_mux++) { + if (!strcmp(pin_mux->name, func->name)) + group_names[grp_idx++] = pin_info->name; } - - grp = func->group_names; - while (*grp) - grp++; - - *grp = pin_info->name; - pin_mux++; } + + func->group_names = group_names; } /* Add all functions */ for (i = 0; i < tpc->nfuncs; i++) { pinmux_generic_add_function(tpc->pctrl, - function[i].name, - function[i].group_names, - function[i].num_group_names, - function[i].data); + funcs[i].name, + funcs[i].group_names, + funcs[i].num_group_names, + funcs[i].data); } - kfree(function); + kfree(funcs); return 0; } @@ -839,27 +818,30 @@ static int thunderbay_build_functions(struct thunderbay_pinctrl *tpc) void *ptr; int pin; - /* Total number of functions is unknown at this point. Allocate first. */ + /* + * Allocate maximum possible number of functions. Assume every pin + * being part of 8 (hw maximum) globally unique muxes. + */ tpc->nfuncs = 0; thunderbay_funcs = kcalloc(tpc->soc->npins * 8, sizeof(*thunderbay_funcs), GFP_KERNEL); if (!thunderbay_funcs) return -ENOMEM; - /* Find total number of functions and each's properties */ + /* Setup 1 function for each unique mux */ for (pin = 0; pin < tpc->soc->npins; pin++) { const struct pinctrl_pin_desc *pin_info = thunderbay_pins + pin; - struct thunderbay_mux_desc *pin_mux = pin_info->drv_data; + struct thunderbay_mux_desc *pin_mux; - while (pin_mux->name) { - struct function_desc *func = thunderbay_funcs; + for (pin_mux = pin_info->drv_data; pin_mux->name; pin_mux++) { + struct function_desc *func; - while (func->name) { + /* Check if we already have function for this mux */ + for (func = thunderbay_funcs; func->name; func++) { if (!strcmp(pin_mux->name, func->name)) { func->num_group_names++; break; } - func++; } if (!func->name) { @@ -868,8 +850,6 @@ static int thunderbay_build_functions(struct thunderbay_pinctrl *tpc) func->data = (int *)&pin_mux->mode; tpc->nfuncs++; } - - pin_mux++; } } diff --git a/drivers/pinctrl/pinctrl-zynqmp.c b/drivers/pinctrl/pinctrl-zynqmp.c index 42da6bd399ee..e14012209992 100644 --- a/drivers/pinctrl/pinctrl-zynqmp.c +++ b/drivers/pinctrl/pinctrl-zynqmp.c @@ -809,7 +809,6 @@ static int zynqmp_pinctrl_prepare_pin_desc(struct device *dev, unsigned int *npins) { struct pinctrl_pin_desc *pins, *pin; - char **pin_names; int ret; int i; @@ -821,14 +820,13 @@ static int zynqmp_pinctrl_prepare_pin_desc(struct device *dev, if (!pins) return -ENOMEM; - pin_names = devm_kasprintf_strarray(dev, ZYNQMP_PIN_PREFIX, *npins); - if (IS_ERR(pin_names)) - return PTR_ERR(pin_names); - for (i = 0; i < *npins; i++) { pin = &pins[i]; pin->number = i; - pin->name = pin_names[i]; + pin->name = devm_kasprintf(dev, GFP_KERNEL, "%s%d", + ZYNQMP_PIN_PREFIX, i); + if (!pin->name) + return -ENOMEM; } *zynqmp_pins = pins; diff --git a/drivers/pinctrl/sunxi/pinctrl-sun50i-h616.c b/drivers/pinctrl/sunxi/pinctrl-sun50i-h616.c index ce1917e230f4..152b71226a80 100644 --- a/drivers/pinctrl/sunxi/pinctrl-sun50i-h616.c +++ b/drivers/pinctrl/sunxi/pinctrl-sun50i-h616.c @@ -363,16 +363,16 @@ static const struct sunxi_desc_pin h616_pins[] = { SUNXI_FUNCTION(0x0, "gpio_in"), SUNXI_FUNCTION(0x1, "gpio_out"), SUNXI_FUNCTION(0x2, "uart2"), /* CTS */ - SUNXI_FUNCTION(0x3, "i2s3"), /* DO0 */ + SUNXI_FUNCTION(0x3, "i2s3_dout0"), /* DO0 */ SUNXI_FUNCTION(0x4, "spi1"), /* MISO */ - SUNXI_FUNCTION(0x5, "i2s3"), /* DI1 */ + SUNXI_FUNCTION(0x5, "i2s3_din1"), /* DI1 */ SUNXI_FUNCTION_IRQ_BANK(0x6, 6, 8)), /* PH_EINT8 */ SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 9), SUNXI_FUNCTION(0x0, "gpio_in"), SUNXI_FUNCTION(0x1, "gpio_out"), - SUNXI_FUNCTION(0x3, "i2s3"), /* DI0 */ + SUNXI_FUNCTION(0x3, "i2s3_din0"), /* DI0 */ SUNXI_FUNCTION(0x4, "spi1"), /* CS1 */ - SUNXI_FUNCTION(0x3, "i2s3"), /* DO1 */ + SUNXI_FUNCTION(0x5, "i2s3_dout1"), /* DO1 */ SUNXI_FUNCTION_IRQ_BANK(0x6, 6, 9)), /* PH_EINT9 */ SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 10), SUNXI_FUNCTION(0x0, "gpio_in"), diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c index 80d6750c74a6..1f401377ff60 100644 --- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c +++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c @@ -36,6 +36,13 @@ #include "../core.h" #include "pinctrl-sunxi.h" +/* + * These lock classes tell lockdep that GPIO IRQs are in a different + * category than their parents, so it won't report false recursion. + */ +static struct lock_class_key sunxi_pinctrl_irq_lock_class; +static struct lock_class_key sunxi_pinctrl_irq_request_class; + static struct irq_chip sunxi_pinctrl_edge_irq_chip; static struct irq_chip sunxi_pinctrl_level_irq_chip; @@ -837,7 +844,8 @@ static int sunxi_pinctrl_gpio_direction_input(struct gpio_chip *chip, { struct sunxi_pinctrl *pctl = gpiochip_get_data(chip); - return sunxi_pmx_gpio_set_direction(pctl->pctl_dev, NULL, offset, true); + return sunxi_pmx_gpio_set_direction(pctl->pctl_dev, NULL, + chip->base + offset, true); } static int sunxi_pinctrl_gpio_get(struct gpio_chip *chip, unsigned offset) @@ -890,7 +898,8 @@ static int sunxi_pinctrl_gpio_direction_output(struct gpio_chip *chip, struct sunxi_pinctrl *pctl = gpiochip_get_data(chip); sunxi_pinctrl_gpio_set(chip, offset, value); - return sunxi_pmx_gpio_set_direction(pctl->pctl_dev, NULL, offset, false); + return sunxi_pmx_gpio_set_direction(pctl->pctl_dev, NULL, + chip->base + offset, false); } static int sunxi_pinctrl_gpio_of_xlate(struct gpio_chip *gc, @@ -1555,6 +1564,8 @@ int sunxi_pinctrl_init_with_variant(struct platform_device *pdev, for (i = 0; i < (pctl->desc->irq_banks * IRQ_PER_BANK); i++) { int irqno = irq_create_mapping(pctl->domain, i); + irq_set_lockdep_class(irqno, &sunxi_pinctrl_irq_lock_class, + &sunxi_pinctrl_irq_request_class); irq_set_chip_and_handler(irqno, &sunxi_pinctrl_edge_irq_chip, handle_edge_irq); irq_set_chip_data(irqno, pctl); diff --git a/drivers/platform/surface/Kconfig b/drivers/platform/surface/Kconfig index 5f0578e25f71..463f1ec5c14e 100644 --- a/drivers/platform/surface/Kconfig +++ b/drivers/platform/surface/Kconfig @@ -5,6 +5,7 @@ menuconfig SURFACE_PLATFORMS bool "Microsoft Surface Platform-Specific Device Drivers" + depends on ARM64 || X86 || COMPILE_TEST default y help Say Y here to get to see options for platform-specific device drivers diff --git a/drivers/platform/surface/surface3_power.c b/drivers/platform/surface/surface3_power.c index abac3eec565e..444ec81ba02d 100644 --- a/drivers/platform/surface/surface3_power.c +++ b/drivers/platform/surface/surface3_power.c @@ -232,14 +232,21 @@ static int mshw0011_bix(struct mshw0011_data *cdata, struct bix *bix) } bix->last_full_charg_capacity = ret; - /* get serial number */ + /* + * Get serial number, on some devices (with unofficial replacement + * battery?) reading any of the serial number range addresses gets + * nacked in this case just leave the serial number empty. + */ ret = i2c_smbus_read_i2c_block_data(client, MSHW0011_BAT0_REG_SERIAL_NO, sizeof(buf), buf); - if (ret != sizeof(buf)) { + if (ret == -EREMOTEIO) { + /* no serial number available */ + } else if (ret != sizeof(buf)) { dev_err(&client->dev, "Error reading serial no: %d\n", ret); return ret; + } else { + snprintf(bix->serial, ARRAY_SIZE(bix->serial), "%3pE%6pE", buf + 7, buf); } - snprintf(bix->serial, ARRAY_SIZE(bix->serial), "%3pE%6pE", buf + 7, buf); /* get cycle count */ ret = i2c_smbus_read_word_data(client, MSHW0011_BAT0_REG_CYCLE_CNT); diff --git a/drivers/platform/x86/amd-pmc.c b/drivers/platform/x86/amd-pmc.c index f794343d6aaa..b1103f85a85a 100644 --- a/drivers/platform/x86/amd-pmc.c +++ b/drivers/platform/x86/amd-pmc.c @@ -21,6 +21,7 @@ #include <linux/module.h> #include <linux/pci.h> #include <linux/platform_device.h> +#include <linux/pm_qos.h> #include <linux/rtc.h> #include <linux/suspend.h> #include <linux/seq_file.h> @@ -85,6 +86,9 @@ #define PMC_MSG_DELAY_MIN_US 50 #define RESPONSE_REGISTER_LOOP_MAX 20000 +/* QoS request for letting CPUs in idle states, but not the deepest */ +#define AMD_PMC_MAX_IDLE_STATE_LATENCY 3 + #define SOC_SUBSYSTEM_IP_MAX 12 #define DELAY_MIN_US 2000 #define DELAY_MAX_US 3000 @@ -124,12 +128,14 @@ struct amd_pmc_dev { u32 cpu_id; u32 active_ips; /* SMU version information */ - u16 major; - u16 minor; - u16 rev; + u8 smu_program; + u8 major; + u8 minor; + u8 rev; struct device *dev; struct pci_dev *rdev; struct mutex lock; /* generic mutex lock */ + struct pm_qos_request amd_pmc_pm_qos_req; #if IS_ENABLED(CONFIG_DEBUG_FS) struct dentry *dbgfs_dir; #endif /* CONFIG_DEBUG_FS */ @@ -180,11 +186,13 @@ static int amd_pmc_get_smu_version(struct amd_pmc_dev *dev) if (rc) return rc; - dev->major = (val >> 16) & GENMASK(15, 0); + dev->smu_program = (val >> 24) & GENMASK(7, 0); + dev->major = (val >> 16) & GENMASK(7, 0); dev->minor = (val >> 8) & GENMASK(7, 0); dev->rev = (val >> 0) & GENMASK(7, 0); - dev_dbg(dev->dev, "SMU version is %u.%u.%u\n", dev->major, dev->minor, dev->rev); + dev_dbg(dev->dev, "SMU program %u version is %u.%u.%u\n", + dev->smu_program, dev->major, dev->minor, dev->rev); return 0; } @@ -226,7 +234,7 @@ static int amd_pmc_stb_debugfs_release(struct inode *inode, struct file *filp) return 0; } -const struct file_operations amd_pmc_stb_debugfs_fops = { +static const struct file_operations amd_pmc_stb_debugfs_fops = { .owner = THIS_MODULE, .open = amd_pmc_stb_debugfs_open, .read = amd_pmc_stb_debugfs_read, @@ -518,6 +526,14 @@ static int amd_pmc_verify_czn_rtc(struct amd_pmc_dev *pdev, u32 *arg) rc = rtc_alarm_irq_enable(rtc_device, 0); dev_dbg(pdev->dev, "wakeup timer programmed for %lld seconds\n", duration); + /* + * Prevent CPUs from getting into deep idle states while sending OS_HINT + * which is otherwise generally safe to send when at least one of the CPUs + * is not in deep idle states. + */ + cpu_latency_qos_update_request(&pdev->amd_pmc_pm_qos_req, AMD_PMC_MAX_IDLE_STATE_LATENCY); + wake_up_all_idle_cpus(); + return rc; } @@ -535,24 +551,31 @@ static int __maybe_unused amd_pmc_suspend(struct device *dev) /* Activate CZN specific RTC functionality */ if (pdev->cpu_id == AMD_CPU_ID_CZN) { rc = amd_pmc_verify_czn_rtc(pdev, &arg); - if (rc < 0) - return rc; + if (rc) + goto fail; } /* Dump the IdleMask before we send hint to SMU */ amd_pmc_idlemask_read(pdev, dev, NULL); msg = amd_pmc_get_os_hint(pdev); rc = amd_pmc_send_cmd(pdev, arg, NULL, msg, 0); - if (rc) + if (rc) { dev_err(pdev->dev, "suspend failed\n"); + goto fail; + } if (enable_stb) rc = amd_pmc_write_stb(pdev, AMD_PMC_STB_PREDEF); - if (rc) { + if (rc) { dev_err(pdev->dev, "error writing to STB\n"); - return rc; + goto fail; } + return 0; +fail: + if (pdev->cpu_id == AMD_CPU_ID_CZN) + cpu_latency_qos_update_request(&pdev->amd_pmc_pm_qos_req, + PM_QOS_DEFAULT_VALUE); return rc; } @@ -576,12 +599,15 @@ static int __maybe_unused amd_pmc_resume(struct device *dev) /* Write data incremented by 1 to distinguish in stb_read */ if (enable_stb) rc = amd_pmc_write_stb(pdev, AMD_PMC_STB_PREDEF + 1); - if (rc) { + if (rc) dev_err(pdev->dev, "error writing to STB\n"); - return rc; - } - return 0; + /* Restore the QoS request back to defaults if it was set */ + if (pdev->cpu_id == AMD_CPU_ID_CZN) + cpu_latency_qos_update_request(&pdev->amd_pmc_pm_qos_req, + PM_QOS_DEFAULT_VALUE); + + return rc; } static const struct dev_pm_ops amd_pmc_pm_ops = { @@ -719,6 +745,7 @@ static int amd_pmc_probe(struct platform_device *pdev) amd_pmc_get_smu_version(dev); platform_set_drvdata(pdev, dev); amd_pmc_dbgfs_register(dev); + cpu_latency_qos_add_request(&dev->amd_pmc_pm_qos_req, PM_QOS_DEFAULT_VALUE); return 0; err_pci_dev_put: diff --git a/drivers/platform/x86/asus-tf103c-dock.c b/drivers/platform/x86/asus-tf103c-dock.c index d4ef8f362ee6..6fd0c9fea82d 100644 --- a/drivers/platform/x86/asus-tf103c-dock.c +++ b/drivers/platform/x86/asus-tf103c-dock.c @@ -250,7 +250,7 @@ static int tf103c_dock_hid_raw_request(struct hid_device *hid, u8 reportnum, return 0; } -struct hid_ll_driver tf103c_dock_hid_ll_driver = { +static struct hid_ll_driver tf103c_dock_hid_ll_driver = { .parse = tf103c_dock_hid_parse, .start = tf103c_dock_hid_start, .stop = tf103c_dock_hid_stop, @@ -921,7 +921,7 @@ static int __maybe_unused tf103c_dock_resume(struct device *dev) return 0; } -SIMPLE_DEV_PM_OPS(tf103c_dock_pm_ops, tf103c_dock_suspend, tf103c_dock_resume); +static SIMPLE_DEV_PM_OPS(tf103c_dock_pm_ops, tf103c_dock_suspend, tf103c_dock_resume); static const struct acpi_device_id tf103c_dock_acpi_match[] = { {"NPCE69A"}, diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c index a3b83b22a3b1..2104a2621e50 100644 --- a/drivers/platform/x86/asus-wmi.c +++ b/drivers/platform/x86/asus-wmi.c @@ -2223,7 +2223,7 @@ static int fan_curve_check_present(struct asus_wmi *asus, bool *available, err = fan_curve_get_factory_default(asus, fan_dev); if (err) { - if (err == -ENODEV) + if (err == -ENODEV || err == -ENODATA) return 0; return err; } diff --git a/drivers/platform/x86/intel/crystal_cove_charger.c b/drivers/platform/x86/intel/crystal_cove_charger.c index 0374bc742513..e4299cfa2205 100644 --- a/drivers/platform/x86/intel/crystal_cove_charger.c +++ b/drivers/platform/x86/intel/crystal_cove_charger.c @@ -17,6 +17,7 @@ #include <linux/regmap.h> #define CHGRIRQ_REG 0x0a +#define MCHGRIRQ_REG 0x17 struct crystal_cove_charger_data { struct mutex buslock; /* irq_bus_lock */ @@ -25,8 +26,8 @@ struct crystal_cove_charger_data { struct irq_domain *irq_domain; int irq; int charger_irq; - bool irq_enabled; - bool irq_is_enabled; + u8 mask; + u8 new_mask; }; static irqreturn_t crystal_cove_charger_irq(int irq, void *data) @@ -53,13 +54,9 @@ static void crystal_cove_charger_irq_bus_sync_unlock(struct irq_data *data) { struct crystal_cove_charger_data *charger = irq_data_get_irq_chip_data(data); - if (charger->irq_is_enabled != charger->irq_enabled) { - if (charger->irq_enabled) - enable_irq(charger->irq); - else - disable_irq(charger->irq); - - charger->irq_is_enabled = charger->irq_enabled; + if (charger->mask != charger->new_mask) { + regmap_write(charger->regmap, MCHGRIRQ_REG, charger->new_mask); + charger->mask = charger->new_mask; } mutex_unlock(&charger->buslock); @@ -69,14 +66,14 @@ static void crystal_cove_charger_irq_unmask(struct irq_data *data) { struct crystal_cove_charger_data *charger = irq_data_get_irq_chip_data(data); - charger->irq_enabled = true; + charger->new_mask &= ~BIT(data->hwirq); } static void crystal_cove_charger_irq_mask(struct irq_data *data) { struct crystal_cove_charger_data *charger = irq_data_get_irq_chip_data(data); - charger->irq_enabled = false; + charger->new_mask |= BIT(data->hwirq); } static void crystal_cove_charger_rm_irq_domain(void *data) @@ -130,10 +127,13 @@ static int crystal_cove_charger_probe(struct platform_device *pdev) irq_set_nested_thread(charger->charger_irq, true); irq_set_noprobe(charger->charger_irq); + /* Mask the single 2nd level IRQ before enabling the 1st level IRQ */ + charger->mask = charger->new_mask = BIT(0); + regmap_write(charger->regmap, MCHGRIRQ_REG, charger->mask); + ret = devm_request_threaded_irq(&pdev->dev, charger->irq, NULL, crystal_cove_charger_irq, - IRQF_ONESHOT | IRQF_NO_AUTOEN, - KBUILD_MODNAME, charger); + IRQF_ONESHOT, KBUILD_MODNAME, charger); if (ret) return dev_err_probe(&pdev->dev, ret, "requesting irq\n"); diff --git a/drivers/platform/x86/intel/int3472/tps68470_board_data.c b/drivers/platform/x86/intel/int3472/tps68470_board_data.c index f93d437fd192..525f09a3b5ff 100644 --- a/drivers/platform/x86/intel/int3472/tps68470_board_data.c +++ b/drivers/platform/x86/intel/int3472/tps68470_board_data.c @@ -100,7 +100,8 @@ static struct gpiod_lookup_table surface_go_tps68470_gpios = { .dev_id = "i2c-INT347A:00", .table = { GPIO_LOOKUP("tps68470-gpio", 9, "reset", GPIO_ACTIVE_LOW), - GPIO_LOOKUP("tps68470-gpio", 7, "powerdown", GPIO_ACTIVE_LOW) + GPIO_LOOKUP("tps68470-gpio", 7, "powerdown", GPIO_ACTIVE_LOW), + { } } }; diff --git a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c index c9a85eb2e860..e8424e70d81d 100644 --- a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c +++ b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c @@ -596,7 +596,10 @@ static long isst_if_def_ioctl(struct file *file, unsigned int cmd, return ret; } -static DEFINE_MUTEX(punit_misc_dev_lock); +/* Lock to prevent module registration when already opened by user space */ +static DEFINE_MUTEX(punit_misc_dev_open_lock); +/* Lock to allow one share misc device for all ISST interace */ +static DEFINE_MUTEX(punit_misc_dev_reg_lock); static int misc_usage_count; static int misc_device_ret; static int misc_device_open; @@ -606,7 +609,7 @@ static int isst_if_open(struct inode *inode, struct file *file) int i, ret = 0; /* Fail open, if a module is going away */ - mutex_lock(&punit_misc_dev_lock); + mutex_lock(&punit_misc_dev_open_lock); for (i = 0; i < ISST_IF_DEV_MAX; ++i) { struct isst_if_cmd_cb *cb = &punit_callbacks[i]; @@ -628,7 +631,7 @@ static int isst_if_open(struct inode *inode, struct file *file) } else { misc_device_open++; } - mutex_unlock(&punit_misc_dev_lock); + mutex_unlock(&punit_misc_dev_open_lock); return ret; } @@ -637,7 +640,7 @@ static int isst_if_relase(struct inode *inode, struct file *f) { int i; - mutex_lock(&punit_misc_dev_lock); + mutex_lock(&punit_misc_dev_open_lock); misc_device_open--; for (i = 0; i < ISST_IF_DEV_MAX; ++i) { struct isst_if_cmd_cb *cb = &punit_callbacks[i]; @@ -645,7 +648,7 @@ static int isst_if_relase(struct inode *inode, struct file *f) if (cb->registered) module_put(cb->owner); } - mutex_unlock(&punit_misc_dev_lock); + mutex_unlock(&punit_misc_dev_open_lock); return 0; } @@ -662,6 +665,43 @@ static struct miscdevice isst_if_char_driver = { .fops = &isst_if_char_driver_ops, }; +static int isst_misc_reg(void) +{ + mutex_lock(&punit_misc_dev_reg_lock); + if (misc_device_ret) + goto unlock_exit; + + if (!misc_usage_count) { + misc_device_ret = isst_if_cpu_info_init(); + if (misc_device_ret) + goto unlock_exit; + + misc_device_ret = misc_register(&isst_if_char_driver); + if (misc_device_ret) { + isst_if_cpu_info_exit(); + goto unlock_exit; + } + } + misc_usage_count++; + +unlock_exit: + mutex_unlock(&punit_misc_dev_reg_lock); + + return misc_device_ret; +} + +static void isst_misc_unreg(void) +{ + mutex_lock(&punit_misc_dev_reg_lock); + if (misc_usage_count) + misc_usage_count--; + if (!misc_usage_count && !misc_device_ret) { + misc_deregister(&isst_if_char_driver); + isst_if_cpu_info_exit(); + } + mutex_unlock(&punit_misc_dev_reg_lock); +} + /** * isst_if_cdev_register() - Register callback for IOCTL * @device_type: The device type this callback handling. @@ -679,38 +719,31 @@ static struct miscdevice isst_if_char_driver = { */ int isst_if_cdev_register(int device_type, struct isst_if_cmd_cb *cb) { - if (misc_device_ret) - return misc_device_ret; + int ret; if (device_type >= ISST_IF_DEV_MAX) return -EINVAL; - mutex_lock(&punit_misc_dev_lock); + mutex_lock(&punit_misc_dev_open_lock); + /* Device is already open, we don't want to add new callbacks */ if (misc_device_open) { - mutex_unlock(&punit_misc_dev_lock); + mutex_unlock(&punit_misc_dev_open_lock); return -EAGAIN; } - if (!misc_usage_count) { - int ret; - - misc_device_ret = misc_register(&isst_if_char_driver); - if (misc_device_ret) - goto unlock_exit; - - ret = isst_if_cpu_info_init(); - if (ret) { - misc_deregister(&isst_if_char_driver); - misc_device_ret = ret; - goto unlock_exit; - } - } memcpy(&punit_callbacks[device_type], cb, sizeof(*cb)); punit_callbacks[device_type].registered = 1; - misc_usage_count++; -unlock_exit: - mutex_unlock(&punit_misc_dev_lock); + mutex_unlock(&punit_misc_dev_open_lock); - return misc_device_ret; + ret = isst_misc_reg(); + if (ret) { + /* + * No need of mutex as the misc device register failed + * as no one can open device yet. Hence no contention. + */ + punit_callbacks[device_type].registered = 0; + return ret; + } + return 0; } EXPORT_SYMBOL_GPL(isst_if_cdev_register); @@ -725,16 +758,12 @@ EXPORT_SYMBOL_GPL(isst_if_cdev_register); */ void isst_if_cdev_unregister(int device_type) { - mutex_lock(&punit_misc_dev_lock); - misc_usage_count--; + isst_misc_unreg(); + mutex_lock(&punit_misc_dev_open_lock); punit_callbacks[device_type].registered = 0; if (device_type == ISST_IF_DEV_MBOX) isst_delete_hash(); - if (!misc_usage_count && !misc_device_ret) { - misc_deregister(&isst_if_char_driver); - isst_if_cpu_info_exit(); - } - mutex_unlock(&punit_misc_dev_lock); + mutex_unlock(&punit_misc_dev_open_lock); } EXPORT_SYMBOL_GPL(isst_if_cdev_unregister); diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index 098180fb1cfc..3424b080db77 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c @@ -8679,9 +8679,10 @@ static const struct attribute_group fan_driver_attr_group = { .attrs = fan_driver_attributes, }; -#define TPACPI_FAN_Q1 0x0001 /* Unitialized HFSP */ -#define TPACPI_FAN_2FAN 0x0002 /* EC 0x31 bit 0 selects fan2 */ -#define TPACPI_FAN_2CTL 0x0004 /* selects fan2 control */ +#define TPACPI_FAN_Q1 0x0001 /* Uninitialized HFSP */ +#define TPACPI_FAN_2FAN 0x0002 /* EC 0x31 bit 0 selects fan2 */ +#define TPACPI_FAN_2CTL 0x0004 /* selects fan2 control */ +#define TPACPI_FAN_NOFAN 0x0008 /* no fan available */ static const struct tpacpi_quirk fan_quirk_table[] __initconst = { TPACPI_QEC_IBM('1', 'Y', TPACPI_FAN_Q1), @@ -8702,6 +8703,8 @@ static const struct tpacpi_quirk fan_quirk_table[] __initconst = { TPACPI_Q_LNV3('N', '4', '0', TPACPI_FAN_2CTL), /* P1 / X1 Extreme (4nd gen) */ TPACPI_Q_LNV3('N', '3', '0', TPACPI_FAN_2CTL), /* P15 (1st gen) / P15v (1st gen) */ TPACPI_Q_LNV3('N', '3', '2', TPACPI_FAN_2CTL), /* X1 Carbon (9th gen) */ + TPACPI_Q_LNV3('N', '3', '7', TPACPI_FAN_2CTL), /* T15g (2nd gen) */ + TPACPI_Q_LNV3('N', '1', 'O', TPACPI_FAN_NOFAN), /* X1 Tablet (2nd gen) */ }; static int __init fan_init(struct ibm_init_struct *iibm) @@ -8730,6 +8733,11 @@ static int __init fan_init(struct ibm_init_struct *iibm) quirks = tpacpi_check_quirks(fan_quirk_table, ARRAY_SIZE(fan_quirk_table)); + if (quirks & TPACPI_FAN_NOFAN) { + pr_info("No integrated ThinkPad fan available\n"); + return -ENODEV; + } + if (gfan_handle) { /* 570, 600e/x, 770e, 770x */ fan_status_access_mode = TPACPI_FAN_RD_ACPI_GFAN; @@ -10112,6 +10120,9 @@ static struct ibm_struct proxsensor_driver_data = { #define DYTC_CMD_MMC_GET 8 /* To get current MMC function and mode */ #define DYTC_CMD_RESET 0x1ff /* To reset back to default */ +#define DYTC_CMD_FUNC_CAP 3 /* To get DYTC capabilities */ +#define DYTC_FC_MMC 27 /* MMC Mode supported */ + #define DYTC_GET_FUNCTION_BIT 8 /* Bits 8-11 - function setting */ #define DYTC_GET_MODE_BIT 12 /* Bits 12-15 - mode setting */ @@ -10324,6 +10335,15 @@ static int tpacpi_dytc_profile_init(struct ibm_init_struct *iibm) if (dytc_version < 5) return -ENODEV; + /* Check what capabilities are supported. Currently MMC is needed */ + err = dytc_command(DYTC_CMD_FUNC_CAP, &output); + if (err) + return err; + if (!(output & BIT(DYTC_FC_MMC))) { + dbg_printk(TPACPI_DBG_INIT, " DYTC MMC mode not supported\n"); + return -ENODEV; + } + dbg_printk(TPACPI_DBG_INIT, "DYTC version %d: thermal mode available\n", dytc_version); /* diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c index 494f23052678..bc97bfa8e8a6 100644 --- a/drivers/platform/x86/touchscreen_dmi.c +++ b/drivers/platform/x86/touchscreen_dmi.c @@ -770,6 +770,21 @@ static const struct ts_dmi_data predia_basic_data = { .properties = predia_basic_props, }; +static const struct property_entry rwc_nanote_p8_props[] = { + PROPERTY_ENTRY_U32("touchscreen-min-y", 46), + PROPERTY_ENTRY_U32("touchscreen-size-x", 1728), + PROPERTY_ENTRY_U32("touchscreen-size-y", 1140), + PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"), + PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-rwc-nanote-p8.fw"), + PROPERTY_ENTRY_U32("silead,max-fingers", 10), + { } +}; + +static const struct ts_dmi_data rwc_nanote_p8_data = { + .acpi_name = "MSSL1680:00", + .properties = rwc_nanote_p8_props, +}; + static const struct property_entry schneider_sct101ctm_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-x", 1715), PROPERTY_ENTRY_U32("touchscreen-size-y", 1140), @@ -1395,6 +1410,15 @@ const struct dmi_system_id touchscreen_dmi_table[] = { }, }, { + /* RWC NANOTE P8 */ + .driver_data = (void *)&rwc_nanote_p8_data, + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "Default string"), + DMI_MATCH(DMI_PRODUCT_NAME, "AY07J"), + DMI_MATCH(DMI_PRODUCT_SKU, "0001") + }, + }, + { /* Schneider SCT101CTM */ .driver_data = (void *)&schneider_sct101ctm_data, .matches = { diff --git a/drivers/platform/x86/x86-android-tablets.c b/drivers/platform/x86/x86-android-tablets.c index 3ba63ad91b28..9360a8a92486 100644 --- a/drivers/platform/x86/x86-android-tablets.c +++ b/drivers/platform/x86/x86-android-tablets.c @@ -26,6 +26,7 @@ #include <linux/string.h> /* For gpio_get_desc() which is EXPORT_SYMBOL_GPL() */ #include "../../gpio/gpiolib.h" +#include "../../gpio/gpiolib-acpi.h" /* * Helper code to get Linux IRQ numbers given a description of the IRQ source @@ -47,7 +48,7 @@ struct x86_acpi_irq_data { int polarity; /* ACPI_ACTIVE_HIGH / ACPI_ACTIVE_LOW / ACPI_ACTIVE_BOTH */ }; -static int x86_acpi_irq_helper_gpiochip_find(struct gpio_chip *gc, void *data) +static int gpiochip_find_match_label(struct gpio_chip *gc, void *data) { return gc->label && !strcmp(gc->label, data); } @@ -73,7 +74,7 @@ static int x86_acpi_irq_helper_get(const struct x86_acpi_irq_data *data) return irq; case X86_ACPI_IRQ_TYPE_GPIOINT: /* Like acpi_dev_gpio_irq_get(), but without parsing ACPI resources */ - chip = gpiochip_find(data->chip, x86_acpi_irq_helper_gpiochip_find); + chip = gpiochip_find(data->chip, gpiochip_find_match_label); if (!chip) { pr_err("error cannot find GPIO chip %s\n", data->chip); return -ENODEV; @@ -143,14 +144,17 @@ struct x86_serdev_info { }; struct x86_dev_info { + char *invalid_aei_gpiochip; const char * const *modules; - struct gpiod_lookup_table **gpiod_lookup_tables; + struct gpiod_lookup_table * const *gpiod_lookup_tables; const struct x86_i2c_client_info *i2c_client_info; const struct platform_device_info *pdev_info; const struct x86_serdev_info *serdev_info; int i2c_client_count; int pdev_count; int serdev_count; + int (*init)(void); + void (*exit)(void); }; /* Generic / shared bq24190 settings */ @@ -187,8 +191,8 @@ static struct bq24190_platform_data bq24190_pdata = { }; static const char * const bq24190_modules[] __initconst = { - "crystal_cove_charger", /* For the bq24190 IRQ */ - "bq24190_charger", /* For the Vbus regulator for intel-int3496 */ + "intel_crystal_cove_charger", /* For the bq24190 IRQ */ + "bq24190_charger", /* For the Vbus regulator for intel-int3496 */ NULL }; @@ -302,7 +306,7 @@ static struct gpiod_lookup_table asus_me176c_goodix_gpios = { }, }; -static struct gpiod_lookup_table *asus_me176c_gpios[] = { +static struct gpiod_lookup_table * const asus_me176c_gpios[] = { &int3496_gpo2_pin22_gpios, &asus_me176c_goodix_gpios, NULL @@ -317,6 +321,7 @@ static const struct x86_dev_info asus_me176c_info __initconst = { .serdev_count = ARRAY_SIZE(asus_me176c_serdevs), .gpiod_lookup_tables = asus_me176c_gpios, .modules = bq24190_modules, + .invalid_aei_gpiochip = "INT33FC:02", }; /* Asus TF103C tablets have an Android factory img with everything hardcoded */ @@ -405,7 +410,7 @@ static const struct x86_i2c_client_info asus_tf103c_i2c_clients[] __initconst = }, }; -static struct gpiod_lookup_table *asus_tf103c_gpios[] = { +static struct gpiod_lookup_table * const asus_tf103c_gpios[] = { &int3496_gpo2_pin22_gpios, NULL }; @@ -417,6 +422,7 @@ static const struct x86_dev_info asus_tf103c_info __initconst = { .pdev_count = ARRAY_SIZE(int3496_pdevs), .gpiod_lookup_tables = asus_tf103c_gpios, .modules = bq24190_modules, + .invalid_aei_gpiochip = "INT33FC:02", }; /* @@ -490,6 +496,39 @@ static const struct x86_dev_info chuwi_hi8_info __initconst = { .i2c_client_count = ARRAY_SIZE(chuwi_hi8_i2c_clients), }; +#define CZC_EC_EXTRA_PORT 0x68 +#define CZC_EC_ANDROID_KEYS 0x63 + +static int __init czc_p10t_init(void) +{ + /* + * The device boots up in "Windows 7" mode, when the home button sends a + * Windows specific key sequence (Left Meta + D) and the second button + * sends an unknown one while also toggling the Radio Kill Switch. + * This is a surprising behavior when the second button is labeled "Back". + * + * The vendor-supplied Android-x86 build switches the device to a "Android" + * mode by writing value 0x63 to the I/O port 0x68. This just seems to just + * set bit 6 on address 0x96 in the EC region; switching the bit directly + * seems to achieve the same result. It uses a "p10t_switcher" to do the + * job. It doesn't seem to be able to do anything else, and no other use + * of the port 0x68 is known. + * + * In the Android mode, the home button sends just a single scancode, + * which can be handled in Linux userspace more reasonably and the back + * button only sends a scancode without toggling the kill switch. + * The scancode can then be mapped either to Back or RF Kill functionality + * in userspace, depending on how the button is labeled on that particular + * model. + */ + outb(CZC_EC_ANDROID_KEYS, CZC_EC_EXTRA_PORT); + return 0; +} + +static const struct x86_dev_info czc_p10t __initconst = { + .init = czc_p10t_init, +}; + /* * Whitelabel (sold as various brands) TM800A550L tablets. * These tablet's DSDT contains a whole bunch of bogus ACPI I2C devices @@ -559,7 +598,7 @@ static struct gpiod_lookup_table whitelabel_tm800a550l_goodix_gpios = { }, }; -static struct gpiod_lookup_table *whitelabel_tm800a550l_gpios[] = { +static struct gpiod_lookup_table * const whitelabel_tm800a550l_gpios[] = { &whitelabel_tm800a550l_goodix_gpios, NULL }; @@ -642,6 +681,24 @@ static const struct dmi_system_id x86_android_tablet_ids[] __initconst = { .driver_data = (void *)&chuwi_hi8_info, }, { + /* CZC P10T */ + .ident = "CZC ODEON TPC-10 (\"P10T\")", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "CZC"), + DMI_MATCH(DMI_PRODUCT_NAME, "ODEON*TPC-10"), + }, + .driver_data = (void *)&czc_p10t, + }, + { + /* A variant of CZC P10T */ + .ident = "ViewSonic ViewPad 10", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ViewSonic"), + DMI_MATCH(DMI_PRODUCT_NAME, "VPAD10"), + }, + .driver_data = (void *)&czc_p10t, + }, + { /* Whitelabel (sold as various brands) TM800A550L */ .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"), @@ -669,7 +726,8 @@ static int serdev_count; static struct i2c_client **i2c_clients; static struct platform_device **pdevs; static struct serdev_device **serdevs; -static struct gpiod_lookup_table **gpiod_lookup_tables; +static struct gpiod_lookup_table * const *gpiod_lookup_tables; +static void (*exit_handler)(void); static __init int x86_instantiate_i2c_client(const struct x86_dev_info *dev_info, int idx) @@ -787,6 +845,9 @@ static void x86_android_tablet_cleanup(void) kfree(i2c_clients); + if (exit_handler) + exit_handler(); + for (i = 0; gpiod_lookup_tables && gpiod_lookup_tables[i]; i++) gpiod_remove_lookup_table(gpiod_lookup_tables[i]); } @@ -795,6 +856,7 @@ static __init int x86_android_tablet_init(void) { const struct x86_dev_info *dev_info; const struct dmi_system_id *id; + struct gpio_chip *chip; int i, ret = 0; id = dmi_first_match(x86_android_tablet_ids); @@ -804,6 +866,20 @@ static __init int x86_android_tablet_init(void) dev_info = id->driver_data; /* + * The broken DSDTs on these devices often also include broken + * _AEI (ACPI Event Interrupt) handlers, disable these. + */ + if (dev_info->invalid_aei_gpiochip) { + chip = gpiochip_find(dev_info->invalid_aei_gpiochip, + gpiochip_find_match_label); + if (!chip) { + pr_err("error cannot find GPIO chip %s\n", dev_info->invalid_aei_gpiochip); + return -ENODEV; + } + acpi_gpiochip_free_interrupts(chip); + } + + /* * Since this runs from module_init() it cannot use -EPROBE_DEFER, * instead pre-load any modules which are listed as requirements. */ @@ -814,6 +890,15 @@ static __init int x86_android_tablet_init(void) for (i = 0; gpiod_lookup_tables && gpiod_lookup_tables[i]; i++) gpiod_add_lookup_table(gpiod_lookup_tables[i]); + if (dev_info->init) { + ret = dev_info->init(); + if (ret < 0) { + x86_android_tablet_cleanup(); + return ret; + } + exit_handler = dev_info->exit; + } + i2c_clients = kcalloc(dev_info->i2c_client_count, sizeof(*i2c_clients), GFP_KERNEL); if (!i2c_clients) { x86_android_tablet_cleanup(); @@ -865,6 +950,6 @@ static __init int x86_android_tablet_init(void) module_init(x86_android_tablet_init); module_exit(x86_android_tablet_cleanup); -MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com"); +MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>"); MODULE_DESCRIPTION("X86 Android tablets DSDT fixups driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/pnp/driver.c b/drivers/pnp/driver.c index cc6757dfa3f1..c02e7bf643a6 100644 --- a/drivers/pnp/driver.c +++ b/drivers/pnp/driver.c @@ -171,7 +171,7 @@ static int __pnp_bus_suspend(struct device *dev, pm_message_t state) if (pnp_drv->driver.pm && pnp_drv->driver.pm->suspend) { error = pnp_drv->driver.pm->suspend(dev); - suspend_report_result(pnp_drv->driver.pm->suspend, error); + suspend_report_result(dev, pnp_drv->driver.pm->suspend, error); if (error) return error; } diff --git a/drivers/pnp/pnpacpi/core.c b/drivers/pnp/pnpacpi/core.c index afaf30a3622c..38928ff7472b 100644 --- a/drivers/pnp/pnpacpi/core.c +++ b/drivers/pnp/pnpacpi/core.c @@ -287,9 +287,9 @@ static acpi_status __init pnpacpi_add_device_handler(acpi_handle handle, u32 lvl, void *context, void **rv) { - struct acpi_device *device; + struct acpi_device *device = acpi_fetch_acpi_dev(handle); - if (acpi_bus_get_device(handle, &device)) + if (!device) return AE_CTRL_DEPTH; if (acpi_is_pnp_device(device)) pnpacpi_add_device(device); diff --git a/drivers/power/supply/bq256xx_charger.c b/drivers/power/supply/bq256xx_charger.c index b274942dc46a..01ad84fd147c 100644 --- a/drivers/power/supply/bq256xx_charger.c +++ b/drivers/power/supply/bq256xx_charger.c @@ -1523,6 +1523,9 @@ static int bq256xx_hw_init(struct bq256xx_device *bq) BQ256XX_WDT_BIT_SHIFT); ret = power_supply_get_battery_info(bq->charger, &bat_info); + if (ret == -ENOMEM) + return ret; + if (ret) { dev_warn(bq->dev, "battery info missing, default values will be applied\n"); diff --git a/drivers/power/supply/cw2015_battery.c b/drivers/power/supply/cw2015_battery.c index 0c87ad0dbf71..728e2a6cc9c3 100644 --- a/drivers/power/supply/cw2015_battery.c +++ b/drivers/power/supply/cw2015_battery.c @@ -689,7 +689,7 @@ static int cw_bat_probe(struct i2c_client *client) if (ret) { /* Allocate an empty battery */ cw_bat->battery = devm_kzalloc(&client->dev, - sizeof(cw_bat->battery), + sizeof(*cw_bat->battery), GFP_KERNEL); if (!cw_bat->battery) return -ENOMEM; diff --git a/drivers/powercap/Kconfig b/drivers/powercap/Kconfig index 8242e8c5ed77..515e3ceb3393 100644 --- a/drivers/powercap/Kconfig +++ b/drivers/powercap/Kconfig @@ -46,6 +46,7 @@ config IDLE_INJECT config DTPM bool "Power capping for Dynamic Thermal Power Management (EXPERIMENTAL)" + depends on OF help This enables support for the power capping for the dynamic thermal power management userspace engine. @@ -56,4 +57,11 @@ config DTPM_CPU help This enables support for CPU power limitation based on energy model. + +config DTPM_DEVFREQ + bool "Add device power capping based on the energy model" + depends on DTPM && ENERGY_MODEL + help + This enables support for device power limitation based on + energy model. endif diff --git a/drivers/powercap/Makefile b/drivers/powercap/Makefile index fabcf388a8d3..494617cdad88 100644 --- a/drivers/powercap/Makefile +++ b/drivers/powercap/Makefile @@ -1,6 +1,7 @@ # SPDX-License-Identifier: GPL-2.0-only obj-$(CONFIG_DTPM) += dtpm.o obj-$(CONFIG_DTPM_CPU) += dtpm_cpu.o +obj-$(CONFIG_DTPM_DEVFREQ) += dtpm_devfreq.o obj-$(CONFIG_POWERCAP) += powercap_sys.o obj-$(CONFIG_INTEL_RAPL_CORE) += intel_rapl_common.o obj-$(CONFIG_INTEL_RAPL) += intel_rapl_msr.o diff --git a/drivers/powercap/dtpm.c b/drivers/powercap/dtpm.c index 8cb45f2d3d78..ce920f17f45f 100644 --- a/drivers/powercap/dtpm.c +++ b/drivers/powercap/dtpm.c @@ -23,6 +23,9 @@ #include <linux/powercap.h> #include <linux/slab.h> #include <linux/mutex.h> +#include <linux/of.h> + +#include "dtpm_subsys.h" #define DTPM_POWER_LIMIT_FLAG 0 @@ -48,9 +51,7 @@ static int get_max_power_range_uw(struct powercap_zone *pcz, u64 *max_power_uw) { struct dtpm *dtpm = to_dtpm(pcz); - mutex_lock(&dtpm_lock); *max_power_uw = dtpm->power_max - dtpm->power_min; - mutex_unlock(&dtpm_lock); return 0; } @@ -80,14 +81,7 @@ static int __get_power_uw(struct dtpm *dtpm, u64 *power_uw) static int get_power_uw(struct powercap_zone *pcz, u64 *power_uw) { - struct dtpm *dtpm = to_dtpm(pcz); - int ret; - - mutex_lock(&dtpm_lock); - ret = __get_power_uw(dtpm, power_uw); - mutex_unlock(&dtpm_lock); - - return ret; + return __get_power_uw(to_dtpm(pcz), power_uw); } static void __dtpm_rebalance_weight(struct dtpm *dtpm) @@ -130,7 +124,16 @@ static void __dtpm_add_power(struct dtpm *dtpm) } } -static int __dtpm_update_power(struct dtpm *dtpm) +/** + * dtpm_update_power - Update the power on the dtpm + * @dtpm: a pointer to a dtpm structure to update + * + * Function to update the power values of the dtpm node specified in + * parameter. These new values will be propagated to the tree. + * + * Return: zero on success, -EINVAL if the values are inconsistent + */ +int dtpm_update_power(struct dtpm *dtpm) { int ret; @@ -153,26 +156,6 @@ static int __dtpm_update_power(struct dtpm *dtpm) } /** - * dtpm_update_power - Update the power on the dtpm - * @dtpm: a pointer to a dtpm structure to update - * - * Function to update the power values of the dtpm node specified in - * parameter. These new values will be propagated to the tree. - * - * Return: zero on success, -EINVAL if the values are inconsistent - */ -int dtpm_update_power(struct dtpm *dtpm) -{ - int ret; - - mutex_lock(&dtpm_lock); - ret = __dtpm_update_power(dtpm); - mutex_unlock(&dtpm_lock); - - return ret; -} - -/** * dtpm_release_zone - Cleanup when the node is released * @pcz: a pointer to a powercap_zone structure * @@ -188,48 +171,28 @@ int dtpm_release_zone(struct powercap_zone *pcz) struct dtpm *dtpm = to_dtpm(pcz); struct dtpm *parent = dtpm->parent; - mutex_lock(&dtpm_lock); - - if (!list_empty(&dtpm->children)) { - mutex_unlock(&dtpm_lock); + if (!list_empty(&dtpm->children)) return -EBUSY; - } if (parent) list_del(&dtpm->sibling); __dtpm_sub_power(dtpm); - mutex_unlock(&dtpm_lock); - if (dtpm->ops) dtpm->ops->release(dtpm); + else + kfree(dtpm); - if (root == dtpm) - root = NULL; - - kfree(dtpm); - - return 0; -} - -static int __get_power_limit_uw(struct dtpm *dtpm, int cid, u64 *power_limit) -{ - *power_limit = dtpm->power_limit; return 0; } static int get_power_limit_uw(struct powercap_zone *pcz, int cid, u64 *power_limit) { - struct dtpm *dtpm = to_dtpm(pcz); - int ret; - - mutex_lock(&dtpm_lock); - ret = __get_power_limit_uw(dtpm, cid, power_limit); - mutex_unlock(&dtpm_lock); - - return ret; + *power_limit = to_dtpm(pcz)->power_limit; + + return 0; } /* @@ -289,7 +252,7 @@ static int __set_power_limit_uw(struct dtpm *dtpm, int cid, u64 power_limit) ret = __set_power_limit_uw(child, cid, power); if (!ret) - ret = __get_power_limit_uw(child, cid, &power); + ret = get_power_limit_uw(&child->zone, cid, &power); if (ret) break; @@ -307,8 +270,6 @@ static int set_power_limit_uw(struct powercap_zone *pcz, struct dtpm *dtpm = to_dtpm(pcz); int ret; - mutex_lock(&dtpm_lock); - /* * Don't allow values outside of the power range previously * set when initializing the power numbers. @@ -320,8 +281,6 @@ static int set_power_limit_uw(struct powercap_zone *pcz, pr_debug("%s: power limit: %llu uW, power max: %llu uW\n", dtpm->zone.name, dtpm->power_limit, dtpm->power_max); - mutex_unlock(&dtpm_lock); - return ret; } @@ -332,11 +291,7 @@ static const char *get_constraint_name(struct powercap_zone *pcz, int cid) static int get_max_power_uw(struct powercap_zone *pcz, int id, u64 *max_power) { - struct dtpm *dtpm = to_dtpm(pcz); - - mutex_lock(&dtpm_lock); - *max_power = dtpm->power_max; - mutex_unlock(&dtpm_lock); + *max_power = to_dtpm(pcz)->power_max; return 0; } @@ -439,8 +394,6 @@ int dtpm_register(const char *name, struct dtpm *dtpm, struct dtpm *parent) if (IS_ERR(pcz)) return PTR_ERR(pcz); - mutex_lock(&dtpm_lock); - if (parent) { list_add_tail(&dtpm->sibling, &parent->children); dtpm->parent = parent; @@ -456,19 +409,253 @@ int dtpm_register(const char *name, struct dtpm *dtpm, struct dtpm *parent) pr_debug("Registered dtpm node '%s' / %llu-%llu uW, \n", dtpm->zone.name, dtpm->power_min, dtpm->power_max); - mutex_unlock(&dtpm_lock); + return 0; +} + +static struct dtpm *dtpm_setup_virtual(const struct dtpm_node *hierarchy, + struct dtpm *parent) +{ + struct dtpm *dtpm; + int ret; + + dtpm = kzalloc(sizeof(*dtpm), GFP_KERNEL); + if (!dtpm) + return ERR_PTR(-ENOMEM); + dtpm_init(dtpm, NULL); + + ret = dtpm_register(hierarchy->name, dtpm, parent); + if (ret) { + pr_err("Failed to register dtpm node '%s': %d\n", + hierarchy->name, ret); + kfree(dtpm); + return ERR_PTR(ret); + } + + return dtpm; +} + +static struct dtpm *dtpm_setup_dt(const struct dtpm_node *hierarchy, + struct dtpm *parent) +{ + struct device_node *np; + int i, ret; + + np = of_find_node_by_path(hierarchy->name); + if (!np) { + pr_err("Failed to find '%s'\n", hierarchy->name); + return ERR_PTR(-ENXIO); + } + + for (i = 0; i < ARRAY_SIZE(dtpm_subsys); i++) { + + if (!dtpm_subsys[i]->setup) + continue; + + ret = dtpm_subsys[i]->setup(parent, np); + if (ret) { + pr_err("Failed to setup '%s': %d\n", dtpm_subsys[i]->name, ret); + of_node_put(np); + return ERR_PTR(ret); + } + } + + of_node_put(np); + + /* + * By returning a NULL pointer, we let know the caller there + * is no child for us as we are a leaf of the tree + */ + return NULL; +} + +typedef struct dtpm * (*dtpm_node_callback_t)(const struct dtpm_node *, struct dtpm *); + +static dtpm_node_callback_t dtpm_node_callback[] = { + [DTPM_NODE_VIRTUAL] = dtpm_setup_virtual, + [DTPM_NODE_DT] = dtpm_setup_dt, +}; + +static int dtpm_for_each_child(const struct dtpm_node *hierarchy, + const struct dtpm_node *it, struct dtpm *parent) +{ + struct dtpm *dtpm; + int i, ret; + + for (i = 0; hierarchy[i].name; i++) { + + if (hierarchy[i].parent != it) + continue; + + dtpm = dtpm_node_callback[hierarchy[i].type](&hierarchy[i], parent); + + /* + * A NULL pointer means there is no children, hence we + * continue without going deeper in the recursivity. + */ + if (!dtpm) + continue; + + /* + * There are multiple reasons why the callback could + * fail. The generic glue is abstracting the backend + * and therefore it is not possible to report back or + * take a decision based on the error. In any case, + * if this call fails, it is not critical in the + * hierarchy creation, we can assume the underlying + * service is not found, so we continue without this + * branch in the tree but with a warning to log the + * information the node was not created. + */ + if (IS_ERR(dtpm)) { + pr_warn("Failed to create '%s' in the hierarchy\n", + hierarchy[i].name); + continue; + } + + ret = dtpm_for_each_child(hierarchy, &hierarchy[i], dtpm); + if (ret) + return ret; + } return 0; } -static int __init init_dtpm(void) +/** + * dtpm_create_hierarchy - Create the dtpm hierarchy + * @hierarchy: An array of struct dtpm_node describing the hierarchy + * + * The function is called by the platform specific code with the + * description of the different node in the hierarchy. It creates the + * tree in the sysfs filesystem under the powercap dtpm entry. + * + * The expected tree has the format: + * + * struct dtpm_node hierarchy[] = { + * [0] { .name = "topmost", type = DTPM_NODE_VIRTUAL }, + * [1] { .name = "package", .type = DTPM_NODE_VIRTUAL, .parent = &hierarchy[0] }, + * [2] { .name = "/cpus/cpu0", .type = DTPM_NODE_DT, .parent = &hierarchy[1] }, + * [3] { .name = "/cpus/cpu1", .type = DTPM_NODE_DT, .parent = &hierarchy[1] }, + * [4] { .name = "/cpus/cpu2", .type = DTPM_NODE_DT, .parent = &hierarchy[1] }, + * [5] { .name = "/cpus/cpu3", .type = DTPM_NODE_DT, .parent = &hierarchy[1] }, + * [6] { } + * }; + * + * The last element is always an empty one and marks the end of the + * array. + * + * Return: zero on success, a negative value in case of error. Errors + * are reported back from the underlying functions. + */ +int dtpm_create_hierarchy(struct of_device_id *dtpm_match_table) { + const struct of_device_id *match; + const struct dtpm_node *hierarchy; + struct device_node *np; + int i, ret; + + mutex_lock(&dtpm_lock); + + if (pct) { + ret = -EBUSY; + goto out_unlock; + } + pct = powercap_register_control_type(NULL, "dtpm", NULL); if (IS_ERR(pct)) { pr_err("Failed to register control type\n"); - return PTR_ERR(pct); + ret = PTR_ERR(pct); + goto out_pct; } + ret = -ENODEV; + np = of_find_node_by_path("/"); + if (!np) + goto out_err; + + match = of_match_node(dtpm_match_table, np); + + of_node_put(np); + + if (!match) + goto out_err; + + hierarchy = match->data; + if (!hierarchy) { + ret = -EFAULT; + goto out_err; + } + + ret = dtpm_for_each_child(hierarchy, NULL, NULL); + if (ret) + goto out_err; + + for (i = 0; i < ARRAY_SIZE(dtpm_subsys); i++) { + + if (!dtpm_subsys[i]->init) + continue; + + ret = dtpm_subsys[i]->init(); + if (ret) + pr_info("Failed to initialize '%s': %d", + dtpm_subsys[i]->name, ret); + } + + mutex_unlock(&dtpm_lock); + return 0; + +out_err: + powercap_unregister_control_type(pct); +out_pct: + pct = NULL; +out_unlock: + mutex_unlock(&dtpm_lock); + + return ret; +} +EXPORT_SYMBOL_GPL(dtpm_create_hierarchy); + +static void __dtpm_destroy_hierarchy(struct dtpm *dtpm) +{ + struct dtpm *child, *aux; + + list_for_each_entry_safe(child, aux, &dtpm->children, sibling) + __dtpm_destroy_hierarchy(child); + + /* + * At this point, we know all children were removed from the + * recursive call before + */ + dtpm_unregister(dtpm); +} + +void dtpm_destroy_hierarchy(void) +{ + int i; + + mutex_lock(&dtpm_lock); + + if (!pct) + goto out_unlock; + + __dtpm_destroy_hierarchy(root); + + + for (i = 0; i < ARRAY_SIZE(dtpm_subsys); i++) { + + if (!dtpm_subsys[i]->exit) + continue; + + dtpm_subsys[i]->exit(); + } + + powercap_unregister_control_type(pct); + + pct = NULL; + + root = NULL; + +out_unlock: + mutex_unlock(&dtpm_lock); } -late_initcall(init_dtpm); +EXPORT_SYMBOL_GPL(dtpm_destroy_hierarchy); diff --git a/drivers/powercap/dtpm_cpu.c b/drivers/powercap/dtpm_cpu.c index b740866b228d..bca2f912d349 100644 --- a/drivers/powercap/dtpm_cpu.c +++ b/drivers/powercap/dtpm_cpu.c @@ -21,6 +21,7 @@ #include <linux/cpuhotplug.h> #include <linux/dtpm.h> #include <linux/energy_model.h> +#include <linux/of.h> #include <linux/pm_qos.h> #include <linux/slab.h> #include <linux/units.h> @@ -150,10 +151,17 @@ static int update_pd_power_uw(struct dtpm *dtpm) static void pd_release(struct dtpm *dtpm) { struct dtpm_cpu *dtpm_cpu = to_dtpm_cpu(dtpm); + struct cpufreq_policy *policy; if (freq_qos_request_active(&dtpm_cpu->qos_req)) freq_qos_remove_request(&dtpm_cpu->qos_req); + policy = cpufreq_cpu_get(dtpm_cpu->cpu); + if (policy) { + for_each_cpu(dtpm_cpu->cpu, policy->related_cpus) + per_cpu(dtpm_per_cpu, dtpm_cpu->cpu) = NULL; + } + kfree(dtpm_cpu); } @@ -178,11 +186,26 @@ static int cpuhp_dtpm_cpu_offline(unsigned int cpu) static int cpuhp_dtpm_cpu_online(unsigned int cpu) { struct dtpm_cpu *dtpm_cpu; + + dtpm_cpu = per_cpu(dtpm_per_cpu, cpu); + if (dtpm_cpu) + return dtpm_update_power(&dtpm_cpu->dtpm); + + return 0; +} + +static int __dtpm_cpu_setup(int cpu, struct dtpm *parent) +{ + struct dtpm_cpu *dtpm_cpu; struct cpufreq_policy *policy; struct em_perf_domain *pd; char name[CPUFREQ_NAME_LEN]; int ret = -ENOMEM; + dtpm_cpu = per_cpu(dtpm_per_cpu, cpu); + if (dtpm_cpu) + return 0; + policy = cpufreq_cpu_get(cpu); if (!policy) return 0; @@ -191,10 +214,6 @@ static int cpuhp_dtpm_cpu_online(unsigned int cpu) if (!pd) return -EINVAL; - dtpm_cpu = per_cpu(dtpm_per_cpu, cpu); - if (dtpm_cpu) - return dtpm_update_power(&dtpm_cpu->dtpm); - dtpm_cpu = kzalloc(sizeof(*dtpm_cpu), GFP_KERNEL); if (!dtpm_cpu) return -ENOMEM; @@ -207,7 +226,7 @@ static int cpuhp_dtpm_cpu_online(unsigned int cpu) snprintf(name, sizeof(name), "cpu%d-cpufreq", dtpm_cpu->cpu); - ret = dtpm_register(name, &dtpm_cpu->dtpm, NULL); + ret = dtpm_register(name, &dtpm_cpu->dtpm, parent); if (ret) goto out_kfree_dtpm_cpu; @@ -231,7 +250,18 @@ out_kfree_dtpm_cpu: return ret; } -static int __init dtpm_cpu_init(void) +static int dtpm_cpu_setup(struct dtpm *dtpm, struct device_node *np) +{ + int cpu; + + cpu = of_cpu_node_to_id(np); + if (cpu < 0) + return 0; + + return __dtpm_cpu_setup(cpu, dtpm); +} + +static int dtpm_cpu_init(void) { int ret; @@ -269,4 +299,15 @@ static int __init dtpm_cpu_init(void) return 0; } -DTPM_DECLARE(dtpm_cpu, dtpm_cpu_init); +static void dtpm_cpu_exit(void) +{ + cpuhp_remove_state_nocalls(CPUHP_AP_ONLINE_DYN); + cpuhp_remove_state_nocalls(CPUHP_AP_DTPM_CPU_DEAD); +} + +struct dtpm_subsys_ops dtpm_cpu_ops = { + .name = KBUILD_MODNAME, + .init = dtpm_cpu_init, + .exit = dtpm_cpu_exit, + .setup = dtpm_cpu_setup, +}; diff --git a/drivers/powercap/dtpm_devfreq.c b/drivers/powercap/dtpm_devfreq.c new file mode 100644 index 000000000000..91276761a31d --- /dev/null +++ b/drivers/powercap/dtpm_devfreq.c @@ -0,0 +1,203 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright 2021 Linaro Limited + * + * Author: Daniel Lezcano <daniel.lezcano@linaro.org> + * + * The devfreq device combined with the energy model and the load can + * give an estimation of the power consumption as well as limiting the + * power. + * + */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/cpumask.h> +#include <linux/devfreq.h> +#include <linux/dtpm.h> +#include <linux/energy_model.h> +#include <linux/of.h> +#include <linux/pm_qos.h> +#include <linux/slab.h> +#include <linux/units.h> + +struct dtpm_devfreq { + struct dtpm dtpm; + struct dev_pm_qos_request qos_req; + struct devfreq *devfreq; +}; + +static struct dtpm_devfreq *to_dtpm_devfreq(struct dtpm *dtpm) +{ + return container_of(dtpm, struct dtpm_devfreq, dtpm); +} + +static int update_pd_power_uw(struct dtpm *dtpm) +{ + struct dtpm_devfreq *dtpm_devfreq = to_dtpm_devfreq(dtpm); + struct devfreq *devfreq = dtpm_devfreq->devfreq; + struct device *dev = devfreq->dev.parent; + struct em_perf_domain *pd = em_pd_get(dev); + + dtpm->power_min = pd->table[0].power; + dtpm->power_min *= MICROWATT_PER_MILLIWATT; + + dtpm->power_max = pd->table[pd->nr_perf_states - 1].power; + dtpm->power_max *= MICROWATT_PER_MILLIWATT; + + return 0; +} + +static u64 set_pd_power_limit(struct dtpm *dtpm, u64 power_limit) +{ + struct dtpm_devfreq *dtpm_devfreq = to_dtpm_devfreq(dtpm); + struct devfreq *devfreq = dtpm_devfreq->devfreq; + struct device *dev = devfreq->dev.parent; + struct em_perf_domain *pd = em_pd_get(dev); + unsigned long freq; + u64 power; + int i; + + for (i = 0; i < pd->nr_perf_states; i++) { + + power = pd->table[i].power * MICROWATT_PER_MILLIWATT; + if (power > power_limit) + break; + } + + freq = pd->table[i - 1].frequency; + + dev_pm_qos_update_request(&dtpm_devfreq->qos_req, freq); + + power_limit = pd->table[i - 1].power * MICROWATT_PER_MILLIWATT; + + return power_limit; +} + +static void _normalize_load(struct devfreq_dev_status *status) +{ + if (status->total_time > 0xfffff) { + status->total_time >>= 10; + status->busy_time >>= 10; + } + + status->busy_time <<= 10; + status->busy_time /= status->total_time ? : 1; + + status->busy_time = status->busy_time ? : 1; + status->total_time = 1024; +} + +static u64 get_pd_power_uw(struct dtpm *dtpm) +{ + struct dtpm_devfreq *dtpm_devfreq = to_dtpm_devfreq(dtpm); + struct devfreq *devfreq = dtpm_devfreq->devfreq; + struct device *dev = devfreq->dev.parent; + struct em_perf_domain *pd = em_pd_get(dev); + struct devfreq_dev_status status; + unsigned long freq; + u64 power; + int i; + + mutex_lock(&devfreq->lock); + status = devfreq->last_status; + mutex_unlock(&devfreq->lock); + + freq = DIV_ROUND_UP(status.current_frequency, HZ_PER_KHZ); + _normalize_load(&status); + + for (i = 0; i < pd->nr_perf_states; i++) { + + if (pd->table[i].frequency < freq) + continue; + + power = pd->table[i].power * MICROWATT_PER_MILLIWATT; + power *= status.busy_time; + power >>= 10; + + return power; + } + + return 0; +} + +static void pd_release(struct dtpm *dtpm) +{ + struct dtpm_devfreq *dtpm_devfreq = to_dtpm_devfreq(dtpm); + + if (dev_pm_qos_request_active(&dtpm_devfreq->qos_req)) + dev_pm_qos_remove_request(&dtpm_devfreq->qos_req); + + kfree(dtpm_devfreq); +} + +static struct dtpm_ops dtpm_ops = { + .set_power_uw = set_pd_power_limit, + .get_power_uw = get_pd_power_uw, + .update_power_uw = update_pd_power_uw, + .release = pd_release, +}; + +static int __dtpm_devfreq_setup(struct devfreq *devfreq, struct dtpm *parent) +{ + struct device *dev = devfreq->dev.parent; + struct dtpm_devfreq *dtpm_devfreq; + struct em_perf_domain *pd; + int ret = -ENOMEM; + + pd = em_pd_get(dev); + if (!pd) { + ret = dev_pm_opp_of_register_em(dev, NULL); + if (ret) { + pr_err("No energy model available for '%s'\n", dev_name(dev)); + return -EINVAL; + } + } + + dtpm_devfreq = kzalloc(sizeof(*dtpm_devfreq), GFP_KERNEL); + if (!dtpm_devfreq) + return -ENOMEM; + + dtpm_init(&dtpm_devfreq->dtpm, &dtpm_ops); + + dtpm_devfreq->devfreq = devfreq; + + ret = dtpm_register(dev_name(dev), &dtpm_devfreq->dtpm, parent); + if (ret) { + pr_err("Failed to register '%s': %d\n", dev_name(dev), ret); + kfree(dtpm_devfreq); + return ret; + } + + ret = dev_pm_qos_add_request(dev, &dtpm_devfreq->qos_req, + DEV_PM_QOS_MAX_FREQUENCY, + PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE); + if (ret) { + pr_err("Failed to add QoS request: %d\n", ret); + goto out_dtpm_unregister; + } + + dtpm_update_power(&dtpm_devfreq->dtpm); + + return 0; + +out_dtpm_unregister: + dtpm_unregister(&dtpm_devfreq->dtpm); + + return ret; +} + +static int dtpm_devfreq_setup(struct dtpm *dtpm, struct device_node *np) +{ + struct devfreq *devfreq; + + devfreq = devfreq_get_devfreq_by_node(np); + if (IS_ERR(devfreq)) + return 0; + + return __dtpm_devfreq_setup(devfreq, dtpm); +} + +struct dtpm_subsys_ops dtpm_devfreq_ops = { + .name = KBUILD_MODNAME, + .setup = dtpm_devfreq_setup, +}; diff --git a/drivers/powercap/dtpm_subsys.h b/drivers/powercap/dtpm_subsys.h new file mode 100644 index 000000000000..db1712938a96 --- /dev/null +++ b/drivers/powercap/dtpm_subsys.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2022 Linaro Ltd + * + * Author: Daniel Lezcano <daniel.lezcano@linaro.org> + */ +#ifndef ___DTPM_SUBSYS_H__ +#define ___DTPM_SUBSYS_H__ + +extern struct dtpm_subsys_ops dtpm_cpu_ops; +extern struct dtpm_subsys_ops dtpm_devfreq_ops; + +struct dtpm_subsys_ops *dtpm_subsys[] = { +#ifdef CONFIG_DTPM_CPU + &dtpm_cpu_ops, +#endif +#ifdef CONFIG_DTPM_DEVFREQ + &dtpm_devfreq_ops, +#endif +}; + +#endif diff --git a/drivers/ptp/ptp_ocp.c b/drivers/ptp/ptp_ocp.c index 0f1b5a7d2a89..17ad5f0d13b2 100644 --- a/drivers/ptp/ptp_ocp.c +++ b/drivers/ptp/ptp_ocp.c @@ -607,7 +607,7 @@ ptp_ocp_settime(struct ptp_clock_info *ptp_info, const struct timespec64 *ts) } static void -__ptp_ocp_adjtime_locked(struct ptp_ocp *bp, u64 adj_val) +__ptp_ocp_adjtime_locked(struct ptp_ocp *bp, u32 adj_val) { u32 select, ctrl; @@ -615,7 +615,7 @@ __ptp_ocp_adjtime_locked(struct ptp_ocp *bp, u64 adj_val) iowrite32(OCP_SELECT_CLK_REG, &bp->reg->select); iowrite32(adj_val, &bp->reg->offset_ns); - iowrite32(adj_val & 0x7f, &bp->reg->offset_window_ns); + iowrite32(NSEC_PER_SEC, &bp->reg->offset_window_ns); ctrl = OCP_CTRL_ADJUST_OFFSET | OCP_CTRL_ENABLE; iowrite32(ctrl, &bp->reg->ctrl); @@ -624,6 +624,22 @@ __ptp_ocp_adjtime_locked(struct ptp_ocp *bp, u64 adj_val) iowrite32(select >> 16, &bp->reg->select); } +static void +ptp_ocp_adjtime_coarse(struct ptp_ocp *bp, u64 delta_ns) +{ + struct timespec64 ts; + unsigned long flags; + int err; + + spin_lock_irqsave(&bp->lock, flags); + err = __ptp_ocp_gettime_locked(bp, &ts, NULL); + if (likely(!err)) { + timespec64_add_ns(&ts, delta_ns); + __ptp_ocp_settime_locked(bp, &ts); + } + spin_unlock_irqrestore(&bp->lock, flags); +} + static int ptp_ocp_adjtime(struct ptp_clock_info *ptp_info, s64 delta_ns) { @@ -631,6 +647,11 @@ ptp_ocp_adjtime(struct ptp_clock_info *ptp_info, s64 delta_ns) unsigned long flags; u32 adj_ns, sign; + if (delta_ns > NSEC_PER_SEC || -delta_ns > NSEC_PER_SEC) { + ptp_ocp_adjtime_coarse(bp, delta_ns); + return 0; + } + sign = delta_ns < 0 ? BIT(31) : 0; adj_ns = sign ? -delta_ns : delta_ns; diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index 86aa4141efa9..d2553970a67b 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c @@ -6014,9 +6014,8 @@ core_initcall(regulator_init); static int regulator_late_cleanup(struct device *dev, void *data) { struct regulator_dev *rdev = dev_to_rdev(dev); - const struct regulator_ops *ops = rdev->desc->ops; struct regulation_constraints *c = rdev->constraints; - int enabled, ret; + int ret; if (c && c->always_on) return 0; @@ -6029,14 +6028,8 @@ static int regulator_late_cleanup(struct device *dev, void *data) if (rdev->use_count) goto unlock; - /* If we can't read the status assume it's always on. */ - if (ops->is_enabled) - enabled = ops->is_enabled(rdev); - else - enabled = 1; - - /* But if reading the status failed, assume that it's off. */ - if (enabled <= 0) + /* If reading the status failed, assume that it's off. */ + if (_regulator_is_enabled(rdev) <= 0) goto unlock; if (have_full_constraints()) { diff --git a/drivers/regulator/da9121-regulator.c b/drivers/regulator/da9121-regulator.c index 6f21223a488e..eb9df485bd8a 100644 --- a/drivers/regulator/da9121-regulator.c +++ b/drivers/regulator/da9121-regulator.c @@ -87,16 +87,16 @@ static struct da9121_range da9121_3A_1phase_current = { }; static struct da9121_range da914x_40A_4phase_current = { - .val_min = 14000000, - .val_max = 80000000, - .val_stp = 2000000, + .val_min = 26000000, + .val_max = 78000000, + .val_stp = 4000000, .reg_min = 1, .reg_max = 14, }; static struct da9121_range da914x_20A_2phase_current = { - .val_min = 7000000, - .val_max = 40000000, + .val_min = 13000000, + .val_max = 39000000, .val_stp = 2000000, .reg_min = 1, .reg_max = 14, @@ -561,7 +561,7 @@ static const struct regulator_desc da9217_reg = { }; #define DA914X_MIN_MV 500 -#define DA914X_MAX_MV 1000 +#define DA914X_MAX_MV 1300 #define DA914X_STEP_MV 10 #define DA914X_MIN_SEL (DA914X_MIN_MV / DA914X_STEP_MV) #define DA914X_N_VOLTAGES (((DA914X_MAX_MV - DA914X_MIN_MV) / DA914X_STEP_MV) \ @@ -585,10 +585,6 @@ static const struct regulator_desc da9141_reg = { .vsel_mask = DA9121_MASK_BUCK_BUCKx_5_CHx_A_VOUT, .enable_reg = DA9121_REG_BUCK_BUCK1_0, .enable_mask = DA9121_MASK_BUCK_BUCKx_0_CHx_EN, - /* Default value of BUCK_BUCK1_0.CH1_SRC_DVC_UP */ - .ramp_delay = 20000, - /* tBUCK_EN */ - .enable_time = 20, }; static const struct regulator_desc da9142_reg = { diff --git a/drivers/regulator/max20086-regulator.c b/drivers/regulator/max20086-regulator.c index fbc56b043071..b8bf76c170fe 100644 --- a/drivers/regulator/max20086-regulator.c +++ b/drivers/regulator/max20086-regulator.c @@ -7,6 +7,7 @@ #include <linux/err.h> #include <linux/gpio.h> +#include <linux/gpio/consumer.h> #include <linux/i2c.h> #include <linux/module.h> #include <linux/regmap.h> @@ -140,7 +141,7 @@ static int max20086_parse_regulators_dt(struct max20086 *chip, bool *boot_on) node = of_get_child_by_name(chip->dev->of_node, "regulators"); if (!node) { dev_err(chip->dev, "regulators node not found\n"); - return PTR_ERR(node); + return -ENODEV; } for (i = 0; i < chip->info->num_outputs; ++i) diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c index cd938a26b76c..3b1cd0c96a74 100644 --- a/drivers/s390/cio/device.c +++ b/drivers/s390/cio/device.c @@ -1180,7 +1180,7 @@ static int io_subchannel_chp_event(struct subchannel *sch, else path_event[chpid] = PE_NONE; } - if (cdev) + if (cdev && cdev->drv && cdev->drv->path_event) cdev->drv->path_event(cdev, path_event); break; } diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c index 9be273c320e2..a826456c6075 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c +++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c @@ -508,7 +508,8 @@ static int bnx2fc_l2_rcv_thread(void *arg) static void bnx2fc_recv_frame(struct sk_buff *skb) { - u32 fr_len; + u64 crc_err; + u32 fr_len, fr_crc; struct fc_lport *lport; struct fcoe_rcv_info *fr; struct fc_stats *stats; @@ -542,6 +543,11 @@ static void bnx2fc_recv_frame(struct sk_buff *skb) skb_pull(skb, sizeof(struct fcoe_hdr)); fr_len = skb->len - sizeof(struct fcoe_crc_eof); + stats = per_cpu_ptr(lport->stats, get_cpu()); + stats->RxFrames++; + stats->RxWords += fr_len / FCOE_WORD_TO_BYTE; + put_cpu(); + fp = (struct fc_frame *)skb; fc_frame_init(fp); fr_dev(fp) = lport; @@ -624,16 +630,15 @@ static void bnx2fc_recv_frame(struct sk_buff *skb) return; } - stats = per_cpu_ptr(lport->stats, smp_processor_id()); - stats->RxFrames++; - stats->RxWords += fr_len / FCOE_WORD_TO_BYTE; + fr_crc = le32_to_cpu(fr_crc(fp)); - if (le32_to_cpu(fr_crc(fp)) != - ~crc32(~0, skb->data, fr_len)) { - if (stats->InvalidCRCCount < 5) + if (unlikely(fr_crc != ~crc32(~0, skb->data, fr_len))) { + stats = per_cpu_ptr(lport->stats, get_cpu()); + crc_err = (stats->InvalidCRCCount++); + put_cpu(); + if (crc_err < 5) printk(KERN_WARNING PFX "dropping frame with " "CRC error\n"); - stats->InvalidCRCCount++; kfree_skb(skb); return; } diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c index 88c549f257db..40a52feb315d 100644 --- a/drivers/scsi/fnic/fnic_scsi.c +++ b/drivers/scsi/fnic/fnic_scsi.c @@ -986,8 +986,6 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic, CMD_SP(sc) = NULL; CMD_FLAGS(sc) |= FNIC_IO_DONE; - spin_unlock_irqrestore(io_lock, flags); - if (hdr_status != FCPIO_SUCCESS) { atomic64_inc(&fnic_stats->io_stats.io_failures); shost_printk(KERN_ERR, fnic->lport->host, "hdr status = %s\n", @@ -996,8 +994,6 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic, fnic_release_ioreq_buf(fnic, io_req, sc); - mempool_free(io_req, fnic->io_req_pool); - cmd_trace = ((u64)hdr_status << 56) | (u64)icmnd_cmpl->scsi_status << 48 | (u64)icmnd_cmpl->flags << 40 | (u64)sc->cmnd[0] << 32 | @@ -1021,6 +1017,12 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic, } else fnic->lport->host_stats.fcp_control_requests++; + /* Call SCSI completion function to complete the IO */ + scsi_done(sc); + spin_unlock_irqrestore(io_lock, flags); + + mempool_free(io_req, fnic->io_req_pool); + atomic64_dec(&fnic_stats->io_stats.active_ios); if (atomic64_read(&fnic->io_cmpl_skip)) atomic64_dec(&fnic->io_cmpl_skip); @@ -1049,9 +1051,6 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic, if(io_duration_time > atomic64_read(&fnic_stats->io_stats.current_max_io_time)) atomic64_set(&fnic_stats->io_stats.current_max_io_time, io_duration_time); } - - /* Call SCSI completion function to complete the IO */ - scsi_done(sc); } /* fnic_fcpio_itmf_cmpl_handler diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c index 2f53a2ee024a..ebf5ec38891b 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_main.c +++ b/drivers/scsi/hisi_sas/hisi_sas_main.c @@ -400,8 +400,7 @@ void hisi_sas_task_deliver(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot, struct hisi_sas_dq *dq, struct hisi_sas_device *sas_dev, - struct hisi_sas_internal_abort *abort, - struct hisi_sas_tmf_task *tmf) + struct hisi_sas_internal_abort *abort) { struct hisi_sas_cmd_hdr *cmd_hdr_base; int dlvry_queue_slot, dlvry_queue; @@ -427,8 +426,6 @@ void hisi_sas_task_deliver(struct hisi_hba *hisi_hba, cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue]; slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot]; - slot->tmf = tmf; - slot->is_internal = tmf; task->lldd_task = slot; memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr)); @@ -587,7 +584,7 @@ static int hisi_sas_task_exec(struct sas_task *task, gfp_t gfp_flags, slot->is_internal = tmf; /* protect task_prep and start_delivery sequence */ - hisi_sas_task_deliver(hisi_hba, slot, dq, sas_dev, NULL, tmf); + hisi_sas_task_deliver(hisi_hba, slot, dq, sas_dev, NULL); return 0; @@ -1380,12 +1377,13 @@ static int hisi_sas_softreset_ata_disk(struct domain_device *device) struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); struct device *dev = hisi_hba->dev; int s = sizeof(struct host_to_dev_fis); + struct hisi_sas_tmf_task tmf = {}; ata_for_each_link(link, ap, EDGE) { int pmp = sata_srst_pmp(link); hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis); - rc = hisi_sas_exec_internal_tmf_task(device, fis, s, NULL); + rc = hisi_sas_exec_internal_tmf_task(device, fis, s, &tmf); if (rc != TMF_RESP_FUNC_COMPLETE) break; } @@ -1396,7 +1394,7 @@ static int hisi_sas_softreset_ata_disk(struct domain_device *device) hisi_sas_fill_ata_reset_cmd(link->device, 0, pmp, fis); rc = hisi_sas_exec_internal_tmf_task(device, fis, - s, NULL); + s, &tmf); if (rc != TMF_RESP_FUNC_COMPLETE) dev_err(dev, "ata disk %016llx de-reset failed\n", SAS_ADDR(device->sas_addr)); @@ -2067,7 +2065,7 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id, slot->port = port; slot->is_internal = true; - hisi_sas_task_deliver(hisi_hba, slot, dq, sas_dev, abort, NULL); + hisi_sas_task_deliver(hisi_hba, slot, dq, sas_dev, abort); return 0; diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index 4878c94761f9..98cabe09c040 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h @@ -592,6 +592,7 @@ struct lpfc_vport { #define FC_VPORT_LOGO_RCVD 0x200 /* LOGO received on vport */ #define FC_RSCN_DISCOVERY 0x400 /* Auth all devices after RSCN */ #define FC_LOGO_RCVD_DID_CHNG 0x800 /* FDISC on phys port detect DID chng*/ +#define FC_PT2PT_NO_NVME 0x1000 /* Don't send NVME PRLI */ #define FC_SCSI_SCAN_TMO 0x4000 /* scsi scan timer running */ #define FC_ABORT_DISCOVERY 0x8000 /* we want to abort discovery */ #define FC_NDISC_ACTIVE 0x10000 /* NPort discovery active */ @@ -1161,6 +1162,16 @@ struct lpfc_hba { uint32_t cfg_hostmem_hgp; uint32_t cfg_log_verbose; uint32_t cfg_enable_fc4_type; +#define LPFC_ENABLE_FCP 1 +#define LPFC_ENABLE_NVME 2 +#define LPFC_ENABLE_BOTH 3 +#if (IS_ENABLED(CONFIG_NVME_FC)) +#define LPFC_MAX_ENBL_FC4_TYPE LPFC_ENABLE_BOTH +#define LPFC_DEF_ENBL_FC4_TYPE LPFC_ENABLE_BOTH +#else +#define LPFC_MAX_ENBL_FC4_TYPE LPFC_ENABLE_FCP +#define LPFC_DEF_ENBL_FC4_TYPE LPFC_ENABLE_FCP +#endif uint32_t cfg_aer_support; uint32_t cfg_sriov_nr_virtfn; uint32_t cfg_request_firmware_upgrade; @@ -1182,9 +1193,6 @@ struct lpfc_hba { uint32_t cfg_ras_fwlog_func; uint32_t cfg_enable_bbcr; /* Enable BB Credit Recovery */ uint32_t cfg_enable_dpp; /* Enable Direct Packet Push */ -#define LPFC_ENABLE_FCP 1 -#define LPFC_ENABLE_NVME 2 -#define LPFC_ENABLE_BOTH 3 uint32_t cfg_enable_pbde; uint32_t cfg_enable_mi; struct nvmet_fc_target_port *targetport; diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index 7a7f17d71811..fa8415259cb8 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -1315,6 +1315,9 @@ lpfc_issue_lip(struct Scsi_Host *shost) pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK; pmboxq->u.mb.mbxOwner = OWN_HOST; + if ((vport->fc_flag & FC_PT2PT) && (vport->fc_flag & FC_PT2PT_NO_NVME)) + vport->fc_flag &= ~FC_PT2PT_NO_NVME; + mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO * 2); if ((mbxstatus == MBX_SUCCESS) && @@ -3978,8 +3981,8 @@ LPFC_ATTR_R(nvmet_mrq_post, * 3 - register both FCP and NVME * Supported values are [1,3]. Default value is 3 */ -LPFC_ATTR_R(enable_fc4_type, LPFC_ENABLE_BOTH, - LPFC_ENABLE_FCP, LPFC_ENABLE_BOTH, +LPFC_ATTR_R(enable_fc4_type, LPFC_DEF_ENBL_FC4_TYPE, + LPFC_ENABLE_FCP, LPFC_MAX_ENBL_FC4_TYPE, "Enable FC4 Protocol support - FCP / NVME"); /* diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index db5ccae1b63d..f936833c9909 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -1072,7 +1072,8 @@ stop_rr_fcf_flogi: /* FLOGI failed, so there is no fabric */ spin_lock_irq(shost->host_lock); - vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); + vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP | + FC_PT2PT_NO_NVME); spin_unlock_irq(shost->host_lock); /* If private loop, then allow max outstanding els to be @@ -4607,6 +4608,23 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, /* Added for Vendor specifc support * Just keep retrying for these Rsn / Exp codes */ + if ((vport->fc_flag & FC_PT2PT) && + cmd == ELS_CMD_NVMEPRLI) { + switch (stat.un.b.lsRjtRsnCode) { + case LSRJT_UNABLE_TPC: + case LSRJT_INVALID_CMD: + case LSRJT_LOGICAL_ERR: + case LSRJT_CMD_UNSUPPORTED: + lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS, + "0168 NVME PRLI LS_RJT " + "reason %x port doesn't " + "support NVME, disabling NVME\n", + stat.un.b.lsRjtRsnCode); + retry = 0; + vport->fc_flag |= FC_PT2PT_NO_NVME; + goto out_retry; + } + } switch (stat.un.b.lsRjtRsnCode) { case LSRJT_UNABLE_TPC: /* The driver has a VALID PLOGI but the rport has diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index a56f01f659f8..558f7d2559c4 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -2104,7 +2104,7 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba) } if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && reg_err2 == SLIPORT_ERR2_REG_FW_RESTART) { - lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "3143 Port Down: Firmware Update " "Detected\n"); en_rn_msg = false; diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c index 7d717a4ac14d..fdf5e777bf11 100644 --- a/drivers/scsi/lpfc/lpfc_nportdisc.c +++ b/drivers/scsi/lpfc/lpfc_nportdisc.c @@ -1961,8 +1961,9 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport, * is configured try it. */ ndlp->nlp_fc4_type |= NLP_FC4_FCP; - if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || - (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) { + if ((!(vport->fc_flag & FC_PT2PT_NO_NVME)) && + (vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH || + vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) { ndlp->nlp_fc4_type |= NLP_FC4_NVME; /* We need to update the localport also */ lpfc_nvme_update_localport(vport); diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 1bc0db572d9e..430abebf99f1 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -13363,6 +13363,7 @@ lpfc_sli4_eratt_read(struct lpfc_hba *phba) uint32_t uerr_sta_hi, uerr_sta_lo; uint32_t if_type, portsmphr; struct lpfc_register portstat_reg; + u32 logmask; /* * For now, use the SLI4 device internal unrecoverable error @@ -13413,7 +13414,12 @@ lpfc_sli4_eratt_read(struct lpfc_hba *phba) readl(phba->sli4_hba.u.if_type2.ERR1regaddr); phba->work_status[1] = readl(phba->sli4_hba.u.if_type2.ERR2regaddr); - lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + logmask = LOG_TRACE_EVENT; + if (phba->work_status[0] == + SLIPORT_ERR1_REG_ERR_CODE_2 && + phba->work_status[1] == SLIPORT_ERR2_REG_FW_RESTART) + logmask = LOG_SLI; + lpfc_printf_log(phba, KERN_ERR, logmask, "2885 Port Status Event: " "port status reg 0x%x, " "port smphr reg 0x%x, " diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c index 511726f92d9a..76229b839560 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.c +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c @@ -2011,9 +2011,10 @@ mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc, u8 poll) enable_irq(reply_q->os_irq); } } + + if (poll) + _base_process_reply_queue(reply_q); } - if (poll) - _base_process_reply_queue(reply_q); } /** diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c index c814e5071712..9ec310b795c3 100644 --- a/drivers/scsi/pm8001/pm8001_hwi.c +++ b/drivers/scsi/pm8001/pm8001_hwi.c @@ -2692,7 +2692,6 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha, void *piomb) u32 tag = le32_to_cpu(psataPayload->tag); u32 port_id = le32_to_cpu(psataPayload->port_id); u32 dev_id = le32_to_cpu(psataPayload->device_id); - unsigned long flags; if (event) pm8001_dbg(pm8001_ha, FAIL, "SATA EVENT 0x%x\n", event); @@ -2724,8 +2723,6 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha, void *piomb) ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_DATA_OVERRUN; ts->residual = 0; - if (pm8001_dev) - atomic_dec(&pm8001_dev->running_req); break; case IO_XFER_ERROR_BREAK: pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_BREAK\n"); @@ -2767,7 +2764,6 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha, void *piomb) IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_QUEUE_FULL; - pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag); return; } break; @@ -2853,20 +2849,6 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha, void *piomb) ts->stat = SAS_OPEN_TO; break; } - spin_lock_irqsave(&t->task_state_lock, flags); - t->task_state_flags &= ~SAS_TASK_STATE_PENDING; - t->task_state_flags &= ~SAS_TASK_AT_INITIATOR; - t->task_state_flags |= SAS_TASK_STATE_DONE; - if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) { - spin_unlock_irqrestore(&t->task_state_lock, flags); - pm8001_dbg(pm8001_ha, FAIL, - "task 0x%p done with io_status 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n", - t, event, ts->resp, ts->stat); - pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); - } else { - spin_unlock_irqrestore(&t->task_state_lock, flags); - pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag); - } } /*See the comments for mpi_ssp_completion */ diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c index 160ee8b228c9..32edda3e55c6 100644 --- a/drivers/scsi/pm8001/pm8001_sas.c +++ b/drivers/scsi/pm8001/pm8001_sas.c @@ -769,8 +769,13 @@ static int pm8001_exec_internal_tmf_task(struct domain_device *dev, res = -TMF_RESP_FUNC_FAILED; /* Even TMF timed out, return direct. */ if (task->task_state_flags & SAS_TASK_STATE_ABORTED) { + struct pm8001_ccb_info *ccb = task->lldd_task; + pm8001_dbg(pm8001_ha, FAIL, "TMF task[%x]timeout.\n", tmf->tmf); + + if (ccb) + ccb->task = NULL; goto ex_err; } diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c index 2530d1365556..9d20f8009b89 100644 --- a/drivers/scsi/pm8001/pm80xx_hwi.c +++ b/drivers/scsi/pm8001/pm80xx_hwi.c @@ -2185,9 +2185,9 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb) pm8001_dbg(pm8001_ha, FAIL, "task 0x%p done with io_status 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n", t, status, ts->resp, ts->stat); + pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); if (t->slow_task) complete(&t->slow_task->completion); - pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); } else { spin_unlock_irqrestore(&t->task_state_lock, flags); pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); @@ -2794,9 +2794,9 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, pm8001_dbg(pm8001_ha, FAIL, "task 0x%p done with io_status 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n", t, status, ts->resp, ts->stat); + pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); if (t->slow_task) complete(&t->slow_task->completion); - pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); } else { spin_unlock_irqrestore(&t->task_state_lock, flags); spin_unlock_irqrestore(&circularQ->oq_lock, @@ -2821,7 +2821,6 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha, u32 tag = le32_to_cpu(psataPayload->tag); u32 port_id = le32_to_cpu(psataPayload->port_id); u32 dev_id = le32_to_cpu(psataPayload->device_id); - unsigned long flags; if (event) pm8001_dbg(pm8001_ha, FAIL, "SATA EVENT 0x%x\n", event); @@ -2854,8 +2853,6 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha, ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_DATA_OVERRUN; ts->residual = 0; - if (pm8001_dev) - atomic_dec(&pm8001_dev->running_req); break; case IO_XFER_ERROR_BREAK: pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_BREAK\n"); @@ -2904,11 +2901,6 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha, IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_QUEUE_FULL; - spin_unlock_irqrestore(&circularQ->oq_lock, - circularQ->lock_flags); - pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag); - spin_lock_irqsave(&circularQ->oq_lock, - circularQ->lock_flags); return; } break; @@ -3008,24 +3000,6 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha, ts->stat = SAS_OPEN_TO; break; } - spin_lock_irqsave(&t->task_state_lock, flags); - t->task_state_flags &= ~SAS_TASK_STATE_PENDING; - t->task_state_flags &= ~SAS_TASK_AT_INITIATOR; - t->task_state_flags |= SAS_TASK_STATE_DONE; - if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) { - spin_unlock_irqrestore(&t->task_state_lock, flags); - pm8001_dbg(pm8001_ha, FAIL, - "task 0x%p done with io_status 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n", - t, event, ts->resp, ts->stat); - pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); - } else { - spin_unlock_irqrestore(&t->task_state_lock, flags); - spin_unlock_irqrestore(&circularQ->oq_lock, - circularQ->lock_flags); - pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag); - spin_lock_irqsave(&circularQ->oq_lock, - circularQ->lock_flags); - } } /*See the comments for mpi_ssp_completion */ @@ -3931,6 +3905,7 @@ static int ssp_coalesced_comp_resp(struct pm8001_hba_info *pm8001_ha, /** * process_one_iomb - process one outbound Queue memory block * @pm8001_ha: our hba card information + * @circularQ: outbound circular queue * @piomb: IO message buffer */ static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, diff --git a/drivers/scsi/qedi/qedi_fw.c b/drivers/scsi/qedi/qedi_fw.c index 5916ed7662d5..4eb89aa4a39d 100644 --- a/drivers/scsi/qedi/qedi_fw.c +++ b/drivers/scsi/qedi/qedi_fw.c @@ -771,11 +771,10 @@ static void qedi_process_cmd_cleanup_resp(struct qedi_ctx *qedi, qedi_cmd->list_tmf_work = NULL; } } + spin_unlock_bh(&qedi_conn->tmf_work_lock); - if (!found) { - spin_unlock_bh(&qedi_conn->tmf_work_lock); + if (!found) goto check_cleanup_reqs; - } QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM, "TMF work, cqe->tid=0x%x, tmf flags=0x%x, cid=0x%x\n", @@ -806,7 +805,6 @@ static void qedi_process_cmd_cleanup_resp(struct qedi_ctx *qedi, qedi_cmd->state = CLEANUP_RECV; unlock: spin_unlock_bh(&conn->session->back_lock); - spin_unlock_bh(&qedi_conn->tmf_work_lock); wake_up_interruptible(&qedi_conn->wait_queue); return; diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c index 3520b9384428..f4e6c68ac99e 100644 --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c @@ -214,6 +214,48 @@ static void scsi_unlock_floptical(struct scsi_device *sdev, SCSI_TIMEOUT, 3, NULL); } +static int scsi_realloc_sdev_budget_map(struct scsi_device *sdev, + unsigned int depth) +{ + int new_shift = sbitmap_calculate_shift(depth); + bool need_alloc = !sdev->budget_map.map; + bool need_free = false; + int ret; + struct sbitmap sb_backup; + + /* + * realloc if new shift is calculated, which is caused by setting + * up one new default queue depth after calling ->slave_configure + */ + if (!need_alloc && new_shift != sdev->budget_map.shift) + need_alloc = need_free = true; + + if (!need_alloc) + return 0; + + /* + * Request queue has to be frozen for reallocating budget map, + * and here disk isn't added yet, so freezing is pretty fast + */ + if (need_free) { + blk_mq_freeze_queue(sdev->request_queue); + sb_backup = sdev->budget_map; + } + ret = sbitmap_init_node(&sdev->budget_map, + scsi_device_max_queue_depth(sdev), + new_shift, GFP_KERNEL, + sdev->request_queue->node, false, true); + if (need_free) { + if (ret) + sdev->budget_map = sb_backup; + else + sbitmap_free(&sb_backup); + ret = 0; + blk_mq_unfreeze_queue(sdev->request_queue); + } + return ret; +} + /** * scsi_alloc_sdev - allocate and setup a scsi_Device * @starget: which target to allocate a &scsi_device for @@ -306,11 +348,7 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget, * default device queue depth to figure out sbitmap shift * since we use this queue depth most of times. */ - if (sbitmap_init_node(&sdev->budget_map, - scsi_device_max_queue_depth(sdev), - sbitmap_calculate_shift(depth), - GFP_KERNEL, sdev->request_queue->node, - false, true)) { + if (scsi_realloc_sdev_budget_map(sdev, depth)) { put_device(&starget->dev); kfree(sdev); goto out; @@ -1017,6 +1055,13 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result, } return SCSI_SCAN_NO_RESPONSE; } + + /* + * The queue_depth is often changed in ->slave_configure. + * Set up budget map again since memory consumption of + * the map depends on actual queue depth. + */ + scsi_realloc_sdev_budget_map(sdev, sdev->queue_depth); } if (sdev->scsi_level >= SCSI_3) diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 50b12d60dc1b..9349557b8a01 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -2681,7 +2681,7 @@ static int ufshcd_map_queues(struct Scsi_Host *shost) break; case HCTX_TYPE_READ: map->nr_queues = 0; - break; + continue; default: WARN_ON_ONCE(true); } diff --git a/drivers/scsi/xen-scsifront.c b/drivers/scsi/xen-scsifront.c index 12c10a5e3d93..7f421600cb66 100644 --- a/drivers/scsi/xen-scsifront.c +++ b/drivers/scsi/xen-scsifront.c @@ -233,12 +233,11 @@ static void scsifront_gnttab_done(struct vscsifrnt_info *info, return; for (i = 0; i < shadow->nr_grants; i++) { - if (unlikely(gnttab_query_foreign_access(shadow->gref[i]))) { + if (unlikely(!gnttab_try_end_foreign_access(shadow->gref[i]))) { shost_printk(KERN_ALERT, info->host, KBUILD_MODNAME "grant still in use by backend\n"); BUG(); } - gnttab_end_foreign_access(shadow->gref[i], 0, 0UL); } kfree(shadow->sg); diff --git a/drivers/soc/aspeed/aspeed-lpc-ctrl.c b/drivers/soc/aspeed/aspeed-lpc-ctrl.c index 72771e018c42..258894ed234b 100644 --- a/drivers/soc/aspeed/aspeed-lpc-ctrl.c +++ b/drivers/soc/aspeed/aspeed-lpc-ctrl.c @@ -306,10 +306,9 @@ static int aspeed_lpc_ctrl_probe(struct platform_device *pdev) } lpc_ctrl->clk = devm_clk_get(dev, NULL); - if (IS_ERR(lpc_ctrl->clk)) { - dev_err(dev, "couldn't get clock\n"); - return PTR_ERR(lpc_ctrl->clk); - } + if (IS_ERR(lpc_ctrl->clk)) + return dev_err_probe(dev, PTR_ERR(lpc_ctrl->clk), + "couldn't get clock\n"); rc = clk_prepare_enable(lpc_ctrl->clk); if (rc) { dev_err(dev, "couldn't enable clock\n"); diff --git a/drivers/soc/fsl/guts.c b/drivers/soc/fsl/guts.c index 072473a16f4d..5ed2fc1c53a0 100644 --- a/drivers/soc/fsl/guts.c +++ b/drivers/soc/fsl/guts.c @@ -28,7 +28,6 @@ struct fsl_soc_die_attr { static struct guts *guts; static struct soc_device_attribute soc_dev_attr; static struct soc_device *soc_dev; -static struct device_node *root; /* SoC die attribute definition for QorIQ platform */ @@ -138,7 +137,7 @@ static u32 fsl_guts_get_svr(void) static int fsl_guts_probe(struct platform_device *pdev) { - struct device_node *np = pdev->dev.of_node; + struct device_node *root, *np = pdev->dev.of_node; struct device *dev = &pdev->dev; const struct fsl_soc_die_attr *soc_die; const char *machine; @@ -159,8 +158,14 @@ static int fsl_guts_probe(struct platform_device *pdev) root = of_find_node_by_path("/"); if (of_property_read_string(root, "model", &machine)) of_property_read_string_index(root, "compatible", 0, &machine); - if (machine) - soc_dev_attr.machine = machine; + if (machine) { + soc_dev_attr.machine = devm_kstrdup(dev, machine, GFP_KERNEL); + if (!soc_dev_attr.machine) { + of_node_put(root); + return -ENOMEM; + } + } + of_node_put(root); svr = fsl_guts_get_svr(); soc_die = fsl_soc_die_match(svr, fsl_soc_die); @@ -195,7 +200,6 @@ static int fsl_guts_probe(struct platform_device *pdev) static int fsl_guts_remove(struct platform_device *dev) { soc_device_unregister(soc_dev); - of_node_put(root); return 0; } diff --git a/drivers/soc/fsl/qe/qe.c b/drivers/soc/fsl/qe/qe.c index 4d38c80f8be8..b3c226eb5292 100644 --- a/drivers/soc/fsl/qe/qe.c +++ b/drivers/soc/fsl/qe/qe.c @@ -147,7 +147,7 @@ EXPORT_SYMBOL(qe_issue_cmd); * memory mapped space. * The BRG clock is the QE clock divided by 2. * It was set up long ago during the initial boot phase and is - * is given to us. + * given to us. * Baud rate clocks are zero-based in the driver code (as that maps * to port numbers). Documentation uses 1-based numbering. */ @@ -421,7 +421,7 @@ static void qe_upload_microcode(const void *base, for (i = 0; i < be32_to_cpu(ucode->count); i++) iowrite32be(be32_to_cpu(code[i]), &qe_immr->iram.idata); - + /* Set I-RAM Ready Register */ iowrite32be(QE_IRAM_READY, &qe_immr->iram.iready); } diff --git a/drivers/soc/fsl/qe/qe_io.c b/drivers/soc/fsl/qe/qe_io.c index e277c827bdf3..a5e2d0e5ab51 100644 --- a/drivers/soc/fsl/qe/qe_io.c +++ b/drivers/soc/fsl/qe/qe_io.c @@ -35,6 +35,8 @@ int par_io_init(struct device_node *np) if (ret) return ret; par_io = ioremap(res.start, resource_size(&res)); + if (!par_io) + return -ENOMEM; if (!of_property_read_u32(np, "num-ports", &num_ports)) num_par_io_ports = num_ports; diff --git a/drivers/soc/imx/gpcv2.c b/drivers/soc/imx/gpcv2.c index 3e59d479d001..3cb123016b3e 100644 --- a/drivers/soc/imx/gpcv2.c +++ b/drivers/soc/imx/gpcv2.c @@ -382,7 +382,8 @@ static int imx_pgc_power_down(struct generic_pm_domain *genpd) return 0; out_clk_disable: - clk_bulk_disable_unprepare(domain->num_clks, domain->clks); + if (!domain->keep_clocks) + clk_bulk_disable_unprepare(domain->num_clks, domain->clks); return ret; } diff --git a/drivers/soc/mediatek/mt8192-mmsys.h b/drivers/soc/mediatek/mt8192-mmsys.h index 6f0a57044a7b..6aae0b12b6ff 100644 --- a/drivers/soc/mediatek/mt8192-mmsys.h +++ b/drivers/soc/mediatek/mt8192-mmsys.h @@ -53,7 +53,8 @@ static const struct mtk_mmsys_routes mmsys_mt8192_routing_table[] = { MT8192_AAL0_SEL_IN_CCORR0 }, { DDP_COMPONENT_DITHER, DDP_COMPONENT_DSI0, - MT8192_DISP_DSI0_SEL_IN, MT8192_DSI0_SEL_IN_DITHER0 + MT8192_DISP_DSI0_SEL_IN, MT8192_DSI0_SEL_IN_DITHER0, + MT8192_DSI0_SEL_IN_DITHER0 }, { DDP_COMPONENT_RDMA0, DDP_COMPONENT_COLOR0, MT8192_DISP_RDMA0_SOUT_SEL, MT8192_RDMA0_SOUT_COLOR0, diff --git a/drivers/soc/mediatek/mtk-scpsys.c b/drivers/soc/mediatek/mtk-scpsys.c index 670cc82d17dc..ca75b14931ec 100644 --- a/drivers/soc/mediatek/mtk-scpsys.c +++ b/drivers/soc/mediatek/mtk-scpsys.c @@ -411,17 +411,12 @@ out: return ret; } -static int init_clks(struct platform_device *pdev, struct clk **clk) +static void init_clks(struct platform_device *pdev, struct clk **clk) { int i; - for (i = CLK_NONE + 1; i < CLK_MAX; i++) { + for (i = CLK_NONE + 1; i < CLK_MAX; i++) clk[i] = devm_clk_get(&pdev->dev, clk_names[i]); - if (IS_ERR(clk[i])) - return PTR_ERR(clk[i]); - } - - return 0; } static struct scp *init_scp(struct platform_device *pdev, @@ -431,7 +426,7 @@ static struct scp *init_scp(struct platform_device *pdev, { struct genpd_onecell_data *pd_data; struct resource *res; - int i, j, ret; + int i, j; struct scp *scp; struct clk *clk[CLK_MAX]; @@ -486,9 +481,7 @@ static struct scp *init_scp(struct platform_device *pdev, pd_data->num_domains = num; - ret = init_clks(pdev, clk); - if (ret) - return ERR_PTR(ret); + init_clks(pdev, clk); for (i = 0; i < num; i++) { struct scp_domain *scpd = &scp->domains[i]; diff --git a/drivers/soc/rockchip/Kconfig b/drivers/soc/rockchip/Kconfig index 25eb2c1e31bb..156ac0e0c8fe 100644 --- a/drivers/soc/rockchip/Kconfig +++ b/drivers/soc/rockchip/Kconfig @@ -34,4 +34,12 @@ config ROCKCHIP_PM_DOMAINS If unsure, say N. +config ROCKCHIP_DTPM + tristate "Rockchip DTPM hierarchy" + depends on DTPM && m + help + Describe the hierarchy for the Dynamic Thermal Power + Management tree on this platform. That will create all the + power capping capable devices. + endif diff --git a/drivers/soc/rockchip/Makefile b/drivers/soc/rockchip/Makefile index 875032f7344e..05f31a4e743c 100644 --- a/drivers/soc/rockchip/Makefile +++ b/drivers/soc/rockchip/Makefile @@ -5,3 +5,4 @@ obj-$(CONFIG_ROCKCHIP_GRF) += grf.o obj-$(CONFIG_ROCKCHIP_IODOMAIN) += io-domain.o obj-$(CONFIG_ROCKCHIP_PM_DOMAINS) += pm_domains.o +obj-$(CONFIG_ROCKCHIP_DTPM) += dtpm.o diff --git a/drivers/soc/rockchip/dtpm.c b/drivers/soc/rockchip/dtpm.c new file mode 100644 index 000000000000..5a23784b5221 --- /dev/null +++ b/drivers/soc/rockchip/dtpm.c @@ -0,0 +1,65 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright 2021 Linaro Limited + * + * Author: Daniel Lezcano <daniel.lezcano@linaro.org> + * + * DTPM hierarchy description + */ +#include <linux/dtpm.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> + +static struct dtpm_node __initdata rk3399_hierarchy[] = { + [0]{ .name = "rk3399", + .type = DTPM_NODE_VIRTUAL }, + [1]{ .name = "package", + .type = DTPM_NODE_VIRTUAL, + .parent = &rk3399_hierarchy[0] }, + [2]{ .name = "/cpus/cpu@0", + .type = DTPM_NODE_DT, + .parent = &rk3399_hierarchy[1] }, + [3]{ .name = "/cpus/cpu@1", + .type = DTPM_NODE_DT, + .parent = &rk3399_hierarchy[1] }, + [4]{ .name = "/cpus/cpu@2", + .type = DTPM_NODE_DT, + .parent = &rk3399_hierarchy[1] }, + [5]{ .name = "/cpus/cpu@3", + .type = DTPM_NODE_DT, + .parent = &rk3399_hierarchy[1] }, + [6]{ .name = "/cpus/cpu@100", + .type = DTPM_NODE_DT, + .parent = &rk3399_hierarchy[1] }, + [7]{ .name = "/cpus/cpu@101", + .type = DTPM_NODE_DT, + .parent = &rk3399_hierarchy[1] }, + [8]{ .name = "/gpu@ff9a0000", + .type = DTPM_NODE_DT, + .parent = &rk3399_hierarchy[1] }, + [9]{ /* sentinel */ } +}; + +static struct of_device_id __initdata rockchip_dtpm_match_table[] = { + { .compatible = "rockchip,rk3399", .data = rk3399_hierarchy }, + {}, +}; + +static int __init rockchip_dtpm_init(void) +{ + return dtpm_create_hierarchy(rockchip_dtpm_match_table); +} +module_init(rockchip_dtpm_init); + +static void __exit rockchip_dtpm_exit(void) +{ + return dtpm_destroy_hierarchy(); +} +module_exit(rockchip_dtpm_exit); + +MODULE_SOFTDEP("pre: panfrost cpufreq-dt"); +MODULE_DESCRIPTION("Rockchip DTPM driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:dtpm"); +MODULE_AUTHOR("Daniel Lezcano <daniel.lezcano@kernel.org"); diff --git a/drivers/soc/samsung/Kconfig b/drivers/soc/samsung/Kconfig index a9f8b224322e..02e319508cc6 100644 --- a/drivers/soc/samsung/Kconfig +++ b/drivers/soc/samsung/Kconfig @@ -31,7 +31,7 @@ config EXYNOS_USI help Enable support for USI block. USI (Universal Serial Interface) is an IP-core found in modern Samsung Exynos SoCs, like Exynos850 and - ExynosAutoV0. USI block can be configured to provide one of the + ExynosAutoV9. USI block can be configured to provide one of the following serial protocols: UART, SPI or High Speed I2C. This driver allows one to configure USI for desired protocol, which diff --git a/drivers/soc/samsung/exynos-chipid.c b/drivers/soc/samsung/exynos-chipid.c index 2746d05936d3..0fb3631e7346 100644 --- a/drivers/soc/samsung/exynos-chipid.c +++ b/drivers/soc/samsung/exynos-chipid.c @@ -204,7 +204,7 @@ module_platform_driver(exynos_chipid_driver); MODULE_DESCRIPTION("Samsung Exynos ChipID controller and ASV driver"); MODULE_AUTHOR("Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>"); -MODULE_AUTHOR("Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>"); +MODULE_AUTHOR("Krzysztof Kozlowski <krzk@kernel.org>"); MODULE_AUTHOR("Pankaj Dubey <pankaj.dubey@samsung.com>"); MODULE_AUTHOR("Sylwester Nawrocki <s.nawrocki@samsung.com>"); MODULE_LICENSE("GPL"); diff --git a/drivers/spi/spi-bcm-qspi.c b/drivers/spi/spi-bcm-qspi.c index c9a769b8594b..86c76211b3d3 100644 --- a/drivers/spi/spi-bcm-qspi.c +++ b/drivers/spi/spi-bcm-qspi.c @@ -585,7 +585,7 @@ static void bcm_qspi_chip_select(struct bcm_qspi *qspi, int cs) u32 rd = 0; u32 wr = 0; - if (qspi->base[CHIP_SELECT]) { + if (cs >= 0 && qspi->base[CHIP_SELECT]) { rd = bcm_qspi_read(qspi, CHIP_SELECT, 0); wr = (rd & ~0xff) | (1 << cs); if (rd == wr) diff --git a/drivers/spi/spi-meson-spicc.c b/drivers/spi/spi-meson-spicc.c index c208efeadd18..0bc7daa7afc8 100644 --- a/drivers/spi/spi-meson-spicc.c +++ b/drivers/spi/spi-meson-spicc.c @@ -693,6 +693,11 @@ static int meson_spicc_probe(struct platform_device *pdev) writel_relaxed(0, spicc->base + SPICC_INTREG); irq = platform_get_irq(pdev, 0); + if (irq < 0) { + ret = irq; + goto out_master; + } + ret = devm_request_irq(&pdev->dev, irq, meson_spicc_irq, 0, NULL, spicc); if (ret) { diff --git a/drivers/spi/spi-mt65xx.c b/drivers/spi/spi-mt65xx.c index a15de10ee286..753bd313e6fd 100644 --- a/drivers/spi/spi-mt65xx.c +++ b/drivers/spi/spi-mt65xx.c @@ -624,7 +624,7 @@ static irqreturn_t mtk_spi_interrupt(int irq, void *dev_id) else mdata->state = MTK_SPI_IDLE; - if (!master->can_dma(master, master->cur_msg->spi, trans)) { + if (!master->can_dma(master, NULL, trans)) { if (trans->rx_buf) { cnt = mdata->xfer_len / 4; ioread32_rep(mdata->base + SPI_RX_DATA_REG, diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c index 553b6b9d0222..c6a1bb09be05 100644 --- a/drivers/spi/spi-rockchip.c +++ b/drivers/spi/spi-rockchip.c @@ -585,6 +585,12 @@ static int rockchip_spi_slave_abort(struct spi_controller *ctlr) { struct rockchip_spi *rs = spi_controller_get_devdata(ctlr); + if (atomic_read(&rs->state) & RXDMA) + dmaengine_terminate_sync(ctlr->dma_rx); + if (atomic_read(&rs->state) & TXDMA) + dmaengine_terminate_sync(ctlr->dma_tx); + atomic_set(&rs->state, 0); + spi_enable_chip(rs, false); rs->slave_abort = true; spi_finalize_current_transfer(ctlr); @@ -654,7 +660,7 @@ static int rockchip_spi_probe(struct platform_device *pdev) struct spi_controller *ctlr; struct resource *mem; struct device_node *np = pdev->dev.of_node; - u32 rsd_nsecs; + u32 rsd_nsecs, num_cs; bool slave_mode; slave_mode = of_property_read_bool(np, "spi-slave"); @@ -764,8 +770,9 @@ static int rockchip_spi_probe(struct platform_device *pdev) * rk spi0 has two native cs, spi1..5 one cs only * if num-cs is missing in the dts, default to 1 */ - if (of_property_read_u16(np, "num-cs", &ctlr->num_chipselect)) - ctlr->num_chipselect = 1; + if (of_property_read_u32(np, "num-cs", &num_cs)) + num_cs = 1; + ctlr->num_chipselect = num_cs; ctlr->use_gpio_descriptors = true; } ctlr->dev.of_node = pdev->dev.of_node; diff --git a/drivers/spi/spi-stm32-qspi.c b/drivers/spi/spi-stm32-qspi.c index 514337c86d2c..ffdc55f87e82 100644 --- a/drivers/spi/spi-stm32-qspi.c +++ b/drivers/spi/spi-stm32-qspi.c @@ -688,7 +688,7 @@ static int stm32_qspi_probe(struct platform_device *pdev) struct resource *res; int ret, irq; - ctrl = spi_alloc_master(dev, sizeof(*qspi)); + ctrl = devm_spi_alloc_master(dev, sizeof(*qspi)); if (!ctrl) return -ENOMEM; @@ -697,58 +697,46 @@ static int stm32_qspi_probe(struct platform_device *pdev) res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qspi"); qspi->io_base = devm_ioremap_resource(dev, res); - if (IS_ERR(qspi->io_base)) { - ret = PTR_ERR(qspi->io_base); - goto err_master_put; - } + if (IS_ERR(qspi->io_base)) + return PTR_ERR(qspi->io_base); qspi->phys_base = res->start; res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qspi_mm"); qspi->mm_base = devm_ioremap_resource(dev, res); - if (IS_ERR(qspi->mm_base)) { - ret = PTR_ERR(qspi->mm_base); - goto err_master_put; - } + if (IS_ERR(qspi->mm_base)) + return PTR_ERR(qspi->mm_base); qspi->mm_size = resource_size(res); - if (qspi->mm_size > STM32_QSPI_MAX_MMAP_SZ) { - ret = -EINVAL; - goto err_master_put; - } + if (qspi->mm_size > STM32_QSPI_MAX_MMAP_SZ) + return -EINVAL; irq = platform_get_irq(pdev, 0); - if (irq < 0) { - ret = irq; - goto err_master_put; - } + if (irq < 0) + return irq; ret = devm_request_irq(dev, irq, stm32_qspi_irq, 0, dev_name(dev), qspi); if (ret) { dev_err(dev, "failed to request irq\n"); - goto err_master_put; + return ret; } init_completion(&qspi->data_completion); init_completion(&qspi->match_completion); qspi->clk = devm_clk_get(dev, NULL); - if (IS_ERR(qspi->clk)) { - ret = PTR_ERR(qspi->clk); - goto err_master_put; - } + if (IS_ERR(qspi->clk)) + return PTR_ERR(qspi->clk); qspi->clk_rate = clk_get_rate(qspi->clk); - if (!qspi->clk_rate) { - ret = -EINVAL; - goto err_master_put; - } + if (!qspi->clk_rate) + return -EINVAL; ret = clk_prepare_enable(qspi->clk); if (ret) { dev_err(dev, "can not enable the clock\n"); - goto err_master_put; + return ret; } rstc = devm_reset_control_get_exclusive(dev, NULL); @@ -784,7 +772,7 @@ static int stm32_qspi_probe(struct platform_device *pdev) pm_runtime_enable(dev); pm_runtime_get_noresume(dev); - ret = devm_spi_register_master(dev, ctrl); + ret = spi_register_master(ctrl); if (ret) goto err_pm_runtime_free; @@ -806,8 +794,6 @@ err_dma_free: stm32_qspi_dma_free(qspi); err_clk_disable: clk_disable_unprepare(qspi->clk); -err_master_put: - spi_master_put(qspi->ctrl); return ret; } @@ -817,6 +803,7 @@ static int stm32_qspi_remove(struct platform_device *pdev) struct stm32_qspi *qspi = platform_get_drvdata(pdev); pm_runtime_get_sync(qspi->dev); + spi_unregister_master(qspi->ctrl); /* disable qspi */ writel_relaxed(0, qspi->io_base + QSPI_CR); stm32_qspi_dma_free(qspi); diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c index 9bd3fd1652f7..7fc24505a72c 100644 --- a/drivers/spi/spi-stm32.c +++ b/drivers/spi/spi-stm32.c @@ -221,7 +221,6 @@ struct stm32_spi; * time between frames (if driver has this functionality) * @set_number_of_data: optional routine to configure registers to desired * number of data (if driver has this functionality) - * @can_dma: routine to determine if the transfer is eligible for DMA use * @transfer_one_dma_start: routine to start transfer a single spi_transfer * using DMA * @dma_rx_cb: routine to call after DMA RX channel operation is complete @@ -232,7 +231,7 @@ struct stm32_spi; * @baud_rate_div_min: minimum baud rate divisor * @baud_rate_div_max: maximum baud rate divisor * @has_fifo: boolean to know if fifo is used for driver - * @has_startbit: boolean to know if start bit is used to start transfer + * @flags: compatible specific SPI controller flags used at registration time */ struct stm32_spi_cfg { const struct stm32_spi_regspec *regs; @@ -253,6 +252,7 @@ struct stm32_spi_cfg { unsigned int baud_rate_div_min; unsigned int baud_rate_div_max; bool has_fifo; + u16 flags; }; /** @@ -1722,6 +1722,7 @@ static const struct stm32_spi_cfg stm32f4_spi_cfg = { .baud_rate_div_min = STM32F4_SPI_BR_DIV_MIN, .baud_rate_div_max = STM32F4_SPI_BR_DIV_MAX, .has_fifo = false, + .flags = SPI_MASTER_MUST_TX, }; static const struct stm32_spi_cfg stm32h7_spi_cfg = { @@ -1854,7 +1855,7 @@ static int stm32_spi_probe(struct platform_device *pdev) master->prepare_message = stm32_spi_prepare_msg; master->transfer_one = stm32_spi_transfer_one; master->unprepare_message = stm32_spi_unprepare_msg; - master->flags = SPI_MASTER_MUST_TX; + master->flags = spi->cfg->flags; spi->dma_tx = dma_request_chan(spi->dev, "tx"); if (IS_ERR(spi->dma_tx)) { diff --git a/drivers/spi/spi-uniphier.c b/drivers/spi/spi-uniphier.c index 342ee8d2c476..cc0da4822231 100644 --- a/drivers/spi/spi-uniphier.c +++ b/drivers/spi/spi-uniphier.c @@ -726,7 +726,7 @@ static int uniphier_spi_probe(struct platform_device *pdev) if (ret) { dev_err(&pdev->dev, "failed to get TX DMA capacities: %d\n", ret); - goto out_disable_clk; + goto out_release_dma; } dma_tx_burst = caps.max_burst; } @@ -735,7 +735,7 @@ static int uniphier_spi_probe(struct platform_device *pdev) if (IS_ERR_OR_NULL(master->dma_rx)) { if (PTR_ERR(master->dma_rx) == -EPROBE_DEFER) { ret = -EPROBE_DEFER; - goto out_disable_clk; + goto out_release_dma; } master->dma_rx = NULL; dma_rx_burst = INT_MAX; @@ -744,7 +744,7 @@ static int uniphier_spi_probe(struct platform_device *pdev) if (ret) { dev_err(&pdev->dev, "failed to get RX DMA capacities: %d\n", ret); - goto out_disable_clk; + goto out_release_dma; } dma_rx_burst = caps.max_burst; } @@ -753,10 +753,20 @@ static int uniphier_spi_probe(struct platform_device *pdev) ret = devm_spi_register_master(&pdev->dev, master); if (ret) - goto out_disable_clk; + goto out_release_dma; return 0; +out_release_dma: + if (!IS_ERR_OR_NULL(master->dma_rx)) { + dma_release_channel(master->dma_rx); + master->dma_rx = NULL; + } + if (!IS_ERR_OR_NULL(master->dma_tx)) { + dma_release_channel(master->dma_tx); + master->dma_tx = NULL; + } + out_disable_clk: clk_disable_unprepare(priv->clk); diff --git a/drivers/spi/spi-zynq-qspi.c b/drivers/spi/spi-zynq-qspi.c index cfa222c9bd5e..78f31b61a2aa 100644 --- a/drivers/spi/spi-zynq-qspi.c +++ b/drivers/spi/spi-zynq-qspi.c @@ -570,6 +570,9 @@ static int zynq_qspi_exec_mem_op(struct spi_mem *mem, if (op->dummy.nbytes) { tmpbuf = kzalloc(op->dummy.nbytes, GFP_KERNEL); + if (!tmpbuf) + return -ENOMEM; + memset(tmpbuf, 0xff, op->dummy.nbytes); reinit_completion(&xqspi->data_completion); xqspi->txbuf = tmpbuf; diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 4599b121d744..d96082dc3340 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -1019,10 +1019,10 @@ int spi_map_buf(struct spi_controller *ctlr, struct device *dev, int i, ret; if (vmalloced_buf || kmap_buf) { - desc_len = min_t(int, max_seg_size, PAGE_SIZE); + desc_len = min_t(unsigned int, max_seg_size, PAGE_SIZE); sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len); } else if (virt_addr_valid(buf)) { - desc_len = min_t(int, max_seg_size, ctlr->max_dma_len); + desc_len = min_t(unsigned int, max_seg_size, ctlr->max_dma_len); sgs = DIV_ROUND_UP(len, desc_len); } else { return -EINVAL; diff --git a/drivers/staging/fbtft/fb_st7789v.c b/drivers/staging/fbtft/fb_st7789v.c index abe9395a0aef..861a154144e6 100644 --- a/drivers/staging/fbtft/fb_st7789v.c +++ b/drivers/staging/fbtft/fb_st7789v.c @@ -144,6 +144,8 @@ static int init_display(struct fbtft_par *par) { int rc; + par->fbtftops.reset(par); + rc = init_tearing_effect_line(par); if (rc) return rc; diff --git a/drivers/staging/fbtft/fbtft.h b/drivers/staging/fbtft/fbtft.h index 4cdec34e23d2..55677efc0138 100644 --- a/drivers/staging/fbtft/fbtft.h +++ b/drivers/staging/fbtft/fbtft.h @@ -334,7 +334,10 @@ static int __init fbtft_driver_module_init(void) \ ret = spi_register_driver(&fbtft_driver_spi_driver); \ if (ret < 0) \ return ret; \ - return platform_driver_register(&fbtft_driver_platform_driver); \ + ret = platform_driver_register(&fbtft_driver_platform_driver); \ + if (ret < 0) \ + spi_unregister_driver(&fbtft_driver_spi_driver); \ + return ret; \ } \ \ static void __exit fbtft_driver_module_exit(void) \ diff --git a/drivers/staging/gdm724x/gdm_lte.c b/drivers/staging/gdm724x/gdm_lte.c index 493ed4821515..0d8d8fed283d 100644 --- a/drivers/staging/gdm724x/gdm_lte.c +++ b/drivers/staging/gdm724x/gdm_lte.c @@ -76,14 +76,15 @@ static void tx_complete(void *arg) static int gdm_lte_rx(struct sk_buff *skb, struct nic *nic, int nic_type) { - int ret; + int ret, len; + len = skb->len + ETH_HLEN; ret = netif_rx_ni(skb); if (ret == NET_RX_DROP) { nic->stats.rx_dropped++; } else { nic->stats.rx_packets++; - nic->stats.rx_bytes += skb->len + ETH_HLEN; + nic->stats.rx_bytes += len; } return 0; diff --git a/drivers/staging/greybus/gpio.c b/drivers/staging/greybus/gpio.c index 7e6347fe93f9..8a7cf1d0e968 100644 --- a/drivers/staging/greybus/gpio.c +++ b/drivers/staging/greybus/gpio.c @@ -391,10 +391,7 @@ static int gb_gpio_request_handler(struct gb_operation *op) return -EINVAL; } - local_irq_disable(); - ret = generic_handle_irq(irq); - local_irq_enable(); - + ret = generic_handle_irq_safe(irq); if (ret) dev_err(dev, "failed to invoke irq handler\n"); diff --git a/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c b/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c index 0f82f5031c43..49a3f45cb771 100644 --- a/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c +++ b/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c @@ -5907,6 +5907,7 @@ u8 chk_bmc_sleepq_hdl(struct adapter *padapter, unsigned char *pbuf) struct sta_info *psta_bmc; struct list_head *xmitframe_plist, *xmitframe_phead, *tmp; struct xmit_frame *pxmitframe = NULL; + struct xmit_priv *pxmitpriv = &padapter->xmitpriv; struct sta_priv *pstapriv = &padapter->stapriv; /* for BC/MC Frames */ @@ -5917,7 +5918,8 @@ u8 chk_bmc_sleepq_hdl(struct adapter *padapter, unsigned char *pbuf) if ((pstapriv->tim_bitmap&BIT(0)) && (psta_bmc->sleepq_len > 0)) { msleep(10);/* 10ms, ATIM(HIQ) Windows */ - spin_lock_bh(&psta_bmc->sleep_q.lock); + /* spin_lock_bh(&psta_bmc->sleep_q.lock); */ + spin_lock_bh(&pxmitpriv->lock); xmitframe_phead = get_list_head(&psta_bmc->sleep_q); list_for_each_safe(xmitframe_plist, tmp, xmitframe_phead) { @@ -5940,7 +5942,8 @@ u8 chk_bmc_sleepq_hdl(struct adapter *padapter, unsigned char *pbuf) rtw_hal_xmitframe_enqueue(padapter, pxmitframe); } - spin_unlock_bh(&psta_bmc->sleep_q.lock); + /* spin_unlock_bh(&psta_bmc->sleep_q.lock); */ + spin_unlock_bh(&pxmitpriv->lock); /* check hi queue and bmc_sleepq */ rtw_chk_hi_queue_cmd(padapter); diff --git a/drivers/staging/rtl8723bs/core/rtw_recv.c b/drivers/staging/rtl8723bs/core/rtw_recv.c index 41bfca549c64..105fe0e3482a 100644 --- a/drivers/staging/rtl8723bs/core/rtw_recv.c +++ b/drivers/staging/rtl8723bs/core/rtw_recv.c @@ -957,8 +957,10 @@ static signed int validate_recv_ctrl_frame(struct adapter *padapter, union recv_ if ((psta->state&WIFI_SLEEP_STATE) && (pstapriv->sta_dz_bitmap&BIT(psta->aid))) { struct list_head *xmitframe_plist, *xmitframe_phead; struct xmit_frame *pxmitframe = NULL; + struct xmit_priv *pxmitpriv = &padapter->xmitpriv; - spin_lock_bh(&psta->sleep_q.lock); + /* spin_lock_bh(&psta->sleep_q.lock); */ + spin_lock_bh(&pxmitpriv->lock); xmitframe_phead = get_list_head(&psta->sleep_q); xmitframe_plist = get_next(xmitframe_phead); @@ -989,10 +991,12 @@ static signed int validate_recv_ctrl_frame(struct adapter *padapter, union recv_ update_beacon(padapter, WLAN_EID_TIM, NULL, true); } - spin_unlock_bh(&psta->sleep_q.lock); + /* spin_unlock_bh(&psta->sleep_q.lock); */ + spin_unlock_bh(&pxmitpriv->lock); } else { - spin_unlock_bh(&psta->sleep_q.lock); + /* spin_unlock_bh(&psta->sleep_q.lock); */ + spin_unlock_bh(&pxmitpriv->lock); if (pstapriv->tim_bitmap&BIT(psta->aid)) { if (psta->sleepq_len == 0) { diff --git a/drivers/staging/rtl8723bs/core/rtw_sta_mgt.c b/drivers/staging/rtl8723bs/core/rtw_sta_mgt.c index 0c9ea1520fd0..beb11d89db18 100644 --- a/drivers/staging/rtl8723bs/core/rtw_sta_mgt.c +++ b/drivers/staging/rtl8723bs/core/rtw_sta_mgt.c @@ -293,48 +293,46 @@ u32 rtw_free_stainfo(struct adapter *padapter, struct sta_info *psta) /* list_del_init(&psta->wakeup_list); */ - spin_lock_bh(&psta->sleep_q.lock); + spin_lock_bh(&pxmitpriv->lock); + rtw_free_xmitframe_queue(pxmitpriv, &psta->sleep_q); psta->sleepq_len = 0; - spin_unlock_bh(&psta->sleep_q.lock); - - spin_lock_bh(&pxmitpriv->lock); /* vo */ - spin_lock_bh(&pstaxmitpriv->vo_q.sta_pending.lock); + /* spin_lock_bh(&(pxmitpriv->vo_pending.lock)); */ rtw_free_xmitframe_queue(pxmitpriv, &pstaxmitpriv->vo_q.sta_pending); list_del_init(&(pstaxmitpriv->vo_q.tx_pending)); phwxmit = pxmitpriv->hwxmits; phwxmit->accnt -= pstaxmitpriv->vo_q.qcnt; pstaxmitpriv->vo_q.qcnt = 0; - spin_unlock_bh(&pstaxmitpriv->vo_q.sta_pending.lock); + /* spin_unlock_bh(&(pxmitpriv->vo_pending.lock)); */ /* vi */ - spin_lock_bh(&pstaxmitpriv->vi_q.sta_pending.lock); + /* spin_lock_bh(&(pxmitpriv->vi_pending.lock)); */ rtw_free_xmitframe_queue(pxmitpriv, &pstaxmitpriv->vi_q.sta_pending); list_del_init(&(pstaxmitpriv->vi_q.tx_pending)); phwxmit = pxmitpriv->hwxmits+1; phwxmit->accnt -= pstaxmitpriv->vi_q.qcnt; pstaxmitpriv->vi_q.qcnt = 0; - spin_unlock_bh(&pstaxmitpriv->vi_q.sta_pending.lock); + /* spin_unlock_bh(&(pxmitpriv->vi_pending.lock)); */ /* be */ - spin_lock_bh(&pstaxmitpriv->be_q.sta_pending.lock); + /* spin_lock_bh(&(pxmitpriv->be_pending.lock)); */ rtw_free_xmitframe_queue(pxmitpriv, &pstaxmitpriv->be_q.sta_pending); list_del_init(&(pstaxmitpriv->be_q.tx_pending)); phwxmit = pxmitpriv->hwxmits+2; phwxmit->accnt -= pstaxmitpriv->be_q.qcnt; pstaxmitpriv->be_q.qcnt = 0; - spin_unlock_bh(&pstaxmitpriv->be_q.sta_pending.lock); + /* spin_unlock_bh(&(pxmitpriv->be_pending.lock)); */ /* bk */ - spin_lock_bh(&pstaxmitpriv->bk_q.sta_pending.lock); + /* spin_lock_bh(&(pxmitpriv->bk_pending.lock)); */ rtw_free_xmitframe_queue(pxmitpriv, &pstaxmitpriv->bk_q.sta_pending); list_del_init(&(pstaxmitpriv->bk_q.tx_pending)); phwxmit = pxmitpriv->hwxmits+3; phwxmit->accnt -= pstaxmitpriv->bk_q.qcnt; pstaxmitpriv->bk_q.qcnt = 0; - spin_unlock_bh(&pstaxmitpriv->bk_q.sta_pending.lock); + /* spin_unlock_bh(&(pxmitpriv->bk_pending.lock)); */ spin_unlock_bh(&pxmitpriv->lock); diff --git a/drivers/staging/rtl8723bs/core/rtw_xmit.c b/drivers/staging/rtl8723bs/core/rtw_xmit.c index 13b8bd5ffabc..f466bfd248fb 100644 --- a/drivers/staging/rtl8723bs/core/rtw_xmit.c +++ b/drivers/staging/rtl8723bs/core/rtw_xmit.c @@ -1734,12 +1734,15 @@ void rtw_free_xmitframe_queue(struct xmit_priv *pxmitpriv, struct __queue *pfram struct list_head *plist, *phead, *tmp; struct xmit_frame *pxmitframe; + spin_lock_bh(&pframequeue->lock); + phead = get_list_head(pframequeue); list_for_each_safe(plist, tmp, phead) { pxmitframe = list_entry(plist, struct xmit_frame, list); rtw_free_xmitframe(pxmitpriv, pxmitframe); } + spin_unlock_bh(&pframequeue->lock); } s32 rtw_xmitframe_enqueue(struct adapter *padapter, struct xmit_frame *pxmitframe) @@ -1794,7 +1797,6 @@ s32 rtw_xmit_classifier(struct adapter *padapter, struct xmit_frame *pxmitframe) struct sta_info *psta; struct tx_servq *ptxservq; struct pkt_attrib *pattrib = &pxmitframe->attrib; - struct xmit_priv *xmit_priv = &padapter->xmitpriv; struct hw_xmit *phwxmits = padapter->xmitpriv.hwxmits; signed int res = _SUCCESS; @@ -1812,14 +1814,12 @@ s32 rtw_xmit_classifier(struct adapter *padapter, struct xmit_frame *pxmitframe) ptxservq = rtw_get_sta_pending(padapter, psta, pattrib->priority, (u8 *)(&ac_index)); - spin_lock_bh(&xmit_priv->lock); if (list_empty(&ptxservq->tx_pending)) list_add_tail(&ptxservq->tx_pending, get_list_head(phwxmits[ac_index].sta_queue)); list_add_tail(&pxmitframe->list, get_list_head(&ptxservq->sta_pending)); ptxservq->qcnt++; phwxmits[ac_index].accnt++; - spin_unlock_bh(&xmit_priv->lock); exit: @@ -2202,10 +2202,11 @@ void wakeup_sta_to_xmit(struct adapter *padapter, struct sta_info *psta) struct list_head *xmitframe_plist, *xmitframe_phead, *tmp; struct xmit_frame *pxmitframe = NULL; struct sta_priv *pstapriv = &padapter->stapriv; + struct xmit_priv *pxmitpriv = &padapter->xmitpriv; psta_bmc = rtw_get_bcmc_stainfo(padapter); - spin_lock_bh(&psta->sleep_q.lock); + spin_lock_bh(&pxmitpriv->lock); xmitframe_phead = get_list_head(&psta->sleep_q); list_for_each_safe(xmitframe_plist, tmp, xmitframe_phead) { @@ -2306,7 +2307,7 @@ void wakeup_sta_to_xmit(struct adapter *padapter, struct sta_info *psta) _exit: - spin_unlock_bh(&psta->sleep_q.lock); + spin_unlock_bh(&pxmitpriv->lock); if (update_mask) update_beacon(padapter, WLAN_EID_TIM, NULL, true); @@ -2318,8 +2319,9 @@ void xmit_delivery_enabled_frames(struct adapter *padapter, struct sta_info *pst struct list_head *xmitframe_plist, *xmitframe_phead, *tmp; struct xmit_frame *pxmitframe = NULL; struct sta_priv *pstapriv = &padapter->stapriv; + struct xmit_priv *pxmitpriv = &padapter->xmitpriv; - spin_lock_bh(&psta->sleep_q.lock); + spin_lock_bh(&pxmitpriv->lock); xmitframe_phead = get_list_head(&psta->sleep_q); list_for_each_safe(xmitframe_plist, tmp, xmitframe_phead) { @@ -2372,7 +2374,7 @@ void xmit_delivery_enabled_frames(struct adapter *padapter, struct sta_info *pst } } - spin_unlock_bh(&psta->sleep_q.lock); + spin_unlock_bh(&pxmitpriv->lock); } void enqueue_pending_xmitbuf(struct xmit_priv *pxmitpriv, struct xmit_buf *pxmitbuf) diff --git a/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c b/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c index b5d5e922231c..15810438a472 100644 --- a/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c +++ b/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c @@ -502,7 +502,9 @@ s32 rtl8723bs_hal_xmit( rtw_issue_addbareq_cmd(padapter, pxmitframe); } + spin_lock_bh(&pxmitpriv->lock); err = rtw_xmitframe_enqueue(padapter, pxmitframe); + spin_unlock_bh(&pxmitpriv->lock); if (err != _SUCCESS) { rtw_free_xmitframe(pxmitpriv, pxmitframe); diff --git a/drivers/staging/rtl8723bs/include/rtw_mlme.h b/drivers/staging/rtl8723bs/include/rtw_mlme.h index c94fa7d8d5a9..1b343b434f4d 100644 --- a/drivers/staging/rtl8723bs/include/rtw_mlme.h +++ b/drivers/staging/rtl8723bs/include/rtw_mlme.h @@ -102,13 +102,17 @@ there are several "locks" in mlme_priv, since mlme_priv is a shared resource between many threads, like ISR/Call-Back functions, the OID handlers, and even timer functions. - Each struct __queue has its own locks, already. -Other items are protected by mlme_priv.lock. +Other items in mlme_priv are protected by mlme_priv.lock, while items in +xmit_priv are protected by xmit_priv.lock. To avoid possible dead lock, any thread trying to modifiying mlme_priv SHALL not lock up more than one locks at a time! +The only exception is that queue functions which take the __queue.lock +may be called with the xmit_priv.lock held. In this case the order +MUST always be first lock xmit_priv.lock and then call any queue functions +which take __queue.lock. */ diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c index 6759a6261500..3a2e4582db8e 100644 --- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c +++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c @@ -1058,15 +1058,27 @@ service_callback(enum vchiq_reason reason, struct vchiq_header *header, DEBUG_TRACE(SERVICE_CALLBACK_LINE); + rcu_read_lock(); service = handle_to_service(handle); - if (WARN_ON(!service)) + if (WARN_ON(!service)) { + rcu_read_unlock(); return VCHIQ_SUCCESS; + } user_service = (struct user_service *)service->base.userdata; instance = user_service->instance; - if (!instance || instance->closing) + if (!instance || instance->closing) { + rcu_read_unlock(); return VCHIQ_SUCCESS; + } + + /* + * As hopping around different synchronization mechanism, + * taking an extra reference results in simpler implementation. + */ + vchiq_service_get(service); + rcu_read_unlock(); vchiq_log_trace(vchiq_arm_log_level, "%s - service %lx(%d,%p), reason %d, header %lx, instance %lx, bulk_userdata %lx", @@ -1097,6 +1109,7 @@ service_callback(enum vchiq_reason reason, struct vchiq_header *header, bulk_userdata); if (status != VCHIQ_SUCCESS) { DEBUG_TRACE(SERVICE_CALLBACK_LINE); + vchiq_service_put(service); return status; } } @@ -1105,10 +1118,12 @@ service_callback(enum vchiq_reason reason, struct vchiq_header *header, if (wait_for_completion_interruptible(&user_service->remove_event)) { vchiq_log_info(vchiq_arm_log_level, "%s interrupted", __func__); DEBUG_TRACE(SERVICE_CALLBACK_LINE); + vchiq_service_put(service); return VCHIQ_RETRY; } else if (instance->closing) { vchiq_log_info(vchiq_arm_log_level, "%s closing", __func__); DEBUG_TRACE(SERVICE_CALLBACK_LINE); + vchiq_service_put(service); return VCHIQ_ERROR; } DEBUG_TRACE(SERVICE_CALLBACK_LINE); @@ -1137,6 +1152,7 @@ service_callback(enum vchiq_reason reason, struct vchiq_header *header, header = NULL; } DEBUG_TRACE(SERVICE_CALLBACK_LINE); + vchiq_service_put(service); if (skip_completion) return VCHIQ_SUCCESS; diff --git a/drivers/tee/optee/core.c b/drivers/tee/optee/core.c index 1ca320885fad..17a6f51d3089 100644 --- a/drivers/tee/optee/core.c +++ b/drivers/tee/optee/core.c @@ -158,6 +158,7 @@ void optee_remove_common(struct optee *optee) optee_unregister_devices(); optee_notif_uninit(optee); + teedev_close_context(optee->ctx); /* * The two devices have to be unregistered before we can free the * other resources. diff --git a/drivers/tee/optee/ffa_abi.c b/drivers/tee/optee/ffa_abi.c index 20a1b1a3d965..f744ab15bf2c 100644 --- a/drivers/tee/optee/ffa_abi.c +++ b/drivers/tee/optee/ffa_abi.c @@ -424,6 +424,7 @@ static struct tee_shm_pool_mgr *optee_ffa_shm_pool_alloc_pages(void) */ static void handle_ffa_rpc_func_cmd_shm_alloc(struct tee_context *ctx, + struct optee *optee, struct optee_msg_arg *arg) { struct tee_shm *shm; @@ -439,7 +440,7 @@ static void handle_ffa_rpc_func_cmd_shm_alloc(struct tee_context *ctx, shm = optee_rpc_cmd_alloc_suppl(ctx, arg->params[0].u.value.b); break; case OPTEE_RPC_SHM_TYPE_KERNEL: - shm = tee_shm_alloc(ctx, arg->params[0].u.value.b, + shm = tee_shm_alloc(optee->ctx, arg->params[0].u.value.b, TEE_SHM_MAPPED | TEE_SHM_PRIV); break; default: @@ -493,14 +494,13 @@ err_bad_param: } static void handle_ffa_rpc_func_cmd(struct tee_context *ctx, + struct optee *optee, struct optee_msg_arg *arg) { - struct optee *optee = tee_get_drvdata(ctx->teedev); - arg->ret_origin = TEEC_ORIGIN_COMMS; switch (arg->cmd) { case OPTEE_RPC_CMD_SHM_ALLOC: - handle_ffa_rpc_func_cmd_shm_alloc(ctx, arg); + handle_ffa_rpc_func_cmd_shm_alloc(ctx, optee, arg); break; case OPTEE_RPC_CMD_SHM_FREE: handle_ffa_rpc_func_cmd_shm_free(ctx, optee, arg); @@ -510,12 +510,12 @@ static void handle_ffa_rpc_func_cmd(struct tee_context *ctx, } } -static void optee_handle_ffa_rpc(struct tee_context *ctx, u32 cmd, - struct optee_msg_arg *arg) +static void optee_handle_ffa_rpc(struct tee_context *ctx, struct optee *optee, + u32 cmd, struct optee_msg_arg *arg) { switch (cmd) { case OPTEE_FFA_YIELDING_CALL_RETURN_RPC_CMD: - handle_ffa_rpc_func_cmd(ctx, arg); + handle_ffa_rpc_func_cmd(ctx, optee, arg); break; case OPTEE_FFA_YIELDING_CALL_RETURN_INTERRUPT: /* Interrupt delivered by now */ @@ -582,7 +582,7 @@ static int optee_ffa_yielding_call(struct tee_context *ctx, * above. */ cond_resched(); - optee_handle_ffa_rpc(ctx, data->data1, rpc_arg); + optee_handle_ffa_rpc(ctx, optee, data->data1, rpc_arg); cmd = OPTEE_FFA_YIELDING_CALL_RESUME; data->data0 = cmd; data->data1 = 0; @@ -619,9 +619,18 @@ static int optee_ffa_do_call_with_arg(struct tee_context *ctx, .data2 = (u32)(shm->sec_world_id >> 32), .data3 = shm->offset, }; - struct optee_msg_arg *arg = tee_shm_get_va(shm, 0); - unsigned int rpc_arg_offs = OPTEE_MSG_GET_ARG_SIZE(arg->num_params); - struct optee_msg_arg *rpc_arg = tee_shm_get_va(shm, rpc_arg_offs); + struct optee_msg_arg *arg; + unsigned int rpc_arg_offs; + struct optee_msg_arg *rpc_arg; + + arg = tee_shm_get_va(shm, 0); + if (IS_ERR(arg)) + return PTR_ERR(arg); + + rpc_arg_offs = OPTEE_MSG_GET_ARG_SIZE(arg->num_params); + rpc_arg = tee_shm_get_va(shm, rpc_arg_offs); + if (IS_ERR(rpc_arg)) + return PTR_ERR(rpc_arg); return optee_ffa_yielding_call(ctx, &data, rpc_arg); } @@ -793,7 +802,9 @@ static int optee_ffa_probe(struct ffa_device *ffa_dev) { const struct ffa_dev_ops *ffa_ops; unsigned int rpc_arg_count; + struct tee_shm_pool *pool; struct tee_device *teedev; + struct tee_context *ctx; struct optee *optee; int rc; @@ -813,12 +824,12 @@ static int optee_ffa_probe(struct ffa_device *ffa_dev) if (!optee) return -ENOMEM; - optee->pool = optee_ffa_config_dyn_shm(); - if (IS_ERR(optee->pool)) { - rc = PTR_ERR(optee->pool); - optee->pool = NULL; - goto err; + pool = optee_ffa_config_dyn_shm(); + if (IS_ERR(pool)) { + rc = PTR_ERR(pool); + goto err_free_optee; } + optee->pool = pool; optee->ops = &optee_ffa_ops; optee->ffa.ffa_dev = ffa_dev; @@ -829,7 +840,7 @@ static int optee_ffa_probe(struct ffa_device *ffa_dev) optee); if (IS_ERR(teedev)) { rc = PTR_ERR(teedev); - goto err; + goto err_free_pool; } optee->teedev = teedev; @@ -837,50 +848,59 @@ static int optee_ffa_probe(struct ffa_device *ffa_dev) optee); if (IS_ERR(teedev)) { rc = PTR_ERR(teedev); - goto err; + goto err_unreg_teedev; } optee->supp_teedev = teedev; rc = tee_device_register(optee->teedev); if (rc) - goto err; + goto err_unreg_supp_teedev; rc = tee_device_register(optee->supp_teedev); if (rc) - goto err; + goto err_unreg_supp_teedev; rc = rhashtable_init(&optee->ffa.global_ids, &shm_rhash_params); if (rc) - goto err; + goto err_unreg_supp_teedev; mutex_init(&optee->ffa.mutex); mutex_init(&optee->call_queue.mutex); INIT_LIST_HEAD(&optee->call_queue.waiters); optee_supp_init(&optee->supp); ffa_dev_set_drvdata(ffa_dev, optee); - rc = optee_notif_init(optee, OPTEE_DEFAULT_MAX_NOTIF_VALUE); - if (rc) { - optee_ffa_remove(ffa_dev); - return rc; + ctx = teedev_open(optee->teedev); + if (IS_ERR(ctx)) { + rc = PTR_ERR(ctx); + goto err_rhashtable_free; } + optee->ctx = ctx; + rc = optee_notif_init(optee, OPTEE_DEFAULT_MAX_NOTIF_VALUE); + if (rc) + goto err_close_ctx; rc = optee_enumerate_devices(PTA_CMD_GET_DEVICES); - if (rc) { - optee_ffa_remove(ffa_dev); - return rc; - } + if (rc) + goto err_unregister_devices; pr_info("initialized driver\n"); return 0; -err: - /* - * tee_device_unregister() is safe to call even if the - * devices hasn't been registered with - * tee_device_register() yet. - */ + +err_unregister_devices: + optee_unregister_devices(); + optee_notif_uninit(optee); +err_close_ctx: + teedev_close_context(ctx); +err_rhashtable_free: + rhashtable_free_and_destroy(&optee->ffa.global_ids, rh_free_fn, NULL); + optee_supp_uninit(&optee->supp); + mutex_destroy(&optee->call_queue.mutex); +err_unreg_supp_teedev: tee_device_unregister(optee->supp_teedev); +err_unreg_teedev: tee_device_unregister(optee->teedev); - if (optee->pool) - tee_shm_pool_free(optee->pool); +err_free_pool: + tee_shm_pool_free(pool); +err_free_optee: kfree(optee); return rc; } diff --git a/drivers/tee/optee/notif.c b/drivers/tee/optee/notif.c index a28fa03dcd0e..05212842b0a5 100644 --- a/drivers/tee/optee/notif.c +++ b/drivers/tee/optee/notif.c @@ -121,5 +121,5 @@ int optee_notif_init(struct optee *optee, u_int max_key) void optee_notif_uninit(struct optee *optee) { - kfree(optee->notif.bitmap); + bitmap_free(optee->notif.bitmap); } diff --git a/drivers/tee/optee/optee_private.h b/drivers/tee/optee/optee_private.h index 46f74ab07c7e..92bc47bef95f 100644 --- a/drivers/tee/optee/optee_private.h +++ b/drivers/tee/optee/optee_private.h @@ -53,7 +53,6 @@ struct optee_call_queue { struct optee_notif { u_int max_key; - struct tee_context *ctx; /* Serializes access to the elements below in this struct */ spinlock_t lock; struct list_head db; @@ -134,9 +133,10 @@ struct optee_ops { /** * struct optee - main service struct * @supp_teedev: supplicant device + * @teedev: client device * @ops: internal callbacks for different ways to reach secure * world - * @teedev: client device + * @ctx: driver internal TEE context * @smc: specific to SMC ABI * @ffa: specific to FF-A ABI * @call_queue: queue of threads waiting to call @invoke_fn @@ -152,6 +152,7 @@ struct optee { struct tee_device *supp_teedev; struct tee_device *teedev; const struct optee_ops *ops; + struct tee_context *ctx; union { struct optee_smc smc; struct optee_ffa ffa; diff --git a/drivers/tee/optee/smc_abi.c b/drivers/tee/optee/smc_abi.c index 449d6a72d289..c517d310249f 100644 --- a/drivers/tee/optee/smc_abi.c +++ b/drivers/tee/optee/smc_abi.c @@ -75,16 +75,6 @@ static int from_msg_param_tmp_mem(struct tee_param *p, u32 attr, p->u.memref.shm_offs = mp->u.tmem.buf_ptr - pa; p->u.memref.shm = shm; - /* Check that the memref is covered by the shm object */ - if (p->u.memref.size) { - size_t o = p->u.memref.shm_offs + - p->u.memref.size - 1; - - rc = tee_shm_get_pa(shm, o, NULL); - if (rc) - return rc; - } - return 0; } @@ -622,6 +612,7 @@ static void handle_rpc_func_cmd_shm_free(struct tee_context *ctx, } static void handle_rpc_func_cmd_shm_alloc(struct tee_context *ctx, + struct optee *optee, struct optee_msg_arg *arg, struct optee_call_ctx *call_ctx) { @@ -651,7 +642,8 @@ static void handle_rpc_func_cmd_shm_alloc(struct tee_context *ctx, shm = optee_rpc_cmd_alloc_suppl(ctx, sz); break; case OPTEE_RPC_SHM_TYPE_KERNEL: - shm = tee_shm_alloc(ctx, sz, TEE_SHM_MAPPED | TEE_SHM_PRIV); + shm = tee_shm_alloc(optee->ctx, sz, + TEE_SHM_MAPPED | TEE_SHM_PRIV); break; default: arg->ret = TEEC_ERROR_BAD_PARAMETERS; @@ -747,7 +739,7 @@ static void handle_rpc_func_cmd(struct tee_context *ctx, struct optee *optee, switch (arg->cmd) { case OPTEE_RPC_CMD_SHM_ALLOC: free_pages_list(call_ctx); - handle_rpc_func_cmd_shm_alloc(ctx, arg, call_ctx); + handle_rpc_func_cmd_shm_alloc(ctx, optee, arg, call_ctx); break; case OPTEE_RPC_CMD_SHM_FREE: handle_rpc_func_cmd_shm_free(ctx, arg); @@ -776,7 +768,7 @@ static void optee_handle_rpc(struct tee_context *ctx, switch (OPTEE_SMC_RETURN_GET_RPC_FUNC(param->a0)) { case OPTEE_SMC_RPC_FUNC_ALLOC: - shm = tee_shm_alloc(ctx, param->a1, + shm = tee_shm_alloc(optee->ctx, param->a1, TEE_SHM_MAPPED | TEE_SHM_PRIV); if (!IS_ERR(shm) && !tee_shm_get_pa(shm, 0, &pa)) { reg_pair_from_64(¶m->a1, ¶m->a2, pa); @@ -954,57 +946,34 @@ static irqreturn_t notif_irq_thread_fn(int irq, void *dev_id) { struct optee *optee = dev_id; - optee_smc_do_bottom_half(optee->notif.ctx); + optee_smc_do_bottom_half(optee->ctx); return IRQ_HANDLED; } static int optee_smc_notif_init_irq(struct optee *optee, u_int irq) { - struct tee_context *ctx; int rc; - ctx = teedev_open(optee->teedev); - if (IS_ERR(ctx)) - return PTR_ERR(ctx); - - optee->notif.ctx = ctx; rc = request_threaded_irq(irq, notif_irq_handler, notif_irq_thread_fn, 0, "optee_notification", optee); if (rc) - goto err_close_ctx; + return rc; optee->smc.notif_irq = irq; return 0; - -err_close_ctx: - teedev_close_context(optee->notif.ctx); - optee->notif.ctx = NULL; - - return rc; } static void optee_smc_notif_uninit_irq(struct optee *optee) { - if (optee->notif.ctx) { - optee_smc_stop_async_notif(optee->notif.ctx); + if (optee->smc.sec_caps & OPTEE_SMC_SEC_CAP_ASYNC_NOTIF) { + optee_smc_stop_async_notif(optee->ctx); if (optee->smc.notif_irq) { free_irq(optee->smc.notif_irq, optee); irq_dispose_mapping(optee->smc.notif_irq); } - - /* - * The thread normally working with optee->notif.ctx was - * stopped with free_irq() above. - * - * Note we're not using teedev_close_context() or - * tee_client_close_context() since we have already called - * tee_device_put() while initializing to avoid a circular - * reference counting. - */ - teedev_close_context(optee->notif.ctx); } } @@ -1366,6 +1335,7 @@ static int optee_probe(struct platform_device *pdev) struct optee *optee = NULL; void *memremaped_shm = NULL; struct tee_device *teedev; + struct tee_context *ctx; u32 max_notif_value; u32 sec_caps; int rc; @@ -1446,9 +1416,15 @@ static int optee_probe(struct platform_device *pdev) optee->pool = pool; platform_set_drvdata(pdev, optee); + ctx = teedev_open(optee->teedev); + if (IS_ERR(ctx)) { + rc = PTR_ERR(ctx); + goto err_supp_uninit; + } + optee->ctx = ctx; rc = optee_notif_init(optee, max_notif_value); if (rc) - goto err_supp_uninit; + goto err_close_ctx; if (sec_caps & OPTEE_SMC_SEC_CAP_ASYNC_NOTIF) { unsigned int irq; @@ -1496,6 +1472,8 @@ err_disable_shm_cache: optee_unregister_devices(); err_notif_uninit: optee_notif_uninit(optee); +err_close_ctx: + teedev_close_context(ctx); err_supp_uninit: optee_supp_uninit(&optee->supp); mutex_destroy(&optee->call_queue.mutex); diff --git a/drivers/thermal/broadcom/brcmstb_thermal.c b/drivers/thermal/broadcom/brcmstb_thermal.c index 8df5edef1ded..0cedb8b4f00a 100644 --- a/drivers/thermal/broadcom/brcmstb_thermal.c +++ b/drivers/thermal/broadcom/brcmstb_thermal.c @@ -351,7 +351,7 @@ static int brcmstb_thermal_probe(struct platform_device *pdev) priv->thermal = thermal; - irq = platform_get_irq(pdev, 0); + irq = platform_get_irq_optional(pdev, 0); if (irq >= 0) { ret = devm_request_threaded_irq(&pdev->dev, irq, NULL, brcmstb_tmon_irq_thread, diff --git a/drivers/thermal/intel/Kconfig b/drivers/thermal/intel/Kconfig index c83ea5d04a1d..f0c845679250 100644 --- a/drivers/thermal/intel/Kconfig +++ b/drivers/thermal/intel/Kconfig @@ -99,3 +99,17 @@ config INTEL_MENLOW Intel Menlow platform. If unsure, say N. + +config INTEL_HFI_THERMAL + bool "Intel Hardware Feedback Interface" + depends on NET + depends on CPU_SUP_INTEL + depends on X86_THERMAL_VECTOR + select THERMAL_NETLINK + help + Select this option to enable the Hardware Feedback Interface. If + selected, hardware provides guidance to the operating system on + the performance and energy efficiency capabilities of each CPU. + These capabilities may change as a result of changes in the operating + conditions of the system such power and thermal limits. If selected, + the kernel relays updates in CPUs' capabilities to userspace. diff --git a/drivers/thermal/intel/Makefile b/drivers/thermal/intel/Makefile index 960b56268b4a..9a8d8054f316 100644 --- a/drivers/thermal/intel/Makefile +++ b/drivers/thermal/intel/Makefile @@ -13,3 +13,4 @@ obj-$(CONFIG_INTEL_PCH_THERMAL) += intel_pch_thermal.o obj-$(CONFIG_INTEL_TCC_COOLING) += intel_tcc_cooling.o obj-$(CONFIG_X86_THERMAL_VECTOR) += therm_throt.o obj-$(CONFIG_INTEL_MENLOW) += intel_menlow.o +obj-$(CONFIG_INTEL_HFI_THERMAL) += intel_hfi.o diff --git a/drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.c b/drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.c index e90690a234c4..01b80331eab6 100644 --- a/drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.c +++ b/drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.c @@ -72,7 +72,6 @@ int acpi_parse_trt(acpi_handle handle, int *trt_count, struct trt **trtp, int i; int nr_bad_entries = 0; struct trt *trts; - struct acpi_device *adev; union acpi_object *p; struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; struct acpi_buffer element = { 0, NULL }; @@ -112,12 +111,10 @@ int acpi_parse_trt(acpi_handle handle, int *trt_count, struct trt **trtp, if (!create_dev) continue; - result = acpi_bus_get_device(trt->source, &adev); - if (result) + if (!acpi_fetch_acpi_dev(trt->source)) pr_warn("Failed to get source ACPI device\n"); - result = acpi_bus_get_device(trt->target, &adev); - if (result) + if (!acpi_fetch_acpi_dev(trt->target)) pr_warn("Failed to get target ACPI device\n"); } @@ -149,7 +146,6 @@ int acpi_parse_art(acpi_handle handle, int *art_count, struct art **artp, int i; int nr_bad_entries = 0; struct art *arts; - struct acpi_device *adev; union acpi_object *p; struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; struct acpi_buffer element = { 0, NULL }; @@ -191,16 +187,11 @@ int acpi_parse_art(acpi_handle handle, int *art_count, struct art **artp, if (!create_dev) continue; - if (art->source) { - result = acpi_bus_get_device(art->source, &adev); - if (result) - pr_warn("Failed to get source ACPI device\n"); - } - if (art->target) { - result = acpi_bus_get_device(art->target, &adev); - if (result) - pr_warn("Failed to get target ACPI device\n"); - } + if (!acpi_fetch_acpi_dev(art->source)) + pr_warn("Failed to get source ACPI device\n"); + + if (!acpi_fetch_acpi_dev(art->target)) + pr_warn("Failed to get target ACPI device\n"); } *artp = arts; diff --git a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c index 72acb1f61849..4954800b9850 100644 --- a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c +++ b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c @@ -17,8 +17,8 @@ #define INT3400_KEEP_ALIVE 0xA0 enum int3400_thermal_uuid { + INT3400_THERMAL_ACTIVE = 0, INT3400_THERMAL_PASSIVE_1, - INT3400_THERMAL_ACTIVE, INT3400_THERMAL_CRITICAL, INT3400_THERMAL_ADAPTIVE_PERFORMANCE, INT3400_THERMAL_EMERGENCY_CALL_MODE, @@ -31,8 +31,8 @@ enum int3400_thermal_uuid { }; static char *int3400_thermal_uuids[INT3400_THERMAL_MAXIMUM_UUID] = { - "42A441D6-AE6A-462b-A84B-4A8CE79027D3", "3A95C389-E4B8-4629-A526-C52C88626BAE", + "42A441D6-AE6A-462b-A84B-4A8CE79027D3", "97C68AE7-15FA-499c-B8C9-5DA81D606E0A", "63BE270F-1C11-48FD-A6F7-3AF253FF3E2D", "5349962F-71E6-431D-9AE8-0A635B710AEE", @@ -53,12 +53,13 @@ struct int3400_thermal_priv { struct art *arts; int trt_count; struct trt *trts; - u8 uuid_bitmap; + u32 uuid_bitmap; int rel_misc_dev_res; int current_uuid_index; char *data_vault; int odvp_count; int *odvp; + u32 os_uuid_mask; struct odvp_attr *odvp_attrs; }; @@ -142,12 +143,55 @@ static ssize_t current_uuid_show(struct device *dev, struct device_attribute *devattr, char *buf) { struct int3400_thermal_priv *priv = dev_get_drvdata(dev); + int i, length = 0; - if (priv->current_uuid_index == -1) - return sprintf(buf, "INVALID\n"); + if (priv->current_uuid_index > 0) + return sprintf(buf, "%s\n", + int3400_thermal_uuids[priv->current_uuid_index]); - return sprintf(buf, "%s\n", - int3400_thermal_uuids[priv->current_uuid_index]); + for (i = 0; i <= INT3400_THERMAL_CRITICAL; i++) { + if (priv->os_uuid_mask & BIT(i)) + length += scnprintf(&buf[length], + PAGE_SIZE - length, + "%s\n", + int3400_thermal_uuids[i]); + } + + if (length) + return length; + + return sprintf(buf, "INVALID\n"); +} + +static int int3400_thermal_run_osc(acpi_handle handle, char *uuid_str, int *enable) +{ + u32 ret, buf[2]; + acpi_status status; + int result = 0; + struct acpi_osc_context context = { + .uuid_str = NULL, + .rev = 1, + .cap.length = 8, + }; + + context.uuid_str = uuid_str; + + buf[OSC_QUERY_DWORD] = 0; + buf[OSC_SUPPORT_DWORD] = *enable; + + context.cap.pointer = buf; + + status = acpi_run_osc(handle, &context); + if (ACPI_SUCCESS(status)) { + ret = *((u32 *)(context.ret.pointer + 4)); + if (ret != *enable) + result = -EPERM; + } else + result = -EPERM; + + kfree(context.ret.pointer); + + return result; } static ssize_t current_uuid_store(struct device *dev, @@ -164,16 +208,47 @@ static ssize_t current_uuid_store(struct device *dev, * If we have a list of supported UUIDs, make sure * this one is supported. */ - if (priv->uuid_bitmap && - !(priv->uuid_bitmap & (1 << i))) + if (priv->uuid_bitmap & BIT(i)) { + priv->current_uuid_index = i; + return count; + } + + /* + * There is support of only 3 policies via the new + * _OSC to inform OS capability: + * INT3400_THERMAL_ACTIVE + * INT3400_THERMAL_PASSIVE_1 + * INT3400_THERMAL_CRITICAL + */ + + if (i > INT3400_THERMAL_CRITICAL) return -EINVAL; - priv->current_uuid_index = i; - return count; + priv->os_uuid_mask |= BIT(i); + + break; } } - return -EINVAL; + if (priv->os_uuid_mask) { + int cap, ret; + + /* + * Capability bits: + * Bit 0: set to 1 to indicate DPTF is active + * Bi1 1: set to 1 to active cooling is supported by user space daemon + * Bit 2: set to 1 to passive cooling is supported by user space daemon + * Bit 3: set to 1 to critical trip is handled by user space daemon + */ + cap = ((priv->os_uuid_mask << 1) | 0x01); + ret = int3400_thermal_run_osc(priv->adev->handle, + "b23ba85d-c8b7-3542-88de-8de2ffcfd698", + &cap); + if (ret) + return ret; + } + + return count; } static DEVICE_ATTR_RW(current_uuid); @@ -236,41 +311,6 @@ end: return result; } -static int int3400_thermal_run_osc(acpi_handle handle, - enum int3400_thermal_uuid uuid, bool enable) -{ - u32 ret, buf[2]; - acpi_status status; - int result = 0; - struct acpi_osc_context context = { - .uuid_str = NULL, - .rev = 1, - .cap.length = 8, - }; - - if (uuid < 0 || uuid >= INT3400_THERMAL_MAXIMUM_UUID) - return -EINVAL; - - context.uuid_str = int3400_thermal_uuids[uuid]; - - buf[OSC_QUERY_DWORD] = 0; - buf[OSC_SUPPORT_DWORD] = enable; - - context.cap.pointer = buf; - - status = acpi_run_osc(handle, &context); - if (ACPI_SUCCESS(status)) { - ret = *((u32 *)(context.ret.pointer + 4)); - if (ret != enable) - result = -EPERM; - } else - result = -EPERM; - - kfree(context.ret.pointer); - - return result; -} - static ssize_t odvp_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { @@ -404,6 +444,10 @@ static void int3400_notify(acpi_handle handle, thermal_prop[3] = kasprintf(GFP_KERNEL, "EVENT=%d", therm_event); thermal_prop[4] = NULL; kobject_uevent_env(&priv->thermal->device.kobj, KOBJ_CHANGE, thermal_prop); + kfree(thermal_prop[0]); + kfree(thermal_prop[1]); + kfree(thermal_prop[2]); + kfree(thermal_prop[3]); } static int int3400_thermal_get_temp(struct thermal_zone_device *thermal, @@ -422,10 +466,18 @@ static int int3400_thermal_change_mode(struct thermal_zone_device *thermal, if (!priv) return -EINVAL; - if (mode != thermal->mode) + if (mode != thermal->mode) { + int enabled; + + if (priv->current_uuid_index < 0 || + priv->current_uuid_index >= INT3400_THERMAL_MAXIMUM_UUID) + return -EINVAL; + + enabled = (mode == THERMAL_DEVICE_ENABLED); result = int3400_thermal_run_osc(priv->adev->handle, - priv->current_uuid_index, - mode == THERMAL_DEVICE_ENABLED); + int3400_thermal_uuids[priv->current_uuid_index], + &enabled); + } evaluate_odvp(priv); @@ -464,6 +516,11 @@ static void int3400_setup_gddv(struct int3400_thermal_priv *priv) priv->data_vault = kmemdup(obj->package.elements[0].buffer.pointer, obj->package.elements[0].buffer.length, GFP_KERNEL); + if (!priv->data_vault) { + kfree(buffer.pointer); + return; + } + bin_attr_data_vault.private = priv->data_vault; bin_attr_data_vault.size = obj->package.elements[0].buffer.length; kfree(buffer.pointer); diff --git a/drivers/thermal/intel/intel_hfi.c b/drivers/thermal/intel/intel_hfi.c new file mode 100644 index 000000000000..730fd121df6e --- /dev/null +++ b/drivers/thermal/intel/intel_hfi.c @@ -0,0 +1,569 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Hardware Feedback Interface Driver + * + * Copyright (c) 2021, Intel Corporation. + * + * Authors: Aubrey Li <aubrey.li@linux.intel.com> + * Ricardo Neri <ricardo.neri-calderon@linux.intel.com> + * + * + * The Hardware Feedback Interface provides a performance and energy efficiency + * capability information for each CPU in the system. Depending on the processor + * model, hardware may periodically update these capabilities as a result of + * changes in the operating conditions (e.g., power limits or thermal + * constraints). On other processor models, there is a single HFI update + * at boot. + * + * This file provides functionality to process HFI updates and relay these + * updates to userspace. + */ + +#define pr_fmt(fmt) "intel-hfi: " fmt + +#include <linux/bitops.h> +#include <linux/cpufeature.h> +#include <linux/cpumask.h> +#include <linux/gfp.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/math.h> +#include <linux/mutex.h> +#include <linux/percpu-defs.h> +#include <linux/printk.h> +#include <linux/processor.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/string.h> +#include <linux/topology.h> +#include <linux/workqueue.h> + +#include <asm/msr.h> + +#include "../thermal_core.h" +#include "intel_hfi.h" + +#define THERM_STATUS_CLEAR_PKG_MASK (BIT(1) | BIT(3) | BIT(5) | BIT(7) | \ + BIT(9) | BIT(11) | BIT(26)) + +/* Hardware Feedback Interface MSR configuration bits */ +#define HW_FEEDBACK_PTR_VALID_BIT BIT(0) +#define HW_FEEDBACK_CONFIG_HFI_ENABLE_BIT BIT(0) + +/* CPUID detection and enumeration definitions for HFI */ + +#define CPUID_HFI_LEAF 6 + +union hfi_capabilities { + struct { + u8 performance:1; + u8 energy_efficiency:1; + u8 __reserved:6; + } split; + u8 bits; +}; + +union cpuid6_edx { + struct { + union hfi_capabilities capabilities; + u32 table_pages:4; + u32 __reserved:4; + s32 index:16; + } split; + u32 full; +}; + +/** + * struct hfi_cpu_data - HFI capabilities per CPU + * @perf_cap: Performance capability + * @ee_cap: Energy efficiency capability + * + * Capabilities of a logical processor in the HFI table. These capabilities are + * unitless. + */ +struct hfi_cpu_data { + u8 perf_cap; + u8 ee_cap; +} __packed; + +/** + * struct hfi_hdr - Header of the HFI table + * @perf_updated: Hardware updated performance capabilities + * @ee_updated: Hardware updated energy efficiency capabilities + * + * Properties of the data in an HFI table. + */ +struct hfi_hdr { + u8 perf_updated; + u8 ee_updated; +} __packed; + +/** + * struct hfi_instance - Representation of an HFI instance (i.e., a table) + * @local_table: Base of the local copy of the HFI table + * @timestamp: Timestamp of the last update of the local table. + * Located at the base of the local table. + * @hdr: Base address of the header of the local table + * @data: Base address of the data of the local table + * @cpus: CPUs represented in this HFI table instance + * @hw_table: Pointer to the HFI table of this instance + * @update_work: Delayed work to process HFI updates + * @table_lock: Lock to protect acceses to the table of this instance + * @event_lock: Lock to process HFI interrupts + * + * A set of parameters to parse and navigate a specific HFI table. + */ +struct hfi_instance { + union { + void *local_table; + u64 *timestamp; + }; + void *hdr; + void *data; + cpumask_var_t cpus; + void *hw_table; + struct delayed_work update_work; + raw_spinlock_t table_lock; + raw_spinlock_t event_lock; +}; + +/** + * struct hfi_features - Supported HFI features + * @nr_table_pages: Size of the HFI table in 4KB pages + * @cpu_stride: Stride size to locate the capability data of a logical + * processor within the table (i.e., row stride) + * @hdr_size: Size of the table header + * + * Parameters and supported features that are common to all HFI instances + */ +struct hfi_features { + unsigned int nr_table_pages; + unsigned int cpu_stride; + unsigned int hdr_size; +}; + +/** + * struct hfi_cpu_info - Per-CPU attributes to consume HFI data + * @index: Row of this CPU in its HFI table + * @hfi_instance: Attributes of the HFI table to which this CPU belongs + * + * Parameters to link a logical processor to an HFI table and a row within it. + */ +struct hfi_cpu_info { + s16 index; + struct hfi_instance *hfi_instance; +}; + +static DEFINE_PER_CPU(struct hfi_cpu_info, hfi_cpu_info) = { .index = -1 }; + +static int max_hfi_instances; +static struct hfi_instance *hfi_instances; + +static struct hfi_features hfi_features; +static DEFINE_MUTEX(hfi_instance_lock); + +static struct workqueue_struct *hfi_updates_wq; +#define HFI_UPDATE_INTERVAL HZ +#define HFI_MAX_THERM_NOTIFY_COUNT 16 + +static void get_hfi_caps(struct hfi_instance *hfi_instance, + struct thermal_genl_cpu_caps *cpu_caps) +{ + int cpu, i = 0; + + raw_spin_lock_irq(&hfi_instance->table_lock); + for_each_cpu(cpu, hfi_instance->cpus) { + struct hfi_cpu_data *caps; + s16 index; + + index = per_cpu(hfi_cpu_info, cpu).index; + caps = hfi_instance->data + index * hfi_features.cpu_stride; + cpu_caps[i].cpu = cpu; + + /* + * Scale performance and energy efficiency to + * the [0, 1023] interval that thermal netlink uses. + */ + cpu_caps[i].performance = caps->perf_cap << 2; + cpu_caps[i].efficiency = caps->ee_cap << 2; + + ++i; + } + raw_spin_unlock_irq(&hfi_instance->table_lock); +} + +/* + * Call update_capabilities() when there are changes in the HFI table. + */ +static void update_capabilities(struct hfi_instance *hfi_instance) +{ + struct thermal_genl_cpu_caps *cpu_caps; + int i = 0, cpu_count; + + /* CPUs may come online/offline while processing an HFI update. */ + mutex_lock(&hfi_instance_lock); + + cpu_count = cpumask_weight(hfi_instance->cpus); + + /* No CPUs to report in this hfi_instance. */ + if (!cpu_count) + goto out; + + cpu_caps = kcalloc(cpu_count, sizeof(*cpu_caps), GFP_KERNEL); + if (!cpu_caps) + goto out; + + get_hfi_caps(hfi_instance, cpu_caps); + + if (cpu_count < HFI_MAX_THERM_NOTIFY_COUNT) + goto last_cmd; + + /* Process complete chunks of HFI_MAX_THERM_NOTIFY_COUNT capabilities. */ + for (i = 0; + (i + HFI_MAX_THERM_NOTIFY_COUNT) <= cpu_count; + i += HFI_MAX_THERM_NOTIFY_COUNT) + thermal_genl_cpu_capability_event(HFI_MAX_THERM_NOTIFY_COUNT, + &cpu_caps[i]); + + cpu_count = cpu_count - i; + +last_cmd: + /* Process the remaining capabilities if any. */ + if (cpu_count) + thermal_genl_cpu_capability_event(cpu_count, &cpu_caps[i]); + + kfree(cpu_caps); +out: + mutex_unlock(&hfi_instance_lock); +} + +static void hfi_update_work_fn(struct work_struct *work) +{ + struct hfi_instance *hfi_instance; + + hfi_instance = container_of(to_delayed_work(work), struct hfi_instance, + update_work); + if (!hfi_instance) + return; + + update_capabilities(hfi_instance); +} + +void intel_hfi_process_event(__u64 pkg_therm_status_msr_val) +{ + struct hfi_instance *hfi_instance; + int cpu = smp_processor_id(); + struct hfi_cpu_info *info; + u64 new_timestamp; + + if (!pkg_therm_status_msr_val) + return; + + info = &per_cpu(hfi_cpu_info, cpu); + if (!info) + return; + + /* + * A CPU is linked to its HFI instance before the thermal vector in the + * local APIC is unmasked. Hence, info->hfi_instance cannot be NULL + * when receiving an HFI event. + */ + hfi_instance = info->hfi_instance; + if (unlikely(!hfi_instance)) { + pr_debug("Received event on CPU %d but instance was null", cpu); + return; + } + + /* + * On most systems, all CPUs in the package receive a package-level + * thermal interrupt when there is an HFI update. It is sufficient to + * let a single CPU to acknowledge the update and queue work to + * process it. The remaining CPUs can resume their work. + */ + if (!raw_spin_trylock(&hfi_instance->event_lock)) + return; + + /* Skip duplicated updates. */ + new_timestamp = *(u64 *)hfi_instance->hw_table; + if (*hfi_instance->timestamp == new_timestamp) { + raw_spin_unlock(&hfi_instance->event_lock); + return; + } + + raw_spin_lock(&hfi_instance->table_lock); + + /* + * Copy the updated table into our local copy. This includes the new + * timestamp. + */ + memcpy(hfi_instance->local_table, hfi_instance->hw_table, + hfi_features.nr_table_pages << PAGE_SHIFT); + + raw_spin_unlock(&hfi_instance->table_lock); + raw_spin_unlock(&hfi_instance->event_lock); + + /* + * Let hardware know that we are done reading the HFI table and it is + * free to update it again. + */ + pkg_therm_status_msr_val &= THERM_STATUS_CLEAR_PKG_MASK & + ~PACKAGE_THERM_STATUS_HFI_UPDATED; + wrmsrl(MSR_IA32_PACKAGE_THERM_STATUS, pkg_therm_status_msr_val); + + queue_delayed_work(hfi_updates_wq, &hfi_instance->update_work, + HFI_UPDATE_INTERVAL); +} + +static void init_hfi_cpu_index(struct hfi_cpu_info *info) +{ + union cpuid6_edx edx; + + /* Do not re-read @cpu's index if it has already been initialized. */ + if (info->index > -1) + return; + + edx.full = cpuid_edx(CPUID_HFI_LEAF); + info->index = edx.split.index; +} + +/* + * The format of the HFI table depends on the number of capabilities that the + * hardware supports. Keep a data structure to navigate the table. + */ +static void init_hfi_instance(struct hfi_instance *hfi_instance) +{ + /* The HFI header is below the time-stamp. */ + hfi_instance->hdr = hfi_instance->local_table + + sizeof(*hfi_instance->timestamp); + + /* The HFI data starts below the header. */ + hfi_instance->data = hfi_instance->hdr + hfi_features.hdr_size; +} + +/** + * intel_hfi_online() - Enable HFI on @cpu + * @cpu: CPU in which the HFI will be enabled + * + * Enable the HFI to be used in @cpu. The HFI is enabled at the die/package + * level. The first CPU in the die/package to come online does the full HFI + * initialization. Subsequent CPUs will just link themselves to the HFI + * instance of their die/package. + * + * This function is called before enabling the thermal vector in the local APIC + * in order to ensure that @cpu has an associated HFI instance when it receives + * an HFI event. + */ +void intel_hfi_online(unsigned int cpu) +{ + struct hfi_instance *hfi_instance; + struct hfi_cpu_info *info; + phys_addr_t hw_table_pa; + u64 msr_val; + u16 die_id; + + /* Nothing to do if hfi_instances are missing. */ + if (!hfi_instances) + return; + + /* + * Link @cpu to the HFI instance of its package/die. It does not + * matter whether the instance has been initialized. + */ + info = &per_cpu(hfi_cpu_info, cpu); + die_id = topology_logical_die_id(cpu); + hfi_instance = info->hfi_instance; + if (!hfi_instance) { + if (die_id < 0 || die_id >= max_hfi_instances) + return; + + hfi_instance = &hfi_instances[die_id]; + info->hfi_instance = hfi_instance; + } + + init_hfi_cpu_index(info); + + /* + * Now check if the HFI instance of the package/die of @cpu has been + * initialized (by checking its header). In such case, all we have to + * do is to add @cpu to this instance's cpumask. + */ + mutex_lock(&hfi_instance_lock); + if (hfi_instance->hdr) { + cpumask_set_cpu(cpu, hfi_instance->cpus); + goto unlock; + } + + /* + * Hardware is programmed with the physical address of the first page + * frame of the table. Hence, the allocated memory must be page-aligned. + */ + hfi_instance->hw_table = alloc_pages_exact(hfi_features.nr_table_pages, + GFP_KERNEL | __GFP_ZERO); + if (!hfi_instance->hw_table) + goto unlock; + + hw_table_pa = virt_to_phys(hfi_instance->hw_table); + + /* + * Allocate memory to keep a local copy of the table that + * hardware generates. + */ + hfi_instance->local_table = kzalloc(hfi_features.nr_table_pages << PAGE_SHIFT, + GFP_KERNEL); + if (!hfi_instance->local_table) + goto free_hw_table; + + /* + * Program the address of the feedback table of this die/package. On + * some processors, hardware remembers the old address of the HFI table + * even after having been reprogrammed and re-enabled. Thus, do not free + * the pages allocated for the table or reprogram the hardware with a + * new base address. Namely, program the hardware only once. + */ + msr_val = hw_table_pa | HW_FEEDBACK_PTR_VALID_BIT; + wrmsrl(MSR_IA32_HW_FEEDBACK_PTR, msr_val); + + init_hfi_instance(hfi_instance); + + INIT_DELAYED_WORK(&hfi_instance->update_work, hfi_update_work_fn); + raw_spin_lock_init(&hfi_instance->table_lock); + raw_spin_lock_init(&hfi_instance->event_lock); + + cpumask_set_cpu(cpu, hfi_instance->cpus); + + /* + * Enable the hardware feedback interface and never disable it. See + * comment on programming the address of the table. + */ + rdmsrl(MSR_IA32_HW_FEEDBACK_CONFIG, msr_val); + msr_val |= HW_FEEDBACK_CONFIG_HFI_ENABLE_BIT; + wrmsrl(MSR_IA32_HW_FEEDBACK_CONFIG, msr_val); + +unlock: + mutex_unlock(&hfi_instance_lock); + return; + +free_hw_table: + free_pages_exact(hfi_instance->hw_table, hfi_features.nr_table_pages); + goto unlock; +} + +/** + * intel_hfi_offline() - Disable HFI on @cpu + * @cpu: CPU in which the HFI will be disabled + * + * Remove @cpu from those covered by its HFI instance. + * + * On some processors, hardware remembers previous programming settings even + * after being reprogrammed. Thus, keep HFI enabled even if all CPUs in the + * die/package of @cpu are offline. See note in intel_hfi_online(). + */ +void intel_hfi_offline(unsigned int cpu) +{ + struct hfi_cpu_info *info = &per_cpu(hfi_cpu_info, cpu); + struct hfi_instance *hfi_instance; + + /* + * Check if @cpu as an associated, initialized (i.e., with a non-NULL + * header). Also, HFI instances are only initialized if X86_FEATURE_HFI + * is present. + */ + hfi_instance = info->hfi_instance; + if (!hfi_instance) + return; + + if (!hfi_instance->hdr) + return; + + mutex_lock(&hfi_instance_lock); + cpumask_clear_cpu(cpu, hfi_instance->cpus); + mutex_unlock(&hfi_instance_lock); +} + +static __init int hfi_parse_features(void) +{ + unsigned int nr_capabilities; + union cpuid6_edx edx; + + if (!boot_cpu_has(X86_FEATURE_HFI)) + return -ENODEV; + + /* + * If we are here we know that CPUID_HFI_LEAF exists. Parse the + * supported capabilities and the size of the HFI table. + */ + edx.full = cpuid_edx(CPUID_HFI_LEAF); + + if (!edx.split.capabilities.split.performance) { + pr_debug("Performance reporting not supported! Not using HFI\n"); + return -ENODEV; + } + + /* + * The number of supported capabilities determines the number of + * columns in the HFI table. Exclude the reserved bits. + */ + edx.split.capabilities.split.__reserved = 0; + nr_capabilities = hweight8(edx.split.capabilities.bits); + + /* The number of 4KB pages required by the table */ + hfi_features.nr_table_pages = edx.split.table_pages + 1; + + /* + * The header contains change indications for each supported feature. + * The size of the table header is rounded up to be a multiple of 8 + * bytes. + */ + hfi_features.hdr_size = DIV_ROUND_UP(nr_capabilities, 8) * 8; + + /* + * Data of each logical processor is also rounded up to be a multiple + * of 8 bytes. + */ + hfi_features.cpu_stride = DIV_ROUND_UP(nr_capabilities, 8) * 8; + + return 0; +} + +void __init intel_hfi_init(void) +{ + struct hfi_instance *hfi_instance; + int i, j; + + if (hfi_parse_features()) + return; + + /* There is one HFI instance per die/package. */ + max_hfi_instances = topology_max_packages() * + topology_max_die_per_package(); + + /* + * This allocation may fail. CPU hotplug callbacks must check + * for a null pointer. + */ + hfi_instances = kcalloc(max_hfi_instances, sizeof(*hfi_instances), + GFP_KERNEL); + if (!hfi_instances) + return; + + for (i = 0; i < max_hfi_instances; i++) { + hfi_instance = &hfi_instances[i]; + if (!zalloc_cpumask_var(&hfi_instance->cpus, GFP_KERNEL)) + goto err_nomem; + } + + hfi_updates_wq = create_singlethread_workqueue("hfi-updates"); + if (!hfi_updates_wq) + goto err_nomem; + + return; + +err_nomem: + for (j = 0; j < i; ++j) { + hfi_instance = &hfi_instances[j]; + free_cpumask_var(hfi_instance->cpus); + } + + kfree(hfi_instances); + hfi_instances = NULL; +} diff --git a/drivers/thermal/intel/intel_hfi.h b/drivers/thermal/intel/intel_hfi.h new file mode 100644 index 000000000000..325aa78b745c --- /dev/null +++ b/drivers/thermal/intel/intel_hfi.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _INTEL_HFI_H +#define _INTEL_HFI_H + +#if defined(CONFIG_INTEL_HFI_THERMAL) +void __init intel_hfi_init(void); +void intel_hfi_online(unsigned int cpu); +void intel_hfi_offline(unsigned int cpu); +void intel_hfi_process_event(__u64 pkg_therm_status_msr_val); +#else +static inline void intel_hfi_init(void) { } +static inline void intel_hfi_online(unsigned int cpu) { } +static inline void intel_hfi_offline(unsigned int cpu) { } +static inline void intel_hfi_process_event(__u64 pkg_therm_status_msr_val) { } +#endif /* CONFIG_INTEL_HFI_THERMAL */ + +#endif /* _INTEL_HFI_H */ diff --git a/drivers/thermal/intel/intel_powerclamp.c b/drivers/thermal/intel/intel_powerclamp.c index 14256421d98c..c841ab37e7c6 100644 --- a/drivers/thermal/intel/intel_powerclamp.c +++ b/drivers/thermal/intel/intel_powerclamp.c @@ -556,12 +556,9 @@ static void end_power_clamp(void) * stop faster. */ clamping = false; - if (bitmap_weight(cpu_clamping_mask, num_possible_cpus())) { - for_each_set_bit(i, cpu_clamping_mask, num_possible_cpus()) { - pr_debug("clamping worker for cpu %d alive, destroy\n", - i); - stop_power_clamp_worker(i); - } + for_each_set_bit(i, cpu_clamping_mask, num_possible_cpus()) { + pr_debug("clamping worker for cpu %d alive, destroy\n", i); + stop_power_clamp_worker(i); } } diff --git a/drivers/thermal/intel/therm_throt.c b/drivers/thermal/intel/therm_throt.c index dab7e8fb1059..8352083b87c7 100644 --- a/drivers/thermal/intel/therm_throt.c +++ b/drivers/thermal/intel/therm_throt.c @@ -32,6 +32,7 @@ #include <asm/irq.h> #include <asm/msr.h> +#include "intel_hfi.h" #include "thermal_interrupt.h" /* How long to wait between reporting thermal events */ @@ -475,6 +476,13 @@ static int thermal_throttle_online(unsigned int cpu) INIT_DELAYED_WORK(&state->package_throttle.therm_work, throttle_active_work); INIT_DELAYED_WORK(&state->core_throttle.therm_work, throttle_active_work); + /* + * The first CPU coming online will enable the HFI. Usually this causes + * hardware to issue an HFI thermal interrupt. Such interrupt will reach + * the CPU once we enable the thermal vector in the local APIC. + */ + intel_hfi_online(cpu); + /* Unmask the thermal vector after the above workqueues are initialized. */ l = apic_read(APIC_LVTTHMR); apic_write(APIC_LVTTHMR, l & ~APIC_LVT_MASKED); @@ -492,6 +500,8 @@ static int thermal_throttle_offline(unsigned int cpu) l = apic_read(APIC_LVTTHMR); apic_write(APIC_LVTTHMR, l | APIC_LVT_MASKED); + intel_hfi_offline(cpu); + cancel_delayed_work_sync(&state->package_throttle.therm_work); cancel_delayed_work_sync(&state->core_throttle.therm_work); @@ -509,6 +519,8 @@ static __init int thermal_throttle_init_device(void) if (!atomic_read(&therm_throt_en)) return 0; + intel_hfi_init(); + ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "x86/therm:online", thermal_throttle_online, thermal_throttle_offline); @@ -608,6 +620,10 @@ void intel_thermal_interrupt(void) PACKAGE_THERM_STATUS_POWER_LIMIT, POWER_LIMIT_EVENT, PACKAGE_LEVEL); + + if (this_cpu_has(X86_FEATURE_HFI)) + intel_hfi_process_event(msr_val & + PACKAGE_THERM_STATUS_HFI_UPDATED); } } @@ -717,6 +733,12 @@ void intel_init_thermal(struct cpuinfo_x86 *c) wrmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT, l | (PACKAGE_THERM_INT_LOW_ENABLE | PACKAGE_THERM_INT_HIGH_ENABLE), h); + + if (cpu_has(c, X86_FEATURE_HFI)) { + rdmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT, l, h); + wrmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT, + l | PACKAGE_THERM_INT_HFI_ENABLE, h); + } } rdmsr(MSR_IA32_MISC_ENABLE, l, h); diff --git a/drivers/thermal/qcom/lmh.c b/drivers/thermal/qcom/lmh.c index eafa7526eb8b..c7f91cbdccc7 100644 --- a/drivers/thermal/qcom/lmh.c +++ b/drivers/thermal/qcom/lmh.c @@ -28,6 +28,8 @@ #define LMH_REG_DCVS_INTR_CLR 0x8 +#define LMH_ENABLE_ALGOS 1 + struct lmh_hw_data { void __iomem *base; struct irq_domain *domain; @@ -90,6 +92,7 @@ static int lmh_probe(struct platform_device *pdev) struct device_node *cpu_node; struct lmh_hw_data *lmh_data; int temp_low, temp_high, temp_arm, cpu_id, ret; + unsigned int enable_alg; u32 node_id; lmh_data = devm_kzalloc(dev, sizeof(*lmh_data), GFP_KERNEL); @@ -141,32 +144,36 @@ static int lmh_probe(struct platform_device *pdev) if (!qcom_scm_lmh_dcvsh_available()) return -EINVAL; - ret = qcom_scm_lmh_dcvsh(LMH_SUB_FN_CRNT, LMH_ALGO_MODE_ENABLE, 1, - LMH_NODE_DCVS, node_id, 0); - if (ret) - dev_err(dev, "Error %d enabling current subfunction\n", ret); - - ret = qcom_scm_lmh_dcvsh(LMH_SUB_FN_REL, LMH_ALGO_MODE_ENABLE, 1, - LMH_NODE_DCVS, node_id, 0); - if (ret) - dev_err(dev, "Error %d enabling reliability subfunction\n", ret); - - ret = qcom_scm_lmh_dcvsh(LMH_SUB_FN_BCL, LMH_ALGO_MODE_ENABLE, 1, - LMH_NODE_DCVS, node_id, 0); - if (ret) - dev_err(dev, "Error %d enabling BCL subfunction\n", ret); - - ret = qcom_scm_lmh_dcvsh(LMH_SUB_FN_THERMAL, LMH_ALGO_MODE_ENABLE, 1, - LMH_NODE_DCVS, node_id, 0); - if (ret) { - dev_err(dev, "Error %d enabling thermal subfunction\n", ret); - return ret; - } - - ret = qcom_scm_lmh_profile_change(0x1); - if (ret) { - dev_err(dev, "Error %d changing profile\n", ret); - return ret; + enable_alg = (uintptr_t)of_device_get_match_data(dev); + + if (enable_alg) { + ret = qcom_scm_lmh_dcvsh(LMH_SUB_FN_CRNT, LMH_ALGO_MODE_ENABLE, 1, + LMH_NODE_DCVS, node_id, 0); + if (ret) + dev_err(dev, "Error %d enabling current subfunction\n", ret); + + ret = qcom_scm_lmh_dcvsh(LMH_SUB_FN_REL, LMH_ALGO_MODE_ENABLE, 1, + LMH_NODE_DCVS, node_id, 0); + if (ret) + dev_err(dev, "Error %d enabling reliability subfunction\n", ret); + + ret = qcom_scm_lmh_dcvsh(LMH_SUB_FN_BCL, LMH_ALGO_MODE_ENABLE, 1, + LMH_NODE_DCVS, node_id, 0); + if (ret) + dev_err(dev, "Error %d enabling BCL subfunction\n", ret); + + ret = qcom_scm_lmh_dcvsh(LMH_SUB_FN_THERMAL, LMH_ALGO_MODE_ENABLE, 1, + LMH_NODE_DCVS, node_id, 0); + if (ret) { + dev_err(dev, "Error %d enabling thermal subfunction\n", ret); + return ret; + } + + ret = qcom_scm_lmh_profile_change(0x1); + if (ret) { + dev_err(dev, "Error %d changing profile\n", ret); + return ret; + } } /* Set default thermal trips */ @@ -213,7 +220,8 @@ static int lmh_probe(struct platform_device *pdev) } static const struct of_device_id lmh_table[] = { - { .compatible = "qcom,sdm845-lmh", }, + { .compatible = "qcom,sdm845-lmh", .data = (void *)LMH_ENABLE_ALGOS}, + { .compatible = "qcom,sm8150-lmh", }, {} }; MODULE_DEVICE_TABLE(of, lmh_table); diff --git a/drivers/thermal/qcom/tsens.c b/drivers/thermal/qcom/tsens.c index 99a8d9f3e03c..154d3cb19c88 100644 --- a/drivers/thermal/qcom/tsens.c +++ b/drivers/thermal/qcom/tsens.c @@ -18,6 +18,7 @@ #include <linux/regmap.h> #include <linux/slab.h> #include <linux/thermal.h> +#include "../thermal_hwmon.h" #include "tsens.h" /** @@ -1060,6 +1061,10 @@ static int tsens_register(struct tsens_priv *priv) priv->sensor[i].tzd = tzd; if (priv->ops->enable) priv->ops->enable(priv, i); + + if (devm_thermal_add_hwmon_sysfs(tzd)) + dev_warn(priv->dev, + "Failed to add hwmon sysfs attributes\n"); } /* VER_0 require to set MIN and MAX THRESH diff --git a/drivers/thermal/tegra/tegra-bpmp-thermal.c b/drivers/thermal/tegra/tegra-bpmp-thermal.c index 94f1da1dcd69..5affc3d196be 100644 --- a/drivers/thermal/tegra/tegra-bpmp-thermal.c +++ b/drivers/thermal/tegra/tegra-bpmp-thermal.c @@ -52,6 +52,8 @@ static int tegra_bpmp_thermal_get_temp(void *data, int *out_temp) err = tegra_bpmp_transfer(zone->tegra->bpmp, &msg); if (err) return err; + if (msg.rx.ret) + return -EINVAL; *out_temp = reply.get_temp.temp; @@ -63,6 +65,7 @@ static int tegra_bpmp_thermal_set_trips(void *data, int low, int high) struct tegra_bpmp_thermal_zone *zone = data; struct mrq_thermal_host_to_bpmp_request req; struct tegra_bpmp_message msg; + int err; memset(&req, 0, sizeof(req)); req.type = CMD_THERMAL_SET_TRIP; @@ -76,7 +79,13 @@ static int tegra_bpmp_thermal_set_trips(void *data, int low, int high) msg.tx.data = &req; msg.tx.size = sizeof(req); - return tegra_bpmp_transfer(zone->tegra->bpmp, &msg); + err = tegra_bpmp_transfer(zone->tegra->bpmp, &msg); + if (err) + return err; + if (msg.rx.ret) + return -EINVAL; + + return 0; } static void tz_device_update_work_fn(struct work_struct *work) @@ -140,6 +149,8 @@ static int tegra_bpmp_thermal_get_num_zones(struct tegra_bpmp *bpmp, err = tegra_bpmp_transfer(bpmp, &msg); if (err) return err; + if (msg.rx.ret) + return -EINVAL; *num_zones = reply.get_num_zones.num; diff --git a/drivers/thermal/thermal_netlink.c b/drivers/thermal/thermal_netlink.c index a16dd4d5d710..32fea5174cc0 100644 --- a/drivers/thermal/thermal_netlink.c +++ b/drivers/thermal/thermal_netlink.c @@ -43,6 +43,11 @@ static const struct nla_policy thermal_genl_policy[THERMAL_GENL_ATTR_MAX + 1] = [THERMAL_GENL_ATTR_CDEV_MAX_STATE] = { .type = NLA_U32 }, [THERMAL_GENL_ATTR_CDEV_NAME] = { .type = NLA_STRING, .len = THERMAL_NAME_LENGTH }, + /* CPU capabilities */ + [THERMAL_GENL_ATTR_CPU_CAPABILITY] = { .type = NLA_NESTED }, + [THERMAL_GENL_ATTR_CPU_CAPABILITY_ID] = { .type = NLA_U32 }, + [THERMAL_GENL_ATTR_CPU_CAPABILITY_PERFORMANCE] = { .type = NLA_U32 }, + [THERMAL_GENL_ATTR_CPU_CAPABILITY_EFFICIENCY] = { .type = NLA_U32 }, }; struct param { @@ -58,6 +63,8 @@ struct param { int temp; int cdev_state; int cdev_max_state; + struct thermal_genl_cpu_caps *cpu_capabilities; + int cpu_capabilities_count; }; typedef int (*cb_t)(struct param *); @@ -190,6 +197,42 @@ static int thermal_genl_event_gov_change(struct param *p) return 0; } +static int thermal_genl_event_cpu_capability_change(struct param *p) +{ + struct thermal_genl_cpu_caps *cpu_cap = p->cpu_capabilities; + struct sk_buff *msg = p->msg; + struct nlattr *start_cap; + int i; + + start_cap = nla_nest_start(msg, THERMAL_GENL_ATTR_CPU_CAPABILITY); + if (!start_cap) + return -EMSGSIZE; + + for (i = 0; i < p->cpu_capabilities_count; ++i) { + if (nla_put_u32(msg, THERMAL_GENL_ATTR_CPU_CAPABILITY_ID, + cpu_cap->cpu)) + goto out_cancel_nest; + + if (nla_put_u32(msg, THERMAL_GENL_ATTR_CPU_CAPABILITY_PERFORMANCE, + cpu_cap->performance)) + goto out_cancel_nest; + + if (nla_put_u32(msg, THERMAL_GENL_ATTR_CPU_CAPABILITY_EFFICIENCY, + cpu_cap->efficiency)) + goto out_cancel_nest; + + ++cpu_cap; + } + + nla_nest_end(msg, start_cap); + + return 0; +out_cancel_nest: + nla_nest_cancel(msg, start_cap); + + return -EMSGSIZE; +} + int thermal_genl_event_tz_delete(struct param *p) __attribute__((alias("thermal_genl_event_tz"))); @@ -219,6 +262,7 @@ static cb_t event_cb[] = { [THERMAL_GENL_EVENT_CDEV_DELETE] = thermal_genl_event_cdev_delete, [THERMAL_GENL_EVENT_CDEV_STATE_UPDATE] = thermal_genl_event_cdev_state_update, [THERMAL_GENL_EVENT_TZ_GOV_CHANGE] = thermal_genl_event_gov_change, + [THERMAL_GENL_EVENT_CPU_CAPABILITY_CHANGE] = thermal_genl_event_cpu_capability_change, }; /* @@ -356,6 +400,15 @@ int thermal_notify_tz_gov_change(int tz_id, const char *name) return thermal_genl_send_event(THERMAL_GENL_EVENT_TZ_GOV_CHANGE, &p); } +int thermal_genl_cpu_capability_event(int count, + struct thermal_genl_cpu_caps *caps) +{ + struct param p = { .cpu_capabilities_count = count, .cpu_capabilities = caps }; + + return thermal_genl_send_event(THERMAL_GENL_EVENT_CPU_CAPABILITY_CHANGE, &p); +} +EXPORT_SYMBOL_GPL(thermal_genl_cpu_capability_event); + /*************************** Command encoding ********************************/ static int __thermal_genl_cmd_tz_get_id(struct thermal_zone_device *tz, @@ -419,11 +472,12 @@ static int thermal_genl_cmd_tz_get_trip(struct param *p) for (i = 0; i < tz->trips; i++) { enum thermal_trip_type type; - int temp, hyst; + int temp, hyst = 0; tz->ops->get_trip_type(tz, i, &type); tz->ops->get_trip_temp(tz, i, &temp); - tz->ops->get_trip_hyst(tz, i, &hyst); + if (tz->ops->get_trip_hyst) + tz->ops->get_trip_hyst(tz, i, &hyst); if (nla_put_u32(msg, THERMAL_GENL_ATTR_TZ_TRIP_ID, i) || nla_put_u32(msg, THERMAL_GENL_ATTR_TZ_TRIP_TYPE, type) || diff --git a/drivers/thermal/thermal_netlink.h b/drivers/thermal/thermal_netlink.h index e554f76291f4..1052f523188d 100644 --- a/drivers/thermal/thermal_netlink.h +++ b/drivers/thermal/thermal_netlink.h @@ -4,6 +4,12 @@ * Author: Daniel Lezcano <daniel.lezcano@linaro.org> */ +struct thermal_genl_cpu_caps { + int cpu; + int performance; + int efficiency; +}; + /* Netlink notification function */ #ifdef CONFIG_THERMAL_NETLINK int __init thermal_netlink_init(void); @@ -23,6 +29,8 @@ int thermal_notify_cdev_add(int cdev_id, const char *name, int max_state); int thermal_notify_cdev_delete(int cdev_id); int thermal_notify_tz_gov_change(int tz_id, const char *name); int thermal_genl_sampling_temp(int id, int temp); +int thermal_genl_cpu_capability_event(int count, + struct thermal_genl_cpu_caps *caps); #else static inline int thermal_netlink_init(void) { @@ -101,4 +109,10 @@ static inline int thermal_genl_sampling_temp(int id, int temp) { return 0; } + +static inline int thermal_genl_cpu_capability_event(int count, struct thermal_genl_cpu_caps *caps) +{ + return 0; +} + #endif /* CONFIG_THERMAL_NETLINK */ diff --git a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c index f84375865c97..703039d8b937 100644 --- a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c +++ b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c @@ -21,6 +21,7 @@ #include "ti-thermal.h" #include "ti-bandgap.h" +#include "../thermal_hwmon.h" /* common data structures */ struct ti_thermal_data { @@ -106,14 +107,6 @@ static inline int __ti_thermal_get_temp(void *devdata, int *temp) return ret; } -static inline int ti_thermal_get_temp(struct thermal_zone_device *thermal, - int *temp) -{ - struct ti_thermal_data *data = thermal->devdata; - - return __ti_thermal_get_temp(data, temp); -} - static int __ti_thermal_get_trend(void *p, int trip, enum thermal_trend *trend) { struct ti_thermal_data *data = p; @@ -189,6 +182,9 @@ int ti_thermal_expose_sensor(struct ti_bandgap *bgp, int id, ti_bandgap_set_sensor_data(bgp, id, data); ti_bandgap_write_update_interval(bgp, data->sensor_id, interval); + if (devm_thermal_add_hwmon_sysfs(data->ti_thermal)) + dev_warn(bgp->dev, "failed to add hwmon sysfs attributes\n"); + return 0; } diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c index 0b1808e3a912..fa92f727fdf8 100644 --- a/drivers/tty/n_gsm.c +++ b/drivers/tty/n_gsm.c @@ -439,7 +439,7 @@ static u8 gsm_encode_modem(const struct gsm_dlci *dlci) modembits |= MDM_RTR; if (dlci->modem_tx & TIOCM_RI) modembits |= MDM_IC; - if (dlci->modem_tx & TIOCM_CD) + if (dlci->modem_tx & TIOCM_CD || dlci->gsm->initiator) modembits |= MDM_DV; return modembits; } @@ -448,7 +448,7 @@ static u8 gsm_encode_modem(const struct gsm_dlci *dlci) * gsm_print_packet - display a frame for debug * @hdr: header to print before decode * @addr: address EA from the frame - * @cr: C/R bit from the frame + * @cr: C/R bit seen as initiator * @control: control including PF bit * @data: following data bytes * @dlen: length of data @@ -548,7 +548,7 @@ static int gsm_stuff_frame(const u8 *input, u8 *output, int len) * gsm_send - send a control frame * @gsm: our GSM mux * @addr: address for control frame - * @cr: command/response bit + * @cr: command/response bit seen as initiator * @control: control byte including PF bit * * Format up and transmit a control frame. These do not go via the @@ -563,11 +563,15 @@ static void gsm_send(struct gsm_mux *gsm, int addr, int cr, int control) int len; u8 cbuf[10]; u8 ibuf[3]; + int ocr; + + /* toggle C/R coding if not initiator */ + ocr = cr ^ (gsm->initiator ? 0 : 1); switch (gsm->encoding) { case 0: cbuf[0] = GSM0_SOF; - cbuf[1] = (addr << 2) | (cr << 1) | EA; + cbuf[1] = (addr << 2) | (ocr << 1) | EA; cbuf[2] = control; cbuf[3] = EA; /* Length of data = 0 */ cbuf[4] = 0xFF - gsm_fcs_add_block(INIT_FCS, cbuf + 1, 3); @@ -577,7 +581,7 @@ static void gsm_send(struct gsm_mux *gsm, int addr, int cr, int control) case 1: case 2: /* Control frame + packing (but not frame stuffing) in mode 1 */ - ibuf[0] = (addr << 2) | (cr << 1) | EA; + ibuf[0] = (addr << 2) | (ocr << 1) | EA; ibuf[1] = control; ibuf[2] = 0xFF - gsm_fcs_add_block(INIT_FCS, ibuf, 2); /* Stuffing may double the size worst case */ @@ -611,7 +615,7 @@ static void gsm_send(struct gsm_mux *gsm, int addr, int cr, int control) static inline void gsm_response(struct gsm_mux *gsm, int addr, int control) { - gsm_send(gsm, addr, 1, control); + gsm_send(gsm, addr, 0, control); } /** @@ -1017,25 +1021,25 @@ static void gsm_control_reply(struct gsm_mux *gsm, int cmd, const u8 *data, * @tty: virtual tty bound to the DLCI * @dlci: DLCI to affect * @modem: modem bits (full EA) - * @clen: command length + * @slen: number of signal octets * * Used when a modem control message or line state inline in adaption * layer 2 is processed. Sort out the local modem state and throttles */ static void gsm_process_modem(struct tty_struct *tty, struct gsm_dlci *dlci, - u32 modem, int clen) + u32 modem, int slen) { int mlines = 0; u8 brk = 0; int fc; - /* The modem status command can either contain one octet (v.24 signals) - or two octets (v.24 signals + break signals). The length field will - either be 2 or 3 respectively. This is specified in section - 5.4.6.3.7 of the 27.010 mux spec. */ + /* The modem status command can either contain one octet (V.24 signals) + * or two octets (V.24 signals + break signals). This is specified in + * section 5.4.6.3.7 of the 07.10 mux spec. + */ - if (clen == 2) + if (slen == 1) modem = modem & 0x7f; else { brk = modem & 0x7f; @@ -1092,6 +1096,7 @@ static void gsm_control_modem(struct gsm_mux *gsm, const u8 *data, int clen) unsigned int brk = 0; struct gsm_dlci *dlci; int len = clen; + int slen; const u8 *dp = data; struct tty_struct *tty; @@ -1111,6 +1116,7 @@ static void gsm_control_modem(struct gsm_mux *gsm, const u8 *data, int clen) return; dlci = gsm->dlci[addr]; + slen = len; while (gsm_read_ea(&modem, *dp++) == 0) { len--; if (len == 0) @@ -1127,7 +1133,7 @@ static void gsm_control_modem(struct gsm_mux *gsm, const u8 *data, int clen) modem |= (brk & 0x7f); } tty = tty_port_tty_get(&dlci->port); - gsm_process_modem(tty, dlci, modem, clen); + gsm_process_modem(tty, dlci, modem, slen); if (tty) { tty_wakeup(tty); tty_kref_put(tty); @@ -1451,6 +1457,9 @@ static void gsm_dlci_close(struct gsm_dlci *dlci) if (dlci->addr != 0) { tty_port_tty_hangup(&dlci->port, false); kfifo_reset(&dlci->fifo); + /* Ensure that gsmtty_open() can return. */ + tty_port_set_initialized(&dlci->port, 0); + wake_up_interruptible(&dlci->port.open_wait); } else dlci->gsm->dead = true; /* Unregister gsmtty driver,report gsmtty dev remove uevent for user */ @@ -1514,7 +1523,7 @@ static void gsm_dlci_t1(struct timer_list *t) dlci->mode = DLCI_MODE_ADM; gsm_dlci_open(dlci); } else { - gsm_dlci_close(dlci); + gsm_dlci_begin_close(dlci); /* prevent half open link */ } break; @@ -1593,6 +1602,7 @@ static void gsm_dlci_data(struct gsm_dlci *dlci, const u8 *data, int clen) struct tty_struct *tty; unsigned int modem = 0; int len = clen; + int slen = 0; if (debug & 16) pr_debug("%d bytes for tty\n", len); @@ -1605,12 +1615,14 @@ static void gsm_dlci_data(struct gsm_dlci *dlci, const u8 *data, int clen) case 2: /* Asynchronous serial with line state in each frame */ while (gsm_read_ea(&modem, *data++) == 0) { len--; + slen++; if (len == 0) return; } + slen++; tty = tty_port_tty_get(port); if (tty) { - gsm_process_modem(tty, dlci, modem, clen); + gsm_process_modem(tty, dlci, modem, slen); tty_kref_put(tty); } fallthrough; @@ -1748,7 +1760,12 @@ static void gsm_dlci_release(struct gsm_dlci *dlci) gsm_destroy_network(dlci); mutex_unlock(&dlci->mutex); - tty_hangup(tty); + /* We cannot use tty_hangup() because in tty_kref_put() the tty + * driver assumes that the hangup queue is free and reuses it to + * queue release_one_tty() -> NULL pointer panic in + * process_one_work(). + */ + tty_vhangup(tty); tty_port_tty_set(&dlci->port, NULL); tty_kref_put(tty); @@ -1800,10 +1817,10 @@ static void gsm_queue(struct gsm_mux *gsm) goto invalid; cr = gsm->address & 1; /* C/R bit */ + cr ^= gsm->initiator ? 0 : 1; /* Flip so 1 always means command */ gsm_print_packet("<--", address, cr, gsm->control, gsm->buf, gsm->len); - cr ^= 1 - gsm->initiator; /* Flip so 1 always means command */ dlci = gsm->dlci[address]; switch (gsm->control) { @@ -3234,9 +3251,9 @@ static void gsmtty_throttle(struct tty_struct *tty) if (dlci->state == DLCI_CLOSED) return; if (C_CRTSCTS(tty)) - dlci->modem_tx &= ~TIOCM_DTR; + dlci->modem_tx &= ~TIOCM_RTS; dlci->throttled = true; - /* Send an MSC with DTR cleared */ + /* Send an MSC with RTS cleared */ gsmtty_modem_update(dlci, 0); } @@ -3246,9 +3263,9 @@ static void gsmtty_unthrottle(struct tty_struct *tty) if (dlci->state == DLCI_CLOSED) return; if (C_CRTSCTS(tty)) - dlci->modem_tx |= TIOCM_DTR; + dlci->modem_tx |= TIOCM_RTS; dlci->throttled = false; - /* Send an MSC with DTR set */ + /* Send an MSC with RTS set */ gsmtty_modem_update(dlci, 0); } diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c index 8933ef1f83c0..efc72104c840 100644 --- a/drivers/tty/n_tty.c +++ b/drivers/tty/n_tty.c @@ -1329,7 +1329,7 @@ handle_newline: put_tty_queue(c, ldata); smp_store_release(&ldata->canon_head, ldata->read_head); kill_fasync(&tty->fasync, SIGIO, POLL_IN); - wake_up_interruptible_poll(&tty->read_wait, EPOLLIN); + wake_up_interruptible_poll(&tty->read_wait, EPOLLIN | EPOLLRDNORM); return; } } @@ -1561,7 +1561,7 @@ static void __receive_buf(struct tty_struct *tty, const unsigned char *cp, if (read_cnt(ldata)) { kill_fasync(&tty->fasync, SIGIO, POLL_IN); - wake_up_interruptible_poll(&tty->read_wait, EPOLLIN); + wake_up_interruptible_poll(&tty->read_wait, EPOLLIN | EPOLLRDNORM); } } @@ -1926,7 +1926,7 @@ static bool canon_copy_from_read_buf(struct tty_struct *tty, return false; canon_head = smp_load_acquire(&ldata->canon_head); - n = min(*nr + 1, canon_head - ldata->read_tail); + n = min(*nr, canon_head - ldata->read_tail); tail = ldata->read_tail & (N_TTY_BUF_SIZE - 1); size = min_t(size_t, tail + n, N_TTY_BUF_SIZE); @@ -1948,10 +1948,8 @@ static bool canon_copy_from_read_buf(struct tty_struct *tty, n += N_TTY_BUF_SIZE; c = n + found; - if (!found || read_buf(ldata, eol) != __DISABLED_CHAR) { - c = min(*nr, c); + if (!found || read_buf(ldata, eol) != __DISABLED_CHAR) n = c; - } n_tty_trace("%s: eol:%zu found:%d n:%zu c:%zu tail:%zu more:%zu\n", __func__, eol, found, n, c, tail, more); diff --git a/drivers/tty/serial/8250/8250_gsc.c b/drivers/tty/serial/8250/8250_gsc.c index 673cda3d011d..948d0a1c6ae8 100644 --- a/drivers/tty/serial/8250/8250_gsc.c +++ b/drivers/tty/serial/8250/8250_gsc.c @@ -26,7 +26,7 @@ static int __init serial_init_chip(struct parisc_device *dev) unsigned long address; int err; -#ifdef CONFIG_64BIT +#if defined(CONFIG_64BIT) && defined(CONFIG_IOSAPIC) if (!dev->irq && (dev->id.sversion == 0xad)) dev->irq = iosapic_serial_irq(dev); #endif diff --git a/drivers/tty/serial/8250/8250_pericom.c b/drivers/tty/serial/8250/8250_pericom.c index 025b055363c3..95ff10f25d58 100644 --- a/drivers/tty/serial/8250/8250_pericom.c +++ b/drivers/tty/serial/8250/8250_pericom.c @@ -117,7 +117,7 @@ static int pericom8250_probe(struct pci_dev *pdev, const struct pci_device_id *i uart.port.private_data = pericom; uart.port.iotype = UPIO_PORT; uart.port.uartclk = 921600 * 16; - uart.port.flags = UPF_SKIP_TEST | UPF_BOOT_AUTOCONF | UPF_SHARE_IRQ | UPF_MAGIC_MULTIPLIER; + uart.port.flags = UPF_SKIP_TEST | UPF_BOOT_AUTOCONF | UPF_SHARE_IRQ; uart.port.set_divisor = pericom_do_set_divisor; for (i = 0; i < nr && i < maxnr; i++) { unsigned int offset = (i == 3 && nr == 4) ? 0x38 : i * 0x8; diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c index 64e7e6c8145f..38d1c0748533 100644 --- a/drivers/tty/serial/sc16is7xx.c +++ b/drivers/tty/serial/sc16is7xx.c @@ -734,12 +734,15 @@ static irqreturn_t sc16is7xx_irq(int irq, void *dev_id) static void sc16is7xx_tx_proc(struct kthread_work *ws) { struct uart_port *port = &(to_sc16is7xx_one(ws, tx_work)->port); + struct sc16is7xx_port *s = dev_get_drvdata(port->dev); if ((port->rs485.flags & SER_RS485_ENABLED) && (port->rs485.delay_rts_before_send > 0)) msleep(port->rs485.delay_rts_before_send); + mutex_lock(&s->efr_lock); sc16is7xx_handle_tx(port); + mutex_unlock(&s->efr_lock); } static void sc16is7xx_reconf_rs485(struct uart_port *port) diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c index 3639bb6dc372..58013698635f 100644 --- a/drivers/tty/vt/vt_ioctl.c +++ b/drivers/tty/vt/vt_ioctl.c @@ -599,8 +599,8 @@ static int vt_setactivate(struct vt_setactivate __user *sa) if (vsa.console == 0 || vsa.console > MAX_NR_CONSOLES) return -ENXIO; - vsa.console = array_index_nospec(vsa.console, MAX_NR_CONSOLES + 1); vsa.console--; + vsa.console = array_index_nospec(vsa.console, MAX_NR_CONSOLES); console_lock(); ret = vc_allocate(vsa.console); if (ret) { @@ -845,6 +845,7 @@ int vt_ioctl(struct tty_struct *tty, return -ENXIO; arg--; + arg = array_index_nospec(arg, MAX_NR_CONSOLES); console_lock(); ret = vc_allocate(arg); console_unlock(); diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c index 73f419adce61..4bb6d304eb4b 100644 --- a/drivers/usb/class/usbtmc.c +++ b/drivers/usb/class/usbtmc.c @@ -1919,6 +1919,7 @@ static int usbtmc_ioctl_request(struct usbtmc_device_data *data, struct usbtmc_ctrlrequest request; u8 *buffer = NULL; int rv; + unsigned int is_in, pipe; unsigned long res; res = copy_from_user(&request, arg, sizeof(struct usbtmc_ctrlrequest)); @@ -1928,12 +1929,14 @@ static int usbtmc_ioctl_request(struct usbtmc_device_data *data, if (request.req.wLength > USBTMC_BUFSIZE) return -EMSGSIZE; + is_in = request.req.bRequestType & USB_DIR_IN; + if (request.req.wLength) { buffer = kmalloc(request.req.wLength, GFP_KERNEL); if (!buffer) return -ENOMEM; - if ((request.req.bRequestType & USB_DIR_IN) == 0) { + if (!is_in) { /* Send control data to device */ res = copy_from_user(buffer, request.data, request.req.wLength); @@ -1944,8 +1947,12 @@ static int usbtmc_ioctl_request(struct usbtmc_device_data *data, } } + if (is_in) + pipe = usb_rcvctrlpipe(data->usb_dev, 0); + else + pipe = usb_sndctrlpipe(data->usb_dev, 0); rv = usb_control_msg(data->usb_dev, - usb_rcvctrlpipe(data->usb_dev, 0), + pipe, request.req.bRequest, request.req.bRequestType, request.req.wValue, @@ -1957,7 +1964,7 @@ static int usbtmc_ioctl_request(struct usbtmc_device_data *data, goto exit; } - if (rv && (request.req.bRequestType & USB_DIR_IN)) { + if (rv && is_in) { /* Read control data from device */ res = copy_to_user(request.data, buffer, rv); if (res) diff --git a/drivers/usb/common/ulpi.c b/drivers/usb/common/ulpi.c index 8f8405b0d608..5509d3847af4 100644 --- a/drivers/usb/common/ulpi.c +++ b/drivers/usb/common/ulpi.c @@ -130,6 +130,7 @@ static const struct attribute_group *ulpi_dev_attr_groups[] = { static void ulpi_dev_release(struct device *dev) { + of_node_put(dev->of_node); kfree(to_ulpi_dev(dev)); } @@ -247,12 +248,16 @@ static int ulpi_register(struct device *dev, struct ulpi *ulpi) return ret; ret = ulpi_read_id(ulpi); - if (ret) + if (ret) { + of_node_put(ulpi->dev.of_node); return ret; + } ret = device_register(&ulpi->dev); - if (ret) + if (ret) { + put_device(&ulpi->dev); return ret; + } dev_dbg(&ulpi->dev, "registered ULPI PHY: vendor %04x, product %04x\n", ulpi->id.vendor, ulpi->id.product); @@ -299,7 +304,6 @@ EXPORT_SYMBOL_GPL(ulpi_register_interface); */ void ulpi_unregister_interface(struct ulpi *ulpi) { - of_node_put(ulpi->dev.of_node); device_unregister(&ulpi->dev); } EXPORT_SYMBOL_GPL(ulpi_unregister_interface); diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c index d630cccd2e6e..dd44e37a454a 100644 --- a/drivers/usb/core/hcd-pci.c +++ b/drivers/usb/core/hcd-pci.c @@ -446,7 +446,7 @@ static int suspend_common(struct device *dev, bool do_wakeup) HCD_WAKEUP_PENDING(hcd->shared_hcd)) return -EBUSY; retval = hcd->driver->pci_suspend(hcd, do_wakeup); - suspend_report_result(hcd->driver->pci_suspend, retval); + suspend_report_result(dev, hcd->driver->pci_suspend, retval); /* Check again in case wakeup raced with pci_suspend */ if ((retval == 0 && do_wakeup && HCD_WAKEUP_PENDING(hcd)) || @@ -556,7 +556,7 @@ static int hcd_pci_suspend_noirq(struct device *dev) dev_dbg(dev, "--> PCI %s\n", pci_power_name(pci_dev->current_state)); } else { - suspend_report_result(pci_prepare_to_sleep, retval); + suspend_report_result(dev, pci_prepare_to_sleep, retval); return retval; } diff --git a/drivers/usb/core/port.c b/drivers/usb/core/port.c index c2bbf97a79be..d5bc36ca5b1f 100644 --- a/drivers/usb/core/port.c +++ b/drivers/usb/core/port.c @@ -602,11 +602,14 @@ int usb_hub_create_port_device(struct usb_hub *hub, int port1) return retval; } - find_and_link_peer(hub, port1); - retval = component_add(&port_dev->dev, &connector_ops); - if (retval) + if (retval) { dev_warn(&port_dev->dev, "failed to add component\n"); + device_unregister(&port_dev->dev); + return retval; + } + + find_and_link_peer(hub, port1); /* * Enable runtime pm and hold a refernce that hub_configure() diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h index 8a63da3ab39d..88c337bf564f 100644 --- a/drivers/usb/dwc2/core.h +++ b/drivers/usb/dwc2/core.h @@ -1418,6 +1418,7 @@ void dwc2_hsotg_core_connect(struct dwc2_hsotg *hsotg); void dwc2_hsotg_disconnect(struct dwc2_hsotg *dwc2); int dwc2_hsotg_set_test_mode(struct dwc2_hsotg *hsotg, int testmode); #define dwc2_is_device_connected(hsotg) (hsotg->connected) +#define dwc2_is_device_enabled(hsotg) (hsotg->enabled) int dwc2_backup_device_registers(struct dwc2_hsotg *hsotg); int dwc2_restore_device_registers(struct dwc2_hsotg *hsotg, int remote_wakeup); int dwc2_gadget_enter_hibernation(struct dwc2_hsotg *hsotg); @@ -1454,6 +1455,7 @@ static inline int dwc2_hsotg_set_test_mode(struct dwc2_hsotg *hsotg, int testmode) { return 0; } #define dwc2_is_device_connected(hsotg) (0) +#define dwc2_is_device_enabled(hsotg) (0) static inline int dwc2_backup_device_registers(struct dwc2_hsotg *hsotg) { return 0; } static inline int dwc2_restore_device_registers(struct dwc2_hsotg *hsotg, diff --git a/drivers/usb/dwc2/drd.c b/drivers/usb/dwc2/drd.c index 1b39c4776369..d8d6493bc457 100644 --- a/drivers/usb/dwc2/drd.c +++ b/drivers/usb/dwc2/drd.c @@ -130,8 +130,10 @@ static int dwc2_drd_role_sw_set(struct usb_role_switch *sw, enum usb_role role) already = dwc2_ovr_avalid(hsotg, true); } else if (role == USB_ROLE_DEVICE) { already = dwc2_ovr_bvalid(hsotg, true); - /* This clear DCTL.SFTDISCON bit */ - dwc2_hsotg_core_connect(hsotg); + if (dwc2_is_device_enabled(hsotg)) { + /* This clear DCTL.SFTDISCON bit */ + dwc2_hsotg_core_connect(hsotg); + } } else { if (dwc2_is_device_mode(hsotg)) { if (!dwc2_ovr_bvalid(hsotg, false)) diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c index 7ff8fc8f79a9..06d0e88ec8af 100644 --- a/drivers/usb/dwc3/dwc3-pci.c +++ b/drivers/usb/dwc3/dwc3-pci.c @@ -43,6 +43,7 @@ #define PCI_DEVICE_ID_INTEL_ADLP 0x51ee #define PCI_DEVICE_ID_INTEL_ADLM 0x54ee #define PCI_DEVICE_ID_INTEL_ADLS 0x7ae1 +#define PCI_DEVICE_ID_INTEL_RPLS 0x7a61 #define PCI_DEVICE_ID_INTEL_TGL 0x9a15 #define PCI_DEVICE_ID_AMD_MR 0x163a @@ -85,8 +86,8 @@ static const struct acpi_gpio_mapping acpi_dwc3_byt_gpios[] = { static struct gpiod_lookup_table platform_bytcr_gpios = { .dev_id = "0000:00:16.0", .table = { - GPIO_LOOKUP("INT33FC:00", 54, "reset", GPIO_ACTIVE_HIGH), - GPIO_LOOKUP("INT33FC:02", 14, "cs", GPIO_ACTIVE_HIGH), + GPIO_LOOKUP("INT33FC:00", 54, "cs", GPIO_ACTIVE_HIGH), + GPIO_LOOKUP("INT33FC:02", 14, "reset", GPIO_ACTIVE_HIGH), {} }, }; @@ -119,6 +120,13 @@ static const struct property_entry dwc3_pci_intel_properties[] = { {} }; +static const struct property_entry dwc3_pci_intel_byt_properties[] = { + PROPERTY_ENTRY_STRING("dr_mode", "peripheral"), + PROPERTY_ENTRY_BOOL("snps,dis_u2_susphy_quirk"), + PROPERTY_ENTRY_BOOL("linux,sysdev_is_parent"), + {} +}; + static const struct property_entry dwc3_pci_mrfld_properties[] = { PROPERTY_ENTRY_STRING("dr_mode", "otg"), PROPERTY_ENTRY_STRING("linux,extcon-name", "mrfld_bcove_pwrsrc"), @@ -161,6 +169,10 @@ static const struct software_node dwc3_pci_intel_swnode = { .properties = dwc3_pci_intel_properties, }; +static const struct software_node dwc3_pci_intel_byt_swnode = { + .properties = dwc3_pci_intel_byt_properties, +}; + static const struct software_node dwc3_pci_intel_mrfld_swnode = { .properties = dwc3_pci_mrfld_properties, }; @@ -344,7 +356,7 @@ static const struct pci_device_id dwc3_pci_id_table[] = { (kernel_ulong_t) &dwc3_pci_intel_swnode, }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_BYT), - (kernel_ulong_t) &dwc3_pci_intel_swnode, }, + (kernel_ulong_t) &dwc3_pci_intel_byt_swnode, }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MRFLD), (kernel_ulong_t) &dwc3_pci_intel_mrfld_swnode, }, @@ -409,6 +421,9 @@ static const struct pci_device_id dwc3_pci_id_table[] = { { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ADLS), (kernel_ulong_t) &dwc3_pci_intel_swnode, }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_RPLS), + (kernel_ulong_t) &dwc3_pci_intel_swnode, }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TGL), (kernel_ulong_t) &dwc3_pci_intel_swnode, }, diff --git a/drivers/usb/dwc3/dwc3-xilinx.c b/drivers/usb/dwc3/dwc3-xilinx.c index e14ac15e24c3..a6f3a9b38789 100644 --- a/drivers/usb/dwc3/dwc3-xilinx.c +++ b/drivers/usb/dwc3/dwc3-xilinx.c @@ -99,7 +99,7 @@ static int dwc3_xlnx_init_zynqmp(struct dwc3_xlnx *priv_data) struct device *dev = priv_data->dev; struct reset_control *crst, *hibrst, *apbrst; struct phy *usb3_phy; - int ret; + int ret = 0; u32 reg; usb3_phy = devm_phy_optional_get(dev, "usb3-phy"); diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 520031ba38aa..a0c883f19a41 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -1291,6 +1291,19 @@ static void __dwc3_prepare_one_trb(struct dwc3_ep *dep, struct dwc3_trb *trb, if (usb_endpoint_xfer_bulk(dep->endpoint.desc) && dep->stream_capable) trb->ctrl |= DWC3_TRB_CTRL_SID_SOFN(stream_id); + /* + * As per data book 4.2.3.2TRB Control Bit Rules section + * + * The controller autonomously checks the HWO field of a TRB to determine if the + * entire TRB is valid. Therefore, software must ensure that the rest of the TRB + * is valid before setting the HWO field to '1'. In most systems, this means that + * software must update the fourth DWORD of a TRB last. + * + * However there is a possibility of CPU re-ordering here which can cause + * controller to observe the HWO bit set prematurely. + * Add a write memory barrier to prevent CPU re-ordering. + */ + wmb(); trb->ctrl |= DWC3_TRB_CTRL_HWO; dwc3_ep_inc_enq(dep); @@ -4147,9 +4160,11 @@ static irqreturn_t dwc3_thread_interrupt(int irq, void *_evt) unsigned long flags; irqreturn_t ret = IRQ_NONE; + local_bh_disable(); spin_lock_irqsave(&dwc->lock, flags); ret = dwc3_process_event_buf(evt); spin_unlock_irqrestore(&dwc->lock, flags); + local_bh_enable(); return ret; } diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c index 16f9e3423c9f..9315313108c9 100644 --- a/drivers/usb/gadget/composite.c +++ b/drivers/usb/gadget/composite.c @@ -1988,6 +1988,9 @@ unknown: if (w_index != 0x5 || (w_value >> 8)) break; interface = w_value & 0xFF; + if (interface >= MAX_CONFIG_INTERFACES || + !os_desc_cfg->interface[interface]) + break; buf[6] = w_index; count = count_ext_prop(os_desc_cfg, interface); diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c index 25ad1e97a458..1922fd02043c 100644 --- a/drivers/usb/gadget/function/f_fs.c +++ b/drivers/usb/gadget/function/f_fs.c @@ -1711,16 +1711,24 @@ static void ffs_data_put(struct ffs_data *ffs) static void ffs_data_closed(struct ffs_data *ffs) { + struct ffs_epfile *epfiles; + unsigned long flags; + ENTER(); if (atomic_dec_and_test(&ffs->opened)) { if (ffs->no_disconnect) { ffs->state = FFS_DEACTIVATED; - if (ffs->epfiles) { - ffs_epfiles_destroy(ffs->epfiles, - ffs->eps_count); - ffs->epfiles = NULL; - } + spin_lock_irqsave(&ffs->eps_lock, flags); + epfiles = ffs->epfiles; + ffs->epfiles = NULL; + spin_unlock_irqrestore(&ffs->eps_lock, + flags); + + if (epfiles) + ffs_epfiles_destroy(epfiles, + ffs->eps_count); + if (ffs->setup_state == FFS_SETUP_PENDING) __ffs_ep0_stall(ffs); } else { @@ -1767,14 +1775,27 @@ static struct ffs_data *ffs_data_new(const char *dev_name) static void ffs_data_clear(struct ffs_data *ffs) { + struct ffs_epfile *epfiles; + unsigned long flags; + ENTER(); ffs_closed(ffs); BUG_ON(ffs->gadget); - if (ffs->epfiles) { - ffs_epfiles_destroy(ffs->epfiles, ffs->eps_count); + spin_lock_irqsave(&ffs->eps_lock, flags); + epfiles = ffs->epfiles; + ffs->epfiles = NULL; + spin_unlock_irqrestore(&ffs->eps_lock, flags); + + /* + * potential race possible between ffs_func_eps_disable + * & ffs_epfile_release therefore maintaining a local + * copy of epfile will save us from use-after-free. + */ + if (epfiles) { + ffs_epfiles_destroy(epfiles, ffs->eps_count); ffs->epfiles = NULL; } @@ -1922,12 +1943,15 @@ static void ffs_epfiles_destroy(struct ffs_epfile *epfiles, unsigned count) static void ffs_func_eps_disable(struct ffs_function *func) { - struct ffs_ep *ep = func->eps; - struct ffs_epfile *epfile = func->ffs->epfiles; - unsigned count = func->ffs->eps_count; + struct ffs_ep *ep; + struct ffs_epfile *epfile; + unsigned short count; unsigned long flags; spin_lock_irqsave(&func->ffs->eps_lock, flags); + count = func->ffs->eps_count; + epfile = func->ffs->epfiles; + ep = func->eps; while (count--) { /* pending requests get nuked */ if (ep->ep) @@ -1945,14 +1969,18 @@ static void ffs_func_eps_disable(struct ffs_function *func) static int ffs_func_eps_enable(struct ffs_function *func) { - struct ffs_data *ffs = func->ffs; - struct ffs_ep *ep = func->eps; - struct ffs_epfile *epfile = ffs->epfiles; - unsigned count = ffs->eps_count; + struct ffs_data *ffs; + struct ffs_ep *ep; + struct ffs_epfile *epfile; + unsigned short count; unsigned long flags; int ret = 0; spin_lock_irqsave(&func->ffs->eps_lock, flags); + ffs = func->ffs; + ep = func->eps; + epfile = ffs->epfiles; + count = ffs->eps_count; while(count--) { ep->ep->driver_data = ep; diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c index 36fa6ef0581b..097a709549d6 100644 --- a/drivers/usb/gadget/function/f_uac2.c +++ b/drivers/usb/gadget/function/f_uac2.c @@ -203,7 +203,7 @@ static struct uac2_input_terminal_descriptor io_in_it_desc = { .bDescriptorSubtype = UAC_INPUT_TERMINAL, /* .bTerminalID = DYNAMIC */ - .wTerminalType = cpu_to_le16(UAC_INPUT_TERMINAL_UNDEFINED), + .wTerminalType = cpu_to_le16(UAC_INPUT_TERMINAL_MICROPHONE), .bAssocTerminal = 0, /* .bCSourceID = DYNAMIC */ .iChannelNames = 0, @@ -231,7 +231,7 @@ static struct uac2_output_terminal_descriptor io_out_ot_desc = { .bDescriptorSubtype = UAC_OUTPUT_TERMINAL, /* .bTerminalID = DYNAMIC */ - .wTerminalType = cpu_to_le16(UAC_OUTPUT_TERMINAL_UNDEFINED), + .wTerminalType = cpu_to_le16(UAC_OUTPUT_TERMINAL_SPEAKER), .bAssocTerminal = 0, /* .bSourceID = DYNAMIC */ /* .bCSourceID = DYNAMIC */ diff --git a/drivers/usb/gadget/function/rndis.c b/drivers/usb/gadget/function/rndis.c index 431d5a7d737e..713efd9aefde 100644 --- a/drivers/usb/gadget/function/rndis.c +++ b/drivers/usb/gadget/function/rndis.c @@ -637,14 +637,18 @@ static int rndis_set_response(struct rndis_params *params, rndis_set_cmplt_type *resp; rndis_resp_t *r; + BufLength = le32_to_cpu(buf->InformationBufferLength); + BufOffset = le32_to_cpu(buf->InformationBufferOffset); + if ((BufLength > RNDIS_MAX_TOTAL_SIZE) || + (BufOffset > RNDIS_MAX_TOTAL_SIZE) || + (BufOffset + 8 >= RNDIS_MAX_TOTAL_SIZE)) + return -EINVAL; + r = rndis_add_response(params, sizeof(rndis_set_cmplt_type)); if (!r) return -ENOMEM; resp = (rndis_set_cmplt_type *)r->buf; - BufLength = le32_to_cpu(buf->InformationBufferLength); - BufOffset = le32_to_cpu(buf->InformationBufferOffset); - #ifdef VERBOSE_DEBUG pr_debug("%s: Length: %d\n", __func__, BufLength); pr_debug("%s: Offset: %d\n", __func__, BufOffset); @@ -919,6 +923,7 @@ struct rndis_params *rndis_register(void (*resp_avail)(void *v), void *v) params->resp_avail = resp_avail; params->v = v; INIT_LIST_HEAD(¶ms->resp_queue); + spin_lock_init(¶ms->resp_lock); pr_debug("%s: configNr = %d\n", __func__, i); return params; @@ -1012,12 +1017,14 @@ void rndis_free_response(struct rndis_params *params, u8 *buf) { rndis_resp_t *r, *n; + spin_lock(¶ms->resp_lock); list_for_each_entry_safe(r, n, ¶ms->resp_queue, list) { if (r->buf == buf) { list_del(&r->list); kfree(r); } } + spin_unlock(¶ms->resp_lock); } EXPORT_SYMBOL_GPL(rndis_free_response); @@ -1027,14 +1034,17 @@ u8 *rndis_get_next_response(struct rndis_params *params, u32 *length) if (!length) return NULL; + spin_lock(¶ms->resp_lock); list_for_each_entry_safe(r, n, ¶ms->resp_queue, list) { if (!r->send) { r->send = 1; *length = r->length; + spin_unlock(¶ms->resp_lock); return r->buf; } } + spin_unlock(¶ms->resp_lock); return NULL; } EXPORT_SYMBOL_GPL(rndis_get_next_response); @@ -1051,7 +1061,9 @@ static rndis_resp_t *rndis_add_response(struct rndis_params *params, u32 length) r->length = length; r->send = 0; + spin_lock(¶ms->resp_lock); list_add_tail(&r->list, ¶ms->resp_queue); + spin_unlock(¶ms->resp_lock); return r; } diff --git a/drivers/usb/gadget/function/rndis.h b/drivers/usb/gadget/function/rndis.h index f6167f7fea82..6206b8b7490f 100644 --- a/drivers/usb/gadget/function/rndis.h +++ b/drivers/usb/gadget/function/rndis.h @@ -174,6 +174,7 @@ typedef struct rndis_params { void (*resp_avail)(void *v); void *v; struct list_head resp_queue; + spinlock_t resp_lock; } rndis_params; /* RNDIS Message parser and other useless functions */ diff --git a/drivers/usb/gadget/legacy/raw_gadget.c b/drivers/usb/gadget/legacy/raw_gadget.c index c5a2c734234a..d86c3a36441e 100644 --- a/drivers/usb/gadget/legacy/raw_gadget.c +++ b/drivers/usb/gadget/legacy/raw_gadget.c @@ -1004,7 +1004,7 @@ static int raw_process_ep_io(struct raw_dev *dev, struct usb_raw_ep_io *io, ret = -EBUSY; goto out_unlock; } - if ((in && !ep->ep->caps.dir_in) || (!in && ep->ep->caps.dir_in)) { + if (in != usb_endpoint_dir_in(ep->ep->desc)) { dev_dbg(&dev->gadget->dev, "fail, wrong direction\n"); ret = -EINVAL; goto out_unlock; diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c index 568534a0d17c..c109b069f511 100644 --- a/drivers/usb/gadget/udc/core.c +++ b/drivers/usb/gadget/udc/core.c @@ -1436,7 +1436,6 @@ static void usb_gadget_remove_driver(struct usb_udc *udc) usb_gadget_udc_stop(udc); udc->driver = NULL; - udc->dev.driver = NULL; udc->gadget->dev.driver = NULL; } @@ -1498,7 +1497,6 @@ static int udc_bind_to_driver(struct usb_udc *udc, struct usb_gadget_driver *dri driver->function); udc->driver = driver; - udc->dev.driver = &driver->driver; udc->gadget->dev.driver = &driver->driver; usb_gadget_udc_set_speed(udc, driver->max_speed); @@ -1521,7 +1519,6 @@ err1: dev_err(&udc->dev, "failed to start %s: %d\n", udc->driver->function, ret); udc->driver = NULL; - udc->dev.driver = NULL; udc->gadget->dev.driver = NULL; return ret; } diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c index 57d417a7c3e0..601829a6b4ba 100644 --- a/drivers/usb/gadget/udc/renesas_usb3.c +++ b/drivers/usb/gadget/udc/renesas_usb3.c @@ -2378,6 +2378,8 @@ static void handle_ext_role_switch_states(struct device *dev, switch (role) { case USB_ROLE_NONE: usb3->connection_state = USB_ROLE_NONE; + if (cur_role == USB_ROLE_HOST) + device_release_driver(host); if (usb3->driver) usb3_disconnect(usb3); usb3_vbus_out(usb3, false); diff --git a/drivers/usb/gadget/udc/udc-xilinx.c b/drivers/usb/gadget/udc/udc-xilinx.c index 6ce886fb7bfe..2907fad04e2c 100644 --- a/drivers/usb/gadget/udc/udc-xilinx.c +++ b/drivers/usb/gadget/udc/udc-xilinx.c @@ -1615,6 +1615,8 @@ static void xudc_getstatus(struct xusb_udc *udc) break; case USB_RECIP_ENDPOINT: epnum = udc->setup.wIndex & USB_ENDPOINT_NUMBER_MASK; + if (epnum >= XUSB_MAX_ENDPOINTS) + goto stall; target_ep = &udc->ep[epnum]; epcfgreg = udc->read_fn(udc->addr + target_ep->offset); halt = epcfgreg & XUSB_EP_CFG_STALL_MASK; @@ -1682,6 +1684,10 @@ static void xudc_set_clear_feature(struct xusb_udc *udc) case USB_RECIP_ENDPOINT: if (!udc->setup.wValue) { endpoint = udc->setup.wIndex & USB_ENDPOINT_NUMBER_MASK; + if (endpoint >= XUSB_MAX_ENDPOINTS) { + xudc_ep0_stall(udc); + return; + } target_ep = &udc->ep[endpoint]; outinbit = udc->setup.wIndex & USB_ENDPOINT_DIR_MASK; outinbit = outinbit >> 7; diff --git a/drivers/usb/host/xen-hcd.c b/drivers/usb/host/xen-hcd.c index be09fd9bac58..19b8c7ed74cb 100644 --- a/drivers/usb/host/xen-hcd.c +++ b/drivers/usb/host/xen-hcd.c @@ -716,8 +716,9 @@ static int xenhcd_map_urb_for_request(struct xenhcd_info *info, struct urb *urb, return 0; } -static void xenhcd_gnttab_done(struct usb_shadow *shadow) +static void xenhcd_gnttab_done(struct xenhcd_info *info, unsigned int id) { + struct usb_shadow *shadow = info->shadow + id; int nr_segs = 0; int i; @@ -726,8 +727,10 @@ static void xenhcd_gnttab_done(struct usb_shadow *shadow) if (xenusb_pipeisoc(shadow->req.pipe)) nr_segs += shadow->req.u.isoc.nr_frame_desc_segs; - for (i = 0; i < nr_segs; i++) - gnttab_end_foreign_access(shadow->req.seg[i].gref, 0, 0UL); + for (i = 0; i < nr_segs; i++) { + if (!gnttab_try_end_foreign_access(shadow->req.seg[i].gref)) + xenhcd_set_error(info, "backend didn't release grant"); + } shadow->req.nr_buffer_segs = 0; shadow->req.u.isoc.nr_frame_desc_segs = 0; @@ -841,7 +844,9 @@ static void xenhcd_cancel_all_enqueued_urbs(struct xenhcd_info *info) list_for_each_entry_safe(urbp, tmp, &info->in_progress_list, list) { req_id = urbp->req_id; if (!urbp->unlinked) { - xenhcd_gnttab_done(&info->shadow[req_id]); + xenhcd_gnttab_done(info, req_id); + if (info->error) + return; if (urbp->urb->status == -EINPROGRESS) /* not dequeued */ xenhcd_giveback_urb(info, urbp->urb, @@ -942,8 +947,7 @@ static int xenhcd_urb_request_done(struct xenhcd_info *info) rp = info->urb_ring.sring->rsp_prod; if (RING_RESPONSE_PROD_OVERFLOW(&info->urb_ring, rp)) { xenhcd_set_error(info, "Illegal index on urb-ring"); - spin_unlock_irqrestore(&info->lock, flags); - return 0; + goto err; } rmb(); /* ensure we see queued responses up to "rp" */ @@ -952,11 +956,13 @@ static int xenhcd_urb_request_done(struct xenhcd_info *info) id = res.id; if (id >= XENUSB_URB_RING_SIZE) { xenhcd_set_error(info, "Illegal data on urb-ring"); - continue; + goto err; } if (likely(xenusb_pipesubmit(info->shadow[id].req.pipe))) { - xenhcd_gnttab_done(&info->shadow[id]); + xenhcd_gnttab_done(info, id); + if (info->error) + goto err; urb = info->shadow[id].urb; if (likely(urb)) { urb->actual_length = res.actual_length; @@ -978,6 +984,10 @@ static int xenhcd_urb_request_done(struct xenhcd_info *info) spin_unlock_irqrestore(&info->lock, flags); return more_to_do; + + err: + spin_unlock_irqrestore(&info->lock, flags); + return 0; } static int xenhcd_conn_notify(struct xenhcd_info *info) diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index dc357cabb265..2d378543bc3a 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -1091,6 +1091,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) int retval = 0; bool comp_timer_running = false; bool pending_portevent = false; + bool reinit_xhc = false; if (!hcd->state) return 0; @@ -1107,10 +1108,11 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags); spin_lock_irq(&xhci->lock); - if ((xhci->quirks & XHCI_RESET_ON_RESUME) || xhci->broken_suspend) - hibernated = true; - if (!hibernated) { + if (hibernated || xhci->quirks & XHCI_RESET_ON_RESUME || xhci->broken_suspend) + reinit_xhc = true; + + if (!reinit_xhc) { /* * Some controllers might lose power during suspend, so wait * for controller not ready bit to clear, just as in xHC init. @@ -1143,12 +1145,17 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) spin_unlock_irq(&xhci->lock); return -ETIMEDOUT; } - temp = readl(&xhci->op_regs->status); } - /* If restore operation fails, re-initialize the HC during resume */ - if ((temp & STS_SRE) || hibernated) { + temp = readl(&xhci->op_regs->status); + /* re-initialize the HC on Restore Error, or Host Controller Error */ + if (temp & (STS_SRE | STS_HCE)) { + reinit_xhc = true; + xhci_warn(xhci, "xHC error in resume, USBSTS 0x%x, Reinit\n", temp); + } + + if (reinit_xhc) { if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && !(xhci_all_ports_seen_u0(xhci))) { del_timer_sync(&xhci->comp_mode_recovery_timer); @@ -1604,9 +1611,12 @@ static int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flag struct urb_priv *urb_priv; int num_tds; - if (!urb || xhci_check_args(hcd, urb->dev, urb->ep, - true, true, __func__) <= 0) + if (!urb) return -EINVAL; + ret = xhci_check_args(hcd, urb->dev, urb->ep, + true, true, __func__); + if (ret <= 0) + return ret ? ret : -EINVAL; slot_id = urb->dev->slot_id; ep_index = xhci_get_endpoint_index(&urb->ep->desc); @@ -3323,7 +3333,7 @@ static int xhci_check_streams_endpoint(struct xhci_hcd *xhci, return -EINVAL; ret = xhci_check_args(xhci_to_hcd(xhci), udev, ep, 1, true, __func__); if (ret <= 0) - return -EINVAL; + return ret ? ret : -EINVAL; if (usb_ss_max_streams(&ep->ss_ep_comp) == 0) { xhci_warn(xhci, "WARN: SuperSpeed Endpoint Companion" " descriptor for ep 0x%x does not support streams\n", diff --git a/drivers/usb/misc/usb251xb.c b/drivers/usb/misc/usb251xb.c index 507deef1f709..04c4e3fed094 100644 --- a/drivers/usb/misc/usb251xb.c +++ b/drivers/usb/misc/usb251xb.c @@ -543,6 +543,9 @@ static int usb251xb_get_ofdata(struct usb251xb *hub, if (of_property_read_u16_array(np, "language-id", &hub->lang_id, 1)) hub->lang_id = USB251XB_DEF_LANGUAGE_ID; + if (of_property_read_u8(np, "boost-up", &hub->boost_up)) + hub->boost_up = USB251XB_DEF_BOOST_UP; + cproperty_char = of_get_property(np, "manufacturer", NULL); strlcpy(str, cproperty_char ? : USB251XB_DEF_MANUFACTURER_STRING, sizeof(str)); @@ -584,7 +587,6 @@ static int usb251xb_get_ofdata(struct usb251xb *hub, * may be as soon as needed. */ hub->bat_charge_en = USB251XB_DEF_BATTERY_CHARGING_ENABLE; - hub->boost_up = USB251XB_DEF_BOOST_UP; hub->boost_57 = USB251XB_DEF_BOOST_57; hub->boost_14 = USB251XB_DEF_BOOST_14; hub->port_map12 = USB251XB_DEF_PORT_MAP_12; diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c index 7d4d0713f4f0..d2b7e613eb34 100644 --- a/drivers/usb/musb/omap2430.c +++ b/drivers/usb/musb/omap2430.c @@ -327,7 +327,6 @@ static int omap2430_probe(struct platform_device *pdev) musb->dev.parent = &pdev->dev; musb->dev.dma_mask = &omap2430_dmamask; musb->dev.coherent_dma_mask = omap2430_dmamask; - device_set_of_node_from_dev(&musb->dev, &pdev->dev); glue->dev = &pdev->dev; glue->musb = musb; diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c index 29f4b87a9e74..2798fca71261 100644 --- a/drivers/usb/serial/ch341.c +++ b/drivers/usb/serial/ch341.c @@ -81,10 +81,10 @@ #define CH341_QUIRK_SIMULATE_BREAK BIT(1) static const struct usb_device_id id_table[] = { - { USB_DEVICE(0x1a86, 0x5512) }, { USB_DEVICE(0x1a86, 0x5523) }, { USB_DEVICE(0x1a86, 0x7522) }, { USB_DEVICE(0x1a86, 0x7523) }, + { USB_DEVICE(0x2184, 0x0057) }, { USB_DEVICE(0x4348, 0x5523) }, { USB_DEVICE(0x9986, 0x7523) }, { }, diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c index 8a60c0d56863..a27f7efcec6a 100644 --- a/drivers/usb/serial/cp210x.c +++ b/drivers/usb/serial/cp210x.c @@ -51,6 +51,7 @@ static void cp210x_enable_event_mode(struct usb_serial_port *port); static void cp210x_disable_event_mode(struct usb_serial_port *port); static const struct usb_device_id id_table[] = { + { USB_DEVICE(0x0404, 0x034C) }, /* NCR Retail IO Box */ { USB_DEVICE(0x045B, 0x0053) }, /* Renesas RX610 RX-Stick */ { USB_DEVICE(0x0471, 0x066A) }, /* AKTAKOM ACE-1001 cable */ { USB_DEVICE(0x0489, 0xE000) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */ @@ -68,6 +69,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x0FCF, 0x1004) }, /* Dynastream ANT2USB */ { USB_DEVICE(0x0FCF, 0x1006) }, /* Dynastream ANT development board */ { USB_DEVICE(0x0FDE, 0xCA05) }, /* OWL Wireless Electricity Monitor CM-160 */ + { USB_DEVICE(0x106F, 0x0003) }, /* CPI / Money Controls Bulk Coin Recycler */ { USB_DEVICE(0x10A6, 0xAA26) }, /* Knock-off DCU-11 cable */ { USB_DEVICE(0x10AB, 0x10C5) }, /* Siemens MC60 Cable */ { USB_DEVICE(0x10B5, 0xAC70) }, /* Nokia CA-42 USB */ diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index 4edebd14ef29..49c08f07c969 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -969,6 +969,7 @@ static const struct usb_device_id id_table_combined[] = { { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_VX_023_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_VX_034_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_101_PID) }, + { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_159_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_1_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_2_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_3_PID) }, @@ -977,12 +978,14 @@ static const struct usb_device_id id_table_combined[] = { { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_6_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_7_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_8_PID) }, + { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_235_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_257_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_279_1_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_279_2_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_279_3_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_279_4_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_313_PID) }, + { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_320_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_324_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_346_1_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_346_2_PID) }, diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index 755858ca20ba..d1a9564697a4 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h @@ -1506,6 +1506,9 @@ #define BRAINBOXES_VX_023_PID 0x1003 /* VX-023 ExpressCard 1 Port RS422/485 */ #define BRAINBOXES_VX_034_PID 0x1004 /* VX-034 ExpressCard 2 Port RS422/485 */ #define BRAINBOXES_US_101_PID 0x1011 /* US-101 1xRS232 */ +#define BRAINBOXES_US_159_PID 0x1021 /* US-159 1xRS232 */ +#define BRAINBOXES_US_235_PID 0x1017 /* US-235 1xRS232 */ +#define BRAINBOXES_US_320_PID 0x1019 /* US-320 1xRS422/485 */ #define BRAINBOXES_US_324_PID 0x1013 /* US-324 1xRS422/485 1Mbaud */ #define BRAINBOXES_US_606_1_PID 0x2001 /* US-606 6 Port RS232 Serial Port 1 and 2 */ #define BRAINBOXES_US_606_2_PID 0x2002 /* US-606 6 Port RS232 Serial Port 3 and 4 */ diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 42420bfc983c..e7755d9cfc61 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -198,6 +198,8 @@ static void option_instat_callback(struct urb *urb); #define DELL_PRODUCT_5821E 0x81d7 #define DELL_PRODUCT_5821E_ESIM 0x81e0 +#define DELL_PRODUCT_5829E_ESIM 0x81e4 +#define DELL_PRODUCT_5829E 0x81e6 #define KYOCERA_VENDOR_ID 0x0c88 #define KYOCERA_PRODUCT_KPC650 0x17da @@ -1063,6 +1065,10 @@ static const struct usb_device_id option_ids[] = { .driver_info = RSVD(0) | RSVD(1) | RSVD(6) }, { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5821E_ESIM), .driver_info = RSVD(0) | RSVD(1) | RSVD(6) }, + { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5829E), + .driver_info = RSVD(0) | RSVD(6) }, + { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5829E_ESIM), + .driver_info = RSVD(0) | RSVD(6) }, { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_E100A) }, /* ADU-E100, ADU-310 */ { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) }, { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_620UW) }, @@ -1273,10 +1279,16 @@ static const struct usb_device_id option_ids[] = { .driver_info = NCTRL(2) }, { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x7011, 0xff), /* Telit LE910-S1 (ECM) */ .driver_info = NCTRL(2) }, + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x701a, 0xff), /* Telit LE910R1 (RNDIS) */ + .driver_info = NCTRL(2) }, + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x701b, 0xff), /* Telit LE910R1 (ECM) */ + .driver_info = NCTRL(2) }, { USB_DEVICE(TELIT_VENDOR_ID, 0x9010), /* Telit SBL FN980 flashing device */ .driver_info = NCTRL(0) | ZLP }, { USB_DEVICE(TELIT_VENDOR_ID, 0x9200), /* Telit LE910S1 flashing device */ .driver_info = NCTRL(0) | ZLP }, + { USB_DEVICE(TELIT_VENDOR_ID, 0x9201), /* Telit LE910R1 flashing device */ + .driver_info = NCTRL(0) | ZLP }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff), .driver_info = RSVD(1) }, @@ -1649,6 +1661,8 @@ static const struct usb_device_id option_ids[] = { .driver_info = RSVD(2) }, { USB_DEVICE_INTERFACE_CLASS(ZTE_VENDOR_ID, 0x1476, 0xff) }, /* GosunCn ZTE WeLink ME3630 (ECM/NCM mode) */ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1481, 0xff, 0x00, 0x00) }, /* ZTE MF871A */ + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1485, 0xff, 0xff, 0xff), /* ZTE MF286D */ + .driver_info = RSVD(5) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1533, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1534, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1535, 0xff, 0xff, 0xff) }, diff --git a/drivers/usb/typec/port-mapper.c b/drivers/usb/typec/port-mapper.c index a7d507802509..a929e000d0e2 100644 --- a/drivers/usb/typec/port-mapper.c +++ b/drivers/usb/typec/port-mapper.c @@ -59,7 +59,7 @@ int typec_link_ports(struct typec_port *con) if (!has_acpi_companion(&con->dev)) return 0; - bus_for_each_dev(&acpi_bus_type, NULL, &arg, typec_port_match); + acpi_bus_for_each_dev(typec_port_match, &arg); if (!arg.match) return 0; diff --git a/drivers/usb/typec/tipd/core.c b/drivers/usb/typec/tipd/core.c index 6d27a5b5e3ca..7ffcda94d323 100644 --- a/drivers/usb/typec/tipd/core.c +++ b/drivers/usb/typec/tipd/core.c @@ -761,12 +761,12 @@ static int tps6598x_probe(struct i2c_client *client) ret = tps6598x_read32(tps, TPS_REG_STATUS, &status); if (ret < 0) - return ret; + goto err_clear_mask; trace_tps6598x_status(status); ret = tps6598x_read32(tps, TPS_REG_SYSTEM_CONF, &conf); if (ret < 0) - return ret; + goto err_clear_mask; /* * This fwnode has a "compatible" property, but is never populated as a @@ -855,7 +855,8 @@ err_role_put: usb_role_switch_put(tps->role_sw); err_fwnode_put: fwnode_handle_put(fwnode); - +err_clear_mask: + tps6598x_write64(tps, TPS_REG_INT_MASK1, 0); return ret; } diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c index f648f1c54a0f..d0f91078600e 100644 --- a/drivers/vdpa/mlx5/net/mlx5_vnet.c +++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c @@ -1563,11 +1563,27 @@ static virtio_net_ctrl_ack handle_ctrl_mq(struct mlx5_vdpa_dev *mvdev, u8 cmd) switch (cmd) { case VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET: + /* This mq feature check aligns with pre-existing userspace + * implementation. + * + * Without it, an untrusted driver could fake a multiqueue config + * request down to a non-mq device that may cause kernel to + * panic due to uninitialized resources for extra vqs. Even with + * a well behaving guest driver, it is not expected to allow + * changing the number of vqs on a non-mq device. + */ + if (!MLX5_FEATURE(mvdev, VIRTIO_NET_F_MQ)) + break; + read = vringh_iov_pull_iotlb(&cvq->vring, &cvq->riov, (void *)&mq, sizeof(mq)); if (read != sizeof(mq)) break; newqps = mlx5vdpa16_to_cpu(mvdev, mq.virtqueue_pairs); + if (newqps < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN || + newqps > mlx5_vdpa_max_qps(mvdev->max_vqs)) + break; + if (ndev->cur_num_vqs == 2 * newqps) { status = VIRTIO_NET_OK; break; @@ -1897,11 +1913,25 @@ static u64 mlx5_vdpa_get_device_features(struct vdpa_device *vdev) return ndev->mvdev.mlx_features; } -static int verify_min_features(struct mlx5_vdpa_dev *mvdev, u64 features) +static int verify_driver_features(struct mlx5_vdpa_dev *mvdev, u64 features) { + /* Minimum features to expect */ if (!(features & BIT_ULL(VIRTIO_F_ACCESS_PLATFORM))) return -EOPNOTSUPP; + /* Double check features combination sent down by the driver. + * Fail invalid features due to absence of the depended feature. + * + * Per VIRTIO v1.1 specification, section 5.1.3.1 Feature bit + * requirements: "VIRTIO_NET_F_MQ Requires VIRTIO_NET_F_CTRL_VQ". + * By failing the invalid features sent down by untrusted drivers, + * we're assured the assumption made upon is_index_valid() and + * is_ctrl_vq_idx() will not be compromised. + */ + if ((features & (BIT_ULL(VIRTIO_NET_F_MQ) | BIT_ULL(VIRTIO_NET_F_CTRL_VQ))) == + BIT_ULL(VIRTIO_NET_F_MQ)) + return -EINVAL; + return 0; } @@ -1977,7 +2007,7 @@ static int mlx5_vdpa_set_driver_features(struct vdpa_device *vdev, u64 features) print_features(mvdev, features, true); - err = verify_min_features(mvdev, features); + err = verify_driver_features(mvdev, features); if (err) return err; diff --git a/drivers/vdpa/vdpa.c b/drivers/vdpa/vdpa.c index 9846c9de4bfa..1ea525433a5c 100644 --- a/drivers/vdpa/vdpa.c +++ b/drivers/vdpa/vdpa.c @@ -393,7 +393,7 @@ static void vdpa_get_config_unlocked(struct vdpa_device *vdev, * If it does happen we assume a legacy guest. */ if (!vdev->features_valid) - vdpa_set_features(vdev, 0, true); + vdpa_set_features_unlocked(vdev, 0); ops->get_config(vdev, offset, buf, len); } diff --git a/drivers/vdpa/vdpa_user/iova_domain.c b/drivers/vdpa/vdpa_user/iova_domain.c index 2b1143f11d8f..0a4d93edc4c0 100644 --- a/drivers/vdpa/vdpa_user/iova_domain.c +++ b/drivers/vdpa/vdpa_user/iova_domain.c @@ -294,7 +294,7 @@ vduse_domain_alloc_iova(struct iova_domain *iovad, iova_pfn = alloc_iova_fast(iovad, iova_len, limit >> shift, true); - return iova_pfn << shift; + return (dma_addr_t)iova_pfn << shift; } static void vduse_domain_free_iova(struct iova_domain *iovad, diff --git a/drivers/vdpa/virtio_pci/vp_vdpa.c b/drivers/vdpa/virtio_pci/vp_vdpa.c index a57e381e830b..cce101e6a940 100644 --- a/drivers/vdpa/virtio_pci/vp_vdpa.c +++ b/drivers/vdpa/virtio_pci/vp_vdpa.c @@ -533,8 +533,8 @@ static void vp_vdpa_remove(struct pci_dev *pdev) { struct vp_vdpa *vp_vdpa = pci_get_drvdata(pdev); - vdpa_unregister_device(&vp_vdpa->vdpa); vp_modern_remove(&vp_vdpa->mdev); + vdpa_unregister_device(&vp_vdpa->vdpa); } static struct pci_driver vp_vdpa_driver = { diff --git a/drivers/vhost/iotlb.c b/drivers/vhost/iotlb.c index 670d56c879e5..40b098320b2a 100644 --- a/drivers/vhost/iotlb.c +++ b/drivers/vhost/iotlb.c @@ -57,6 +57,17 @@ int vhost_iotlb_add_range_ctx(struct vhost_iotlb *iotlb, if (last < start) return -EFAULT; + /* If the range being mapped is [0, ULONG_MAX], split it into two entries + * otherwise its size would overflow u64. + */ + if (start == 0 && last == ULONG_MAX) { + u64 mid = last / 2; + + vhost_iotlb_add_range_ctx(iotlb, start, mid, addr, perm, opaque); + addr += mid + 1; + start = mid + 1; + } + if (iotlb->limit && iotlb->nmaps == iotlb->limit && iotlb->flags & VHOST_IOTLB_FLAG_RETIRE) { diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c index 851539807bc9..ec5249e8c32d 100644 --- a/drivers/vhost/vdpa.c +++ b/drivers/vhost/vdpa.c @@ -286,7 +286,7 @@ static long vhost_vdpa_set_features(struct vhost_vdpa *v, u64 __user *featurep) if (copy_from_user(&features, featurep, sizeof(features))) return -EFAULT; - if (vdpa_set_features(vdpa, features, false)) + if (vdpa_set_features(vdpa, features)) return -EINVAL; return 0; diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index 59edb5a1ffe2..1768362115c6 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c @@ -1170,6 +1170,13 @@ ssize_t vhost_chr_write_iter(struct vhost_dev *dev, goto done; } + if ((msg.type == VHOST_IOTLB_UPDATE || + msg.type == VHOST_IOTLB_INVALIDATE) && + msg.size == 0) { + ret = -EINVAL; + goto done; + } + if (dev->msg_handler) ret = dev->msg_handler(dev, &msg); else @@ -1981,7 +1988,7 @@ static int vhost_update_used_flags(struct vhost_virtqueue *vq) return 0; } -static int vhost_update_avail_event(struct vhost_virtqueue *vq, u16 avail_event) +static int vhost_update_avail_event(struct vhost_virtqueue *vq) { if (vhost_put_avail_event(vq)) return -EFAULT; @@ -2527,7 +2534,7 @@ bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq) return false; } } else { - r = vhost_update_avail_event(vq, vq->avail_idx); + r = vhost_update_avail_event(vq); if (r) { vq_err(vq, "Failed to update avail event index at %p: %d\n", vhost_avail_event(vq), r); diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c index d6ca1c7ad513..e6c9d41db1de 100644 --- a/drivers/vhost/vsock.c +++ b/drivers/vhost/vsock.c @@ -629,16 +629,18 @@ err: return ret; } -static int vhost_vsock_stop(struct vhost_vsock *vsock) +static int vhost_vsock_stop(struct vhost_vsock *vsock, bool check_owner) { size_t i; - int ret; + int ret = 0; mutex_lock(&vsock->dev.mutex); - ret = vhost_dev_check_owner(&vsock->dev); - if (ret) - goto err; + if (check_owner) { + ret = vhost_dev_check_owner(&vsock->dev); + if (ret) + goto err; + } for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) { struct vhost_virtqueue *vq = &vsock->vqs[i]; @@ -751,9 +753,15 @@ static int vhost_vsock_dev_release(struct inode *inode, struct file *file) /* Iterating over all connections for all CIDs to find orphans is * inefficient. Room for improvement here. */ - vsock_for_each_connected_socket(vhost_vsock_reset_orphans); + vsock_for_each_connected_socket(&vhost_transport.transport, + vhost_vsock_reset_orphans); - vhost_vsock_stop(vsock); + /* Don't check the owner, because we are in the release path, so we + * need to stop the vsock device in any case. + * vhost_vsock_stop() can not fail in this case, so we don't need to + * check the return code. + */ + vhost_vsock_stop(vsock, false); vhost_vsock_flush(vsock); vhost_dev_stop(&vsock->dev); @@ -868,7 +876,7 @@ static long vhost_vsock_dev_ioctl(struct file *f, unsigned int ioctl, if (start) return vhost_vsock_start(vsock); else - return vhost_vsock_stop(vsock); + return vhost_vsock_stop(vsock, true); case VHOST_GET_FEATURES: features = VHOST_VSOCK_FEATURES; if (copy_to_user(argp, &features, sizeof(features))) diff --git a/drivers/video/console/Kconfig b/drivers/video/console/Kconfig index 840d9813b0bc..fcc46380e7c9 100644 --- a/drivers/video/console/Kconfig +++ b/drivers/video/console/Kconfig @@ -78,6 +78,26 @@ config FRAMEBUFFER_CONSOLE help Low-level framebuffer-based console driver. +config FRAMEBUFFER_CONSOLE_LEGACY_ACCELERATION + bool "Enable legacy fbcon hardware acceleration code" + depends on FRAMEBUFFER_CONSOLE + default y if PARISC + default n + help + This option enables the fbcon (framebuffer text-based) hardware + acceleration for graphics drivers which were written for the fbdev + graphics interface. + + On modern machines, on mainstream machines (like x86-64) or when + using a modern Linux distribution those fbdev drivers usually aren't used. + So enabling this option wouldn't have any effect, which is why you want + to disable this option on such newer machines. + + If you compile this kernel for older machines which still require the + fbdev drivers, you may want to say Y. + + If unsure, select n. + config FRAMEBUFFER_CONSOLE_DETECT_PRIMARY bool "Map the console to the primary display device" depends on FRAMEBUFFER_CONSOLE diff --git a/drivers/video/fbdev/core/bitblit.c b/drivers/video/fbdev/core/bitblit.c index 01fae2c96965..f98e8f298bc1 100644 --- a/drivers/video/fbdev/core/bitblit.c +++ b/drivers/video/fbdev/core/bitblit.c @@ -43,6 +43,21 @@ static void update_attr(u8 *dst, u8 *src, int attribute, } } +static void bit_bmove(struct vc_data *vc, struct fb_info *info, int sy, + int sx, int dy, int dx, int height, int width) +{ + struct fb_copyarea area; + + area.sx = sx * vc->vc_font.width; + area.sy = sy * vc->vc_font.height; + area.dx = dx * vc->vc_font.width; + area.dy = dy * vc->vc_font.height; + area.height = height * vc->vc_font.height; + area.width = width * vc->vc_font.width; + + info->fbops->fb_copyarea(info, &area); +} + static void bit_clear(struct vc_data *vc, struct fb_info *info, int sy, int sx, int height, int width) { @@ -378,6 +393,7 @@ static int bit_update_start(struct fb_info *info) void fbcon_set_bitops(struct fbcon_ops *ops) { + ops->bmove = bit_bmove; ops->clear = bit_clear; ops->putcs = bit_putcs; ops->clear_margins = bit_clear_margins; diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c index 99ecd9a6d844..2fc1b80a26ad 100644 --- a/drivers/video/fbdev/core/fbcon.c +++ b/drivers/video/fbdev/core/fbcon.c @@ -173,6 +173,8 @@ static void fbcon_putcs(struct vc_data *vc, const unsigned short *s, int count, int ypos, int xpos); static void fbcon_clear_margins(struct vc_data *vc, int bottom_only); static void fbcon_cursor(struct vc_data *vc, int mode); +static void fbcon_bmove(struct vc_data *vc, int sy, int sx, int dy, int dx, + int height, int width); static int fbcon_switch(struct vc_data *vc); static int fbcon_blank(struct vc_data *vc, int blank, int mode_switch); static void fbcon_set_palette(struct vc_data *vc, const unsigned char *table); @@ -180,8 +182,16 @@ static void fbcon_set_palette(struct vc_data *vc, const unsigned char *table); /* * Internal routines */ +static __inline__ void ywrap_up(struct vc_data *vc, int count); +static __inline__ void ywrap_down(struct vc_data *vc, int count); +static __inline__ void ypan_up(struct vc_data *vc, int count); +static __inline__ void ypan_down(struct vc_data *vc, int count); +static void fbcon_bmove_rec(struct vc_data *vc, struct fbcon_display *p, int sy, int sx, + int dy, int dx, int height, int width, u_int y_break); static void fbcon_set_disp(struct fb_info *info, struct fb_var_screeninfo *var, int unit); +static void fbcon_redraw_move(struct vc_data *vc, struct fbcon_display *p, + int line, int count, int dy); static void fbcon_modechanged(struct fb_info *info); static void fbcon_set_all_vcs(struct fb_info *info); static void fbcon_start(void); @@ -1125,6 +1135,14 @@ static void fbcon_init(struct vc_data *vc, int init) ops->graphics = 0; +#ifdef CONFIG_FRAMEBUFFER_CONSOLE_LEGACY_ACCELERATION + if ((info->flags & FBINFO_HWACCEL_COPYAREA) && + !(info->flags & FBINFO_HWACCEL_DISABLED)) + p->scrollmode = SCROLL_MOVE; + else /* default to something safe */ + p->scrollmode = SCROLL_REDRAW; +#endif + /* * ++guenther: console.c:vc_allocate() relies on initializing * vc_{cols,rows}, but we must not set those if we are only @@ -1211,13 +1229,14 @@ finished: * This system is now divided into two levels because of complications * caused by hardware scrolling. Top level functions: * - * fbcon_clear(), fbcon_putc(), fbcon_clear_margins() + * fbcon_bmove(), fbcon_clear(), fbcon_putc(), fbcon_clear_margins() * * handles y values in range [0, scr_height-1] that correspond to real * screen positions. y_wrap shift means that first line of bitmap may be * anywhere on this display. These functions convert lineoffsets to * bitmap offsets and deal with the wrap-around case by splitting blits. * + * fbcon_bmove_physical_8() -- These functions fast implementations * fbcon_clear_physical_8() -- of original fbcon_XXX fns. * fbcon_putc_physical_8() -- (font width != 8) may be added later * @@ -1390,6 +1409,224 @@ static void fbcon_set_disp(struct fb_info *info, struct fb_var_screeninfo *var, } } +static __inline__ void ywrap_up(struct vc_data *vc, int count) +{ + struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]]; + struct fbcon_ops *ops = info->fbcon_par; + struct fbcon_display *p = &fb_display[vc->vc_num]; + + p->yscroll += count; + if (p->yscroll >= p->vrows) /* Deal with wrap */ + p->yscroll -= p->vrows; + ops->var.xoffset = 0; + ops->var.yoffset = p->yscroll * vc->vc_font.height; + ops->var.vmode |= FB_VMODE_YWRAP; + ops->update_start(info); + scrollback_max += count; + if (scrollback_max > scrollback_phys_max) + scrollback_max = scrollback_phys_max; + scrollback_current = 0; +} + +static __inline__ void ywrap_down(struct vc_data *vc, int count) +{ + struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]]; + struct fbcon_ops *ops = info->fbcon_par; + struct fbcon_display *p = &fb_display[vc->vc_num]; + + p->yscroll -= count; + if (p->yscroll < 0) /* Deal with wrap */ + p->yscroll += p->vrows; + ops->var.xoffset = 0; + ops->var.yoffset = p->yscroll * vc->vc_font.height; + ops->var.vmode |= FB_VMODE_YWRAP; + ops->update_start(info); + scrollback_max -= count; + if (scrollback_max < 0) + scrollback_max = 0; + scrollback_current = 0; +} + +static __inline__ void ypan_up(struct vc_data *vc, int count) +{ + struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]]; + struct fbcon_display *p = &fb_display[vc->vc_num]; + struct fbcon_ops *ops = info->fbcon_par; + + p->yscroll += count; + if (p->yscroll > p->vrows - vc->vc_rows) { + ops->bmove(vc, info, p->vrows - vc->vc_rows, + 0, 0, 0, vc->vc_rows, vc->vc_cols); + p->yscroll -= p->vrows - vc->vc_rows; + } + + ops->var.xoffset = 0; + ops->var.yoffset = p->yscroll * vc->vc_font.height; + ops->var.vmode &= ~FB_VMODE_YWRAP; + ops->update_start(info); + fbcon_clear_margins(vc, 1); + scrollback_max += count; + if (scrollback_max > scrollback_phys_max) + scrollback_max = scrollback_phys_max; + scrollback_current = 0; +} + +static __inline__ void ypan_up_redraw(struct vc_data *vc, int t, int count) +{ + struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]]; + struct fbcon_ops *ops = info->fbcon_par; + struct fbcon_display *p = &fb_display[vc->vc_num]; + + p->yscroll += count; + + if (p->yscroll > p->vrows - vc->vc_rows) { + p->yscroll -= p->vrows - vc->vc_rows; + fbcon_redraw_move(vc, p, t + count, vc->vc_rows - count, t); + } + + ops->var.xoffset = 0; + ops->var.yoffset = p->yscroll * vc->vc_font.height; + ops->var.vmode &= ~FB_VMODE_YWRAP; + ops->update_start(info); + fbcon_clear_margins(vc, 1); + scrollback_max += count; + if (scrollback_max > scrollback_phys_max) + scrollback_max = scrollback_phys_max; + scrollback_current = 0; +} + +static __inline__ void ypan_down(struct vc_data *vc, int count) +{ + struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]]; + struct fbcon_display *p = &fb_display[vc->vc_num]; + struct fbcon_ops *ops = info->fbcon_par; + + p->yscroll -= count; + if (p->yscroll < 0) { + ops->bmove(vc, info, 0, 0, p->vrows - vc->vc_rows, + 0, vc->vc_rows, vc->vc_cols); + p->yscroll += p->vrows - vc->vc_rows; + } + + ops->var.xoffset = 0; + ops->var.yoffset = p->yscroll * vc->vc_font.height; + ops->var.vmode &= ~FB_VMODE_YWRAP; + ops->update_start(info); + fbcon_clear_margins(vc, 1); + scrollback_max -= count; + if (scrollback_max < 0) + scrollback_max = 0; + scrollback_current = 0; +} + +static __inline__ void ypan_down_redraw(struct vc_data *vc, int t, int count) +{ + struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]]; + struct fbcon_ops *ops = info->fbcon_par; + struct fbcon_display *p = &fb_display[vc->vc_num]; + + p->yscroll -= count; + + if (p->yscroll < 0) { + p->yscroll += p->vrows - vc->vc_rows; + fbcon_redraw_move(vc, p, t, vc->vc_rows - count, t + count); + } + + ops->var.xoffset = 0; + ops->var.yoffset = p->yscroll * vc->vc_font.height; + ops->var.vmode &= ~FB_VMODE_YWRAP; + ops->update_start(info); + fbcon_clear_margins(vc, 1); + scrollback_max -= count; + if (scrollback_max < 0) + scrollback_max = 0; + scrollback_current = 0; +} + +static void fbcon_redraw_move(struct vc_data *vc, struct fbcon_display *p, + int line, int count, int dy) +{ + unsigned short *s = (unsigned short *) + (vc->vc_origin + vc->vc_size_row * line); + + while (count--) { + unsigned short *start = s; + unsigned short *le = advance_row(s, 1); + unsigned short c; + int x = 0; + unsigned short attr = 1; + + do { + c = scr_readw(s); + if (attr != (c & 0xff00)) { + attr = c & 0xff00; + if (s > start) { + fbcon_putcs(vc, start, s - start, + dy, x); + x += s - start; + start = s; + } + } + console_conditional_schedule(); + s++; + } while (s < le); + if (s > start) + fbcon_putcs(vc, start, s - start, dy, x); + console_conditional_schedule(); + dy++; + } +} + +static void fbcon_redraw_blit(struct vc_data *vc, struct fb_info *info, + struct fbcon_display *p, int line, int count, int ycount) +{ + int offset = ycount * vc->vc_cols; + unsigned short *d = (unsigned short *) + (vc->vc_origin + vc->vc_size_row * line); + unsigned short *s = d + offset; + struct fbcon_ops *ops = info->fbcon_par; + + while (count--) { + unsigned short *start = s; + unsigned short *le = advance_row(s, 1); + unsigned short c; + int x = 0; + + do { + c = scr_readw(s); + + if (c == scr_readw(d)) { + if (s > start) { + ops->bmove(vc, info, line + ycount, x, + line, x, 1, s-start); + x += s - start + 1; + start = s + 1; + } else { + x++; + start++; + } + } + + scr_writew(c, d); + console_conditional_schedule(); + s++; + d++; + } while (s < le); + if (s > start) + ops->bmove(vc, info, line + ycount, x, line, x, 1, + s-start); + console_conditional_schedule(); + if (ycount > 0) + line++; + else { + line--; + /* NOTE: We subtract two lines from these pointers */ + s -= vc->vc_size_row; + d -= vc->vc_size_row; + } + } +} + static void fbcon_redraw(struct vc_data *vc, struct fbcon_display *p, int line, int count, int offset) { @@ -1450,6 +1687,7 @@ static bool fbcon_scroll(struct vc_data *vc, unsigned int t, unsigned int b, { struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]]; struct fbcon_display *p = &fb_display[vc->vc_num]; + int scroll_partial = info->flags & FBINFO_PARTIAL_PAN_OK; if (fbcon_is_inactive(vc, info)) return true; @@ -1466,32 +1704,291 @@ static bool fbcon_scroll(struct vc_data *vc, unsigned int t, unsigned int b, case SM_UP: if (count > vc->vc_rows) /* Maximum realistic size */ count = vc->vc_rows; - fbcon_redraw(vc, p, t, b - t - count, - count * vc->vc_cols); - fbcon_clear(vc, b - count, 0, count, vc->vc_cols); - scr_memsetw((unsigned short *) (vc->vc_origin + - vc->vc_size_row * - (b - count)), - vc->vc_video_erase_char, - vc->vc_size_row * count); - return true; + if (logo_shown >= 0) + goto redraw_up; + switch (fb_scrollmode(p)) { + case SCROLL_MOVE: + fbcon_redraw_blit(vc, info, p, t, b - t - count, + count); + fbcon_clear(vc, b - count, 0, count, vc->vc_cols); + scr_memsetw((unsigned short *) (vc->vc_origin + + vc->vc_size_row * + (b - count)), + vc->vc_video_erase_char, + vc->vc_size_row * count); + return true; + + case SCROLL_WRAP_MOVE: + if (b - t - count > 3 * vc->vc_rows >> 2) { + if (t > 0) + fbcon_bmove(vc, 0, 0, count, 0, t, + vc->vc_cols); + ywrap_up(vc, count); + if (vc->vc_rows - b > 0) + fbcon_bmove(vc, b - count, 0, b, 0, + vc->vc_rows - b, + vc->vc_cols); + } else if (info->flags & FBINFO_READS_FAST) + fbcon_bmove(vc, t + count, 0, t, 0, + b - t - count, vc->vc_cols); + else + goto redraw_up; + fbcon_clear(vc, b - count, 0, count, vc->vc_cols); + break; + + case SCROLL_PAN_REDRAW: + if ((p->yscroll + count <= + 2 * (p->vrows - vc->vc_rows)) + && ((!scroll_partial && (b - t == vc->vc_rows)) + || (scroll_partial + && (b - t - count > + 3 * vc->vc_rows >> 2)))) { + if (t > 0) + fbcon_redraw_move(vc, p, 0, t, count); + ypan_up_redraw(vc, t, count); + if (vc->vc_rows - b > 0) + fbcon_redraw_move(vc, p, b, + vc->vc_rows - b, b); + } else + fbcon_redraw_move(vc, p, t + count, b - t - count, t); + fbcon_clear(vc, b - count, 0, count, vc->vc_cols); + break; + + case SCROLL_PAN_MOVE: + if ((p->yscroll + count <= + 2 * (p->vrows - vc->vc_rows)) + && ((!scroll_partial && (b - t == vc->vc_rows)) + || (scroll_partial + && (b - t - count > + 3 * vc->vc_rows >> 2)))) { + if (t > 0) + fbcon_bmove(vc, 0, 0, count, 0, t, + vc->vc_cols); + ypan_up(vc, count); + if (vc->vc_rows - b > 0) + fbcon_bmove(vc, b - count, 0, b, 0, + vc->vc_rows - b, + vc->vc_cols); + } else if (info->flags & FBINFO_READS_FAST) + fbcon_bmove(vc, t + count, 0, t, 0, + b - t - count, vc->vc_cols); + else + goto redraw_up; + fbcon_clear(vc, b - count, 0, count, vc->vc_cols); + break; + + case SCROLL_REDRAW: + redraw_up: + fbcon_redraw(vc, p, t, b - t - count, + count * vc->vc_cols); + fbcon_clear(vc, b - count, 0, count, vc->vc_cols); + scr_memsetw((unsigned short *) (vc->vc_origin + + vc->vc_size_row * + (b - count)), + vc->vc_video_erase_char, + vc->vc_size_row * count); + return true; + } + break; case SM_DOWN: if (count > vc->vc_rows) /* Maximum realistic size */ count = vc->vc_rows; - fbcon_redraw(vc, p, b - 1, b - t - count, - -count * vc->vc_cols); - fbcon_clear(vc, t, 0, count, vc->vc_cols); - scr_memsetw((unsigned short *) (vc->vc_origin + - vc->vc_size_row * - t), - vc->vc_video_erase_char, - vc->vc_size_row * count); - return true; + if (logo_shown >= 0) + goto redraw_down; + switch (fb_scrollmode(p)) { + case SCROLL_MOVE: + fbcon_redraw_blit(vc, info, p, b - 1, b - t - count, + -count); + fbcon_clear(vc, t, 0, count, vc->vc_cols); + scr_memsetw((unsigned short *) (vc->vc_origin + + vc->vc_size_row * + t), + vc->vc_video_erase_char, + vc->vc_size_row * count); + return true; + + case SCROLL_WRAP_MOVE: + if (b - t - count > 3 * vc->vc_rows >> 2) { + if (vc->vc_rows - b > 0) + fbcon_bmove(vc, b, 0, b - count, 0, + vc->vc_rows - b, + vc->vc_cols); + ywrap_down(vc, count); + if (t > 0) + fbcon_bmove(vc, count, 0, 0, 0, t, + vc->vc_cols); + } else if (info->flags & FBINFO_READS_FAST) + fbcon_bmove(vc, t, 0, t + count, 0, + b - t - count, vc->vc_cols); + else + goto redraw_down; + fbcon_clear(vc, t, 0, count, vc->vc_cols); + break; + + case SCROLL_PAN_MOVE: + if ((count - p->yscroll <= p->vrows - vc->vc_rows) + && ((!scroll_partial && (b - t == vc->vc_rows)) + || (scroll_partial + && (b - t - count > + 3 * vc->vc_rows >> 2)))) { + if (vc->vc_rows - b > 0) + fbcon_bmove(vc, b, 0, b - count, 0, + vc->vc_rows - b, + vc->vc_cols); + ypan_down(vc, count); + if (t > 0) + fbcon_bmove(vc, count, 0, 0, 0, t, + vc->vc_cols); + } else if (info->flags & FBINFO_READS_FAST) + fbcon_bmove(vc, t, 0, t + count, 0, + b - t - count, vc->vc_cols); + else + goto redraw_down; + fbcon_clear(vc, t, 0, count, vc->vc_cols); + break; + + case SCROLL_PAN_REDRAW: + if ((count - p->yscroll <= p->vrows - vc->vc_rows) + && ((!scroll_partial && (b - t == vc->vc_rows)) + || (scroll_partial + && (b - t - count > + 3 * vc->vc_rows >> 2)))) { + if (vc->vc_rows - b > 0) + fbcon_redraw_move(vc, p, b, vc->vc_rows - b, + b - count); + ypan_down_redraw(vc, t, count); + if (t > 0) + fbcon_redraw_move(vc, p, count, t, 0); + } else + fbcon_redraw_move(vc, p, t, b - t - count, t + count); + fbcon_clear(vc, t, 0, count, vc->vc_cols); + break; + + case SCROLL_REDRAW: + redraw_down: + fbcon_redraw(vc, p, b - 1, b - t - count, + -count * vc->vc_cols); + fbcon_clear(vc, t, 0, count, vc->vc_cols); + scr_memsetw((unsigned short *) (vc->vc_origin + + vc->vc_size_row * + t), + vc->vc_video_erase_char, + vc->vc_size_row * count); + return true; + } } return false; } + +static void fbcon_bmove(struct vc_data *vc, int sy, int sx, int dy, int dx, + int height, int width) +{ + struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]]; + struct fbcon_display *p = &fb_display[vc->vc_num]; + + if (fbcon_is_inactive(vc, info)) + return; + + if (!width || !height) + return; + + /* Split blits that cross physical y_wrap case. + * Pathological case involves 4 blits, better to use recursive + * code rather than unrolled case + * + * Recursive invocations don't need to erase the cursor over and + * over again, so we use fbcon_bmove_rec() + */ + fbcon_bmove_rec(vc, p, sy, sx, dy, dx, height, width, + p->vrows - p->yscroll); +} + +static void fbcon_bmove_rec(struct vc_data *vc, struct fbcon_display *p, int sy, int sx, + int dy, int dx, int height, int width, u_int y_break) +{ + struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]]; + struct fbcon_ops *ops = info->fbcon_par; + u_int b; + + if (sy < y_break && sy + height > y_break) { + b = y_break - sy; + if (dy < sy) { /* Avoid trashing self */ + fbcon_bmove_rec(vc, p, sy, sx, dy, dx, b, width, + y_break); + fbcon_bmove_rec(vc, p, sy + b, sx, dy + b, dx, + height - b, width, y_break); + } else { + fbcon_bmove_rec(vc, p, sy + b, sx, dy + b, dx, + height - b, width, y_break); + fbcon_bmove_rec(vc, p, sy, sx, dy, dx, b, width, + y_break); + } + return; + } + + if (dy < y_break && dy + height > y_break) { + b = y_break - dy; + if (dy < sy) { /* Avoid trashing self */ + fbcon_bmove_rec(vc, p, sy, sx, dy, dx, b, width, + y_break); + fbcon_bmove_rec(vc, p, sy + b, sx, dy + b, dx, + height - b, width, y_break); + } else { + fbcon_bmove_rec(vc, p, sy + b, sx, dy + b, dx, + height - b, width, y_break); + fbcon_bmove_rec(vc, p, sy, sx, dy, dx, b, width, + y_break); + } + return; + } + ops->bmove(vc, info, real_y(p, sy), sx, real_y(p, dy), dx, + height, width); +} + +static void updatescrollmode_accel(struct fbcon_display *p, + struct fb_info *info, + struct vc_data *vc) +{ +#ifdef CONFIG_FRAMEBUFFER_CONSOLE_LEGACY_ACCELERATION + struct fbcon_ops *ops = info->fbcon_par; + int cap = info->flags; + u16 t = 0; + int ypan = FBCON_SWAP(ops->rotate, info->fix.ypanstep, + info->fix.xpanstep); + int ywrap = FBCON_SWAP(ops->rotate, info->fix.ywrapstep, t); + int yres = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres); + int vyres = FBCON_SWAP(ops->rotate, info->var.yres_virtual, + info->var.xres_virtual); + int good_pan = (cap & FBINFO_HWACCEL_YPAN) && + divides(ypan, vc->vc_font.height) && vyres > yres; + int good_wrap = (cap & FBINFO_HWACCEL_YWRAP) && + divides(ywrap, vc->vc_font.height) && + divides(vc->vc_font.height, vyres) && + divides(vc->vc_font.height, yres); + int reading_fast = cap & FBINFO_READS_FAST; + int fast_copyarea = (cap & FBINFO_HWACCEL_COPYAREA) && + !(cap & FBINFO_HWACCEL_DISABLED); + int fast_imageblit = (cap & FBINFO_HWACCEL_IMAGEBLIT) && + !(cap & FBINFO_HWACCEL_DISABLED); + + if (good_wrap || good_pan) { + if (reading_fast || fast_copyarea) + p->scrollmode = good_wrap ? + SCROLL_WRAP_MOVE : SCROLL_PAN_MOVE; + else + p->scrollmode = good_wrap ? SCROLL_REDRAW : + SCROLL_PAN_REDRAW; + } else { + if (reading_fast || (fast_copyarea && !fast_imageblit)) + p->scrollmode = SCROLL_MOVE; + else + p->scrollmode = SCROLL_REDRAW; + } +#endif +} + static void updatescrollmode(struct fbcon_display *p, struct fb_info *info, struct vc_data *vc) @@ -1507,6 +2004,9 @@ static void updatescrollmode(struct fbcon_display *p, p->vrows -= (yres - (fh * vc->vc_rows)) / fh; if ((yres % fh) && (vyres % fh < yres % fh)) p->vrows--; + + /* update scrollmode in case hardware acceleration is used */ + updatescrollmode_accel(p, info, vc); } #define PITCH(w) (((w) + 7) >> 3) @@ -1664,7 +2164,21 @@ static int fbcon_switch(struct vc_data *vc) updatescrollmode(p, info, vc); - scrollback_phys_max = 0; + switch (fb_scrollmode(p)) { + case SCROLL_WRAP_MOVE: + scrollback_phys_max = p->vrows - vc->vc_rows; + break; + case SCROLL_PAN_MOVE: + case SCROLL_PAN_REDRAW: + scrollback_phys_max = p->vrows - 2 * vc->vc_rows; + if (scrollback_phys_max < 0) + scrollback_phys_max = 0; + break; + default: + scrollback_phys_max = 0; + break; + } + scrollback_max = 0; scrollback_current = 0; diff --git a/drivers/video/fbdev/core/fbcon.h b/drivers/video/fbdev/core/fbcon.h index a00603b4451a..969d41ecede5 100644 --- a/drivers/video/fbdev/core/fbcon.h +++ b/drivers/video/fbdev/core/fbcon.h @@ -29,6 +29,9 @@ struct fbcon_display { /* Filled in by the low-level console driver */ const u_char *fontdata; int userfont; /* != 0 if fontdata kmalloc()ed */ +#ifdef CONFIG_FRAMEBUFFER_CONSOLE_LEGACY_ACCELERATION + u_short scrollmode; /* Scroll Method, use fb_scrollmode() */ +#endif u_short inverse; /* != 0 text black on white as default */ short yscroll; /* Hardware scrolling */ int vrows; /* number of virtual rows */ @@ -51,6 +54,8 @@ struct fbcon_display { }; struct fbcon_ops { + void (*bmove)(struct vc_data *vc, struct fb_info *info, int sy, + int sx, int dy, int dx, int height, int width); void (*clear)(struct vc_data *vc, struct fb_info *info, int sy, int sx, int height, int width); void (*putcs)(struct vc_data *vc, struct fb_info *info, @@ -149,6 +154,73 @@ static inline int attr_col_ec(int shift, struct vc_data *vc, #define attr_bgcol_ec(bgshift, vc, info) attr_col_ec(bgshift, vc, info, 0) #define attr_fgcol_ec(fgshift, vc, info) attr_col_ec(fgshift, vc, info, 1) + /* + * Scroll Method + */ + +/* There are several methods fbcon can use to move text around the screen: + * + * Operation Pan Wrap + *--------------------------------------------- + * SCROLL_MOVE copyarea No No + * SCROLL_PAN_MOVE copyarea Yes No + * SCROLL_WRAP_MOVE copyarea No Yes + * SCROLL_REDRAW imageblit No No + * SCROLL_PAN_REDRAW imageblit Yes No + * SCROLL_WRAP_REDRAW imageblit No Yes + * + * (SCROLL_WRAP_REDRAW is not implemented yet) + * + * In general, fbcon will choose the best scrolling + * method based on the rule below: + * + * Pan/Wrap > accel imageblit > accel copyarea > + * soft imageblit > (soft copyarea) + * + * Exception to the rule: Pan + accel copyarea is + * preferred over Pan + accel imageblit. + * + * The above is typical for PCI/AGP cards. Unless + * overridden, fbcon will never use soft copyarea. + * + * If you need to override the above rule, set the + * appropriate flags in fb_info->flags. For example, + * to prefer copyarea over imageblit, set + * FBINFO_READS_FAST. + * + * Other notes: + * + use the hardware engine to move the text + * (hw-accelerated copyarea() and fillrect()) + * + use hardware-supported panning on a large virtual screen + * + amifb can not only pan, but also wrap the display by N lines + * (i.e. visible line i = physical line (i+N) % yres). + * + read what's already rendered on the screen and + * write it in a different place (this is cfb_copyarea()) + * + re-render the text to the screen + * + * Whether to use wrapping or panning can only be figured out at + * runtime (when we know whether our font height is a multiple + * of the pan/wrap step) + * + */ + +#define SCROLL_MOVE 0x001 +#define SCROLL_PAN_MOVE 0x002 +#define SCROLL_WRAP_MOVE 0x003 +#define SCROLL_REDRAW 0x004 +#define SCROLL_PAN_REDRAW 0x005 + +static inline u_short fb_scrollmode(struct fbcon_display *fb) +{ +#ifdef CONFIG_FRAMEBUFFER_CONSOLE_LEGACY_ACCELERATION + return fb->scrollmode; +#else + /* hardcoded to SCROLL_REDRAW if acceleration was disabled. */ + return SCROLL_REDRAW; +#endif +} + + #ifdef CONFIG_FB_TILEBLITTING extern void fbcon_set_tileops(struct vc_data *vc, struct fb_info *info); #endif diff --git a/drivers/video/fbdev/core/fbcon_ccw.c b/drivers/video/fbdev/core/fbcon_ccw.c index ffa78936eaab..2789ace79634 100644 --- a/drivers/video/fbdev/core/fbcon_ccw.c +++ b/drivers/video/fbdev/core/fbcon_ccw.c @@ -59,12 +59,31 @@ static void ccw_update_attr(u8 *dst, u8 *src, int attribute, } } + +static void ccw_bmove(struct vc_data *vc, struct fb_info *info, int sy, + int sx, int dy, int dx, int height, int width) +{ + struct fbcon_ops *ops = info->fbcon_par; + struct fb_copyarea area; + u32 vyres = GETVYRES(ops->p, info); + + area.sx = sy * vc->vc_font.height; + area.sy = vyres - ((sx + width) * vc->vc_font.width); + area.dx = dy * vc->vc_font.height; + area.dy = vyres - ((dx + width) * vc->vc_font.width); + area.width = height * vc->vc_font.height; + area.height = width * vc->vc_font.width; + + info->fbops->fb_copyarea(info, &area); +} + static void ccw_clear(struct vc_data *vc, struct fb_info *info, int sy, int sx, int height, int width) { + struct fbcon_ops *ops = info->fbcon_par; struct fb_fillrect region; int bgshift = (vc->vc_hi_font_mask) ? 13 : 12; - u32 vyres = info->var.yres; + u32 vyres = GETVYRES(ops->p, info); region.color = attr_bgcol_ec(bgshift,vc,info); region.dx = sy * vc->vc_font.height; @@ -121,7 +140,7 @@ static void ccw_putcs(struct vc_data *vc, struct fb_info *info, u32 cnt, pitch, size; u32 attribute = get_attribute(info, scr_readw(s)); u8 *dst, *buf = NULL; - u32 vyres = info->var.yres; + u32 vyres = GETVYRES(ops->p, info); if (!ops->fontbuffer) return; @@ -210,7 +229,7 @@ static void ccw_cursor(struct vc_data *vc, struct fb_info *info, int mode, int attribute, use_sw = vc->vc_cursor_type & CUR_SW; int err = 1, dx, dy; char *src; - u32 vyres = info->var.yres; + u32 vyres = GETVYRES(ops->p, info); if (!ops->fontbuffer) return; @@ -368,7 +387,7 @@ static int ccw_update_start(struct fb_info *info) { struct fbcon_ops *ops = info->fbcon_par; u32 yoffset; - u32 vyres = info->var.yres; + u32 vyres = GETVYRES(ops->p, info); int err; yoffset = (vyres - info->var.yres) - ops->var.xoffset; @@ -383,6 +402,7 @@ static int ccw_update_start(struct fb_info *info) void fbcon_rotate_ccw(struct fbcon_ops *ops) { + ops->bmove = ccw_bmove; ops->clear = ccw_clear; ops->putcs = ccw_putcs; ops->clear_margins = ccw_clear_margins; diff --git a/drivers/video/fbdev/core/fbcon_cw.c b/drivers/video/fbdev/core/fbcon_cw.c index 92e5b7fb51ee..86a254c1b2b7 100644 --- a/drivers/video/fbdev/core/fbcon_cw.c +++ b/drivers/video/fbdev/core/fbcon_cw.c @@ -44,12 +44,31 @@ static void cw_update_attr(u8 *dst, u8 *src, int attribute, } } + +static void cw_bmove(struct vc_data *vc, struct fb_info *info, int sy, + int sx, int dy, int dx, int height, int width) +{ + struct fbcon_ops *ops = info->fbcon_par; + struct fb_copyarea area; + u32 vxres = GETVXRES(ops->p, info); + + area.sx = vxres - ((sy + height) * vc->vc_font.height); + area.sy = sx * vc->vc_font.width; + area.dx = vxres - ((dy + height) * vc->vc_font.height); + area.dy = dx * vc->vc_font.width; + area.width = height * vc->vc_font.height; + area.height = width * vc->vc_font.width; + + info->fbops->fb_copyarea(info, &area); +} + static void cw_clear(struct vc_data *vc, struct fb_info *info, int sy, int sx, int height, int width) { + struct fbcon_ops *ops = info->fbcon_par; struct fb_fillrect region; int bgshift = (vc->vc_hi_font_mask) ? 13 : 12; - u32 vxres = info->var.xres; + u32 vxres = GETVXRES(ops->p, info); region.color = attr_bgcol_ec(bgshift,vc,info); region.dx = vxres - ((sy + height) * vc->vc_font.height); @@ -106,7 +125,7 @@ static void cw_putcs(struct vc_data *vc, struct fb_info *info, u32 cnt, pitch, size; u32 attribute = get_attribute(info, scr_readw(s)); u8 *dst, *buf = NULL; - u32 vxres = info->var.xres; + u32 vxres = GETVXRES(ops->p, info); if (!ops->fontbuffer) return; @@ -193,7 +212,7 @@ static void cw_cursor(struct vc_data *vc, struct fb_info *info, int mode, int attribute, use_sw = vc->vc_cursor_type & CUR_SW; int err = 1, dx, dy; char *src; - u32 vxres = info->var.xres; + u32 vxres = GETVXRES(ops->p, info); if (!ops->fontbuffer) return; @@ -350,7 +369,7 @@ static void cw_cursor(struct vc_data *vc, struct fb_info *info, int mode, static int cw_update_start(struct fb_info *info) { struct fbcon_ops *ops = info->fbcon_par; - u32 vxres = info->var.xres; + u32 vxres = GETVXRES(ops->p, info); u32 xoffset; int err; @@ -366,6 +385,7 @@ static int cw_update_start(struct fb_info *info) void fbcon_rotate_cw(struct fbcon_ops *ops) { + ops->bmove = cw_bmove; ops->clear = cw_clear; ops->putcs = cw_putcs; ops->clear_margins = cw_clear_margins; diff --git a/drivers/video/fbdev/core/fbcon_rotate.h b/drivers/video/fbdev/core/fbcon_rotate.h index b528b2e54283..01cbe303b8a2 100644 --- a/drivers/video/fbdev/core/fbcon_rotate.h +++ b/drivers/video/fbdev/core/fbcon_rotate.h @@ -11,6 +11,15 @@ #ifndef _FBCON_ROTATE_H #define _FBCON_ROTATE_H +#define GETVYRES(s,i) ({ \ + (fb_scrollmode(s) == SCROLL_REDRAW || fb_scrollmode(s) == SCROLL_MOVE) ? \ + (i)->var.yres : (i)->var.yres_virtual; }) + +#define GETVXRES(s,i) ({ \ + (fb_scrollmode(s) == SCROLL_REDRAW || fb_scrollmode(s) == SCROLL_MOVE || !(i)->fix.xpanstep) ? \ + (i)->var.xres : (i)->var.xres_virtual; }) + + static inline int pattern_test_bit(u32 x, u32 y, u32 pitch, const char *pat) { u32 tmp = (y * pitch) + x, index = tmp / 8, bit = tmp % 8; diff --git a/drivers/video/fbdev/core/fbcon_ud.c b/drivers/video/fbdev/core/fbcon_ud.c index 09619bd8e021..23bc045769d0 100644 --- a/drivers/video/fbdev/core/fbcon_ud.c +++ b/drivers/video/fbdev/core/fbcon_ud.c @@ -44,13 +44,33 @@ static void ud_update_attr(u8 *dst, u8 *src, int attribute, } } + +static void ud_bmove(struct vc_data *vc, struct fb_info *info, int sy, + int sx, int dy, int dx, int height, int width) +{ + struct fbcon_ops *ops = info->fbcon_par; + struct fb_copyarea area; + u32 vyres = GETVYRES(ops->p, info); + u32 vxres = GETVXRES(ops->p, info); + + area.sy = vyres - ((sy + height) * vc->vc_font.height); + area.sx = vxres - ((sx + width) * vc->vc_font.width); + area.dy = vyres - ((dy + height) * vc->vc_font.height); + area.dx = vxres - ((dx + width) * vc->vc_font.width); + area.height = height * vc->vc_font.height; + area.width = width * vc->vc_font.width; + + info->fbops->fb_copyarea(info, &area); +} + static void ud_clear(struct vc_data *vc, struct fb_info *info, int sy, int sx, int height, int width) { + struct fbcon_ops *ops = info->fbcon_par; struct fb_fillrect region; int bgshift = (vc->vc_hi_font_mask) ? 13 : 12; - u32 vyres = info->var.yres; - u32 vxres = info->var.xres; + u32 vyres = GETVYRES(ops->p, info); + u32 vxres = GETVXRES(ops->p, info); region.color = attr_bgcol_ec(bgshift,vc,info); region.dy = vyres - ((sy + height) * vc->vc_font.height); @@ -142,8 +162,8 @@ static void ud_putcs(struct vc_data *vc, struct fb_info *info, u32 mod = vc->vc_font.width % 8, cnt, pitch, size; u32 attribute = get_attribute(info, scr_readw(s)); u8 *dst, *buf = NULL; - u32 vyres = info->var.yres; - u32 vxres = info->var.xres; + u32 vyres = GETVYRES(ops->p, info); + u32 vxres = GETVXRES(ops->p, info); if (!ops->fontbuffer) return; @@ -239,8 +259,8 @@ static void ud_cursor(struct vc_data *vc, struct fb_info *info, int mode, int attribute, use_sw = vc->vc_cursor_type & CUR_SW; int err = 1, dx, dy; char *src; - u32 vyres = info->var.yres; - u32 vxres = info->var.xres; + u32 vyres = GETVYRES(ops->p, info); + u32 vxres = GETVXRES(ops->p, info); if (!ops->fontbuffer) return; @@ -390,8 +410,8 @@ static int ud_update_start(struct fb_info *info) { struct fbcon_ops *ops = info->fbcon_par; int xoffset, yoffset; - u32 vyres = info->var.yres; - u32 vxres = info->var.xres; + u32 vyres = GETVYRES(ops->p, info); + u32 vxres = GETVXRES(ops->p, info); int err; xoffset = vxres - info->var.xres - ops->var.xoffset; @@ -409,6 +429,7 @@ static int ud_update_start(struct fb_info *info) void fbcon_rotate_ud(struct fbcon_ops *ops) { + ops->bmove = ud_bmove; ops->clear = ud_clear; ops->putcs = ud_putcs; ops->clear_margins = ud_clear_margins; diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c index 0fa7ede94fa6..13083ad8d751 100644 --- a/drivers/video/fbdev/core/fbmem.c +++ b/drivers/video/fbdev/core/fbmem.c @@ -1160,6 +1160,8 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd, ret = fbcon_set_con2fb_map_ioctl(argp); break; case FBIOBLANK: + if (arg > FB_BLANK_POWERDOWN) + return -EINVAL; console_lock(); lock_fb_info(info); ret = fb_blank(info, arg); diff --git a/drivers/video/fbdev/core/tileblit.c b/drivers/video/fbdev/core/tileblit.c index 72af95053bcb..2768eff247ba 100644 --- a/drivers/video/fbdev/core/tileblit.c +++ b/drivers/video/fbdev/core/tileblit.c @@ -16,6 +16,21 @@ #include <asm/types.h> #include "fbcon.h" +static void tile_bmove(struct vc_data *vc, struct fb_info *info, int sy, + int sx, int dy, int dx, int height, int width) +{ + struct fb_tilearea area; + + area.sx = sx; + area.sy = sy; + area.dx = dx; + area.dy = dy; + area.height = height; + area.width = width; + + info->tileops->fb_tilecopy(info, &area); +} + static void tile_clear(struct vc_data *vc, struct fb_info *info, int sy, int sx, int height, int width) { @@ -118,6 +133,7 @@ void fbcon_set_tileops(struct vc_data *vc, struct fb_info *info) struct fb_tilemap map; struct fbcon_ops *ops = info->fbcon_par; + ops->bmove = tile_bmove; ops->clear = tile_clear; ops->putcs = tile_putcs; ops->clear_margins = tile_clear_margins; diff --git a/drivers/video/fbdev/skeletonfb.c b/drivers/video/fbdev/skeletonfb.c index 0fe922f726e9..bcacfb6934fa 100644 --- a/drivers/video/fbdev/skeletonfb.c +++ b/drivers/video/fbdev/skeletonfb.c @@ -505,15 +505,15 @@ void xxxfb_fillrect(struct fb_info *p, const struct fb_fillrect *region) } /** - * xxxfb_copyarea - OBSOLETE function. + * xxxfb_copyarea - REQUIRED function. Can use generic routines if + * non acclerated hardware and packed pixel based. * Copies one area of the screen to another area. - * Will be deleted in a future version * * @info: frame buffer structure that represents a single frame buffer * @area: Structure providing the data to copy the framebuffer contents * from one region to another. * - * This drawing operation copied a rectangular area from one area of the + * This drawing operation copies a rectangular area from one area of the * screen to another area. */ void xxxfb_copyarea(struct fb_info *p, const struct fb_copyarea *area) @@ -645,9 +645,9 @@ static const struct fb_ops xxxfb_ops = { .fb_setcolreg = xxxfb_setcolreg, .fb_blank = xxxfb_blank, .fb_pan_display = xxxfb_pan_display, - .fb_fillrect = xxxfb_fillrect, /* Needed !!! */ - .fb_copyarea = xxxfb_copyarea, /* Obsolete */ - .fb_imageblit = xxxfb_imageblit, /* Needed !!! */ + .fb_fillrect = xxxfb_fillrect, /* Needed !!! */ + .fb_copyarea = xxxfb_copyarea, /* Needed !!! */ + .fb_imageblit = xxxfb_imageblit, /* Needed !!! */ .fb_cursor = xxxfb_cursor, /* Optional !!! */ .fb_sync = xxxfb_sync, .fb_ioctl = xxxfb_ioctl, diff --git a/drivers/video/fbdev/stifb.c b/drivers/video/fbdev/stifb.c index 265865610edc..bebb2eea6448 100644 --- a/drivers/video/fbdev/stifb.c +++ b/drivers/video/fbdev/stifb.c @@ -1041,6 +1041,47 @@ stifb_copyarea(struct fb_info *info, const struct fb_copyarea *area) SETUP_FB(fb); } +#define ARTIST_VRAM_SIZE 0x000804 +#define ARTIST_VRAM_SRC 0x000808 +#define ARTIST_VRAM_SIZE_TRIGGER_WINFILL 0x000a04 +#define ARTIST_VRAM_DEST_TRIGGER_BLOCKMOVE 0x000b00 +#define ARTIST_SRC_BM_ACCESS 0x018008 +#define ARTIST_FGCOLOR 0x018010 +#define ARTIST_BGCOLOR 0x018014 +#define ARTIST_BITMAP_OP 0x01801c + +static void +stifb_fillrect(struct fb_info *info, const struct fb_fillrect *rect) +{ + struct stifb_info *fb = container_of(info, struct stifb_info, info); + + if (rect->rop != ROP_COPY) + return cfb_fillrect(info, rect); + + SETUP_HW(fb); + + if (fb->info.var.bits_per_pixel == 32) { + WRITE_WORD(0xBBA0A000, fb, REG_10); + + NGLE_REALLY_SET_IMAGE_PLANEMASK(fb, 0xffffffff); + } else { + WRITE_WORD(fb->id == S9000_ID_HCRX ? 0x13a02000 : 0x13a01000, fb, REG_10); + + NGLE_REALLY_SET_IMAGE_PLANEMASK(fb, 0xff); + } + + WRITE_WORD(0x03000300, fb, ARTIST_BITMAP_OP); + WRITE_WORD(0x2ea01000, fb, ARTIST_SRC_BM_ACCESS); + NGLE_QUICK_SET_DST_BM_ACCESS(fb, 0x2ea01000); + NGLE_REALLY_SET_IMAGE_FG_COLOR(fb, rect->color); + WRITE_WORD(0, fb, ARTIST_BGCOLOR); + + NGLE_SET_DSTXY(fb, (rect->dx << 16) | (rect->dy)); + SET_LENXY_START_RECFILL(fb, (rect->width << 16) | (rect->height)); + + SETUP_FB(fb); +} + static void __init stifb_init_display(struct stifb_info *fb) { @@ -1105,7 +1146,7 @@ static const struct fb_ops stifb_ops = { .owner = THIS_MODULE, .fb_setcolreg = stifb_setcolreg, .fb_blank = stifb_blank, - .fb_fillrect = cfb_fillrect, + .fb_fillrect = stifb_fillrect, .fb_copyarea = stifb_copyarea, .fb_imageblit = cfb_imageblit, }; @@ -1297,7 +1338,7 @@ static int __init stifb_init_fb(struct sti_struct *sti, int bpp_pref) goto out_err0; } info->screen_size = fix->smem_len; - info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_COPYAREA; + info->flags = FBINFO_HWACCEL_COPYAREA | FBINFO_HWACCEL_FILLRECT; info->pseudo_palette = &fb->pseudo_palette; /* This has to be done !!! */ diff --git a/drivers/virt/Kconfig b/drivers/virt/Kconfig index 8061e8ef449f..121b9293c737 100644 --- a/drivers/virt/Kconfig +++ b/drivers/virt/Kconfig @@ -13,6 +13,17 @@ menuconfig VIRT_DRIVERS if VIRT_DRIVERS +config VMGENID + tristate "Virtual Machine Generation ID driver" + default y + depends on ACPI + help + Say Y here to use the hypervisor-provided Virtual Machine Generation ID + to reseed the RNG when the VM is cloned. This is highly recommended if + you intend to do any rollback / cloning / snapshotting of VMs. + + Prefer Y to M so that this protection is activated very early. + config FSL_HV_MANAGER tristate "Freescale hypervisor management driver" depends on FSL_SOC diff --git a/drivers/virt/Makefile b/drivers/virt/Makefile index 3e272ea60cd9..108d0ffcc9aa 100644 --- a/drivers/virt/Makefile +++ b/drivers/virt/Makefile @@ -4,6 +4,7 @@ # obj-$(CONFIG_FSL_HV_MANAGER) += fsl_hypervisor.o +obj-$(CONFIG_VMGENID) += vmgenid.o obj-y += vboxguest/ obj-$(CONFIG_NITRO_ENCLAVES) += nitro_enclaves/ diff --git a/drivers/virt/vmgenid.c b/drivers/virt/vmgenid.c new file mode 100644 index 000000000000..0ae1a39f2e28 --- /dev/null +++ b/drivers/virt/vmgenid.c @@ -0,0 +1,100 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2022 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. + * + * The "Virtual Machine Generation ID" is exposed via ACPI and changes when a + * virtual machine forks or is cloned. This driver exists for shepherding that + * information to random.c. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/acpi.h> +#include <linux/random.h> + +ACPI_MODULE_NAME("vmgenid"); + +enum { VMGENID_SIZE = 16 }; + +struct vmgenid_state { + u8 *next_id; + u8 this_id[VMGENID_SIZE]; +}; + +static int vmgenid_add(struct acpi_device *device) +{ + struct acpi_buffer parsed = { ACPI_ALLOCATE_BUFFER }; + struct vmgenid_state *state; + union acpi_object *obj; + phys_addr_t phys_addr; + acpi_status status; + int ret = 0; + + state = devm_kmalloc(&device->dev, sizeof(*state), GFP_KERNEL); + if (!state) + return -ENOMEM; + + status = acpi_evaluate_object(device->handle, "ADDR", NULL, &parsed); + if (ACPI_FAILURE(status)) { + ACPI_EXCEPTION((AE_INFO, status, "Evaluating ADDR")); + return -ENODEV; + } + obj = parsed.pointer; + if (!obj || obj->type != ACPI_TYPE_PACKAGE || obj->package.count != 2 || + obj->package.elements[0].type != ACPI_TYPE_INTEGER || + obj->package.elements[1].type != ACPI_TYPE_INTEGER) { + ret = -EINVAL; + goto out; + } + + phys_addr = (obj->package.elements[0].integer.value << 0) | + (obj->package.elements[1].integer.value << 32); + state->next_id = devm_memremap(&device->dev, phys_addr, VMGENID_SIZE, MEMREMAP_WB); + if (IS_ERR(state->next_id)) { + ret = PTR_ERR(state->next_id); + goto out; + } + + memcpy(state->this_id, state->next_id, sizeof(state->this_id)); + add_device_randomness(state->this_id, sizeof(state->this_id)); + + device->driver_data = state; + +out: + ACPI_FREE(parsed.pointer); + return ret; +} + +static void vmgenid_notify(struct acpi_device *device, u32 event) +{ + struct vmgenid_state *state = acpi_driver_data(device); + u8 old_id[VMGENID_SIZE]; + + memcpy(old_id, state->this_id, sizeof(old_id)); + memcpy(state->this_id, state->next_id, sizeof(state->this_id)); + if (!memcmp(old_id, state->this_id, sizeof(old_id))) + return; + add_vmfork_randomness(state->this_id, sizeof(state->this_id)); +} + +static const struct acpi_device_id vmgenid_ids[] = { + { "VM_GEN_COUNTER", 0 }, + { } +}; + +static struct acpi_driver vmgenid_driver = { + .name = "vmgenid", + .ids = vmgenid_ids, + .owner = THIS_MODULE, + .ops = { + .add = vmgenid_add, + .notify = vmgenid_notify + } +}; + +module_acpi_driver(vmgenid_driver); + +MODULE_DEVICE_TABLE(acpi, vmgenid_ids); +MODULE_DESCRIPTION("Virtual Machine Generation ID"); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Jason A. Donenfeld <Jason@zx2c4.com>"); diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig index 34f80b7a8a64..492fc26f0b65 100644 --- a/drivers/virtio/Kconfig +++ b/drivers/virtio/Kconfig @@ -105,7 +105,6 @@ config VIRTIO_BALLOON config VIRTIO_MEM tristate "Virtio mem driver" - default m depends on X86_64 depends on VIRTIO depends on MEMORY_HOTPLUG diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c index 00ac9db792a4..22f15f444f75 100644 --- a/drivers/virtio/virtio.c +++ b/drivers/virtio/virtio.c @@ -166,14 +166,13 @@ void virtio_add_status(struct virtio_device *dev, unsigned int status) } EXPORT_SYMBOL_GPL(virtio_add_status); -int virtio_finalize_features(struct virtio_device *dev) +/* Do some validation, then set FEATURES_OK */ +static int virtio_features_ok(struct virtio_device *dev) { - int ret = dev->config->finalize_features(dev); unsigned status; + int ret; might_sleep(); - if (ret) - return ret; ret = arch_has_restricted_virtio_memory_access(); if (ret) { @@ -202,8 +201,23 @@ int virtio_finalize_features(struct virtio_device *dev) } return 0; } -EXPORT_SYMBOL_GPL(virtio_finalize_features); +/** + * virtio_reset_device - quiesce device for removal + * @dev: the device to reset + * + * Prevents device from sending interrupts and accessing memory. + * + * Generally used for cleanup during driver / device removal. + * + * Once this has been invoked, caller must ensure that + * virtqueue_notify / virtqueue_kick are not in progress. + * + * Note: this guarantees that vq callbacks are not in progress, however caller + * is responsible for preventing access from other contexts, such as a system + * call/workqueue/bh. Invoking virtio_break_device then flushing any such + * contexts is one way to handle that. + * */ void virtio_reset_device(struct virtio_device *dev) { dev->config->reset(dev); @@ -245,17 +259,6 @@ static int virtio_dev_probe(struct device *_d) driver_features_legacy = driver_features; } - /* - * Some devices detect legacy solely via F_VERSION_1. Write - * F_VERSION_1 to force LE config space accesses before FEATURES_OK for - * these when needed. - */ - if (drv->validate && !virtio_legacy_is_little_endian() - && device_features & BIT_ULL(VIRTIO_F_VERSION_1)) { - dev->features = BIT_ULL(VIRTIO_F_VERSION_1); - dev->config->finalize_features(dev); - } - if (device_features & (1ULL << VIRTIO_F_VERSION_1)) dev->features = driver_features & device_features; else @@ -266,13 +269,26 @@ static int virtio_dev_probe(struct device *_d) if (device_features & (1ULL << i)) __virtio_set_bit(dev, i); + err = dev->config->finalize_features(dev); + if (err) + goto err; + if (drv->validate) { + u64 features = dev->features; + err = drv->validate(dev); if (err) goto err; + + /* Did validation change any features? Then write them again. */ + if (features != dev->features) { + err = dev->config->finalize_features(dev); + if (err) + goto err; + } } - err = virtio_finalize_features(dev); + err = virtio_features_ok(dev); if (err) goto err; @@ -496,7 +512,11 @@ int virtio_device_restore(struct virtio_device *dev) /* We have a driver! */ virtio_add_status(dev, VIRTIO_CONFIG_S_DRIVER); - ret = virtio_finalize_features(dev); + ret = dev->config->finalize_features(dev); + if (ret) + goto err; + + ret = virtio_features_ok(dev); if (ret) goto err; diff --git a/drivers/virtio/virtio_vdpa.c b/drivers/virtio/virtio_vdpa.c index 7767a7f0119b..76504559bc25 100644 --- a/drivers/virtio/virtio_vdpa.c +++ b/drivers/virtio/virtio_vdpa.c @@ -317,7 +317,7 @@ static int virtio_vdpa_finalize_features(struct virtio_device *vdev) /* Give virtio_ring a chance to accept features. */ vring_transport_features(vdev); - return vdpa_set_features(vdpa, vdev->features, false); + return vdpa_set_features(vdpa, vdev->features); } static const char *virtio_vdpa_bus_name(struct virtio_device *vdev) diff --git a/drivers/xen/gntalloc.c b/drivers/xen/gntalloc.c index 3fa40c723e8e..edb0acd0b832 100644 --- a/drivers/xen/gntalloc.c +++ b/drivers/xen/gntalloc.c @@ -169,20 +169,14 @@ undo: __del_gref(gref); } - /* It's possible for the target domain to map the just-allocated grant - * references by blindly guessing their IDs; if this is done, then - * __del_gref will leave them in the queue_gref list. They need to be - * added to the global list so that we can free them when they are no - * longer referenced. - */ - if (unlikely(!list_empty(&queue_gref))) - list_splice_tail(&queue_gref, &gref_list); mutex_unlock(&gref_mutex); return rc; } static void __del_gref(struct gntalloc_gref *gref) { + unsigned long addr; + if (gref->notify.flags & UNMAP_NOTIFY_CLEAR_BYTE) { uint8_t *tmp = kmap(gref->page); tmp[gref->notify.pgoff] = 0; @@ -196,21 +190,16 @@ static void __del_gref(struct gntalloc_gref *gref) gref->notify.flags = 0; if (gref->gref_id) { - if (gnttab_query_foreign_access(gref->gref_id)) - return; - - if (!gnttab_end_foreign_access_ref(gref->gref_id, 0)) - return; - - gnttab_free_grant_reference(gref->gref_id); + if (gref->page) { + addr = (unsigned long)page_to_virt(gref->page); + gnttab_end_foreign_access(gref->gref_id, 0, addr); + } else + gnttab_free_grant_reference(gref->gref_id); } gref_size--; list_del(&gref->next_gref); - if (gref->page) - __free_page(gref->page); - kfree(gref); } diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c index 3729bea0c989..5c83d41766c8 100644 --- a/drivers/xen/grant-table.c +++ b/drivers/xen/grant-table.c @@ -134,12 +134,9 @@ struct gnttab_ops { */ unsigned long (*end_foreign_transfer_ref)(grant_ref_t ref); /* - * Query the status of a grant entry. Ref parameter is reference of - * queried grant entry, return value is the status of queried entry. - * Detailed status(writing/reading) can be gotten from the return value - * by bit operations. + * Read the frame number related to a given grant reference. */ - int (*query_foreign_access)(grant_ref_t ref); + unsigned long (*read_frame)(grant_ref_t ref); }; struct unmap_refs_callback_data { @@ -284,22 +281,6 @@ int gnttab_grant_foreign_access(domid_t domid, unsigned long frame, } EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access); -static int gnttab_query_foreign_access_v1(grant_ref_t ref) -{ - return gnttab_shared.v1[ref].flags & (GTF_reading|GTF_writing); -} - -static int gnttab_query_foreign_access_v2(grant_ref_t ref) -{ - return grstatus[ref] & (GTF_reading|GTF_writing); -} - -int gnttab_query_foreign_access(grant_ref_t ref) -{ - return gnttab_interface->query_foreign_access(ref); -} -EXPORT_SYMBOL_GPL(gnttab_query_foreign_access); - static int gnttab_end_foreign_access_ref_v1(grant_ref_t ref, int readonly) { u16 flags, nflags; @@ -353,6 +334,16 @@ int gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly) } EXPORT_SYMBOL_GPL(gnttab_end_foreign_access_ref); +static unsigned long gnttab_read_frame_v1(grant_ref_t ref) +{ + return gnttab_shared.v1[ref].frame; +} + +static unsigned long gnttab_read_frame_v2(grant_ref_t ref) +{ + return gnttab_shared.v2[ref].full_page.frame; +} + struct deferred_entry { struct list_head list; grant_ref_t ref; @@ -382,12 +373,9 @@ static void gnttab_handle_deferred(struct timer_list *unused) spin_unlock_irqrestore(&gnttab_list_lock, flags); if (_gnttab_end_foreign_access_ref(entry->ref, entry->ro)) { put_free_entry(entry->ref); - if (entry->page) { - pr_debug("freeing g.e. %#x (pfn %#lx)\n", - entry->ref, page_to_pfn(entry->page)); - put_page(entry->page); - } else - pr_info("freeing g.e. %#x\n", entry->ref); + pr_debug("freeing g.e. %#x (pfn %#lx)\n", + entry->ref, page_to_pfn(entry->page)); + put_page(entry->page); kfree(entry); entry = NULL; } else { @@ -412,9 +400,18 @@ static void gnttab_handle_deferred(struct timer_list *unused) static void gnttab_add_deferred(grant_ref_t ref, bool readonly, struct page *page) { - struct deferred_entry *entry = kmalloc(sizeof(*entry), GFP_ATOMIC); + struct deferred_entry *entry; + gfp_t gfp = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL; const char *what = KERN_WARNING "leaking"; + entry = kmalloc(sizeof(*entry), gfp); + if (!page) { + unsigned long gfn = gnttab_interface->read_frame(ref); + + page = pfn_to_page(gfn_to_pfn(gfn)); + get_page(page); + } + if (entry) { unsigned long flags; @@ -435,11 +432,21 @@ static void gnttab_add_deferred(grant_ref_t ref, bool readonly, what, ref, page ? page_to_pfn(page) : -1); } +int gnttab_try_end_foreign_access(grant_ref_t ref) +{ + int ret = _gnttab_end_foreign_access_ref(ref, 0); + + if (ret) + put_free_entry(ref); + + return ret; +} +EXPORT_SYMBOL_GPL(gnttab_try_end_foreign_access); + void gnttab_end_foreign_access(grant_ref_t ref, int readonly, unsigned long page) { - if (gnttab_end_foreign_access_ref(ref, readonly)) { - put_free_entry(ref); + if (gnttab_try_end_foreign_access(ref)) { if (page != 0) put_page(virt_to_page(page)); } else @@ -1417,7 +1424,7 @@ static const struct gnttab_ops gnttab_v1_ops = { .update_entry = gnttab_update_entry_v1, .end_foreign_access_ref = gnttab_end_foreign_access_ref_v1, .end_foreign_transfer_ref = gnttab_end_foreign_transfer_ref_v1, - .query_foreign_access = gnttab_query_foreign_access_v1, + .read_frame = gnttab_read_frame_v1, }; static const struct gnttab_ops gnttab_v2_ops = { @@ -1429,7 +1436,7 @@ static const struct gnttab_ops gnttab_v2_ops = { .update_entry = gnttab_update_entry_v2, .end_foreign_access_ref = gnttab_end_foreign_access_ref_v2, .end_foreign_transfer_ref = gnttab_end_foreign_transfer_ref_v2, - .query_foreign_access = gnttab_query_foreign_access_v2, + .read_frame = gnttab_read_frame_v2, }; static bool gnttab_need_v2(void) diff --git a/drivers/xen/pci.c b/drivers/xen/pci.c index 2c890f4f2cbc..72d4e3f193af 100644 --- a/drivers/xen/pci.c +++ b/drivers/xen/pci.c @@ -264,7 +264,7 @@ struct xen_device_domain_owner { }; static DEFINE_SPINLOCK(dev_domain_list_spinlock); -static struct list_head dev_domain_list = LIST_HEAD_INIT(dev_domain_list); +static LIST_HEAD(dev_domain_list); static struct xen_device_domain_owner *find_device(struct pci_dev *dev) { diff --git a/drivers/xen/pvcalls-front.c b/drivers/xen/pvcalls-front.c index 3c9ae156b597..0ca351f30a6d 100644 --- a/drivers/xen/pvcalls-front.c +++ b/drivers/xen/pvcalls-front.c @@ -337,8 +337,8 @@ static void free_active_ring(struct sock_mapping *map) if (!map->active.ring) return; - free_pages((unsigned long)map->active.data.in, - map->active.ring->ring_order); + free_pages_exact(map->active.data.in, + PAGE_SIZE << map->active.ring->ring_order); free_page((unsigned long)map->active.ring); } @@ -352,8 +352,8 @@ static int alloc_active_ring(struct sock_mapping *map) goto out; map->active.ring->ring_order = PVCALLS_RING_ORDER; - bytes = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, - PVCALLS_RING_ORDER); + bytes = alloc_pages_exact(PAGE_SIZE << PVCALLS_RING_ORDER, + GFP_KERNEL | __GFP_ZERO); if (!bytes) goto out; diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c index e8bed1cb76ba..df6890681231 100644 --- a/drivers/xen/xenbus/xenbus_client.c +++ b/drivers/xen/xenbus/xenbus_client.c @@ -379,7 +379,14 @@ int xenbus_grant_ring(struct xenbus_device *dev, void *vaddr, unsigned int nr_pages, grant_ref_t *grefs) { int err; - int i, j; + unsigned int i; + grant_ref_t gref_head; + + err = gnttab_alloc_grant_references(nr_pages, &gref_head); + if (err) { + xenbus_dev_fatal(dev, err, "granting access to ring page"); + return err; + } for (i = 0; i < nr_pages; i++) { unsigned long gfn; @@ -389,23 +396,14 @@ int xenbus_grant_ring(struct xenbus_device *dev, void *vaddr, else gfn = virt_to_gfn(vaddr); - err = gnttab_grant_foreign_access(dev->otherend_id, gfn, 0); - if (err < 0) { - xenbus_dev_fatal(dev, err, - "granting access to ring page"); - goto fail; - } - grefs[i] = err; + grefs[i] = gnttab_claim_grant_reference(&gref_head); + gnttab_grant_foreign_access_ref(grefs[i], dev->otherend_id, + gfn, 0); vaddr = vaddr + XEN_PAGE_SIZE; } return 0; - -fail: - for (j = 0; j < i; j++) - gnttab_end_foreign_access_ref(grefs[j], 0); - return err; } EXPORT_SYMBOL_GPL(xenbus_grant_ring); |