diff options
Diffstat (limited to 'drivers')
93 files changed, 2522 insertions, 1327 deletions
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c index b0ea767c8696..93d160661f4c 100644 --- a/drivers/acpi/acpi_lpss.c +++ b/drivers/acpi/acpi_lpss.c @@ -54,55 +54,58 @@ ACPI_MODULE_NAME("acpi_lpss"); #define LPSS_PRV_REG_COUNT 9 -struct lpss_shared_clock { - const char *name; - unsigned long rate; - struct clk *clk; -}; +/* LPSS Flags */ +#define LPSS_CLK BIT(0) +#define LPSS_CLK_GATE BIT(1) +#define LPSS_CLK_DIVIDER BIT(2) +#define LPSS_LTR BIT(3) +#define LPSS_SAVE_CTX BIT(4) struct lpss_private_data; struct lpss_device_desc { - bool clk_required; - const char *clkdev_name; - bool ltr_required; + unsigned int flags; unsigned int prv_offset; size_t prv_size_override; - bool clk_divider; - bool clk_gate; - bool save_ctx; - struct lpss_shared_clock *shared_clock; void (*setup)(struct lpss_private_data *pdata); }; static struct lpss_device_desc lpss_dma_desc = { - .clk_required = true, - .clkdev_name = "hclk", + .flags = LPSS_CLK, }; struct lpss_private_data { void __iomem *mmio_base; resource_size_t mmio_size; + unsigned int fixed_clk_rate; struct clk *clk; const struct lpss_device_desc *dev_desc; u32 prv_reg_ctx[LPSS_PRV_REG_COUNT]; }; +/* UART Component Parameter Register */ +#define LPSS_UART_CPR 0xF4 +#define LPSS_UART_CPR_AFCE BIT(4) + static void lpss_uart_setup(struct lpss_private_data *pdata) { unsigned int offset; - u32 reg; + u32 val; offset = pdata->dev_desc->prv_offset + LPSS_TX_INT; - reg = readl(pdata->mmio_base + offset); - writel(reg | LPSS_TX_INT_MASK, pdata->mmio_base + offset); - - offset = pdata->dev_desc->prv_offset + LPSS_GENERAL; - reg = readl(pdata->mmio_base + offset); - writel(reg | LPSS_GENERAL_UART_RTS_OVRD, pdata->mmio_base + offset); + val = readl(pdata->mmio_base + offset); + writel(val | LPSS_TX_INT_MASK, pdata->mmio_base + offset); + + val = readl(pdata->mmio_base + LPSS_UART_CPR); + if (!(val & LPSS_UART_CPR_AFCE)) { + offset = pdata->dev_desc->prv_offset + LPSS_GENERAL; + val = readl(pdata->mmio_base + offset); + val |= LPSS_GENERAL_UART_RTS_OVRD; + writel(val, pdata->mmio_base + offset); + } } -static void lpss_i2c_setup(struct lpss_private_data *pdata) +static void byt_i2c_setup(struct lpss_private_data *pdata) { unsigned int offset; u32 val; @@ -111,100 +114,56 @@ static void lpss_i2c_setup(struct lpss_private_data *pdata) val = readl(pdata->mmio_base + offset); val |= LPSS_RESETS_RESET_APB | LPSS_RESETS_RESET_FUNC; writel(val, pdata->mmio_base + offset); -} -static struct lpss_device_desc wpt_dev_desc = { - .clk_required = true, - .prv_offset = 0x800, - .ltr_required = true, - .clk_divider = true, - .clk_gate = true, -}; + if (readl(pdata->mmio_base + pdata->dev_desc->prv_offset)) + pdata->fixed_clk_rate = 133000000; +} static struct lpss_device_desc lpt_dev_desc = { - .clk_required = true, + .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_LTR, .prv_offset = 0x800, - .ltr_required = true, - .clk_divider = true, - .clk_gate = true, }; static struct lpss_device_desc lpt_i2c_dev_desc = { - .clk_required = true, + .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_LTR, .prv_offset = 0x800, - .ltr_required = true, - .clk_gate = true, }; static struct lpss_device_desc lpt_uart_dev_desc = { - .clk_required = true, + .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_LTR, .prv_offset = 0x800, - .ltr_required = true, - .clk_divider = true, - .clk_gate = true, .setup = lpss_uart_setup, }; static struct lpss_device_desc lpt_sdio_dev_desc = { + .flags = LPSS_LTR, .prv_offset = 0x1000, .prv_size_override = 0x1018, - .ltr_required = true, -}; - -static struct lpss_shared_clock pwm_clock = { - .name = "pwm_clk", - .rate = 25000000, }; static struct lpss_device_desc byt_pwm_dev_desc = { - .clk_required = true, - .save_ctx = true, - .shared_clock = &pwm_clock, + .flags = LPSS_SAVE_CTX, }; static struct lpss_device_desc byt_uart_dev_desc = { - .clk_required = true, + .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX, .prv_offset = 0x800, - .clk_divider = true, - .clk_gate = true, - .save_ctx = true, .setup = lpss_uart_setup, }; static struct lpss_device_desc byt_spi_dev_desc = { - .clk_required = true, + .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX, .prv_offset = 0x400, - .clk_divider = true, - .clk_gate = true, - .save_ctx = true, }; static struct lpss_device_desc byt_sdio_dev_desc = { - .clk_required = true, -}; - -static struct lpss_shared_clock i2c_clock = { - .name = "i2c_clk", - .rate = 100000000, + .flags = LPSS_CLK, }; static struct lpss_device_desc byt_i2c_dev_desc = { - .clk_required = true, + .flags = LPSS_CLK | LPSS_SAVE_CTX, .prv_offset = 0x800, - .save_ctx = true, - .shared_clock = &i2c_clock, - .setup = lpss_i2c_setup, -}; - -static struct lpss_shared_clock bsw_pwm_clock = { - .name = "pwm_clk", - .rate = 19200000, -}; - -static struct lpss_device_desc bsw_pwm_dev_desc = { - .clk_required = true, - .save_ctx = true, - .shared_clock = &bsw_pwm_clock, + .setup = byt_i2c_setup, }; #else @@ -237,7 +196,7 @@ static const struct acpi_device_id acpi_lpss_device_ids[] = { { "INT33FC", }, /* Braswell LPSS devices */ - { "80862288", LPSS_ADDR(bsw_pwm_dev_desc) }, + { "80862288", LPSS_ADDR(byt_pwm_dev_desc) }, { "8086228A", LPSS_ADDR(byt_uart_dev_desc) }, { "8086228E", LPSS_ADDR(byt_spi_dev_desc) }, { "808622C1", LPSS_ADDR(byt_i2c_dev_desc) }, @@ -251,7 +210,8 @@ static const struct acpi_device_id acpi_lpss_device_ids[] = { { "INT3436", LPSS_ADDR(lpt_sdio_dev_desc) }, { "INT3437", }, - { "INT3438", LPSS_ADDR(wpt_dev_desc) }, + /* Wildcat Point LPSS devices */ + { "INT3438", LPSS_ADDR(lpt_dev_desc) }, { } }; @@ -276,7 +236,6 @@ static int register_device_clock(struct acpi_device *adev, struct lpss_private_data *pdata) { const struct lpss_device_desc *dev_desc = pdata->dev_desc; - struct lpss_shared_clock *shared_clock = dev_desc->shared_clock; const char *devname = dev_name(&adev->dev); struct clk *clk = ERR_PTR(-ENODEV); struct lpss_clk_data *clk_data; @@ -289,12 +248,7 @@ static int register_device_clock(struct acpi_device *adev, clk_data = platform_get_drvdata(lpss_clk_dev); if (!clk_data) return -ENODEV; - - if (dev_desc->clkdev_name) { - clk_register_clkdev(clk_data->clk, dev_desc->clkdev_name, - devname); - return 0; - } + clk = clk_data->clk; if (!pdata->mmio_base || pdata->mmio_size < dev_desc->prv_offset + LPSS_CLK_SIZE) @@ -303,24 +257,19 @@ static int register_device_clock(struct acpi_device *adev, parent = clk_data->name; prv_base = pdata->mmio_base + dev_desc->prv_offset; - if (shared_clock) { - clk = shared_clock->clk; - if (!clk) { - clk = clk_register_fixed_rate(NULL, shared_clock->name, - "lpss_clk", 0, - shared_clock->rate); - shared_clock->clk = clk; - } - parent = shared_clock->name; + if (pdata->fixed_clk_rate) { + clk = clk_register_fixed_rate(NULL, devname, parent, 0, + pdata->fixed_clk_rate); + goto out; } - if (dev_desc->clk_gate) { + if (dev_desc->flags & LPSS_CLK_GATE) { clk = clk_register_gate(NULL, devname, parent, 0, prv_base, 0, 0, NULL); parent = devname; } - if (dev_desc->clk_divider) { + if (dev_desc->flags & LPSS_CLK_DIVIDER) { /* Prevent division by zero */ if (!readl(prv_base)) writel(LPSS_CLK_DIVIDER_DEF_MASK, prv_base); @@ -344,7 +293,7 @@ static int register_device_clock(struct acpi_device *adev, kfree(parent); kfree(clk_name); } - +out: if (IS_ERR(clk)) return PTR_ERR(clk); @@ -392,7 +341,10 @@ static int acpi_lpss_create_device(struct acpi_device *adev, pdata->dev_desc = dev_desc; - if (dev_desc->clk_required) { + if (dev_desc->setup) + dev_desc->setup(pdata); + + if (dev_desc->flags & LPSS_CLK) { ret = register_device_clock(adev, pdata); if (ret) { /* Skip the device, but continue the namespace scan. */ @@ -413,9 +365,6 @@ static int acpi_lpss_create_device(struct acpi_device *adev, goto err_out; } - if (dev_desc->setup) - dev_desc->setup(pdata); - adev->driver_data = pdata; pdev = acpi_create_platform_device(adev); if (!IS_ERR_OR_NULL(pdev)) { @@ -692,19 +641,19 @@ static int acpi_lpss_platform_notify(struct notifier_block *nb, switch (action) { case BUS_NOTIFY_BOUND_DRIVER: - if (pdata->dev_desc->save_ctx) + if (pdata->dev_desc->flags & LPSS_SAVE_CTX) pdev->dev.pm_domain = &acpi_lpss_pm_domain; break; case BUS_NOTIFY_UNBOUND_DRIVER: - if (pdata->dev_desc->save_ctx) + if (pdata->dev_desc->flags & LPSS_SAVE_CTX) pdev->dev.pm_domain = NULL; break; case BUS_NOTIFY_ADD_DEVICE: - if (pdata->dev_desc->ltr_required) + if (pdata->dev_desc->flags & LPSS_LTR) return sysfs_create_group(&pdev->dev.kobj, &lpss_attr_group); case BUS_NOTIFY_DEL_DEVICE: - if (pdata->dev_desc->ltr_required) + if (pdata->dev_desc->flags & LPSS_LTR) sysfs_remove_group(&pdev->dev.kobj, &lpss_attr_group); default: break; @@ -721,7 +670,7 @@ static void acpi_lpss_bind(struct device *dev) { struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev)); - if (!pdata || !pdata->mmio_base || !pdata->dev_desc->ltr_required) + if (!pdata || !pdata->mmio_base || !(pdata->dev_desc->flags & LPSS_LTR)) return; if (pdata->mmio_size >= pdata->dev_desc->prv_offset + LPSS_LTR_SIZE) diff --git a/drivers/acpi/acpi_pnp.c b/drivers/acpi/acpi_pnp.c index 996fa1959eea..f30c40796856 100644 --- a/drivers/acpi/acpi_pnp.c +++ b/drivers/acpi/acpi_pnp.c @@ -132,10 +132,6 @@ static const struct acpi_device_id acpi_pnp_device_ids[] = { {"PNP0401"}, /* ECP Printer Port */ /* apple-gmux */ {"APP000B"}, - /* fujitsu-laptop.c */ - {"FUJ02bf"}, - {"FUJ02B1"}, - {"FUJ02E3"}, /* system */ {"PNP0c02"}, /* General ID for reserving resources */ {"PNP0c01"}, /* memory controller */ diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c index 0cf159cc6e6d..56710a03c9b0 100644 --- a/drivers/acpi/acpica/evxfgpe.c +++ b/drivers/acpi/acpica/evxfgpe.c @@ -596,6 +596,38 @@ acpi_status acpi_enable_all_runtime_gpes(void) ACPI_EXPORT_SYMBOL(acpi_enable_all_runtime_gpes) +/****************************************************************************** + * + * FUNCTION: acpi_enable_all_wakeup_gpes + * + * PARAMETERS: None + * + * RETURN: Status + * + * DESCRIPTION: Enable all "wakeup" GPEs and disable all of the other GPEs, in + * all GPE blocks. + * + ******************************************************************************/ + +acpi_status acpi_enable_all_wakeup_gpes(void) +{ + acpi_status status; + + ACPI_FUNCTION_TRACE(acpi_enable_all_wakeup_gpes); + + status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS); + if (ACPI_FAILURE(status)) { + return_ACPI_STATUS(status); + } + + status = acpi_hw_enable_all_wakeup_gpes(); + (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS); + + return_ACPI_STATUS(status); +} + +ACPI_EXPORT_SYMBOL(acpi_enable_all_wakeup_gpes) + /******************************************************************************* * * FUNCTION: acpi_install_gpe_block diff --git a/drivers/acpi/acpica/hwgpe.c b/drivers/acpi/acpica/hwgpe.c index 2e6caabba07a..ea62d40fd161 100644 --- a/drivers/acpi/acpica/hwgpe.c +++ b/drivers/acpi/acpica/hwgpe.c @@ -396,11 +396,11 @@ acpi_hw_enable_wakeup_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info, /* Examine each GPE Register within the block */ for (i = 0; i < gpe_block->register_count; i++) { - if (!gpe_block->register_info[i].enable_for_wake) { - continue; - } - /* Enable all "wake" GPEs in this register */ + /* + * Enable all "wake" GPEs in this register and disable the + * remaining ones. + */ status = acpi_hw_write(gpe_block->register_info[i].enable_for_wake, diff --git a/drivers/acpi/acpica/utresrc.c b/drivers/acpi/acpica/utresrc.c index 14cb6c0c8be2..5cd017c7ac0e 100644 --- a/drivers/acpi/acpica/utresrc.c +++ b/drivers/acpi/acpica/utresrc.c @@ -87,7 +87,9 @@ const char *acpi_gbl_io_decode[] = { const char *acpi_gbl_ll_decode[] = { "ActiveHigh", - "ActiveLow" + "ActiveLow", + "ActiveBoth", + "Reserved" }; const char *acpi_gbl_max_decode[] = { diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c index 5fdfe65fe165..8ec8a89a20ab 100644 --- a/drivers/acpi/battery.c +++ b/drivers/acpi/battery.c @@ -695,7 +695,7 @@ static void acpi_battery_quirks(struct acpi_battery *battery) if (battery->power_unit && dmi_name_in_vendors("LENOVO")) { const char *s; s = dmi_get_system_info(DMI_PRODUCT_VERSION); - if (s && !strnicmp(s, "ThinkPad", 8)) { + if (s && !strncasecmp(s, "ThinkPad", 8)) { dmi_walk(find_battery, battery); if (test_bit(ACPI_BATTERY_QUIRK_THINKPAD_MAH, &battery->flags) && diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c index 36eb42e3b0bb..ed122e17636e 100644 --- a/drivers/acpi/blacklist.c +++ b/drivers/acpi/blacklist.c @@ -247,8 +247,8 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = { }, /* - * These machines will power on immediately after shutdown when - * reporting the Windows 2012 OSI. + * The wireless hotkey does not work on those machines when + * returning true for _OSI("Windows 2012") */ { .callback = dmi_disable_osi_win8, @@ -258,6 +258,38 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = { DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7737"), }, }, + { + .callback = dmi_disable_osi_win8, + .ident = "Dell Inspiron 7537", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7537"), + }, + }, + { + .callback = dmi_disable_osi_win8, + .ident = "Dell Inspiron 5437", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 5437"), + }, + }, + { + .callback = dmi_disable_osi_win8, + .ident = "Dell Inspiron 3437", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 3437"), + }, + }, + { + .callback = dmi_disable_osi_win8, + .ident = "Dell Vostro 3446", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 3446"), + }, + }, /* * BIOS invocation of _OSI(Linux) is almost always a BIOS bug. diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c index 67075f800e34..bea6896be122 100644 --- a/drivers/acpi/device_pm.c +++ b/drivers/acpi/device_pm.c @@ -1041,6 +1041,40 @@ static struct dev_pm_domain acpi_general_pm_domain = { }; /** + * acpi_dev_pm_detach - Remove ACPI power management from the device. + * @dev: Device to take care of. + * @power_off: Whether or not to try to remove power from the device. + * + * Remove the device from the general ACPI PM domain and remove its wakeup + * notifier. If @power_off is set, additionally remove power from the device if + * possible. + * + * Callers must ensure proper synchronization of this function with power + * management callbacks. + */ +static void acpi_dev_pm_detach(struct device *dev, bool power_off) +{ + struct acpi_device *adev = ACPI_COMPANION(dev); + + if (adev && dev->pm_domain == &acpi_general_pm_domain) { + dev->pm_domain = NULL; + acpi_remove_pm_notifier(adev); + if (power_off) { + /* + * If the device's PM QoS resume latency limit or flags + * have been exposed to user space, they have to be + * hidden at this point, so that they don't affect the + * choice of the low-power state to put the device into. + */ + dev_pm_qos_hide_latency_limit(dev); + dev_pm_qos_hide_flags(dev); + acpi_device_wakeup(adev, ACPI_STATE_S0, false); + acpi_dev_pm_low_power(dev, adev, ACPI_STATE_S0); + } + } +} + +/** * acpi_dev_pm_attach - Prepare device for ACPI power management. * @dev: Device to prepare. * @power_on: Whether or not to power on the device. @@ -1072,42 +1106,9 @@ int acpi_dev_pm_attach(struct device *dev, bool power_on) acpi_dev_pm_full_power(adev); acpi_device_wakeup(adev, ACPI_STATE_S0, false); } + + dev->pm_domain->detach = acpi_dev_pm_detach; return 0; } EXPORT_SYMBOL_GPL(acpi_dev_pm_attach); - -/** - * acpi_dev_pm_detach - Remove ACPI power management from the device. - * @dev: Device to take care of. - * @power_off: Whether or not to try to remove power from the device. - * - * Remove the device from the general ACPI PM domain and remove its wakeup - * notifier. If @power_off is set, additionally remove power from the device if - * possible. - * - * Callers must ensure proper synchronization of this function with power - * management callbacks. - */ -void acpi_dev_pm_detach(struct device *dev, bool power_off) -{ - struct acpi_device *adev = ACPI_COMPANION(dev); - - if (adev && dev->pm_domain == &acpi_general_pm_domain) { - dev->pm_domain = NULL; - acpi_remove_pm_notifier(adev); - if (power_off) { - /* - * If the device's PM QoS resume latency limit or flags - * have been exposed to user space, they have to be - * hidden at this point, so that they don't affect the - * choice of the low-power state to put the device into. - */ - dev_pm_qos_hide_latency_limit(dev); - dev_pm_qos_hide_flags(dev); - acpi_device_wakeup(adev, ACPI_STATE_S0, false); - acpi_dev_pm_low_power(dev, adev, ACPI_STATE_S0); - } - } -} -EXPORT_SYMBOL_GPL(acpi_dev_pm_detach); #endif /* CONFIG_PM */ diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c index 8acf53e62966..5328b1090e08 100644 --- a/drivers/acpi/fan.c +++ b/drivers/acpi/fan.c @@ -27,12 +27,10 @@ #include <linux/module.h> #include <linux/init.h> #include <linux/types.h> -#include <asm/uaccess.h> +#include <linux/uaccess.h> #include <linux/thermal.h> #include <linux/acpi.h> -#define PREFIX "ACPI: " - #define ACPI_FAN_CLASS "fan" #define ACPI_FAN_FILE_STATE "state" @@ -127,8 +125,9 @@ static const struct thermal_cooling_device_ops fan_cooling_ops = { }; /* -------------------------------------------------------------------------- - Driver Interface - -------------------------------------------------------------------------- */ + * Driver Interface + * -------------------------------------------------------------------------- +*/ static int acpi_fan_add(struct acpi_device *device) { @@ -143,7 +142,7 @@ static int acpi_fan_add(struct acpi_device *device) result = acpi_bus_update_power(device->handle, NULL); if (result) { - printk(KERN_ERR PREFIX "Setting initial power state\n"); + dev_err(&device->dev, "Setting initial power state\n"); goto end; } @@ -168,10 +167,9 @@ static int acpi_fan_add(struct acpi_device *device) &device->dev.kobj, "device"); if (result) - dev_err(&device->dev, "Failed to create sysfs link " - "'device'\n"); + dev_err(&device->dev, "Failed to create sysfs link 'device'\n"); - printk(KERN_INFO PREFIX "%s [%s] (%s)\n", + dev_info(&device->dev, "ACPI: %s [%s] (%s)\n", acpi_device_name(device), acpi_device_bid(device), !device->power.state ? "on" : "off"); @@ -217,7 +215,7 @@ static int acpi_fan_resume(struct device *dev) result = acpi_bus_update_power(to_acpi_device(dev)->handle, NULL); if (result) - printk(KERN_ERR PREFIX "Error updating fan power state\n"); + dev_err(dev, "Error updating fan power state\n"); return result; } diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c index 3abe9b223ba7..9964f70be98d 100644 --- a/drivers/acpi/osl.c +++ b/drivers/acpi/osl.c @@ -152,6 +152,16 @@ static u32 acpi_osi_handler(acpi_string interface, u32 supported) osi_linux.dmi ? " via DMI" : ""); } + if (!strcmp("Darwin", interface)) { + /* + * Apple firmware will behave poorly if it receives positive + * answers to "Darwin" and any other OS. Respond positively + * to Darwin and then disable all other vendor strings. + */ + acpi_update_interfaces(ACPI_DISABLE_ALL_VENDOR_STRINGS); + supported = ACPI_UINT32_MAX; + } + return supported; } @@ -825,7 +835,7 @@ acpi_os_install_interrupt_handler(u32 gsi, acpi_osd_handler handler, acpi_irq_handler = handler; acpi_irq_context = context; - if (request_irq(irq, acpi_irq, IRQF_SHARED | IRQF_NO_SUSPEND, "acpi", acpi_irq)) { + if (request_irq(irq, acpi_irq, IRQF_SHARED, "acpi", acpi_irq)) { printk(KERN_ERR PREFIX "SCI (IRQ%d) allocation failed\n", irq); acpi_irq_handler = NULL; return AE_NOT_ACQUIRED; diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c index e6ae603ed1a1..cd4de7e038ea 100644 --- a/drivers/acpi/pci_root.c +++ b/drivers/acpi/pci_root.c @@ -35,6 +35,7 @@ #include <linux/pci-aspm.h> #include <linux/acpi.h> #include <linux/slab.h> +#include <linux/dmi.h> #include <acpi/apei.h> /* for acpi_hest_init() */ #include "internal.h" @@ -430,6 +431,19 @@ static void negotiate_os_control(struct acpi_pci_root *root, int *no_aspm, acpi_handle handle = device->handle; /* + * Apple always return failure on _OSC calls when _OSI("Darwin") has + * been called successfully. We know the feature set supported by the + * platform, so avoid calling _OSC at all + */ + + if (dmi_match(DMI_SYS_VENDOR, "Apple Inc.")) { + root->osc_control_set = ~OSC_PCI_EXPRESS_PME_CONTROL; + decode_osc_control(root, "OS assumes control of", + root->osc_control_set); + return; + } + + /* * All supported architectures that use ACPI have support for * PCI domains, so we indicate this in _OSC support capabilities. */ diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c index e32321ce9a5c..ef58f46c8442 100644 --- a/drivers/acpi/processor_core.c +++ b/drivers/acpi/processor_core.c @@ -16,7 +16,7 @@ static int map_lapic_id(struct acpi_subtable_header *entry, u32 acpi_id, int *apic_id) { struct acpi_madt_local_apic *lapic = - (struct acpi_madt_local_apic *)entry; + container_of(entry, struct acpi_madt_local_apic, header); if (!(lapic->lapic_flags & ACPI_MADT_ENABLED)) return -ENODEV; @@ -32,7 +32,7 @@ static int map_x2apic_id(struct acpi_subtable_header *entry, int device_declaration, u32 acpi_id, int *apic_id) { struct acpi_madt_local_x2apic *apic = - (struct acpi_madt_local_x2apic *)entry; + container_of(entry, struct acpi_madt_local_x2apic, header); if (!(apic->lapic_flags & ACPI_MADT_ENABLED)) return -ENODEV; @@ -49,7 +49,7 @@ static int map_lsapic_id(struct acpi_subtable_header *entry, int device_declaration, u32 acpi_id, int *apic_id) { struct acpi_madt_local_sapic *lsapic = - (struct acpi_madt_local_sapic *)entry; + container_of(entry, struct acpi_madt_local_sapic, header); if (!(lsapic->lapic_flags & ACPI_MADT_ENABLED)) return -ENODEV; diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c index 366ca40a6f70..a7a3edd28beb 100644 --- a/drivers/acpi/sbs.c +++ b/drivers/acpi/sbs.c @@ -35,6 +35,7 @@ #include <linux/jiffies.h> #include <linux/delay.h> #include <linux/power_supply.h> +#include <linux/dmi.h> #include "sbshc.h" #include "battery.h" @@ -61,6 +62,8 @@ static unsigned int cache_time = 1000; module_param(cache_time, uint, 0644); MODULE_PARM_DESC(cache_time, "cache time in milliseconds"); +static bool sbs_manager_broken; + #define MAX_SBS_BAT 4 #define ACPI_SBS_BLOCK_MAX 32 @@ -109,6 +112,7 @@ struct acpi_sbs { u8 batteries_supported:4; u8 manager_present:1; u8 charger_present:1; + u8 charger_exists:1; }; #define to_acpi_sbs(x) container_of(x, struct acpi_sbs, charger) @@ -429,9 +433,19 @@ static int acpi_ac_get_present(struct acpi_sbs *sbs) result = acpi_smbus_read(sbs->hc, SMBUS_READ_WORD, ACPI_SBS_CHARGER, 0x13, (u8 *) & status); - if (!result) - sbs->charger_present = (status >> 15) & 0x1; - return result; + + if (result) + return result; + + /* + * The spec requires that bit 4 always be 1. If it's not set, assume + * that the implementation doesn't support an SBS charger + */ + if (!((status >> 4) & 0x1)) + return -ENODEV; + + sbs->charger_present = (status >> 15) & 0x1; + return 0; } static ssize_t acpi_battery_alarm_show(struct device *dev, @@ -483,16 +497,21 @@ static int acpi_battery_read(struct acpi_battery *battery) ACPI_SBS_MANAGER, 0x01, (u8 *)&state, 2); } else if (battery->id == 0) battery->present = 1; + if (result || !battery->present) return result; if (saved_present != battery->present) { battery->update_time = 0; result = acpi_battery_get_info(battery); - if (result) + if (result) { + battery->present = 0; return result; + } } result = acpi_battery_get_state(battery); + if (result) + battery->present = 0; return result; } @@ -524,6 +543,7 @@ static int acpi_battery_add(struct acpi_sbs *sbs, int id) result = power_supply_register(&sbs->device->dev, &battery->bat); if (result) goto end; + result = device_create_file(battery->bat.dev, &alarm_attr); if (result) goto end; @@ -554,6 +574,7 @@ static int acpi_charger_add(struct acpi_sbs *sbs) if (result) goto end; + sbs->charger_exists = 1; sbs->charger.name = "sbs-charger"; sbs->charger.type = POWER_SUPPLY_TYPE_MAINS; sbs->charger.properties = sbs_ac_props; @@ -580,9 +601,12 @@ static void acpi_sbs_callback(void *context) struct acpi_battery *bat; u8 saved_charger_state = sbs->charger_present; u8 saved_battery_state; - acpi_ac_get_present(sbs); - if (sbs->charger_present != saved_charger_state) - kobject_uevent(&sbs->charger.dev->kobj, KOBJ_CHANGE); + + if (sbs->charger_exists) { + acpi_ac_get_present(sbs); + if (sbs->charger_present != saved_charger_state) + kobject_uevent(&sbs->charger.dev->kobj, KOBJ_CHANGE); + } if (sbs->manager_present) { for (id = 0; id < MAX_SBS_BAT; ++id) { @@ -598,12 +622,31 @@ static void acpi_sbs_callback(void *context) } } +static int disable_sbs_manager(const struct dmi_system_id *d) +{ + sbs_manager_broken = true; + return 0; +} + +static struct dmi_system_id acpi_sbs_dmi_table[] = { + { + .callback = disable_sbs_manager, + .ident = "Apple", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc.") + }, + }, + { }, +}; + static int acpi_sbs_add(struct acpi_device *device) { struct acpi_sbs *sbs; int result = 0; int id; + dmi_check_system(acpi_sbs_dmi_table); + sbs = kzalloc(sizeof(struct acpi_sbs), GFP_KERNEL); if (!sbs) { result = -ENOMEM; @@ -619,17 +662,24 @@ static int acpi_sbs_add(struct acpi_device *device) device->driver_data = sbs; result = acpi_charger_add(sbs); - if (result) + if (result && result != -ENODEV) goto end; - result = acpi_manager_get_info(sbs); - if (!result) { - sbs->manager_present = 1; - for (id = 0; id < MAX_SBS_BAT; ++id) - if ((sbs->batteries_supported & (1 << id))) - acpi_battery_add(sbs, id); - } else + result = 0; + + if (!sbs_manager_broken) { + result = acpi_manager_get_info(sbs); + if (!result) { + sbs->manager_present = 0; + for (id = 0; id < MAX_SBS_BAT; ++id) + if ((sbs->batteries_supported & (1 << id))) + acpi_battery_add(sbs, id); + } + } + + if (!sbs->manager_present) acpi_battery_add(sbs, 0); + acpi_smbus_register_callback(sbs->hc, acpi_sbs_callback, sbs); end: if (result) diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index 54da4a3fe65e..05a31b573fc3 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c @@ -14,6 +14,7 @@ #include <linux/irq.h> #include <linux/dmi.h> #include <linux/device.h> +#include <linux/interrupt.h> #include <linux/suspend.h> #include <linux/reboot.h> #include <linux/acpi.h> @@ -626,6 +627,19 @@ static int acpi_freeze_begin(void) return 0; } +static int acpi_freeze_prepare(void) +{ + acpi_enable_all_wakeup_gpes(); + enable_irq_wake(acpi_gbl_FADT.sci_interrupt); + return 0; +} + +static void acpi_freeze_restore(void) +{ + disable_irq_wake(acpi_gbl_FADT.sci_interrupt); + acpi_enable_all_runtime_gpes(); +} + static void acpi_freeze_end(void) { acpi_scan_lock_release(); @@ -633,6 +647,8 @@ static void acpi_freeze_end(void) static const struct platform_freeze_ops acpi_freeze_ops = { .begin = acpi_freeze_begin, + .prepare = acpi_freeze_prepare, + .restore = acpi_freeze_restore, .end = acpi_freeze_end, }; diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c index 07c8c5a5ee95..834f35c4bf8d 100644 --- a/drivers/acpi/utils.c +++ b/drivers/acpi/utils.c @@ -661,7 +661,6 @@ EXPORT_SYMBOL(acpi_evaluate_dsm); * @uuid: UUID of requested functions, should be 16 bytes at least * @rev: revision number of requested functions * @funcs: bitmap of requested functions - * @exclude: excluding special value, used to support i915 and nouveau * * Evaluate device's _DSM method to check whether it supports requested * functions. Currently only support 64 functions at maximum, should be diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c index 8e7e18567ae6..807a88a0f394 100644 --- a/drivers/acpi/video.c +++ b/drivers/acpi/video.c @@ -411,12 +411,6 @@ static int __init video_set_bqc_offset(const struct dmi_system_id *d) return 0; } -static int __init video_set_use_native_backlight(const struct dmi_system_id *d) -{ - use_native_backlight_dmi = true; - return 0; -} - static int __init video_disable_native_backlight(const struct dmi_system_id *d) { use_native_backlight_dmi = false; @@ -467,265 +461,6 @@ static struct dmi_system_id video_dmi_table[] __initdata = { DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 7720"), }, }, - { - .callback = video_set_use_native_backlight, - .ident = "ThinkPad X230", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), - DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X230"), - }, - }, - { - .callback = video_set_use_native_backlight, - .ident = "ThinkPad T430 and T430s", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), - DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T430"), - }, - }, - { - .callback = video_set_use_native_backlight, - .ident = "ThinkPad T430", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), - DMI_MATCH(DMI_PRODUCT_VERSION, "2349D15"), - }, - }, - { - .callback = video_set_use_native_backlight, - .ident = "ThinkPad T431s", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), - DMI_MATCH(DMI_PRODUCT_VERSION, "20AACTO1WW"), - }, - }, - { - .callback = video_set_use_native_backlight, - .ident = "ThinkPad Edge E530", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), - DMI_MATCH(DMI_PRODUCT_VERSION, "3259A2G"), - }, - }, - { - .callback = video_set_use_native_backlight, - .ident = "ThinkPad Edge E530", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), - DMI_MATCH(DMI_PRODUCT_VERSION, "3259CTO"), - }, - }, - { - .callback = video_set_use_native_backlight, - .ident = "ThinkPad Edge E530", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), - DMI_MATCH(DMI_PRODUCT_VERSION, "3259HJG"), - }, - }, - { - .callback = video_set_use_native_backlight, - .ident = "ThinkPad W530", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), - DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad W530"), - }, - }, - { - .callback = video_set_use_native_backlight, - .ident = "ThinkPad X1 Carbon", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), - DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X1 Carbon"), - }, - }, - { - .callback = video_set_use_native_backlight, - .ident = "Lenovo Yoga 13", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), - DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo IdeaPad Yoga 13"), - }, - }, - { - .callback = video_set_use_native_backlight, - .ident = "Lenovo Yoga 2 11", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), - DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Yoga 2 11"), - }, - }, - { - .callback = video_set_use_native_backlight, - .ident = "Thinkpad Helix", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), - DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad Helix"), - }, - }, - { - .callback = video_set_use_native_backlight, - .ident = "Dell Inspiron 7520", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7520"), - }, - }, - { - .callback = video_set_use_native_backlight, - .ident = "Acer Aspire 5733Z", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Acer"), - DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5733Z"), - }, - }, - { - .callback = video_set_use_native_backlight, - .ident = "Acer Aspire 5742G", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Acer"), - DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5742G"), - }, - }, - { - .callback = video_set_use_native_backlight, - .ident = "Acer Aspire V5-171", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Acer"), - DMI_MATCH(DMI_PRODUCT_NAME, "V5-171"), - }, - }, - { - .callback = video_set_use_native_backlight, - .ident = "Acer Aspire V5-431", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Acer"), - DMI_MATCH(DMI_PRODUCT_NAME, "Aspire V5-431"), - }, - }, - { - .callback = video_set_use_native_backlight, - .ident = "Acer Aspire V5-471G", - .matches = { - DMI_MATCH(DMI_BOARD_VENDOR, "Acer"), - DMI_MATCH(DMI_PRODUCT_NAME, "Aspire V5-471G"), - }, - }, - { - .callback = video_set_use_native_backlight, - .ident = "Acer TravelMate B113", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Acer"), - DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate B113"), - }, - }, - { - .callback = video_set_use_native_backlight, - .ident = "Acer Aspire V5-572G", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Acer Aspire"), - DMI_MATCH(DMI_PRODUCT_VERSION, "V5-572G/Dazzle_CX"), - }, - }, - { - .callback = video_set_use_native_backlight, - .ident = "Acer Aspire V5-573G", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Acer Aspire"), - DMI_MATCH(DMI_PRODUCT_VERSION, "V5-573G/Dazzle_HW"), - }, - }, - { - .callback = video_set_use_native_backlight, - .ident = "ASUS Zenbook Prime UX31A", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), - DMI_MATCH(DMI_PRODUCT_NAME, "UX31A"), - }, - }, - { - .callback = video_set_use_native_backlight, - .ident = "HP ProBook 4340s", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), - DMI_MATCH(DMI_PRODUCT_VERSION, "HP ProBook 4340s"), - }, - }, - { - .callback = video_set_use_native_backlight, - .ident = "HP ProBook 4540s", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), - DMI_MATCH(DMI_PRODUCT_VERSION, "HP ProBook 4540s"), - }, - }, - { - .callback = video_set_use_native_backlight, - .ident = "HP ProBook 2013 models", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), - DMI_MATCH(DMI_PRODUCT_NAME, "HP ProBook "), - DMI_MATCH(DMI_PRODUCT_NAME, " G1"), - }, - }, - { - .callback = video_set_use_native_backlight, - .ident = "HP EliteBook 2013 models", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), - DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook "), - DMI_MATCH(DMI_PRODUCT_NAME, " G1"), - }, - }, - { - .callback = video_set_use_native_backlight, - .ident = "HP EliteBook 2014 models", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), - DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook "), - DMI_MATCH(DMI_PRODUCT_NAME, " G2"), - }, - }, - { - .callback = video_set_use_native_backlight, - .ident = "HP ZBook 14", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), - DMI_MATCH(DMI_PRODUCT_NAME, "HP ZBook 14"), - }, - }, - { - .callback = video_set_use_native_backlight, - .ident = "HP ZBook 15", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), - DMI_MATCH(DMI_PRODUCT_NAME, "HP ZBook 15"), - }, - }, - { - .callback = video_set_use_native_backlight, - .ident = "HP ZBook 17", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), - DMI_MATCH(DMI_PRODUCT_NAME, "HP ZBook 17"), - }, - }, - { - .callback = video_set_use_native_backlight, - .ident = "HP EliteBook 8470p", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), - DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook 8470p"), - }, - }, - { - .callback = video_set_use_native_backlight, - .ident = "HP EliteBook 8780w", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), - DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook 8780w"), - }, - }, /* * These models have a working acpi_video backlight control, and using @@ -1419,6 +1154,23 @@ acpi_video_device_bind(struct acpi_video_bus *video, } } +static bool acpi_video_device_in_dod(struct acpi_video_device *device) +{ + struct acpi_video_bus *video = device->video; + int i; + + /* If we have a broken _DOD, no need to test */ + if (!video->attached_count) + return true; + + for (i = 0; i < video->attached_count; i++) { + if (video->attached_array[i].bind_info == device) + return true; + } + + return false; +} + /* * Arg: * video : video bus device @@ -1858,6 +1610,15 @@ static void acpi_video_dev_register_backlight(struct acpi_video_device *device) static int count; char *name; + /* + * Do not create backlight device for video output + * device that is not in the enumerated list. + */ + if (!acpi_video_device_in_dod(device)) { + dev_dbg(&device->dev->dev, "not in _DOD list, ignore\n"); + return; + } + result = acpi_video_init_brightness(device); if (result) return; diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c index c42feb2bacd0..27c43499977a 100644 --- a/drivers/acpi/video_detect.c +++ b/drivers/acpi/video_detect.c @@ -174,6 +174,14 @@ static struct dmi_system_id video_detect_dmi_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 5737"), }, }, + { + .callback = video_detect_force_vendor, + .ident = "Lenovo IdeaPad Z570", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "Ideapad Z570"), + }, + }, { }, }; diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c index 3cf61a127ee5..47bbdc1b5be3 100644 --- a/drivers/amba/bus.c +++ b/drivers/amba/bus.c @@ -15,6 +15,7 @@ #include <linux/io.h> #include <linux/pm.h> #include <linux/pm_runtime.h> +#include <linux/pm_domain.h> #include <linux/amba/bus.h> #include <linux/sizes.h> @@ -182,9 +183,15 @@ static int amba_probe(struct device *dev) int ret; do { + ret = dev_pm_domain_attach(dev, true); + if (ret == -EPROBE_DEFER) + break; + ret = amba_get_enable_pclk(pcdev); - if (ret) + if (ret) { + dev_pm_domain_detach(dev, true); break; + } pm_runtime_get_noresume(dev); pm_runtime_set_active(dev); @@ -199,6 +206,7 @@ static int amba_probe(struct device *dev) pm_runtime_put_noidle(dev); amba_put_disable_pclk(pcdev); + dev_pm_domain_detach(dev, true); } while (0); return ret; @@ -220,6 +228,7 @@ static int amba_remove(struct device *dev) pm_runtime_put_noidle(dev); amba_put_disable_pclk(pcdev); + dev_pm_domain_detach(dev, true); return ret; } diff --git a/drivers/base/platform.c b/drivers/base/platform.c index ab4f4ce02722..b2afc29403f9 100644 --- a/drivers/base/platform.c +++ b/drivers/base/platform.c @@ -21,6 +21,7 @@ #include <linux/err.h> #include <linux/slab.h> #include <linux/pm_runtime.h> +#include <linux/pm_domain.h> #include <linux/idr.h> #include <linux/acpi.h> #include <linux/clk/clk-conf.h> @@ -506,11 +507,12 @@ static int platform_drv_probe(struct device *_dev) if (ret < 0) return ret; - acpi_dev_pm_attach(_dev, true); - - ret = drv->probe(dev); - if (ret) - acpi_dev_pm_detach(_dev, true); + ret = dev_pm_domain_attach(_dev, true); + if (ret != -EPROBE_DEFER) { + ret = drv->probe(dev); + if (ret) + dev_pm_domain_detach(_dev, true); + } if (drv->prevent_deferred_probe && ret == -EPROBE_DEFER) { dev_warn(_dev, "probe deferral not supported\n"); @@ -532,7 +534,7 @@ static int platform_drv_remove(struct device *_dev) int ret; ret = drv->remove(dev); - acpi_dev_pm_detach(_dev, true); + dev_pm_domain_detach(_dev, true); return ret; } @@ -543,7 +545,7 @@ static void platform_drv_shutdown(struct device *_dev) struct platform_device *dev = to_platform_device(_dev); drv->shutdown(dev); - acpi_dev_pm_detach(_dev, true); + dev_pm_domain_detach(_dev, true); } /** diff --git a/drivers/base/power/common.c b/drivers/base/power/common.c index df2e5eeaeb05..b0f138806bbc 100644 --- a/drivers/base/power/common.c +++ b/drivers/base/power/common.c @@ -11,6 +11,8 @@ #include <linux/export.h> #include <linux/slab.h> #include <linux/pm_clock.h> +#include <linux/acpi.h> +#include <linux/pm_domain.h> /** * dev_pm_get_subsys_data - Create or refcount power.subsys_data for device. @@ -82,3 +84,53 @@ int dev_pm_put_subsys_data(struct device *dev) return ret; } EXPORT_SYMBOL_GPL(dev_pm_put_subsys_data); + +/** + * dev_pm_domain_attach - Attach a device to its PM domain. + * @dev: Device to attach. + * @power_on: Used to indicate whether we should power on the device. + * + * The @dev may only be attached to a single PM domain. By iterating through + * the available alternatives we try to find a valid PM domain for the device. + * As attachment succeeds, the ->detach() callback in the struct dev_pm_domain + * should be assigned by the corresponding attach function. + * + * This function should typically be invoked from subsystem level code during + * the probe phase. Especially for those that holds devices which requires + * power management through PM domains. + * + * Callers must ensure proper synchronization of this function with power + * management callbacks. + * + * Returns 0 on successfully attached PM domain or negative error code. + */ +int dev_pm_domain_attach(struct device *dev, bool power_on) +{ + int ret; + + ret = acpi_dev_pm_attach(dev, power_on); + if (ret) + ret = genpd_dev_pm_attach(dev); + + return ret; +} +EXPORT_SYMBOL_GPL(dev_pm_domain_attach); + +/** + * dev_pm_domain_detach - Detach a device from its PM domain. + * @dev: Device to attach. + * @power_off: Used to indicate whether we should power off the device. + * + * This functions will reverse the actions from dev_pm_domain_attach() and thus + * try to detach the @dev from its PM domain. Typically it should be invoked + * from subsystem level code during the remove phase. + * + * Callers must ensure proper synchronization of this function with power + * management callbacks. + */ +void dev_pm_domain_detach(struct device *dev, bool power_off) +{ + if (dev->pm_domain && dev->pm_domain->detach) + dev->pm_domain->detach(dev, power_off); +} +EXPORT_SYMBOL_GPL(dev_pm_domain_detach); diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index eee55c1e5fde..40bc2f4072cc 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -8,6 +8,7 @@ #include <linux/kernel.h> #include <linux/io.h> +#include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/pm_domain.h> #include <linux/pm_qos.h> @@ -25,10 +26,6 @@ __routine = genpd->dev_ops.callback; \ if (__routine) { \ __ret = __routine(dev); \ - } else { \ - __routine = dev_gpd_data(dev)->ops.callback; \ - if (__routine) \ - __ret = __routine(dev); \ } \ __ret; \ }) @@ -70,8 +67,6 @@ static struct generic_pm_domain *pm_genpd_lookup_name(const char *domain_name) return genpd; } -#ifdef CONFIG_PM - struct generic_pm_domain *dev_to_genpd(struct device *dev) { if (IS_ERR_OR_NULL(dev->pm_domain)) @@ -147,13 +142,13 @@ static void genpd_recalc_cpu_exit_latency(struct generic_pm_domain *genpd) { s64 usecs64; - if (!genpd->cpu_data) + if (!genpd->cpuidle_data) return; usecs64 = genpd->power_on_latency_ns; do_div(usecs64, NSEC_PER_USEC); - usecs64 += genpd->cpu_data->saved_exit_latency; - genpd->cpu_data->idle_state->exit_latency = usecs64; + usecs64 += genpd->cpuidle_data->saved_exit_latency; + genpd->cpuidle_data->idle_state->exit_latency = usecs64; } /** @@ -193,9 +188,9 @@ static int __pm_genpd_poweron(struct generic_pm_domain *genpd) return 0; } - if (genpd->cpu_data) { + if (genpd->cpuidle_data) { cpuidle_pause_and_lock(); - genpd->cpu_data->idle_state->disabled = true; + genpd->cpuidle_data->idle_state->disabled = true; cpuidle_resume_and_unlock(); goto out; } @@ -285,8 +280,6 @@ int pm_genpd_name_poweron(const char *domain_name) return genpd ? pm_genpd_poweron(genpd) : -EINVAL; } -#endif /* CONFIG_PM */ - #ifdef CONFIG_PM_RUNTIME static int genpd_start_dev_no_timing(struct generic_pm_domain *genpd, @@ -430,7 +423,7 @@ static bool genpd_abort_poweroff(struct generic_pm_domain *genpd) * Queue up the execution of pm_genpd_poweroff() unless it's already been done * before. */ -void genpd_queue_power_off_work(struct generic_pm_domain *genpd) +static void genpd_queue_power_off_work(struct generic_pm_domain *genpd) { queue_work(pm_wq, &genpd->power_off_work); } @@ -520,17 +513,17 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd) } } - if (genpd->cpu_data) { + if (genpd->cpuidle_data) { /* - * If cpu_data is set, cpuidle should turn the domain off when - * the CPU in it is idle. In that case we don't decrement the - * subdomain counts of the master domains, so that power is not - * removed from the current domain prematurely as a result of - * cutting off the masters' power. + * If cpuidle_data is set, cpuidle should turn the domain off + * when the CPU in it is idle. In that case we don't decrement + * the subdomain counts of the master domains, so that power is + * not removed from the current domain prematurely as a result + * of cutting off the masters' power. */ genpd->status = GPD_STATE_POWER_OFF; cpuidle_pause_and_lock(); - genpd->cpu_data->idle_state->disabled = false; + genpd->cpuidle_data->idle_state->disabled = false; cpuidle_resume_and_unlock(); goto out; } @@ -619,8 +612,6 @@ static int pm_genpd_runtime_suspend(struct device *dev) if (IS_ERR(genpd)) return -EINVAL; - might_sleep_if(!genpd->dev_irq_safe); - stop_ok = genpd->gov ? genpd->gov->stop_ok : NULL; if (stop_ok && !stop_ok(dev)) return -EBUSY; @@ -665,8 +656,6 @@ static int pm_genpd_runtime_resume(struct device *dev) if (IS_ERR(genpd)) return -EINVAL; - might_sleep_if(!genpd->dev_irq_safe); - /* If power.irq_safe, the PM domain is never powered off. */ if (dev->power.irq_safe) return genpd_start_dev_no_timing(genpd, dev); @@ -733,6 +722,13 @@ void pm_genpd_poweroff_unused(void) mutex_unlock(&gpd_list_lock); } +static int __init genpd_poweroff_unused(void) +{ + pm_genpd_poweroff_unused(); + return 0; +} +late_initcall(genpd_poweroff_unused); + #else static inline int genpd_dev_pm_qos_notifier(struct notifier_block *nb, @@ -741,6 +737,9 @@ static inline int genpd_dev_pm_qos_notifier(struct notifier_block *nb, return NOTIFY_DONE; } +static inline void +genpd_queue_power_off_work(struct generic_pm_domain *genpd) {} + static inline void genpd_power_off_work_fn(struct work_struct *work) {} #define pm_genpd_runtime_suspend NULL @@ -774,46 +773,6 @@ static bool genpd_dev_active_wakeup(struct generic_pm_domain *genpd, return GENPD_DEV_CALLBACK(genpd, bool, active_wakeup, dev); } -static int genpd_suspend_dev(struct generic_pm_domain *genpd, struct device *dev) -{ - return GENPD_DEV_CALLBACK(genpd, int, suspend, dev); -} - -static int genpd_suspend_late(struct generic_pm_domain *genpd, struct device *dev) -{ - return GENPD_DEV_CALLBACK(genpd, int, suspend_late, dev); -} - -static int genpd_resume_early(struct generic_pm_domain *genpd, struct device *dev) -{ - return GENPD_DEV_CALLBACK(genpd, int, resume_early, dev); -} - -static int genpd_resume_dev(struct generic_pm_domain *genpd, struct device *dev) -{ - return GENPD_DEV_CALLBACK(genpd, int, resume, dev); -} - -static int genpd_freeze_dev(struct generic_pm_domain *genpd, struct device *dev) -{ - return GENPD_DEV_CALLBACK(genpd, int, freeze, dev); -} - -static int genpd_freeze_late(struct generic_pm_domain *genpd, struct device *dev) -{ - return GENPD_DEV_CALLBACK(genpd, int, freeze_late, dev); -} - -static int genpd_thaw_early(struct generic_pm_domain *genpd, struct device *dev) -{ - return GENPD_DEV_CALLBACK(genpd, int, thaw_early, dev); -} - -static int genpd_thaw_dev(struct generic_pm_domain *genpd, struct device *dev) -{ - return GENPD_DEV_CALLBACK(genpd, int, thaw, dev); -} - /** * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its masters. * @genpd: PM domain to power off, if possible. @@ -995,7 +954,7 @@ static int pm_genpd_suspend(struct device *dev) if (IS_ERR(genpd)) return -EINVAL; - return genpd->suspend_power_off ? 0 : genpd_suspend_dev(genpd, dev); + return genpd->suspend_power_off ? 0 : pm_generic_suspend(dev); } /** @@ -1016,7 +975,7 @@ static int pm_genpd_suspend_late(struct device *dev) if (IS_ERR(genpd)) return -EINVAL; - return genpd->suspend_power_off ? 0 : genpd_suspend_late(genpd, dev); + return genpd->suspend_power_off ? 0 : pm_generic_suspend_late(dev); } /** @@ -1103,7 +1062,7 @@ static int pm_genpd_resume_early(struct device *dev) if (IS_ERR(genpd)) return -EINVAL; - return genpd->suspend_power_off ? 0 : genpd_resume_early(genpd, dev); + return genpd->suspend_power_off ? 0 : pm_generic_resume_early(dev); } /** @@ -1124,7 +1083,7 @@ static int pm_genpd_resume(struct device *dev) if (IS_ERR(genpd)) return -EINVAL; - return genpd->suspend_power_off ? 0 : genpd_resume_dev(genpd, dev); + return genpd->suspend_power_off ? 0 : pm_generic_resume(dev); } /** @@ -1145,7 +1104,7 @@ static int pm_genpd_freeze(struct device *dev) if (IS_ERR(genpd)) return -EINVAL; - return genpd->suspend_power_off ? 0 : genpd_freeze_dev(genpd, dev); + return genpd->suspend_power_off ? 0 : pm_generic_freeze(dev); } /** @@ -1167,7 +1126,7 @@ static int pm_genpd_freeze_late(struct device *dev) if (IS_ERR(genpd)) return -EINVAL; - return genpd->suspend_power_off ? 0 : genpd_freeze_late(genpd, dev); + return genpd->suspend_power_off ? 0 : pm_generic_freeze_late(dev); } /** @@ -1231,7 +1190,7 @@ static int pm_genpd_thaw_early(struct device *dev) if (IS_ERR(genpd)) return -EINVAL; - return genpd->suspend_power_off ? 0 : genpd_thaw_early(genpd, dev); + return genpd->suspend_power_off ? 0 : pm_generic_thaw_early(dev); } /** @@ -1252,7 +1211,7 @@ static int pm_genpd_thaw(struct device *dev) if (IS_ERR(genpd)) return -EINVAL; - return genpd->suspend_power_off ? 0 : genpd_thaw_dev(genpd, dev); + return genpd->suspend_power_off ? 0 : pm_generic_thaw(dev); } /** @@ -1344,13 +1303,13 @@ static void pm_genpd_complete(struct device *dev) } /** - * pm_genpd_syscore_switch - Switch power during system core suspend or resume. + * genpd_syscore_switch - Switch power during system core suspend or resume. * @dev: Device that normally is marked as "always on" to switch power for. * * This routine may only be called during the system core (syscore) suspend or * resume phase for devices whose "always on" flags are set. */ -void pm_genpd_syscore_switch(struct device *dev, bool suspend) +static void genpd_syscore_switch(struct device *dev, bool suspend) { struct generic_pm_domain *genpd; @@ -1366,7 +1325,18 @@ void pm_genpd_syscore_switch(struct device *dev, bool suspend) genpd->suspended_count--; } } -EXPORT_SYMBOL_GPL(pm_genpd_syscore_switch); + +void pm_genpd_syscore_poweroff(struct device *dev) +{ + genpd_syscore_switch(dev, true); +} +EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweroff); + +void pm_genpd_syscore_poweron(struct device *dev) +{ + genpd_syscore_switch(dev, false); +} +EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweron); #else @@ -1466,6 +1436,9 @@ int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, spin_unlock_irq(&dev->power.lock); + if (genpd->attach_dev) + genpd->attach_dev(dev); + mutex_lock(&gpd_data->lock); gpd_data->base.dev = dev; list_add_tail(&gpd_data->base.list_node, &genpd->dev_list); @@ -1484,39 +1457,6 @@ int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, } /** - * __pm_genpd_of_add_device - Add a device to an I/O PM domain. - * @genpd_node: Device tree node pointer representing a PM domain to which the - * the device is added to. - * @dev: Device to be added. - * @td: Set of PM QoS timing parameters to attach to the device. - */ -int __pm_genpd_of_add_device(struct device_node *genpd_node, struct device *dev, - struct gpd_timing_data *td) -{ - struct generic_pm_domain *genpd = NULL, *gpd; - - dev_dbg(dev, "%s()\n", __func__); - - if (IS_ERR_OR_NULL(genpd_node) || IS_ERR_OR_NULL(dev)) - return -EINVAL; - - mutex_lock(&gpd_list_lock); - list_for_each_entry(gpd, &gpd_list, gpd_list_node) { - if (gpd->of_node == genpd_node) { - genpd = gpd; - break; - } - } - mutex_unlock(&gpd_list_lock); - - if (!genpd) - return -EINVAL; - - return __pm_genpd_add_device(genpd, dev, td); -} - - -/** * __pm_genpd_name_add_device - Find I/O PM domain and add a device to it. * @domain_name: Name of the PM domain to add the device to. * @dev: Device to be added. @@ -1558,6 +1498,9 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd, genpd->device_count--; genpd->max_off_time_changed = true; + if (genpd->detach_dev) + genpd->detach_dev(dev); + spin_lock_irq(&dev->power.lock); dev->pm_domain = NULL; @@ -1744,112 +1687,6 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, } /** - * pm_genpd_add_callbacks - Add PM domain callbacks to a given device. - * @dev: Device to add the callbacks to. - * @ops: Set of callbacks to add. - * @td: Timing data to add to the device along with the callbacks (optional). - * - * Every call to this routine should be balanced with a call to - * __pm_genpd_remove_callbacks() and they must not be nested. - */ -int pm_genpd_add_callbacks(struct device *dev, struct gpd_dev_ops *ops, - struct gpd_timing_data *td) -{ - struct generic_pm_domain_data *gpd_data_new, *gpd_data = NULL; - int ret = 0; - - if (!(dev && ops)) - return -EINVAL; - - gpd_data_new = __pm_genpd_alloc_dev_data(dev); - if (!gpd_data_new) - return -ENOMEM; - - pm_runtime_disable(dev); - device_pm_lock(); - - ret = dev_pm_get_subsys_data(dev); - if (ret) - goto out; - - spin_lock_irq(&dev->power.lock); - - if (dev->power.subsys_data->domain_data) { - gpd_data = to_gpd_data(dev->power.subsys_data->domain_data); - } else { - gpd_data = gpd_data_new; - dev->power.subsys_data->domain_data = &gpd_data->base; - } - gpd_data->refcount++; - gpd_data->ops = *ops; - if (td) - gpd_data->td = *td; - - spin_unlock_irq(&dev->power.lock); - - out: - device_pm_unlock(); - pm_runtime_enable(dev); - - if (gpd_data != gpd_data_new) - __pm_genpd_free_dev_data(dev, gpd_data_new); - - return ret; -} -EXPORT_SYMBOL_GPL(pm_genpd_add_callbacks); - -/** - * __pm_genpd_remove_callbacks - Remove PM domain callbacks from a given device. - * @dev: Device to remove the callbacks from. - * @clear_td: If set, clear the device's timing data too. - * - * This routine can only be called after pm_genpd_add_callbacks(). - */ -int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td) -{ - struct generic_pm_domain_data *gpd_data = NULL; - bool remove = false; - int ret = 0; - - if (!(dev && dev->power.subsys_data)) - return -EINVAL; - - pm_runtime_disable(dev); - device_pm_lock(); - - spin_lock_irq(&dev->power.lock); - - if (dev->power.subsys_data->domain_data) { - gpd_data = to_gpd_data(dev->power.subsys_data->domain_data); - gpd_data->ops = (struct gpd_dev_ops){ NULL }; - if (clear_td) - gpd_data->td = (struct gpd_timing_data){ 0 }; - - if (--gpd_data->refcount == 0) { - dev->power.subsys_data->domain_data = NULL; - remove = true; - } - } else { - ret = -EINVAL; - } - - spin_unlock_irq(&dev->power.lock); - - device_pm_unlock(); - pm_runtime_enable(dev); - - if (ret) - return ret; - - dev_pm_put_subsys_data(dev); - if (remove) - __pm_genpd_free_dev_data(dev, gpd_data); - - return 0; -} -EXPORT_SYMBOL_GPL(__pm_genpd_remove_callbacks); - -/** * pm_genpd_attach_cpuidle - Connect the given PM domain with cpuidle. * @genpd: PM domain to be connected with cpuidle. * @state: cpuidle state this domain can disable/enable. @@ -1861,7 +1698,7 @@ EXPORT_SYMBOL_GPL(__pm_genpd_remove_callbacks); int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state) { struct cpuidle_driver *cpuidle_drv; - struct gpd_cpu_data *cpu_data; + struct gpd_cpuidle_data *cpuidle_data; struct cpuidle_state *idle_state; int ret = 0; @@ -1870,12 +1707,12 @@ int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state) genpd_acquire_lock(genpd); - if (genpd->cpu_data) { + if (genpd->cpuidle_data) { ret = -EEXIST; goto out; } - cpu_data = kzalloc(sizeof(*cpu_data), GFP_KERNEL); - if (!cpu_data) { + cpuidle_data = kzalloc(sizeof(*cpuidle_data), GFP_KERNEL); + if (!cpuidle_data) { ret = -ENOMEM; goto out; } @@ -1893,9 +1730,9 @@ int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state) ret = -EAGAIN; goto err; } - cpu_data->idle_state = idle_state; - cpu_data->saved_exit_latency = idle_state->exit_latency; - genpd->cpu_data = cpu_data; + cpuidle_data->idle_state = idle_state; + cpuidle_data->saved_exit_latency = idle_state->exit_latency; + genpd->cpuidle_data = cpuidle_data; genpd_recalc_cpu_exit_latency(genpd); out: @@ -1906,7 +1743,7 @@ int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state) cpuidle_driver_unref(); err_drv: - kfree(cpu_data); + kfree(cpuidle_data); goto out; } @@ -1929,7 +1766,7 @@ int pm_genpd_name_attach_cpuidle(const char *name, int state) */ int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd) { - struct gpd_cpu_data *cpu_data; + struct gpd_cpuidle_data *cpuidle_data; struct cpuidle_state *idle_state; int ret = 0; @@ -1938,20 +1775,20 @@ int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd) genpd_acquire_lock(genpd); - cpu_data = genpd->cpu_data; - if (!cpu_data) { + cpuidle_data = genpd->cpuidle_data; + if (!cpuidle_data) { ret = -ENODEV; goto out; } - idle_state = cpu_data->idle_state; + idle_state = cpuidle_data->idle_state; if (!idle_state->disabled) { ret = -EAGAIN; goto out; } - idle_state->exit_latency = cpu_data->saved_exit_latency; + idle_state->exit_latency = cpuidle_data->saved_exit_latency; cpuidle_driver_unref(); - genpd->cpu_data = NULL; - kfree(cpu_data); + genpd->cpuidle_data = NULL; + kfree(cpuidle_data); out: genpd_release_lock(genpd); @@ -1970,17 +1807,13 @@ int pm_genpd_name_detach_cpuidle(const char *name) /* Default device callbacks for generic PM domains. */ /** - * pm_genpd_default_save_state - Default "save device state" for PM domians. + * pm_genpd_default_save_state - Default "save device state" for PM domains. * @dev: Device to handle. */ static int pm_genpd_default_save_state(struct device *dev) { int (*cb)(struct device *__dev); - cb = dev_gpd_data(dev)->ops.save_state; - if (cb) - return cb(dev); - if (dev->type && dev->type->pm) cb = dev->type->pm->runtime_suspend; else if (dev->class && dev->class->pm) @@ -1997,17 +1830,13 @@ static int pm_genpd_default_save_state(struct device *dev) } /** - * pm_genpd_default_restore_state - Default PM domians "restore device state". + * pm_genpd_default_restore_state - Default PM domains "restore device state". * @dev: Device to handle. */ static int pm_genpd_default_restore_state(struct device *dev) { int (*cb)(struct device *__dev); - cb = dev_gpd_data(dev)->ops.restore_state; - if (cb) - return cb(dev); - if (dev->type && dev->type->pm) cb = dev->type->pm->runtime_resume; else if (dev->class && dev->class->pm) @@ -2023,109 +1852,6 @@ static int pm_genpd_default_restore_state(struct device *dev) return cb ? cb(dev) : 0; } -#ifdef CONFIG_PM_SLEEP - -/** - * pm_genpd_default_suspend - Default "device suspend" for PM domians. - * @dev: Device to handle. - */ -static int pm_genpd_default_suspend(struct device *dev) -{ - int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.suspend; - - return cb ? cb(dev) : pm_generic_suspend(dev); -} - -/** - * pm_genpd_default_suspend_late - Default "late device suspend" for PM domians. - * @dev: Device to handle. - */ -static int pm_genpd_default_suspend_late(struct device *dev) -{ - int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.suspend_late; - - return cb ? cb(dev) : pm_generic_suspend_late(dev); -} - -/** - * pm_genpd_default_resume_early - Default "early device resume" for PM domians. - * @dev: Device to handle. - */ -static int pm_genpd_default_resume_early(struct device *dev) -{ - int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.resume_early; - - return cb ? cb(dev) : pm_generic_resume_early(dev); -} - -/** - * pm_genpd_default_resume - Default "device resume" for PM domians. - * @dev: Device to handle. - */ -static int pm_genpd_default_resume(struct device *dev) -{ - int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.resume; - - return cb ? cb(dev) : pm_generic_resume(dev); -} - -/** - * pm_genpd_default_freeze - Default "device freeze" for PM domians. - * @dev: Device to handle. - */ -static int pm_genpd_default_freeze(struct device *dev) -{ - int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.freeze; - - return cb ? cb(dev) : pm_generic_freeze(dev); -} - -/** - * pm_genpd_default_freeze_late - Default "late device freeze" for PM domians. - * @dev: Device to handle. - */ -static int pm_genpd_default_freeze_late(struct device *dev) -{ - int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.freeze_late; - - return cb ? cb(dev) : pm_generic_freeze_late(dev); -} - -/** - * pm_genpd_default_thaw_early - Default "early device thaw" for PM domians. - * @dev: Device to handle. - */ -static int pm_genpd_default_thaw_early(struct device *dev) -{ - int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.thaw_early; - - return cb ? cb(dev) : pm_generic_thaw_early(dev); -} - -/** - * pm_genpd_default_thaw - Default "device thaw" for PM domians. - * @dev: Device to handle. - */ -static int pm_genpd_default_thaw(struct device *dev) -{ - int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.thaw; - - return cb ? cb(dev) : pm_generic_thaw(dev); -} - -#else /* !CONFIG_PM_SLEEP */ - -#define pm_genpd_default_suspend NULL -#define pm_genpd_default_suspend_late NULL -#define pm_genpd_default_resume_early NULL -#define pm_genpd_default_resume NULL -#define pm_genpd_default_freeze NULL -#define pm_genpd_default_freeze_late NULL -#define pm_genpd_default_thaw_early NULL -#define pm_genpd_default_thaw NULL - -#endif /* !CONFIG_PM_SLEEP */ - /** * pm_genpd_init - Initialize a generic I/O PM domain object. * @genpd: PM domain object to initialize. @@ -2177,15 +1903,452 @@ void pm_genpd_init(struct generic_pm_domain *genpd, genpd->domain.ops.complete = pm_genpd_complete; genpd->dev_ops.save_state = pm_genpd_default_save_state; genpd->dev_ops.restore_state = pm_genpd_default_restore_state; - genpd->dev_ops.suspend = pm_genpd_default_suspend; - genpd->dev_ops.suspend_late = pm_genpd_default_suspend_late; - genpd->dev_ops.resume_early = pm_genpd_default_resume_early; - genpd->dev_ops.resume = pm_genpd_default_resume; - genpd->dev_ops.freeze = pm_genpd_default_freeze; - genpd->dev_ops.freeze_late = pm_genpd_default_freeze_late; - genpd->dev_ops.thaw_early = pm_genpd_default_thaw_early; - genpd->dev_ops.thaw = pm_genpd_default_thaw; mutex_lock(&gpd_list_lock); list_add(&genpd->gpd_list_node, &gpd_list); mutex_unlock(&gpd_list_lock); } + +#ifdef CONFIG_PM_GENERIC_DOMAINS_OF +/* + * Device Tree based PM domain providers. + * + * The code below implements generic device tree based PM domain providers that + * bind device tree nodes with generic PM domains registered in the system. + * + * Any driver that registers generic PM domains and needs to support binding of + * devices to these domains is supposed to register a PM domain provider, which + * maps a PM domain specifier retrieved from the device tree to a PM domain. + * + * Two simple mapping functions have been provided for convenience: + * - __of_genpd_xlate_simple() for 1:1 device tree node to PM domain mapping. + * - __of_genpd_xlate_onecell() for mapping of multiple PM domains per node by + * index. + */ + +/** + * struct of_genpd_provider - PM domain provider registration structure + * @link: Entry in global list of PM domain providers + * @node: Pointer to device tree node of PM domain provider + * @xlate: Provider-specific xlate callback mapping a set of specifier cells + * into a PM domain. + * @data: context pointer to be passed into @xlate callback + */ +struct of_genpd_provider { + struct list_head link; + struct device_node *node; + genpd_xlate_t xlate; + void *data; +}; + +/* List of registered PM domain providers. */ +static LIST_HEAD(of_genpd_providers); +/* Mutex to protect the list above. */ +static DEFINE_MUTEX(of_genpd_mutex); + +/** + * __of_genpd_xlate_simple() - Xlate function for direct node-domain mapping + * @genpdspec: OF phandle args to map into a PM domain + * @data: xlate function private data - pointer to struct generic_pm_domain + * + * This is a generic xlate function that can be used to model PM domains that + * have their own device tree nodes. The private data of xlate function needs + * to be a valid pointer to struct generic_pm_domain. + */ +struct generic_pm_domain *__of_genpd_xlate_simple( + struct of_phandle_args *genpdspec, + void *data) +{ + if (genpdspec->args_count != 0) + return ERR_PTR(-EINVAL); + return data; +} +EXPORT_SYMBOL_GPL(__of_genpd_xlate_simple); + +/** + * __of_genpd_xlate_onecell() - Xlate function using a single index. + * @genpdspec: OF phandle args to map into a PM domain + * @data: xlate function private data - pointer to struct genpd_onecell_data + * + * This is a generic xlate function that can be used to model simple PM domain + * controllers that have one device tree node and provide multiple PM domains. + * A single cell is used as an index into an array of PM domains specified in + * the genpd_onecell_data struct when registering the provider. + */ +struct generic_pm_domain *__of_genpd_xlate_onecell( + struct of_phandle_args *genpdspec, + void *data) +{ + struct genpd_onecell_data *genpd_data = data; + unsigned int idx = genpdspec->args[0]; + + if (genpdspec->args_count != 1) + return ERR_PTR(-EINVAL); + + if (idx >= genpd_data->num_domains) { + pr_err("%s: invalid domain index %u\n", __func__, idx); + return ERR_PTR(-EINVAL); + } + + if (!genpd_data->domains[idx]) + return ERR_PTR(-ENOENT); + + return genpd_data->domains[idx]; +} +EXPORT_SYMBOL_GPL(__of_genpd_xlate_onecell); + +/** + * __of_genpd_add_provider() - Register a PM domain provider for a node + * @np: Device node pointer associated with the PM domain provider. + * @xlate: Callback for decoding PM domain from phandle arguments. + * @data: Context pointer for @xlate callback. + */ +int __of_genpd_add_provider(struct device_node *np, genpd_xlate_t xlate, + void *data) +{ + struct of_genpd_provider *cp; + + cp = kzalloc(sizeof(*cp), GFP_KERNEL); + if (!cp) + return -ENOMEM; + + cp->node = of_node_get(np); + cp->data = data; + cp->xlate = xlate; + + mutex_lock(&of_genpd_mutex); + list_add(&cp->link, &of_genpd_providers); + mutex_unlock(&of_genpd_mutex); + pr_debug("Added domain provider from %s\n", np->full_name); + + return 0; +} +EXPORT_SYMBOL_GPL(__of_genpd_add_provider); + +/** + * of_genpd_del_provider() - Remove a previously registered PM domain provider + * @np: Device node pointer associated with the PM domain provider + */ +void of_genpd_del_provider(struct device_node *np) +{ + struct of_genpd_provider *cp; + + mutex_lock(&of_genpd_mutex); + list_for_each_entry(cp, &of_genpd_providers, link) { + if (cp->node == np) { + list_del(&cp->link); + of_node_put(cp->node); + kfree(cp); + break; + } + } + mutex_unlock(&of_genpd_mutex); +} +EXPORT_SYMBOL_GPL(of_genpd_del_provider); + +/** + * of_genpd_get_from_provider() - Look-up PM domain + * @genpdspec: OF phandle args to use for look-up + * + * Looks for a PM domain provider under the node specified by @genpdspec and if + * found, uses xlate function of the provider to map phandle args to a PM + * domain. + * + * Returns a valid pointer to struct generic_pm_domain on success or ERR_PTR() + * on failure. + */ +static struct generic_pm_domain *of_genpd_get_from_provider( + struct of_phandle_args *genpdspec) +{ + struct generic_pm_domain *genpd = ERR_PTR(-ENOENT); + struct of_genpd_provider *provider; + + mutex_lock(&of_genpd_mutex); + + /* Check if we have such a provider in our array */ + list_for_each_entry(provider, &of_genpd_providers, link) { + if (provider->node == genpdspec->np) + genpd = provider->xlate(genpdspec, provider->data); + if (!IS_ERR(genpd)) + break; + } + + mutex_unlock(&of_genpd_mutex); + + return genpd; +} + +/** + * genpd_dev_pm_detach - Detach a device from its PM domain. + * @dev: Device to attach. + * @power_off: Currently not used + * + * Try to locate a corresponding generic PM domain, which the device was + * attached to previously. If such is found, the device is detached from it. + */ +static void genpd_dev_pm_detach(struct device *dev, bool power_off) +{ + struct generic_pm_domain *pd = NULL, *gpd; + int ret = 0; + + if (!dev->pm_domain) + return; + + mutex_lock(&gpd_list_lock); + list_for_each_entry(gpd, &gpd_list, gpd_list_node) { + if (&gpd->domain == dev->pm_domain) { + pd = gpd; + break; + } + } + mutex_unlock(&gpd_list_lock); + + if (!pd) + return; + + dev_dbg(dev, "removing from PM domain %s\n", pd->name); + + while (1) { + ret = pm_genpd_remove_device(pd, dev); + if (ret != -EAGAIN) + break; + cond_resched(); + } + + if (ret < 0) { + dev_err(dev, "failed to remove from PM domain %s: %d", + pd->name, ret); + return; + } + + /* Check if PM domain can be powered off after removing this device. */ + genpd_queue_power_off_work(pd); +} + +/** + * genpd_dev_pm_attach - Attach a device to its PM domain using DT. + * @dev: Device to attach. + * + * Parse device's OF node to find a PM domain specifier. If such is found, + * attaches the device to retrieved pm_domain ops. + * + * Both generic and legacy Samsung-specific DT bindings are supported to keep + * backwards compatibility with existing DTBs. + * + * Returns 0 on successfully attached PM domain or negative error code. + */ +int genpd_dev_pm_attach(struct device *dev) +{ + struct of_phandle_args pd_args; + struct generic_pm_domain *pd; + int ret; + + if (!dev->of_node) + return -ENODEV; + + if (dev->pm_domain) + return -EEXIST; + + ret = of_parse_phandle_with_args(dev->of_node, "power-domains", + "#power-domain-cells", 0, &pd_args); + if (ret < 0) { + if (ret != -ENOENT) + return ret; + + /* + * Try legacy Samsung-specific bindings + * (for backwards compatibility of DT ABI) + */ + pd_args.args_count = 0; + pd_args.np = of_parse_phandle(dev->of_node, + "samsung,power-domain", 0); + if (!pd_args.np) + return -ENOENT; + } + + pd = of_genpd_get_from_provider(&pd_args); + if (IS_ERR(pd)) { + dev_dbg(dev, "%s() failed to find PM domain: %ld\n", + __func__, PTR_ERR(pd)); + of_node_put(dev->of_node); + return PTR_ERR(pd); + } + + dev_dbg(dev, "adding to PM domain %s\n", pd->name); + + while (1) { + ret = pm_genpd_add_device(pd, dev); + if (ret != -EAGAIN) + break; + cond_resched(); + } + + if (ret < 0) { + dev_err(dev, "failed to add to PM domain %s: %d", + pd->name, ret); + of_node_put(dev->of_node); + return ret; + } + + dev->pm_domain->detach = genpd_dev_pm_detach; + + return 0; +} +EXPORT_SYMBOL_GPL(genpd_dev_pm_attach); +#endif + + +/*** debugfs support ***/ + +#ifdef CONFIG_PM_ADVANCED_DEBUG +#include <linux/pm.h> +#include <linux/device.h> +#include <linux/debugfs.h> +#include <linux/seq_file.h> +#include <linux/init.h> +#include <linux/kobject.h> +static struct dentry *pm_genpd_debugfs_dir; + +/* + * TODO: This function is a slightly modified version of rtpm_status_show + * from sysfs.c, but dependencies between PM_GENERIC_DOMAINS and PM_RUNTIME + * are too loose to generalize it. + */ +#ifdef CONFIG_PM_RUNTIME +static void rtpm_status_str(struct seq_file *s, struct device *dev) +{ + static const char * const status_lookup[] = { + [RPM_ACTIVE] = "active", + [RPM_RESUMING] = "resuming", + [RPM_SUSPENDED] = "suspended", + [RPM_SUSPENDING] = "suspending" + }; + const char *p = ""; + + if (dev->power.runtime_error) + p = "error"; + else if (dev->power.disable_depth) + p = "unsupported"; + else if (dev->power.runtime_status < ARRAY_SIZE(status_lookup)) + p = status_lookup[dev->power.runtime_status]; + else + WARN_ON(1); + + seq_puts(s, p); +} +#else +static void rtpm_status_str(struct seq_file *s, struct device *dev) +{ + seq_puts(s, "active"); +} +#endif + +static int pm_genpd_summary_one(struct seq_file *s, + struct generic_pm_domain *gpd) +{ + static const char * const status_lookup[] = { + [GPD_STATE_ACTIVE] = "on", + [GPD_STATE_WAIT_MASTER] = "wait-master", + [GPD_STATE_BUSY] = "busy", + [GPD_STATE_REPEAT] = "off-in-progress", + [GPD_STATE_POWER_OFF] = "off" + }; + struct pm_domain_data *pm_data; + const char *kobj_path; + struct gpd_link *link; + int ret; + + ret = mutex_lock_interruptible(&gpd->lock); + if (ret) + return -ERESTARTSYS; + + if (WARN_ON(gpd->status >= ARRAY_SIZE(status_lookup))) + goto exit; + seq_printf(s, "%-30s %-15s ", gpd->name, status_lookup[gpd->status]); + + /* + * Modifications on the list require holding locks on both + * master and slave, so we are safe. + * Also gpd->name is immutable. + */ + list_for_each_entry(link, &gpd->master_links, master_node) { + seq_printf(s, "%s", link->slave->name); + if (!list_is_last(&link->master_node, &gpd->master_links)) + seq_puts(s, ", "); + } + + list_for_each_entry(pm_data, &gpd->dev_list, list_node) { + kobj_path = kobject_get_path(&pm_data->dev->kobj, GFP_KERNEL); + if (kobj_path == NULL) + continue; + + seq_printf(s, "\n %-50s ", kobj_path); + rtpm_status_str(s, pm_data->dev); + kfree(kobj_path); + } + + seq_puts(s, "\n"); +exit: + mutex_unlock(&gpd->lock); + + return 0; +} + +static int pm_genpd_summary_show(struct seq_file *s, void *data) +{ + struct generic_pm_domain *gpd; + int ret = 0; + + seq_puts(s, " domain status slaves\n"); + seq_puts(s, " /device runtime status\n"); + seq_puts(s, "----------------------------------------------------------------------\n"); + + ret = mutex_lock_interruptible(&gpd_list_lock); + if (ret) + return -ERESTARTSYS; + + list_for_each_entry(gpd, &gpd_list, gpd_list_node) { + ret = pm_genpd_summary_one(s, gpd); + if (ret) + break; + } + mutex_unlock(&gpd_list_lock); + + return ret; +} + +static int pm_genpd_summary_open(struct inode *inode, struct file *file) +{ + return single_open(file, pm_genpd_summary_show, NULL); +} + +static const struct file_operations pm_genpd_summary_fops = { + .open = pm_genpd_summary_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int __init pm_genpd_debug_init(void) +{ + struct dentry *d; + + pm_genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL); + + if (!pm_genpd_debugfs_dir) + return -ENOMEM; + + d = debugfs_create_file("pm_genpd_summary", S_IRUGO, + pm_genpd_debugfs_dir, NULL, &pm_genpd_summary_fops); + if (!d) + return -ENOMEM; + + return 0; +} +late_initcall(pm_genpd_debug_init); + +static void __exit pm_genpd_debug_exit(void) +{ + debugfs_remove_recursive(pm_genpd_debugfs_dir); +} +__exitcall(pm_genpd_debug_exit); +#endif /* CONFIG_PM_ADVANCED_DEBUG */ diff --git a/drivers/base/power/domain_governor.c b/drivers/base/power/domain_governor.c index a089e3bcdfbc..d88a62e104d4 100644 --- a/drivers/base/power/domain_governor.c +++ b/drivers/base/power/domain_governor.c @@ -42,7 +42,7 @@ static int dev_update_qos_constraint(struct device *dev, void *data) * default_stop_ok - Default PM domain governor routine for stopping devices. * @dev: Device to check. */ -bool default_stop_ok(struct device *dev) +static bool default_stop_ok(struct device *dev) { struct gpd_timing_data *td = &dev_gpd_data(dev)->td; unsigned long flags; @@ -229,10 +229,7 @@ static bool always_on_power_down_ok(struct dev_pm_domain *domain) #else /* !CONFIG_PM_RUNTIME */ -bool default_stop_ok(struct device *dev) -{ - return false; -} +static inline bool default_stop_ok(struct device *dev) { return false; } #define default_power_down_ok NULL #define always_on_power_down_ok NULL diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index b67d9aef9fe4..44973196d3fd 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -540,7 +540,7 @@ static void async_resume_noirq(void *data, async_cookie_t cookie) * Call the "noirq" resume handlers for all devices in dpm_noirq_list and * enable device drivers to receive interrupts. */ -static void dpm_resume_noirq(pm_message_t state) +void dpm_resume_noirq(pm_message_t state) { struct device *dev; ktime_t starttime = ktime_get(); @@ -662,7 +662,7 @@ static void async_resume_early(void *data, async_cookie_t cookie) * dpm_resume_early - Execute "early resume" callbacks for all devices. * @state: PM transition of the system being carried out. */ -static void dpm_resume_early(pm_message_t state) +void dpm_resume_early(pm_message_t state) { struct device *dev; ktime_t starttime = ktime_get(); @@ -1093,7 +1093,7 @@ static int device_suspend_noirq(struct device *dev) * Prevent device drivers from receiving interrupts and call the "noirq" suspend * handlers for all non-sysdev devices. */ -static int dpm_suspend_noirq(pm_message_t state) +int dpm_suspend_noirq(pm_message_t state) { ktime_t starttime = ktime_get(); int error = 0; @@ -1232,7 +1232,7 @@ static int device_suspend_late(struct device *dev) * dpm_suspend_late - Execute "late suspend" callbacks for all devices. * @state: PM transition of the system being carried out. */ -static int dpm_suspend_late(pm_message_t state) +int dpm_suspend_late(pm_message_t state) { ktime_t starttime = ktime_get(); int error = 0; diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c index 95b181d1ca6d..a9d26ed11bf4 100644 --- a/drivers/base/power/sysfs.c +++ b/drivers/base/power/sysfs.c @@ -92,9 +92,6 @@ * wakeup_count - Report the number of wakeup events related to the device */ -static const char enabled[] = "enabled"; -static const char disabled[] = "disabled"; - const char power_group_name[] = "power"; EXPORT_SYMBOL_GPL(power_group_name); @@ -336,11 +333,14 @@ static DEVICE_ATTR(pm_qos_remote_wakeup, 0644, #endif /* CONFIG_PM_RUNTIME */ #ifdef CONFIG_PM_SLEEP +static const char _enabled[] = "enabled"; +static const char _disabled[] = "disabled"; + static ssize_t wake_show(struct device * dev, struct device_attribute *attr, char * buf) { return sprintf(buf, "%s\n", device_can_wakeup(dev) - ? (device_may_wakeup(dev) ? enabled : disabled) + ? (device_may_wakeup(dev) ? _enabled : _disabled) : ""); } @@ -357,11 +357,11 @@ wake_store(struct device * dev, struct device_attribute *attr, cp = memchr(buf, '\n', n); if (cp) len = cp - buf; - if (len == sizeof enabled - 1 - && strncmp(buf, enabled, sizeof enabled - 1) == 0) + if (len == sizeof _enabled - 1 + && strncmp(buf, _enabled, sizeof _enabled - 1) == 0) device_set_wakeup_enable(dev, 1); - else if (len == sizeof disabled - 1 - && strncmp(buf, disabled, sizeof disabled - 1) == 0) + else if (len == sizeof _disabled - 1 + && strncmp(buf, _disabled, sizeof _disabled - 1) == 0) device_set_wakeup_enable(dev, 0); else return -EINVAL; @@ -570,7 +570,8 @@ static ssize_t async_show(struct device *dev, struct device_attribute *attr, char *buf) { return sprintf(buf, "%s\n", - device_async_suspend_enabled(dev) ? enabled : disabled); + device_async_suspend_enabled(dev) ? + _enabled : _disabled); } static ssize_t async_store(struct device *dev, struct device_attribute *attr, @@ -582,9 +583,10 @@ static ssize_t async_store(struct device *dev, struct device_attribute *attr, cp = memchr(buf, '\n', n); if (cp) len = cp - buf; - if (len == sizeof enabled - 1 && strncmp(buf, enabled, len) == 0) + if (len == sizeof _enabled - 1 && strncmp(buf, _enabled, len) == 0) device_enable_async_suspend(dev); - else if (len == sizeof disabled - 1 && strncmp(buf, disabled, len) == 0) + else if (len == sizeof _disabled - 1 && + strncmp(buf, _disabled, len) == 0) device_disable_async_suspend(dev); else return -EINVAL; diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c index eb1bd2ecad8b..c2744b30d5d9 100644 --- a/drivers/base/power/wakeup.c +++ b/drivers/base/power/wakeup.c @@ -24,6 +24,9 @@ */ bool events_check_enabled __read_mostly; +/* If set and the system is suspending, terminate the suspend. */ +static bool pm_abort_suspend __read_mostly; + /* * Combined counters of registered wakeup events and wakeup events in progress. * They need to be modified together atomically, so it's better to use one @@ -719,7 +722,18 @@ bool pm_wakeup_pending(void) pm_print_active_wakeup_sources(); } - return ret; + return ret || pm_abort_suspend; +} + +void pm_system_wakeup(void) +{ + pm_abort_suspend = true; + freeze_wake(); +} + +void pm_wakeup_clear(void) +{ + pm_abort_suspend = false; } /** diff --git a/drivers/base/syscore.c b/drivers/base/syscore.c index dbb8350ea8dc..8d98a329f6ea 100644 --- a/drivers/base/syscore.c +++ b/drivers/base/syscore.c @@ -9,7 +9,7 @@ #include <linux/syscore_ops.h> #include <linux/mutex.h> #include <linux/module.h> -#include <linux/interrupt.h> +#include <linux/suspend.h> #include <trace/events/power.h> static LIST_HEAD(syscore_ops_list); @@ -54,9 +54,8 @@ int syscore_suspend(void) pr_debug("Checking wakeup interrupts\n"); /* Return error code if there are any wakeup interrupts pending. */ - ret = check_wakeup_irqs(); - if (ret) - return ret; + if (pm_wakeup_pending()) + return -EBUSY; WARN_ONCE(!irqs_disabled(), "Interrupts enabled before system core suspend.\n"); diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig index ffe350f86bca..3489f8f5fada 100644 --- a/drivers/cpufreq/Kconfig +++ b/drivers/cpufreq/Kconfig @@ -183,14 +183,14 @@ config CPU_FREQ_GOV_CONSERVATIVE If in doubt, say N. -config GENERIC_CPUFREQ_CPU0 - tristate "Generic CPU0 cpufreq driver" +config CPUFREQ_DT + tristate "Generic DT based cpufreq driver" depends on HAVE_CLK && OF - # if CPU_THERMAL is on and THERMAL=m, CPU0 cannot be =y: + # if CPU_THERMAL is on and THERMAL=m, CPUFREQ_DT cannot be =y: depends on !CPU_THERMAL || THERMAL select PM_OPP help - This adds a generic cpufreq driver for CPU0 frequency management. + This adds a generic DT based cpufreq driver for frequency management. It supports both uniprocessor (UP) and symmetric multiprocessor (SMP) systems which share clock and voltage across all CPUs. diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm index 7364a538e056..48ed28b789f7 100644 --- a/drivers/cpufreq/Kconfig.arm +++ b/drivers/cpufreq/Kconfig.arm @@ -92,7 +92,7 @@ config ARM_EXYNOS_CPU_FREQ_BOOST_SW config ARM_HIGHBANK_CPUFREQ tristate "Calxeda Highbank-based" - depends on ARCH_HIGHBANK && GENERIC_CPUFREQ_CPU0 && REGULATOR + depends on ARCH_HIGHBANK && CPUFREQ_DT && REGULATOR default m help This adds the CPUFreq driver for Calxeda Highbank SoC diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index db6d9a2fea4d..40c53dc1937e 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -13,7 +13,7 @@ obj-$(CONFIG_CPU_FREQ_GOV_ONDEMAND) += cpufreq_ondemand.o obj-$(CONFIG_CPU_FREQ_GOV_CONSERVATIVE) += cpufreq_conservative.o obj-$(CONFIG_CPU_FREQ_GOV_COMMON) += cpufreq_governor.o -obj-$(CONFIG_GENERIC_CPUFREQ_CPU0) += cpufreq-cpu0.o +obj-$(CONFIG_CPUFREQ_DT) += cpufreq-dt.o ################################################################################## # x86 drivers. diff --git a/drivers/cpufreq/cpufreq-cpu0.c b/drivers/cpufreq/cpufreq-cpu0.c deleted file mode 100644 index 0d2172b07765..000000000000 --- a/drivers/cpufreq/cpufreq-cpu0.c +++ /dev/null @@ -1,248 +0,0 @@ -/* - * Copyright (C) 2012 Freescale Semiconductor, Inc. - * - * The OPP code in function cpu0_set_target() is reused from - * drivers/cpufreq/omap-cpufreq.c - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include <linux/clk.h> -#include <linux/cpu.h> -#include <linux/cpu_cooling.h> -#include <linux/cpufreq.h> -#include <linux/cpumask.h> -#include <linux/err.h> -#include <linux/module.h> -#include <linux/of.h> -#include <linux/pm_opp.h> -#include <linux/platform_device.h> -#include <linux/regulator/consumer.h> -#include <linux/slab.h> -#include <linux/thermal.h> - -static unsigned int transition_latency; -static unsigned int voltage_tolerance; /* in percentage */ - -static struct device *cpu_dev; -static struct clk *cpu_clk; -static struct regulator *cpu_reg; -static struct cpufreq_frequency_table *freq_table; -static struct thermal_cooling_device *cdev; - -static int cpu0_set_target(struct cpufreq_policy *policy, unsigned int index) -{ - struct dev_pm_opp *opp; - unsigned long volt = 0, volt_old = 0, tol = 0; - unsigned int old_freq, new_freq; - long freq_Hz, freq_exact; - int ret; - - freq_Hz = clk_round_rate(cpu_clk, freq_table[index].frequency * 1000); - if (freq_Hz <= 0) - freq_Hz = freq_table[index].frequency * 1000; - - freq_exact = freq_Hz; - new_freq = freq_Hz / 1000; - old_freq = clk_get_rate(cpu_clk) / 1000; - - if (!IS_ERR(cpu_reg)) { - rcu_read_lock(); - opp = dev_pm_opp_find_freq_ceil(cpu_dev, &freq_Hz); - if (IS_ERR(opp)) { - rcu_read_unlock(); - pr_err("failed to find OPP for %ld\n", freq_Hz); - return PTR_ERR(opp); - } - volt = dev_pm_opp_get_voltage(opp); - rcu_read_unlock(); - tol = volt * voltage_tolerance / 100; - volt_old = regulator_get_voltage(cpu_reg); - } - - pr_debug("%u MHz, %ld mV --> %u MHz, %ld mV\n", - old_freq / 1000, volt_old ? volt_old / 1000 : -1, - new_freq / 1000, volt ? volt / 1000 : -1); - - /* scaling up? scale voltage before frequency */ - if (!IS_ERR(cpu_reg) && new_freq > old_freq) { - ret = regulator_set_voltage_tol(cpu_reg, volt, tol); - if (ret) { - pr_err("failed to scale voltage up: %d\n", ret); - return ret; - } - } - - ret = clk_set_rate(cpu_clk, freq_exact); - if (ret) { - pr_err("failed to set clock rate: %d\n", ret); - if (!IS_ERR(cpu_reg)) - regulator_set_voltage_tol(cpu_reg, volt_old, tol); - return ret; - } - - /* scaling down? scale voltage after frequency */ - if (!IS_ERR(cpu_reg) && new_freq < old_freq) { - ret = regulator_set_voltage_tol(cpu_reg, volt, tol); - if (ret) { - pr_err("failed to scale voltage down: %d\n", ret); - clk_set_rate(cpu_clk, old_freq * 1000); - } - } - - return ret; -} - -static int cpu0_cpufreq_init(struct cpufreq_policy *policy) -{ - policy->clk = cpu_clk; - return cpufreq_generic_init(policy, freq_table, transition_latency); -} - -static struct cpufreq_driver cpu0_cpufreq_driver = { - .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK, - .verify = cpufreq_generic_frequency_table_verify, - .target_index = cpu0_set_target, - .get = cpufreq_generic_get, - .init = cpu0_cpufreq_init, - .name = "generic_cpu0", - .attr = cpufreq_generic_attr, -}; - -static int cpu0_cpufreq_probe(struct platform_device *pdev) -{ - struct device_node *np; - int ret; - - cpu_dev = get_cpu_device(0); - if (!cpu_dev) { - pr_err("failed to get cpu0 device\n"); - return -ENODEV; - } - - np = of_node_get(cpu_dev->of_node); - if (!np) { - pr_err("failed to find cpu0 node\n"); - return -ENOENT; - } - - cpu_reg = regulator_get_optional(cpu_dev, "cpu0"); - if (IS_ERR(cpu_reg)) { - /* - * If cpu0 regulator supply node is present, but regulator is - * not yet registered, we should try defering probe. - */ - if (PTR_ERR(cpu_reg) == -EPROBE_DEFER) { - dev_dbg(cpu_dev, "cpu0 regulator not ready, retry\n"); - ret = -EPROBE_DEFER; - goto out_put_node; - } - pr_warn("failed to get cpu0 regulator: %ld\n", - PTR_ERR(cpu_reg)); - } - - cpu_clk = clk_get(cpu_dev, NULL); - if (IS_ERR(cpu_clk)) { - ret = PTR_ERR(cpu_clk); - pr_err("failed to get cpu0 clock: %d\n", ret); - goto out_put_reg; - } - - /* OPPs might be populated at runtime, don't check for error here */ - of_init_opp_table(cpu_dev); - - ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table); - if (ret) { - pr_err("failed to init cpufreq table: %d\n", ret); - goto out_put_clk; - } - - of_property_read_u32(np, "voltage-tolerance", &voltage_tolerance); - - if (of_property_read_u32(np, "clock-latency", &transition_latency)) - transition_latency = CPUFREQ_ETERNAL; - - if (!IS_ERR(cpu_reg)) { - struct dev_pm_opp *opp; - unsigned long min_uV, max_uV; - int i; - - /* - * OPP is maintained in order of increasing frequency, and - * freq_table initialised from OPP is therefore sorted in the - * same order. - */ - for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) - ; - rcu_read_lock(); - opp = dev_pm_opp_find_freq_exact(cpu_dev, - freq_table[0].frequency * 1000, true); - min_uV = dev_pm_opp_get_voltage(opp); - opp = dev_pm_opp_find_freq_exact(cpu_dev, - freq_table[i-1].frequency * 1000, true); - max_uV = dev_pm_opp_get_voltage(opp); - rcu_read_unlock(); - ret = regulator_set_voltage_time(cpu_reg, min_uV, max_uV); - if (ret > 0) - transition_latency += ret * 1000; - } - - ret = cpufreq_register_driver(&cpu0_cpufreq_driver); - if (ret) { - pr_err("failed register driver: %d\n", ret); - goto out_free_table; - } - - /* - * For now, just loading the cooling device; - * thermal DT code takes care of matching them. - */ - if (of_find_property(np, "#cooling-cells", NULL)) { - cdev = of_cpufreq_cooling_register(np, cpu_present_mask); - if (IS_ERR(cdev)) - pr_err("running cpufreq without cooling device: %ld\n", - PTR_ERR(cdev)); - } - - of_node_put(np); - return 0; - -out_free_table: - dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table); -out_put_clk: - if (!IS_ERR(cpu_clk)) - clk_put(cpu_clk); -out_put_reg: - if (!IS_ERR(cpu_reg)) - regulator_put(cpu_reg); -out_put_node: - of_node_put(np); - return ret; -} - -static int cpu0_cpufreq_remove(struct platform_device *pdev) -{ - cpufreq_cooling_unregister(cdev); - cpufreq_unregister_driver(&cpu0_cpufreq_driver); - dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table); - - return 0; -} - -static struct platform_driver cpu0_cpufreq_platdrv = { - .driver = { - .name = "cpufreq-cpu0", - .owner = THIS_MODULE, - }, - .probe = cpu0_cpufreq_probe, - .remove = cpu0_cpufreq_remove, -}; -module_platform_driver(cpu0_cpufreq_platdrv); - -MODULE_AUTHOR("Shawn Guo <shawn.guo@linaro.org>"); -MODULE_DESCRIPTION("Generic CPU0 cpufreq driver"); -MODULE_LICENSE("GPL"); diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c new file mode 100644 index 000000000000..6bbb8b913446 --- /dev/null +++ b/drivers/cpufreq/cpufreq-dt.c @@ -0,0 +1,364 @@ +/* + * Copyright (C) 2012 Freescale Semiconductor, Inc. + * + * Copyright (C) 2014 Linaro. + * Viresh Kumar <viresh.kumar@linaro.org> + * + * The OPP code in function set_target() is reused from + * drivers/cpufreq/omap-cpufreq.c + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/clk.h> +#include <linux/cpu.h> +#include <linux/cpu_cooling.h> +#include <linux/cpufreq.h> +#include <linux/cpumask.h> +#include <linux/err.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/pm_opp.h> +#include <linux/platform_device.h> +#include <linux/regulator/consumer.h> +#include <linux/slab.h> +#include <linux/thermal.h> + +struct private_data { + struct device *cpu_dev; + struct regulator *cpu_reg; + struct thermal_cooling_device *cdev; + unsigned int voltage_tolerance; /* in percentage */ +}; + +static int set_target(struct cpufreq_policy *policy, unsigned int index) +{ + struct dev_pm_opp *opp; + struct cpufreq_frequency_table *freq_table = policy->freq_table; + struct clk *cpu_clk = policy->clk; + struct private_data *priv = policy->driver_data; + struct device *cpu_dev = priv->cpu_dev; + struct regulator *cpu_reg = priv->cpu_reg; + unsigned long volt = 0, volt_old = 0, tol = 0; + unsigned int old_freq, new_freq; + long freq_Hz, freq_exact; + int ret; + + freq_Hz = clk_round_rate(cpu_clk, freq_table[index].frequency * 1000); + if (freq_Hz <= 0) + freq_Hz = freq_table[index].frequency * 1000; + + freq_exact = freq_Hz; + new_freq = freq_Hz / 1000; + old_freq = clk_get_rate(cpu_clk) / 1000; + + if (!IS_ERR(cpu_reg)) { + rcu_read_lock(); + opp = dev_pm_opp_find_freq_ceil(cpu_dev, &freq_Hz); + if (IS_ERR(opp)) { + rcu_read_unlock(); + dev_err(cpu_dev, "failed to find OPP for %ld\n", + freq_Hz); + return PTR_ERR(opp); + } + volt = dev_pm_opp_get_voltage(opp); + rcu_read_unlock(); + tol = volt * priv->voltage_tolerance / 100; + volt_old = regulator_get_voltage(cpu_reg); + } + + dev_dbg(cpu_dev, "%u MHz, %ld mV --> %u MHz, %ld mV\n", + old_freq / 1000, volt_old ? volt_old / 1000 : -1, + new_freq / 1000, volt ? volt / 1000 : -1); + + /* scaling up? scale voltage before frequency */ + if (!IS_ERR(cpu_reg) && new_freq > old_freq) { + ret = regulator_set_voltage_tol(cpu_reg, volt, tol); + if (ret) { + dev_err(cpu_dev, "failed to scale voltage up: %d\n", + ret); + return ret; + } + } + + ret = clk_set_rate(cpu_clk, freq_exact); + if (ret) { + dev_err(cpu_dev, "failed to set clock rate: %d\n", ret); + if (!IS_ERR(cpu_reg)) + regulator_set_voltage_tol(cpu_reg, volt_old, tol); + return ret; + } + + /* scaling down? scale voltage after frequency */ + if (!IS_ERR(cpu_reg) && new_freq < old_freq) { + ret = regulator_set_voltage_tol(cpu_reg, volt, tol); + if (ret) { + dev_err(cpu_dev, "failed to scale voltage down: %d\n", + ret); + clk_set_rate(cpu_clk, old_freq * 1000); + } + } + + return ret; +} + +static int allocate_resources(int cpu, struct device **cdev, + struct regulator **creg, struct clk **cclk) +{ + struct device *cpu_dev; + struct regulator *cpu_reg; + struct clk *cpu_clk; + int ret = 0; + char *reg_cpu0 = "cpu0", *reg_cpu = "cpu", *reg; + + cpu_dev = get_cpu_device(cpu); + if (!cpu_dev) { + pr_err("failed to get cpu%d device\n", cpu); + return -ENODEV; + } + + /* Try "cpu0" for older DTs */ + if (!cpu) + reg = reg_cpu0; + else + reg = reg_cpu; + +try_again: + cpu_reg = regulator_get_optional(cpu_dev, reg); + if (IS_ERR(cpu_reg)) { + /* + * If cpu's regulator supply node is present, but regulator is + * not yet registered, we should try defering probe. + */ + if (PTR_ERR(cpu_reg) == -EPROBE_DEFER) { + dev_dbg(cpu_dev, "cpu%d regulator not ready, retry\n", + cpu); + return -EPROBE_DEFER; + } + + /* Try with "cpu-supply" */ + if (reg == reg_cpu0) { + reg = reg_cpu; + goto try_again; + } + + dev_warn(cpu_dev, "failed to get cpu%d regulator: %ld\n", + cpu, PTR_ERR(cpu_reg)); + } + + cpu_clk = clk_get(cpu_dev, NULL); + if (IS_ERR(cpu_clk)) { + /* put regulator */ + if (!IS_ERR(cpu_reg)) + regulator_put(cpu_reg); + + ret = PTR_ERR(cpu_clk); + + /* + * If cpu's clk node is present, but clock is not yet + * registered, we should try defering probe. + */ + if (ret == -EPROBE_DEFER) + dev_dbg(cpu_dev, "cpu%d clock not ready, retry\n", cpu); + else + dev_err(cpu_dev, "failed to get cpu%d clock: %d\n", ret, + cpu); + } else { + *cdev = cpu_dev; + *creg = cpu_reg; + *cclk = cpu_clk; + } + + return ret; +} + +static int cpufreq_init(struct cpufreq_policy *policy) +{ + struct cpufreq_frequency_table *freq_table; + struct thermal_cooling_device *cdev; + struct device_node *np; + struct private_data *priv; + struct device *cpu_dev; + struct regulator *cpu_reg; + struct clk *cpu_clk; + unsigned int transition_latency; + int ret; + + ret = allocate_resources(policy->cpu, &cpu_dev, &cpu_reg, &cpu_clk); + if (ret) { + pr_err("%s: Failed to allocate resources\n: %d", __func__, ret); + return ret; + } + + np = of_node_get(cpu_dev->of_node); + if (!np) { + dev_err(cpu_dev, "failed to find cpu%d node\n", policy->cpu); + ret = -ENOENT; + goto out_put_reg_clk; + } + + /* OPPs might be populated at runtime, don't check for error here */ + of_init_opp_table(cpu_dev); + + ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table); + if (ret) { + dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret); + goto out_put_node; + } + + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) { + ret = -ENOMEM; + goto out_free_table; + } + + of_property_read_u32(np, "voltage-tolerance", &priv->voltage_tolerance); + + if (of_property_read_u32(np, "clock-latency", &transition_latency)) + transition_latency = CPUFREQ_ETERNAL; + + if (!IS_ERR(cpu_reg)) { + struct dev_pm_opp *opp; + unsigned long min_uV, max_uV; + int i; + + /* + * OPP is maintained in order of increasing frequency, and + * freq_table initialised from OPP is therefore sorted in the + * same order. + */ + for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) + ; + rcu_read_lock(); + opp = dev_pm_opp_find_freq_exact(cpu_dev, + freq_table[0].frequency * 1000, true); + min_uV = dev_pm_opp_get_voltage(opp); + opp = dev_pm_opp_find_freq_exact(cpu_dev, + freq_table[i-1].frequency * 1000, true); + max_uV = dev_pm_opp_get_voltage(opp); + rcu_read_unlock(); + ret = regulator_set_voltage_time(cpu_reg, min_uV, max_uV); + if (ret > 0) + transition_latency += ret * 1000; + } + + /* + * For now, just loading the cooling device; + * thermal DT code takes care of matching them. + */ + if (of_find_property(np, "#cooling-cells", NULL)) { + cdev = of_cpufreq_cooling_register(np, cpu_present_mask); + if (IS_ERR(cdev)) + dev_err(cpu_dev, + "running cpufreq without cooling device: %ld\n", + PTR_ERR(cdev)); + else + priv->cdev = cdev; + } + + priv->cpu_dev = cpu_dev; + priv->cpu_reg = cpu_reg; + policy->driver_data = priv; + + policy->clk = cpu_clk; + ret = cpufreq_generic_init(policy, freq_table, transition_latency); + if (ret) + goto out_cooling_unregister; + + of_node_put(np); + + return 0; + +out_cooling_unregister: + cpufreq_cooling_unregister(priv->cdev); + kfree(priv); +out_free_table: + dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table); +out_put_node: + of_node_put(np); +out_put_reg_clk: + clk_put(cpu_clk); + if (!IS_ERR(cpu_reg)) + regulator_put(cpu_reg); + + return ret; +} + +static int cpufreq_exit(struct cpufreq_policy *policy) +{ + struct private_data *priv = policy->driver_data; + + cpufreq_cooling_unregister(priv->cdev); + dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table); + clk_put(policy->clk); + if (!IS_ERR(priv->cpu_reg)) + regulator_put(priv->cpu_reg); + kfree(priv); + + return 0; +} + +static struct cpufreq_driver dt_cpufreq_driver = { + .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK, + .verify = cpufreq_generic_frequency_table_verify, + .target_index = set_target, + .get = cpufreq_generic_get, + .init = cpufreq_init, + .exit = cpufreq_exit, + .name = "cpufreq-dt", + .attr = cpufreq_generic_attr, +}; + +static int dt_cpufreq_probe(struct platform_device *pdev) +{ + struct device *cpu_dev; + struct regulator *cpu_reg; + struct clk *cpu_clk; + int ret; + + /* + * All per-cluster (CPUs sharing clock/voltages) initialization is done + * from ->init(). In probe(), we just need to make sure that clk and + * regulators are available. Else defer probe and retry. + * + * FIXME: Is checking this only for CPU0 sufficient ? + */ + ret = allocate_resources(0, &cpu_dev, &cpu_reg, &cpu_clk); + if (ret) + return ret; + + clk_put(cpu_clk); + if (!IS_ERR(cpu_reg)) + regulator_put(cpu_reg); + + ret = cpufreq_register_driver(&dt_cpufreq_driver); + if (ret) + dev_err(cpu_dev, "failed register driver: %d\n", ret); + + return ret; +} + +static int dt_cpufreq_remove(struct platform_device *pdev) +{ + cpufreq_unregister_driver(&dt_cpufreq_driver); + return 0; +} + +static struct platform_driver dt_cpufreq_platdrv = { + .driver = { + .name = "cpufreq-dt", + .owner = THIS_MODULE, + }, + .probe = dt_cpufreq_probe, + .remove = dt_cpufreq_remove, +}; +module_platform_driver(dt_cpufreq_platdrv); + +MODULE_AUTHOR("Viresh Kumar <viresh.kumar@linaro.org>"); +MODULE_AUTHOR("Shawn Guo <shawn.guo@linaro.org>"); +MODULE_DESCRIPTION("Generic cpufreq driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 6e93e7f98358..24bf76fba141 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -437,7 +437,7 @@ static struct cpufreq_governor *__find_governor(const char *str_governor) struct cpufreq_governor *t; list_for_each_entry(t, &cpufreq_governor_list, governor_list) - if (!strnicmp(str_governor, t->name, CPUFREQ_NAME_LEN)) + if (!strncasecmp(str_governor, t->name, CPUFREQ_NAME_LEN)) return t; return NULL; @@ -455,10 +455,10 @@ static int cpufreq_parse_governor(char *str_governor, unsigned int *policy, goto out; if (cpufreq_driver->setpolicy) { - if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) { + if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN)) { *policy = CPUFREQ_POLICY_PERFORMANCE; err = 0; - } else if (!strnicmp(str_governor, "powersave", + } else if (!strncasecmp(str_governor, "powersave", CPUFREQ_NAME_LEN)) { *policy = CPUFREQ_POLICY_POWERSAVE; err = 0; @@ -1382,7 +1382,7 @@ static int __cpufreq_remove_dev_prepare(struct device *dev, if (!cpufreq_suspended) pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n", __func__, new_cpu, cpu); - } else if (cpufreq_driver->stop_cpu && cpufreq_driver->setpolicy) { + } else if (cpufreq_driver->stop_cpu) { cpufreq_driver->stop_cpu(policy); } @@ -1658,10 +1658,8 @@ void cpufreq_suspend(void) if (!cpufreq_driver) return; - cpufreq_suspended = true; - if (!has_target()) - return; + goto suspend; pr_debug("%s: Suspending Governors\n", __func__); @@ -1674,6 +1672,9 @@ void cpufreq_suspend(void) pr_err("%s: Failed to suspend driver: %p\n", __func__, policy); } + +suspend: + cpufreq_suspended = true; } /** diff --git a/drivers/cpufreq/exynos4210-cpufreq.c b/drivers/cpufreq/exynos4210-cpufreq.c index 61a54310a1b9..843ec824fd91 100644 --- a/drivers/cpufreq/exynos4210-cpufreq.c +++ b/drivers/cpufreq/exynos4210-cpufreq.c @@ -127,7 +127,7 @@ int exynos4210_cpufreq_init(struct exynos_dvfs_info *info) * dependencies on platform headers. It is necessary to enable * Exynos multi-platform support and will be removed together with * this whole driver as soon as Exynos gets migrated to use - * cpufreq-cpu0 driver. + * cpufreq-dt driver. */ np = of_find_compatible_node(NULL, NULL, "samsung,exynos4210-clock"); if (!np) { diff --git a/drivers/cpufreq/exynos4x12-cpufreq.c b/drivers/cpufreq/exynos4x12-cpufreq.c index 351a2074cfea..9e78a850e29f 100644 --- a/drivers/cpufreq/exynos4x12-cpufreq.c +++ b/drivers/cpufreq/exynos4x12-cpufreq.c @@ -174,7 +174,7 @@ int exynos4x12_cpufreq_init(struct exynos_dvfs_info *info) * dependencies on platform headers. It is necessary to enable * Exynos multi-platform support and will be removed together with * this whole driver as soon as Exynos gets migrated to use - * cpufreq-cpu0 driver. + * cpufreq-dt driver. */ np = of_find_compatible_node(NULL, NULL, "samsung,exynos4412-clock"); if (!np) { diff --git a/drivers/cpufreq/exynos5250-cpufreq.c b/drivers/cpufreq/exynos5250-cpufreq.c index c91ce69dc631..3eafdc7ba787 100644 --- a/drivers/cpufreq/exynos5250-cpufreq.c +++ b/drivers/cpufreq/exynos5250-cpufreq.c @@ -153,7 +153,7 @@ int exynos5250_cpufreq_init(struct exynos_dvfs_info *info) * dependencies on platform headers. It is necessary to enable * Exynos multi-platform support and will be removed together with * this whole driver as soon as Exynos gets migrated to use - * cpufreq-cpu0 driver. + * cpufreq-dt driver. */ np = of_find_compatible_node(NULL, NULL, "samsung,exynos5250-clock"); if (!np) { diff --git a/drivers/cpufreq/highbank-cpufreq.c b/drivers/cpufreq/highbank-cpufreq.c index bf8902a0866d..ec399ad2f059 100644 --- a/drivers/cpufreq/highbank-cpufreq.c +++ b/drivers/cpufreq/highbank-cpufreq.c @@ -6,7 +6,7 @@ * published by the Free Software Foundation. * * This driver provides the clk notifier callbacks that are used when - * the cpufreq-cpu0 driver changes to frequency to alert the highbank + * the cpufreq-dt driver changes to frequency to alert the highbank * EnergyCore Management Engine (ECME) about the need to change * voltage. The ECME interfaces with the actual voltage regulators. */ @@ -60,7 +60,7 @@ static struct notifier_block hb_cpufreq_clk_nb = { static int hb_cpufreq_driver_init(void) { - struct platform_device_info devinfo = { .name = "cpufreq-cpu0", }; + struct platform_device_info devinfo = { .name = "cpufreq-dt", }; struct device *cpu_dev; struct clk *cpu_clk; struct device_node *np; @@ -95,7 +95,7 @@ static int hb_cpufreq_driver_init(void) goto out_put_node; } - /* Instantiate cpufreq-cpu0 */ + /* Instantiate cpufreq-dt */ platform_device_register_full(&devinfo); out_put_node: diff --git a/drivers/cpufreq/integrator-cpufreq.c b/drivers/cpufreq/integrator-cpufreq.c index c1320528b9d0..6bd69adc3c5e 100644 --- a/drivers/cpufreq/integrator-cpufreq.c +++ b/drivers/cpufreq/integrator-cpufreq.c @@ -213,9 +213,9 @@ static int __init integrator_cpufreq_probe(struct platform_device *pdev) return cpufreq_register_driver(&integrator_driver); } -static void __exit integrator_cpufreq_remove(struct platform_device *pdev) +static int __exit integrator_cpufreq_remove(struct platform_device *pdev) { - cpufreq_unregister_driver(&integrator_driver); + return cpufreq_unregister_driver(&integrator_driver); } static const struct of_device_id integrator_cpufreq_match[] = { diff --git a/drivers/cpufreq/pcc-cpufreq.c b/drivers/cpufreq/pcc-cpufreq.c index 728a2d879499..4d2c8e861089 100644 --- a/drivers/cpufreq/pcc-cpufreq.c +++ b/drivers/cpufreq/pcc-cpufreq.c @@ -204,7 +204,6 @@ static int pcc_cpufreq_target(struct cpufreq_policy *policy, u32 input_buffer; int cpu; - spin_lock(&pcc_lock); cpu = policy->cpu; pcc_cpu_data = per_cpu_ptr(pcc_cpu_info, cpu); @@ -216,6 +215,7 @@ static int pcc_cpufreq_target(struct cpufreq_policy *policy, freqs.old = policy->cur; freqs.new = target_freq; cpufreq_freq_transition_begin(policy, &freqs); + spin_lock(&pcc_lock); input_buffer = 0x1 | (((target_freq * 100) / (ioread32(&pcch_hdr->nominal) * 1000)) << 8); diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c index 379c0837f5a9..2dfd4fdb5a52 100644 --- a/drivers/cpufreq/powernv-cpufreq.c +++ b/drivers/cpufreq/powernv-cpufreq.c @@ -26,6 +26,7 @@ #include <linux/cpufreq.h> #include <linux/smp.h> #include <linux/of.h> +#include <linux/reboot.h> #include <asm/cputhreads.h> #include <asm/firmware.h> @@ -35,6 +36,7 @@ #define POWERNV_MAX_PSTATES 256 static struct cpufreq_frequency_table powernv_freqs[POWERNV_MAX_PSTATES+1]; +static bool rebooting; /* * Note: The set of pstates consists of contiguous integers, the @@ -284,6 +286,15 @@ static void set_pstate(void *freq_data) } /* + * get_nominal_index: Returns the index corresponding to the nominal + * pstate in the cpufreq table + */ +static inline unsigned int get_nominal_index(void) +{ + return powernv_pstate_info.max - powernv_pstate_info.nominal; +} + +/* * powernv_cpufreq_target_index: Sets the frequency corresponding to * the cpufreq table entry indexed by new_index on the cpus in the * mask policy->cpus @@ -293,6 +304,9 @@ static int powernv_cpufreq_target_index(struct cpufreq_policy *policy, { struct powernv_smp_call_data freq_data; + if (unlikely(rebooting) && new_index != get_nominal_index()) + return 0; + freq_data.pstate_id = powernv_freqs[new_index].driver_data; /* @@ -317,6 +331,33 @@ static int powernv_cpufreq_cpu_init(struct cpufreq_policy *policy) return cpufreq_table_validate_and_show(policy, powernv_freqs); } +static int powernv_cpufreq_reboot_notifier(struct notifier_block *nb, + unsigned long action, void *unused) +{ + int cpu; + struct cpufreq_policy cpu_policy; + + rebooting = true; + for_each_online_cpu(cpu) { + cpufreq_get_policy(&cpu_policy, cpu); + powernv_cpufreq_target_index(&cpu_policy, get_nominal_index()); + } + + return NOTIFY_DONE; +} + +static struct notifier_block powernv_cpufreq_reboot_nb = { + .notifier_call = powernv_cpufreq_reboot_notifier, +}; + +static void powernv_cpufreq_stop_cpu(struct cpufreq_policy *policy) +{ + struct powernv_smp_call_data freq_data; + + freq_data.pstate_id = powernv_pstate_info.min; + smp_call_function_single(policy->cpu, set_pstate, &freq_data, 1); +} + static struct cpufreq_driver powernv_cpufreq_driver = { .name = "powernv-cpufreq", .flags = CPUFREQ_CONST_LOOPS, @@ -324,6 +365,7 @@ static struct cpufreq_driver powernv_cpufreq_driver = { .verify = cpufreq_generic_frequency_table_verify, .target_index = powernv_cpufreq_target_index, .get = powernv_cpufreq_get, + .stop_cpu = powernv_cpufreq_stop_cpu, .attr = powernv_cpu_freq_attr, }; @@ -342,12 +384,14 @@ static int __init powernv_cpufreq_init(void) return rc; } + register_reboot_notifier(&powernv_cpufreq_reboot_nb); return cpufreq_register_driver(&powernv_cpufreq_driver); } module_init(powernv_cpufreq_init); static void __exit powernv_cpufreq_exit(void) { + unregister_reboot_notifier(&powernv_cpufreq_reboot_nb); cpufreq_unregister_driver(&powernv_cpufreq_driver); } module_exit(powernv_cpufreq_exit); diff --git a/drivers/cpufreq/ppc-corenet-cpufreq.c b/drivers/cpufreq/ppc-corenet-cpufreq.c index 3607070797af..bee5df7794d3 100644 --- a/drivers/cpufreq/ppc-corenet-cpufreq.c +++ b/drivers/cpufreq/ppc-corenet-cpufreq.c @@ -199,7 +199,6 @@ static int corenet_cpufreq_cpu_init(struct cpufreq_policy *policy) } data->table = table; - per_cpu(cpu_data, cpu) = data; /* update ->cpus if we have cluster, no harm if not */ cpumask_copy(policy->cpus, per_cpu(cpu_mask, cpu)); diff --git a/drivers/cpufreq/s5pv210-cpufreq.c b/drivers/cpufreq/s5pv210-cpufreq.c index 3f9791f07b8e..567caa6313ff 100644 --- a/drivers/cpufreq/s5pv210-cpufreq.c +++ b/drivers/cpufreq/s5pv210-cpufreq.c @@ -597,7 +597,7 @@ static int s5pv210_cpufreq_probe(struct platform_device *pdev) * and dependencies on platform headers. It is necessary to enable * S5PV210 multi-platform support and will be removed together with * this whole driver as soon as S5PV210 gets migrated to use - * cpufreq-cpu0 driver. + * cpufreq-dt driver. */ np = of_find_compatible_node(NULL, NULL, "samsung,s5pv210-clock"); if (!np) { diff --git a/drivers/cpuidle/Kconfig b/drivers/cpuidle/Kconfig index 32748c36c477..c5029c1209b4 100644 --- a/drivers/cpuidle/Kconfig +++ b/drivers/cpuidle/Kconfig @@ -25,11 +25,19 @@ config CPU_IDLE_GOV_MENU bool "Menu governor (for tickless system)" default y +config DT_IDLE_STATES + bool + menu "ARM CPU Idle Drivers" depends on ARM source "drivers/cpuidle/Kconfig.arm" endmenu +menu "ARM64 CPU Idle Drivers" +depends on ARM64 +source "drivers/cpuidle/Kconfig.arm64" +endmenu + menu "MIPS CPU Idle Drivers" depends on MIPS source "drivers/cpuidle/Kconfig.mips" diff --git a/drivers/cpuidle/Kconfig.arm b/drivers/cpuidle/Kconfig.arm index 38cff69ffe06..e339c7f2c2b7 100644 --- a/drivers/cpuidle/Kconfig.arm +++ b/drivers/cpuidle/Kconfig.arm @@ -7,6 +7,7 @@ config ARM_BIG_LITTLE_CPUIDLE depends on MCPM select ARM_CPU_SUSPEND select CPU_IDLE_MULTIPLE_DRIVERS + select DT_IDLE_STATES help Select this option to enable CPU idle driver for big.LITTLE based ARM systems. Driver manages CPUs coordination through MCPM and diff --git a/drivers/cpuidle/Kconfig.arm64 b/drivers/cpuidle/Kconfig.arm64 new file mode 100644 index 000000000000..d0a08ed1b2ee --- /dev/null +++ b/drivers/cpuidle/Kconfig.arm64 @@ -0,0 +1,14 @@ +# +# ARM64 CPU Idle drivers +# + +config ARM64_CPUIDLE + bool "Generic ARM64 CPU idle Driver" + select ARM64_CPU_SUSPEND + select DT_IDLE_STATES + help + Select this to enable generic cpuidle driver for ARM64. + It provides a generic idle driver whose idle states are configured + at run-time through DT nodes. The CPUidle suspend backend is + initialized by calling the CPU operations init idle hook + provided by architecture code. diff --git a/drivers/cpuidle/Makefile b/drivers/cpuidle/Makefile index 11edb31c55e9..4d177b916f75 100644 --- a/drivers/cpuidle/Makefile +++ b/drivers/cpuidle/Makefile @@ -4,6 +4,7 @@ obj-y += cpuidle.o driver.o governor.o sysfs.o governors/ obj-$(CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED) += coupled.o +obj-$(CONFIG_DT_IDLE_STATES) += dt_idle_states.o ################################################################################## # ARM SoC drivers @@ -22,6 +23,10 @@ obj-$(CONFIG_ARM_EXYNOS_CPUIDLE) += cpuidle-exynos.o obj-$(CONFIG_MIPS_CPS_CPUIDLE) += cpuidle-cps.o ############################################################################### +# ARM64 drivers +obj-$(CONFIG_ARM64_CPUIDLE) += cpuidle-arm64.o + +############################################################################### # POWERPC drivers obj-$(CONFIG_PSERIES_CPUIDLE) += cpuidle-pseries.o obj-$(CONFIG_POWERNV_CPUIDLE) += cpuidle-powernv.o diff --git a/drivers/cpuidle/cpuidle-arm64.c b/drivers/cpuidle/cpuidle-arm64.c new file mode 100644 index 000000000000..50997ea942fc --- /dev/null +++ b/drivers/cpuidle/cpuidle-arm64.c @@ -0,0 +1,133 @@ +/* + * ARM64 generic CPU idle driver. + * + * Copyright (C) 2014 ARM Ltd. + * Author: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#define pr_fmt(fmt) "CPUidle arm64: " fmt + +#include <linux/cpuidle.h> +#include <linux/cpumask.h> +#include <linux/cpu_pm.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of.h> + +#include <asm/cpuidle.h> +#include <asm/suspend.h> + +#include "dt_idle_states.h" + +/* + * arm64_enter_idle_state - Programs CPU to enter the specified state + * + * dev: cpuidle device + * drv: cpuidle driver + * idx: state index + * + * Called from the CPUidle framework to program the device to the + * specified target state selected by the governor. + */ +static int arm64_enter_idle_state(struct cpuidle_device *dev, + struct cpuidle_driver *drv, int idx) +{ + int ret; + + if (!idx) { + cpu_do_idle(); + return idx; + } + + ret = cpu_pm_enter(); + if (!ret) { + /* + * Pass idle state index to cpu_suspend which in turn will + * call the CPU ops suspend protocol with idle index as a + * parameter. + */ + ret = cpu_suspend(idx); + + cpu_pm_exit(); + } + + return ret ? -1 : idx; +} + +static struct cpuidle_driver arm64_idle_driver = { + .name = "arm64_idle", + .owner = THIS_MODULE, + /* + * State at index 0 is standby wfi and considered standard + * on all ARM platforms. If in some platforms simple wfi + * can't be used as "state 0", DT bindings must be implemented + * to work around this issue and allow installing a special + * handler for idle state index 0. + */ + .states[0] = { + .enter = arm64_enter_idle_state, + .exit_latency = 1, + .target_residency = 1, + .power_usage = UINT_MAX, + .flags = CPUIDLE_FLAG_TIME_VALID, + .name = "WFI", + .desc = "ARM64 WFI", + } +}; + +static const struct of_device_id arm64_idle_state_match[] __initconst = { + { .compatible = "arm,idle-state", + .data = arm64_enter_idle_state }, + { }, +}; + +/* + * arm64_idle_init + * + * Registers the arm64 specific cpuidle driver with the cpuidle + * framework. It relies on core code to parse the idle states + * and initialize them using driver data structures accordingly. + */ +static int __init arm64_idle_init(void) +{ + int cpu, ret; + struct cpuidle_driver *drv = &arm64_idle_driver; + + /* + * Initialize idle states data, starting at index 1. + * This driver is DT only, if no DT idle states are detected (ret == 0) + * let the driver initialization fail accordingly since there is no + * reason to initialize the idle driver if only wfi is supported. + */ + ret = dt_init_idle_driver(drv, arm64_idle_state_match, 1); + if (ret <= 0) { + if (ret) + pr_err("failed to initialize idle states\n"); + return ret ? : -ENODEV; + } + + /* + * Call arch CPU operations in order to initialize + * idle states suspend back-end specific data + */ + for_each_possible_cpu(cpu) { + ret = cpu_init_idle(cpu); + if (ret) { + pr_err("CPU %d failed to init idle CPU ops\n", cpu); + return ret; + } + } + + ret = cpuidle_register(drv, NULL); + if (ret) { + pr_err("failed to register cpuidle driver\n"); + return ret; + } + + return 0; +} +device_initcall(arm64_idle_init); diff --git a/drivers/cpuidle/cpuidle-big_little.c b/drivers/cpuidle/cpuidle-big_little.c index ef94c3b81f18..fbc00a1d3c48 100644 --- a/drivers/cpuidle/cpuidle-big_little.c +++ b/drivers/cpuidle/cpuidle-big_little.c @@ -24,6 +24,8 @@ #include <asm/smp_plat.h> #include <asm/suspend.h> +#include "dt_idle_states.h" + static int bl_enter_powerdown(struct cpuidle_device *dev, struct cpuidle_driver *drv, int idx); @@ -73,6 +75,12 @@ static struct cpuidle_driver bl_idle_little_driver = { .state_count = 2, }; +static const struct of_device_id bl_idle_state_match[] __initconst = { + { .compatible = "arm,idle-state", + .data = bl_enter_powerdown }, + { }, +}; + static struct cpuidle_driver bl_idle_big_driver = { .name = "big_idle", .owner = THIS_MODULE, @@ -159,6 +167,7 @@ static int __init bl_idle_driver_init(struct cpuidle_driver *drv, int part_id) static const struct of_device_id compatible_machine_match[] = { { .compatible = "arm,vexpress,v2p-ca15_a7" }, { .compatible = "samsung,exynos5420" }, + { .compatible = "samsung,exynos5800" }, {}, }; @@ -190,6 +199,17 @@ static int __init bl_idle_init(void) if (ret) goto out_uninit_little; + /* Start at index 1, index 0 standard WFI */ + ret = dt_init_idle_driver(&bl_idle_big_driver, bl_idle_state_match, 1); + if (ret < 0) + goto out_uninit_big; + + /* Start at index 1, index 0 standard WFI */ + ret = dt_init_idle_driver(&bl_idle_little_driver, + bl_idle_state_match, 1); + if (ret < 0) + goto out_uninit_big; + ret = cpuidle_register(&bl_idle_little_driver, NULL); if (ret) goto out_uninit_big; diff --git a/drivers/cpuidle/dt_idle_states.c b/drivers/cpuidle/dt_idle_states.c new file mode 100644 index 000000000000..52f4d11bbf3f --- /dev/null +++ b/drivers/cpuidle/dt_idle_states.c @@ -0,0 +1,213 @@ +/* + * DT idle states parsing code. + * + * Copyright (C) 2014 ARM Ltd. + * Author: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#define pr_fmt(fmt) "DT idle-states: " fmt + +#include <linux/cpuidle.h> +#include <linux/cpumask.h> +#include <linux/errno.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_device.h> + +#include "dt_idle_states.h" + +static int init_state_node(struct cpuidle_state *idle_state, + const struct of_device_id *matches, + struct device_node *state_node) +{ + int err; + const struct of_device_id *match_id; + + match_id = of_match_node(matches, state_node); + if (!match_id) + return -ENODEV; + /* + * CPUidle drivers are expected to initialize the const void *data + * pointer of the passed in struct of_device_id array to the idle + * state enter function. + */ + idle_state->enter = match_id->data; + + err = of_property_read_u32(state_node, "wakeup-latency-us", + &idle_state->exit_latency); + if (err) { + u32 entry_latency, exit_latency; + + err = of_property_read_u32(state_node, "entry-latency-us", + &entry_latency); + if (err) { + pr_debug(" * %s missing entry-latency-us property\n", + state_node->full_name); + return -EINVAL; + } + + err = of_property_read_u32(state_node, "exit-latency-us", + &exit_latency); + if (err) { + pr_debug(" * %s missing exit-latency-us property\n", + state_node->full_name); + return -EINVAL; + } + /* + * If wakeup-latency-us is missing, default to entry+exit + * latencies as defined in idle states bindings + */ + idle_state->exit_latency = entry_latency + exit_latency; + } + + err = of_property_read_u32(state_node, "min-residency-us", + &idle_state->target_residency); + if (err) { + pr_debug(" * %s missing min-residency-us property\n", + state_node->full_name); + return -EINVAL; + } + + idle_state->flags = CPUIDLE_FLAG_TIME_VALID; + if (of_property_read_bool(state_node, "local-timer-stop")) + idle_state->flags |= CPUIDLE_FLAG_TIMER_STOP; + /* + * TODO: + * replace with kstrdup and pointer assignment when name + * and desc become string pointers + */ + strncpy(idle_state->name, state_node->name, CPUIDLE_NAME_LEN - 1); + strncpy(idle_state->desc, state_node->name, CPUIDLE_DESC_LEN - 1); + return 0; +} + +/* + * Check that the idle state is uniform across all CPUs in the CPUidle driver + * cpumask + */ +static bool idle_state_valid(struct device_node *state_node, unsigned int idx, + const cpumask_t *cpumask) +{ + int cpu; + struct device_node *cpu_node, *curr_state_node; + bool valid = true; + + /* + * Compare idle state phandles for index idx on all CPUs in the + * CPUidle driver cpumask. Start from next logical cpu following + * cpumask_first(cpumask) since that's the CPU state_node was + * retrieved from. If a mismatch is found bail out straight + * away since we certainly hit a firmware misconfiguration. + */ + for (cpu = cpumask_next(cpumask_first(cpumask), cpumask); + cpu < nr_cpu_ids; cpu = cpumask_next(cpu, cpumask)) { + cpu_node = of_cpu_device_node_get(cpu); + curr_state_node = of_parse_phandle(cpu_node, "cpu-idle-states", + idx); + if (state_node != curr_state_node) + valid = false; + + of_node_put(curr_state_node); + of_node_put(cpu_node); + if (!valid) + break; + } + + return valid; +} + +/** + * dt_init_idle_driver() - Parse the DT idle states and initialize the + * idle driver states array + * @drv: Pointer to CPU idle driver to be initialized + * @matches: Array of of_device_id match structures to search in for + * compatible idle state nodes. The data pointer for each valid + * struct of_device_id entry in the matches array must point to + * a function with the following signature, that corresponds to + * the CPUidle state enter function signature: + * + * int (*)(struct cpuidle_device *dev, + * struct cpuidle_driver *drv, + * int index); + * + * @start_idx: First idle state index to be initialized + * + * If DT idle states are detected and are valid the state count and states + * array entries in the cpuidle driver are initialized accordingly starting + * from index start_idx. + * + * Return: number of valid DT idle states parsed, <0 on failure + */ +int dt_init_idle_driver(struct cpuidle_driver *drv, + const struct of_device_id *matches, + unsigned int start_idx) +{ + struct cpuidle_state *idle_state; + struct device_node *state_node, *cpu_node; + int i, err = 0; + const cpumask_t *cpumask; + unsigned int state_idx = start_idx; + + if (state_idx >= CPUIDLE_STATE_MAX) + return -EINVAL; + /* + * We get the idle states for the first logical cpu in the + * driver mask (or cpu_possible_mask if the driver cpumask is not set) + * and we check through idle_state_valid() if they are uniform + * across CPUs, otherwise we hit a firmware misconfiguration. + */ + cpumask = drv->cpumask ? : cpu_possible_mask; + cpu_node = of_cpu_device_node_get(cpumask_first(cpumask)); + + for (i = 0; ; i++) { + state_node = of_parse_phandle(cpu_node, "cpu-idle-states", i); + if (!state_node) + break; + + if (!idle_state_valid(state_node, i, cpumask)) { + pr_warn("%s idle state not valid, bailing out\n", + state_node->full_name); + err = -EINVAL; + break; + } + + if (state_idx == CPUIDLE_STATE_MAX) { + pr_warn("State index reached static CPU idle driver states array size\n"); + break; + } + + idle_state = &drv->states[state_idx++]; + err = init_state_node(idle_state, matches, state_node); + if (err) { + pr_err("Parsing idle state node %s failed with err %d\n", + state_node->full_name, err); + err = -EINVAL; + break; + } + of_node_put(state_node); + } + + of_node_put(state_node); + of_node_put(cpu_node); + if (err) + return err; + /* + * Update the driver state count only if some valid DT idle states + * were detected + */ + if (i) + drv->state_count = state_idx; + + /* + * Return the number of present and valid DT idle states, which can + * also be 0 on platforms with missing DT idle states or legacy DT + * configuration predating the DT idle states bindings. + */ + return i; +} +EXPORT_SYMBOL_GPL(dt_init_idle_driver); diff --git a/drivers/cpuidle/dt_idle_states.h b/drivers/cpuidle/dt_idle_states.h new file mode 100644 index 000000000000..4818134bc65b --- /dev/null +++ b/drivers/cpuidle/dt_idle_states.h @@ -0,0 +1,7 @@ +#ifndef __DT_IDLE_STATES +#define __DT_IDLE_STATES + +int dt_init_idle_driver(struct cpuidle_driver *drv, + const struct of_device_id *matches, + unsigned int start_idx); +#endif diff --git a/drivers/cpuidle/governor.c b/drivers/cpuidle/governor.c index ca89412f5122..fb9f511cca23 100644 --- a/drivers/cpuidle/governor.c +++ b/drivers/cpuidle/governor.c @@ -28,7 +28,7 @@ static struct cpuidle_governor * __cpuidle_find_governor(const char *str) struct cpuidle_governor *gov; list_for_each_entry(gov, &cpuidle_governors, governor_list) - if (!strnicmp(str, gov->name, CPUIDLE_NAME_LEN)) + if (!strncasecmp(str, gov->name, CPUIDLE_NAME_LEN)) return gov; return NULL; diff --git a/drivers/devfreq/Kconfig b/drivers/devfreq/Kconfig index 3dced0a9eae3..faf4e70c42e0 100644 --- a/drivers/devfreq/Kconfig +++ b/drivers/devfreq/Kconfig @@ -78,9 +78,8 @@ config ARM_EXYNOS4_BUS_DEVFREQ This does not yet operate with optimal voltages. config ARM_EXYNOS5_BUS_DEVFREQ - bool "ARM Exynos5250 Bus DEVFREQ Driver" + tristate "ARM Exynos5250 Bus DEVFREQ Driver" depends on SOC_EXYNOS5250 - select ARCH_HAS_OPP select DEVFREQ_GOV_SIMPLE_ONDEMAND select PM_OPP help diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c index 9f90369dd6bd..30b538d8cc90 100644 --- a/drivers/devfreq/devfreq.c +++ b/drivers/devfreq/devfreq.c @@ -1119,6 +1119,7 @@ struct dev_pm_opp *devfreq_recommended_opp(struct device *dev, return opp; } +EXPORT_SYMBOL(devfreq_recommended_opp); /** * devfreq_register_opp_notifier() - Helper function to get devfreq notified @@ -1142,6 +1143,7 @@ int devfreq_register_opp_notifier(struct device *dev, struct devfreq *devfreq) return ret; } +EXPORT_SYMBOL(devfreq_register_opp_notifier); /** * devfreq_unregister_opp_notifier() - Helper function to stop getting devfreq @@ -1168,6 +1170,7 @@ int devfreq_unregister_opp_notifier(struct device *dev, struct devfreq *devfreq) return ret; } +EXPORT_SYMBOL(devfreq_unregister_opp_notifier); static void devm_devfreq_opp_release(struct device *dev, void *res) { diff --git a/drivers/devfreq/exynos/exynos_ppmu.c b/drivers/devfreq/exynos/exynos_ppmu.c index 75fcc5140ffb..97b75e513d29 100644 --- a/drivers/devfreq/exynos/exynos_ppmu.c +++ b/drivers/devfreq/exynos/exynos_ppmu.c @@ -73,6 +73,7 @@ void busfreq_mon_reset(struct busfreq_ppmu_data *ppmu_data) exynos_ppmu_start(ppmu_base); } } +EXPORT_SYMBOL(busfreq_mon_reset); void exynos_read_ppmu(struct busfreq_ppmu_data *ppmu_data) { @@ -97,6 +98,7 @@ void exynos_read_ppmu(struct busfreq_ppmu_data *ppmu_data) busfreq_mon_reset(ppmu_data); } +EXPORT_SYMBOL(exynos_read_ppmu); int exynos_get_busier_ppmu(struct busfreq_ppmu_data *ppmu_data) { @@ -114,3 +116,4 @@ int exynos_get_busier_ppmu(struct busfreq_ppmu_data *ppmu_data) return busy; } +EXPORT_SYMBOL(exynos_get_busier_ppmu); diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 1411613f2174..e42925f76b4b 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -1310,6 +1310,16 @@ void i915_check_and_clear_faults(struct drm_device *dev) POSTING_READ(RING_FAULT_REG(&dev_priv->ring[RCS])); } +static void i915_ggtt_flush(struct drm_i915_private *dev_priv) +{ + if (INTEL_INFO(dev_priv->dev)->gen < 6) { + intel_gtt_chipset_flush(); + } else { + I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); + POSTING_READ(GFX_FLSH_CNTL_GEN6); + } +} + void i915_gem_suspend_gtt_mappings(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -1326,6 +1336,8 @@ void i915_gem_suspend_gtt_mappings(struct drm_device *dev) dev_priv->gtt.base.start, dev_priv->gtt.base.total, true); + + i915_ggtt_flush(dev_priv); } void i915_gem_restore_gtt_mappings(struct drm_device *dev) @@ -1378,7 +1390,7 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev) gen6_write_pdes(container_of(vm, struct i915_hw_ppgtt, base)); } - i915_gem_chipset_flush(dev); + i915_ggtt_flush(dev_priv); } int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj) diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c index ca52ad2ae7d1..d8de1d5140a7 100644 --- a/drivers/gpu/drm/i915/intel_opregion.c +++ b/drivers/gpu/drm/i915/intel_opregion.c @@ -396,6 +396,16 @@ int intel_opregion_notify_adapter(struct drm_device *dev, pci_power_t state) return -EINVAL; } +/* + * If the vendor backlight interface is not in use and ACPI backlight interface + * is broken, do not bother processing backlight change requests from firmware. + */ +static bool should_ignore_backlight_request(void) +{ + return acpi_video_backlight_support() && + !acpi_video_verify_backlight_support(); +} + static u32 asle_set_backlight(struct drm_device *dev, u32 bclp) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -404,11 +414,7 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp) DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp); - /* - * If the acpi_video interface is not supposed to be used, don't - * bother processing backlight level change requests from firmware. - */ - if (!acpi_video_verify_backlight_support()) { + if (should_ignore_backlight_request()) { DRM_DEBUG_KMS("opregion backlight request ignored\n"); return 0; } diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c index 4b5bb5d58a54..f8cbb512132f 100644 --- a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c +++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c @@ -1763,9 +1763,10 @@ nv50_disp_intr_unk40_0_tmds(struct nv50_disp_priv *priv, struct dcb_output *outp const int or = ffs(outp->or) - 1; const u32 loff = (or * 0x800) + (link * 0x80); const u16 mask = (outp->sorconf.link << 6) | outp->or; + struct dcb_output match; u8 ver, hdr; - if (dcb_outp_match(bios, DCB_OUTPUT_DP, mask, &ver, &hdr, outp)) + if (dcb_outp_match(bios, DCB_OUTPUT_DP, mask, &ver, &hdr, &match)) nv_mask(priv, 0x61c10c + loff, 0x00000001, 0x00000000); } diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c index 99cd9e4a2aa6..3440fc999f2f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_chan.c +++ b/drivers/gpu/drm/nouveau/nouveau_chan.c @@ -285,6 +285,7 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart) struct nouveau_software_chan *swch; struct nv_dma_v0 args = {}; int ret, i; + bool save; nvif_object_map(chan->object); @@ -386,7 +387,11 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart) } /* initialise synchronisation */ - return nouveau_fence(chan->drm)->context_new(chan); + save = cli->base.super; + cli->base.super = true; /* hack until fencenv50 fixed */ + ret = nouveau_fence(chan->drm)->context_new(chan); + cli->base.super = save; + return ret; } int diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index 65b4fd53dd4e..4a21b2b06ce2 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c @@ -550,14 +550,12 @@ nouveau_display_destroy(struct drm_device *dev) } int -nouveau_display_suspend(struct drm_device *dev) +nouveau_display_suspend(struct drm_device *dev, bool runtime) { - struct nouveau_drm *drm = nouveau_drm(dev); struct drm_crtc *crtc; nouveau_display_fini(dev); - NV_INFO(drm, "unpinning framebuffer(s)...\n"); list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { struct nouveau_framebuffer *nouveau_fb; @@ -579,12 +577,13 @@ nouveau_display_suspend(struct drm_device *dev) } void -nouveau_display_repin(struct drm_device *dev) +nouveau_display_resume(struct drm_device *dev, bool runtime) { struct nouveau_drm *drm = nouveau_drm(dev); struct drm_crtc *crtc; - int ret; + int ret, head; + /* re-pin fb/cursors */ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { struct nouveau_framebuffer *nouveau_fb; @@ -606,13 +605,6 @@ nouveau_display_repin(struct drm_device *dev) if (ret) NV_ERROR(drm, "Could not pin/map cursor.\n"); } -} - -void -nouveau_display_resume(struct drm_device *dev) -{ - struct drm_crtc *crtc; - int head; nouveau_display_init(dev); @@ -627,6 +619,13 @@ nouveau_display_resume(struct drm_device *dev) for (head = 0; head < dev->mode_config.num_crtc; head++) drm_vblank_on(dev, head); + /* This should ensure we don't hit a locking problem when someone + * wakes us up via a connector. We should never go into suspend + * while the display is on anyways. + */ + if (runtime) + return; + drm_helper_resume_force_mode(dev); list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { diff --git a/drivers/gpu/drm/nouveau/nouveau_display.h b/drivers/gpu/drm/nouveau/nouveau_display.h index 88ca177cb1c7..be3d5947c6be 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.h +++ b/drivers/gpu/drm/nouveau/nouveau_display.h @@ -63,9 +63,8 @@ int nouveau_display_create(struct drm_device *dev); void nouveau_display_destroy(struct drm_device *dev); int nouveau_display_init(struct drm_device *dev); void nouveau_display_fini(struct drm_device *dev); -int nouveau_display_suspend(struct drm_device *dev); -void nouveau_display_repin(struct drm_device *dev); -void nouveau_display_resume(struct drm_device *dev); +int nouveau_display_suspend(struct drm_device *dev, bool runtime); +void nouveau_display_resume(struct drm_device *dev, bool runtime); int nouveau_display_vblank_enable(struct drm_device *, int); void nouveau_display_vblank_disable(struct drm_device *, int); int nouveau_display_scanoutpos(struct drm_device *, int, unsigned int, diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index 9c3af96a7153..3ed32dd90303 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -547,9 +547,11 @@ nouveau_do_suspend(struct drm_device *dev, bool runtime) struct nouveau_cli *cli; int ret; - if (dev->mode_config.num_crtc && !runtime) { + if (dev->mode_config.num_crtc) { + NV_INFO(drm, "suspending console...\n"); + nouveau_fbcon_set_suspend(dev, 1); NV_INFO(drm, "suspending display...\n"); - ret = nouveau_display_suspend(dev); + ret = nouveau_display_suspend(dev, runtime); if (ret) return ret; } @@ -603,7 +605,7 @@ fail_client: fail_display: if (dev->mode_config.num_crtc) { NV_INFO(drm, "resuming display...\n"); - nouveau_display_resume(dev); + nouveau_display_resume(dev, runtime); } return ret; } @@ -618,9 +620,6 @@ int nouveau_pmops_suspend(struct device *dev) drm_dev->switch_power_state == DRM_SWITCH_POWER_DYNAMIC_OFF) return 0; - if (drm_dev->mode_config.num_crtc) - nouveau_fbcon_set_suspend(drm_dev, 1); - ret = nouveau_do_suspend(drm_dev, false); if (ret) return ret; @@ -633,7 +632,7 @@ int nouveau_pmops_suspend(struct device *dev) } static int -nouveau_do_resume(struct drm_device *dev) +nouveau_do_resume(struct drm_device *dev, bool runtime) { struct nouveau_drm *drm = nouveau_drm(dev); struct nouveau_cli *cli; @@ -658,7 +657,9 @@ nouveau_do_resume(struct drm_device *dev) if (dev->mode_config.num_crtc) { NV_INFO(drm, "resuming display...\n"); - nouveau_display_repin(dev); + nouveau_display_resume(dev, runtime); + NV_INFO(drm, "resuming console...\n"); + nouveau_fbcon_set_suspend(dev, 0); } return 0; @@ -681,47 +682,21 @@ int nouveau_pmops_resume(struct device *dev) return ret; pci_set_master(pdev); - ret = nouveau_do_resume(drm_dev); - if (ret) - return ret; - - if (drm_dev->mode_config.num_crtc) { - nouveau_display_resume(drm_dev); - nouveau_fbcon_set_suspend(drm_dev, 0); - } - - return 0; + return nouveau_do_resume(drm_dev, false); } static int nouveau_pmops_freeze(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct drm_device *drm_dev = pci_get_drvdata(pdev); - int ret; - - if (drm_dev->mode_config.num_crtc) - nouveau_fbcon_set_suspend(drm_dev, 1); - - ret = nouveau_do_suspend(drm_dev, false); - return ret; + return nouveau_do_suspend(drm_dev, false); } static int nouveau_pmops_thaw(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct drm_device *drm_dev = pci_get_drvdata(pdev); - int ret; - - ret = nouveau_do_resume(drm_dev); - if (ret) - return ret; - - if (drm_dev->mode_config.num_crtc) { - nouveau_display_resume(drm_dev); - nouveau_fbcon_set_suspend(drm_dev, 0); - } - - return 0; + return nouveau_do_resume(drm_dev, false); } @@ -977,7 +952,7 @@ static int nouveau_pmops_runtime_resume(struct device *dev) return ret; pci_set_master(pdev); - ret = nouveau_do_resume(drm_dev); + ret = nouveau_do_resume(drm_dev, true); drm_kms_helper_poll_enable(drm_dev); /* do magic */ nvif_mask(device, 0x88488, (1 << 25), (1 << 25)); diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c index 8bdd27091db8..49fe6075cc7c 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c @@ -486,6 +486,16 @@ static const struct drm_fb_helper_funcs nouveau_fbcon_helper_funcs = { .fb_probe = nouveau_fbcon_create, }; +static void +nouveau_fbcon_set_suspend_work(struct work_struct *work) +{ + struct nouveau_fbdev *fbcon = container_of(work, typeof(*fbcon), work); + console_lock(); + nouveau_fbcon_accel_restore(fbcon->dev); + nouveau_fbcon_zfill(fbcon->dev, fbcon); + fb_set_suspend(fbcon->helper.fbdev, FBINFO_STATE_RUNNING); + console_unlock(); +} int nouveau_fbcon_init(struct drm_device *dev) @@ -503,6 +513,7 @@ nouveau_fbcon_init(struct drm_device *dev) if (!fbcon) return -ENOMEM; + INIT_WORK(&fbcon->work, nouveau_fbcon_set_suspend_work); fbcon->dev = dev; drm->fbcon = fbcon; @@ -551,14 +562,14 @@ nouveau_fbcon_set_suspend(struct drm_device *dev, int state) { struct nouveau_drm *drm = nouveau_drm(dev); if (drm->fbcon) { - console_lock(); - if (state == 0) { - nouveau_fbcon_accel_restore(dev); - nouveau_fbcon_zfill(dev, drm->fbcon); + if (state == FBINFO_STATE_RUNNING) { + schedule_work(&drm->fbcon->work); + return; } + flush_work(&drm->fbcon->work); + console_lock(); fb_set_suspend(drm->fbcon->helper.fbdev, state); - if (state == 1) - nouveau_fbcon_accel_save_disable(dev); + nouveau_fbcon_accel_save_disable(dev); console_unlock(); } } diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.h b/drivers/gpu/drm/nouveau/nouveau_fbcon.h index 34658cfa8f5d..0b465c7d3907 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.h +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.h @@ -36,6 +36,7 @@ struct nouveau_fbdev { struct nouveau_framebuffer nouveau_fb; struct list_head fbdev_list; struct drm_device *dev; + struct work_struct work; unsigned int saved_flags; struct nvif_object surf2d; struct nvif_object clip; diff --git a/drivers/i2c/busses/i2c-qup.c b/drivers/i2c/busses/i2c-qup.c index 3a4d64e1dfb1..092d89bd3224 100644 --- a/drivers/i2c/busses/i2c-qup.c +++ b/drivers/i2c/busses/i2c-qup.c @@ -674,16 +674,20 @@ static int qup_i2c_probe(struct platform_device *pdev) qup->adap.dev.of_node = pdev->dev.of_node; strlcpy(qup->adap.name, "QUP I2C adapter", sizeof(qup->adap.name)); - ret = i2c_add_adapter(&qup->adap); - if (ret) - goto fail; - pm_runtime_set_autosuspend_delay(qup->dev, MSEC_PER_SEC); pm_runtime_use_autosuspend(qup->dev); pm_runtime_set_active(qup->dev); pm_runtime_enable(qup->dev); + + ret = i2c_add_adapter(&qup->adap); + if (ret) + goto fail_runtime; + return 0; +fail_runtime: + pm_runtime_disable(qup->dev); + pm_runtime_set_suspended(qup->dev); fail: qup_i2c_disable_clocks(qup); return ret; diff --git a/drivers/i2c/busses/i2c-rk3x.c b/drivers/i2c/busses/i2c-rk3x.c index 93cfc837200b..b38b0529946a 100644 --- a/drivers/i2c/busses/i2c-rk3x.c +++ b/drivers/i2c/busses/i2c-rk3x.c @@ -238,7 +238,7 @@ static void rk3x_i2c_fill_transmit_buf(struct rk3x_i2c *i2c) for (i = 0; i < 8; ++i) { val = 0; for (j = 0; j < 4; ++j) { - if (i2c->processed == i2c->msg->len) + if ((i2c->processed == i2c->msg->len) && (cnt != 0)) break; if (i2c->processed == 0 && cnt == 0) diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c index ccfbbab82a15..2f90ac6a7f79 100644 --- a/drivers/i2c/i2c-core.c +++ b/drivers/i2c/i2c-core.c @@ -50,6 +50,7 @@ #include <linux/irqflags.h> #include <linux/rwsem.h> #include <linux/pm_runtime.h> +#include <linux/pm_domain.h> #include <linux/acpi.h> #include <linux/jump_label.h> #include <asm/uaccess.h> @@ -643,10 +644,13 @@ static int i2c_device_probe(struct device *dev) if (status < 0) return status; - acpi_dev_pm_attach(&client->dev, true); - status = driver->probe(client, i2c_match_id(driver->id_table, client)); - if (status) - acpi_dev_pm_detach(&client->dev, true); + status = dev_pm_domain_attach(&client->dev, true); + if (status != -EPROBE_DEFER) { + status = driver->probe(client, i2c_match_id(driver->id_table, + client)); + if (status) + dev_pm_domain_detach(&client->dev, true); + } return status; } @@ -666,7 +670,7 @@ static int i2c_device_remove(struct device *dev) status = driver->remove(client); } - acpi_dev_pm_detach(&client->dev, true); + dev_pm_domain_detach(&client->dev, true); return status; } diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 183588b11fc1..9f0fbecd1eb5 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -64,6 +64,10 @@ #define cpu_to_group(cpu) cpu_to_node(cpu) #define ANY_GROUP NUMA_NO_NODE +static bool devices_handle_discard_safely = false; +module_param(devices_handle_discard_safely, bool, 0644); +MODULE_PARM_DESC(devices_handle_discard_safely, + "Set to Y if all devices in each array reliably return zeroes on reads from discarded regions"); static struct workqueue_struct *raid5_wq; /* * Stripe cache @@ -6208,7 +6212,7 @@ static int run(struct mddev *mddev) mddev->queue->limits.discard_granularity = stripe; /* * unaligned part of discard request will be ignored, so can't - * guarantee discard_zerors_data + * guarantee discard_zeroes_data */ mddev->queue->limits.discard_zeroes_data = 0; @@ -6233,6 +6237,18 @@ static int run(struct mddev *mddev) !bdev_get_queue(rdev->bdev)-> limits.discard_zeroes_data) discard_supported = false; + /* Unfortunately, discard_zeroes_data is not currently + * a guarantee - just a hint. So we only allow DISCARD + * if the sysadmin has confirmed that only safe devices + * are in use by setting a module parameter. + */ + if (!devices_handle_discard_safely) { + if (discard_supported) { + pr_info("md/raid456: discard support disabled due to uncertainty.\n"); + pr_info("Set raid456.devices_handle_discard_safely=Y to override.\n"); + } + discard_supported = false; + } } if (discard_supported && diff --git a/drivers/media/usb/em28xx/em28xx-cards.c b/drivers/media/usb/em28xx/em28xx-cards.c index a7e24848f6c8..9da812b8a786 100644 --- a/drivers/media/usb/em28xx/em28xx-cards.c +++ b/drivers/media/usb/em28xx/em28xx-cards.c @@ -3524,6 +3524,7 @@ static struct usb_driver em28xx_usb_driver = { .disconnect = em28xx_usb_disconnect, .suspend = em28xx_usb_suspend, .resume = em28xx_usb_resume, + .reset_resume = em28xx_usb_resume, .id_table = em28xx_id_table, }; diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c index 4fa8fef9147f..65cf7a7e05ea 100644 --- a/drivers/mmc/core/sdio_bus.c +++ b/drivers/mmc/core/sdio_bus.c @@ -16,6 +16,7 @@ #include <linux/export.h> #include <linux/slab.h> #include <linux/pm_runtime.h> +#include <linux/pm_domain.h> #include <linux/acpi.h> #include <linux/mmc/card.h> @@ -315,7 +316,7 @@ int sdio_add_func(struct sdio_func *func) ret = device_add(&func->dev); if (ret == 0) { sdio_func_set_present(func); - acpi_dev_pm_attach(&func->dev, false); + dev_pm_domain_attach(&func->dev, false); } return ret; @@ -332,7 +333,7 @@ void sdio_remove_func(struct sdio_func *func) if (!sdio_func_present(func)) return; - acpi_dev_pm_detach(&func->dev, false); + dev_pm_domain_detach(&func->dev, false); device_del(&func->dev); put_device(&func->dev); } diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c index 2fee73b878c2..823d01c5684c 100644 --- a/drivers/net/ethernet/broadcom/bnx2.c +++ b/drivers/net/ethernet/broadcom/bnx2.c @@ -3236,8 +3236,9 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget) skb->protocol = eth_type_trans(skb, bp->dev); - if ((len > (bp->dev->mtu + ETH_HLEN)) && - (ntohs(skb->protocol) != 0x8100)) { + if (len > (bp->dev->mtu + ETH_HLEN) && + skb->protocol != htons(0x8100) && + skb->protocol != htons(ETH_P_8021AD)) { dev_kfree_skb(skb); goto next_rx; diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index e7d3a620d96a..ba499489969a 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -6918,7 +6918,8 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget) skb->protocol = eth_type_trans(skb, tp->dev); if (len > (tp->dev->mtu + ETH_HLEN) && - skb->protocol != htons(ETH_P_8021Q)) { + skb->protocol != htons(ETH_P_8021Q) && + skb->protocol != htons(ETH_P_8021AD)) { dev_kfree_skb_any(skb); goto drop_it_no_recycle; } diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c index ca5d7798b265..e1e02fba4fcc 100644 --- a/drivers/net/ethernet/cadence/macb.c +++ b/drivers/net/ethernet/cadence/macb.c @@ -30,7 +30,6 @@ #include <linux/of_device.h> #include <linux/of_mdio.h> #include <linux/of_net.h> -#include <linux/pinctrl/consumer.h> #include "macb.h" @@ -2071,7 +2070,6 @@ static int __init macb_probe(struct platform_device *pdev) struct phy_device *phydev; u32 config; int err = -ENXIO; - struct pinctrl *pinctrl; const char *mac; regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); @@ -2080,15 +2078,6 @@ static int __init macb_probe(struct platform_device *pdev) goto err_out; } - pinctrl = devm_pinctrl_get_select_default(&pdev->dev); - if (IS_ERR(pinctrl)) { - err = PTR_ERR(pinctrl); - if (err == -EPROBE_DEFER) - goto err_out; - - dev_warn(&pdev->dev, "No pinctrl provided\n"); - } - err = -ENOMEM; dev = alloc_etherdev(sizeof(*bp)); if (!dev) diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 7e2d5d57c598..871e3a5bda38 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -78,13 +78,13 @@ MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero"); #endif /* CONFIG_PCI_MSI */ static uint8_t num_vfs[3] = {0, 0, 0}; -static int num_vfs_argc = 3; +static int num_vfs_argc; module_param_array(num_vfs, byte , &num_vfs_argc, 0444); MODULE_PARM_DESC(num_vfs, "enable #num_vfs functions if num_vfs > 0\n" "num_vfs=port1,port2,port1+2"); static uint8_t probe_vf[3] = {0, 0, 0}; -static int probe_vfs_argc = 3; +static int probe_vfs_argc; module_param_array(probe_vf, byte, &probe_vfs_argc, 0444); MODULE_PARM_DESC(probe_vf, "number of vfs to probe by pf driver (num_vfs > 0)\n" "probe_vf=port1,port2,port1+2"); diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c index 32058614151a..5c4068353f66 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c @@ -135,6 +135,7 @@ void netxen_release_tx_buffers(struct netxen_adapter *adapter) int i, j; struct nx_host_tx_ring *tx_ring = adapter->tx_ring; + spin_lock(&adapter->tx_clean_lock); cmd_buf = tx_ring->cmd_buf_arr; for (i = 0; i < tx_ring->num_desc; i++) { buffrag = cmd_buf->frag_array; @@ -158,6 +159,7 @@ void netxen_release_tx_buffers(struct netxen_adapter *adapter) } cmd_buf++; } + spin_unlock(&adapter->tx_clean_lock); } void netxen_free_sw_resources(struct netxen_adapter *adapter) @@ -1792,9 +1794,9 @@ int netxen_process_cmd_ring(struct netxen_adapter *adapter) break; } - if (count && netif_running(netdev)) { - tx_ring->sw_consumer = sw_consumer; + tx_ring->sw_consumer = sw_consumer; + if (count && netif_running(netdev)) { smp_mb(); if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c index 1159031f885b..5ec5a2b0e989 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c @@ -1186,7 +1186,6 @@ __netxen_nic_down(struct netxen_adapter *adapter, struct net_device *netdev) return; smp_mb(); - spin_lock(&adapter->tx_clean_lock); netif_carrier_off(netdev); netif_tx_disable(netdev); @@ -1204,7 +1203,6 @@ __netxen_nic_down(struct netxen_adapter *adapter, struct net_device *netdev) netxen_napi_disable(adapter); netxen_release_tx_buffers(adapter); - spin_unlock(&adapter->tx_clean_lock); } /* Usage: During suspend and firmware recovery module */ diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c index 86783e1afcf7..3172cdf591fe 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c @@ -1177,9 +1177,8 @@ static void qlcnic_83xx_setup_idc_parameters(struct qlcnic_adapter *adapter) { u32 idc_params, val; - if (qlcnic_83xx_lockless_flash_read32(adapter, - QLC_83XX_IDC_FLASH_PARAM_ADDR, - (u8 *)&idc_params, 1)) { + if (qlcnic_83xx_flash_read32(adapter, QLC_83XX_IDC_FLASH_PARAM_ADDR, + (u8 *)&idc_params, 1)) { dev_info(&adapter->pdev->dev, "%s:failed to get IDC params from flash\n", __func__); adapter->dev_init_timeo = QLC_83XX_IDC_INIT_TIMEOUT_SECS; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c index 141f116eb868..494e8105adee 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c @@ -1333,21 +1333,21 @@ static void qlcnic_get_ethtool_stats(struct net_device *dev, struct qlcnic_host_tx_ring *tx_ring; struct qlcnic_esw_statistics port_stats; struct qlcnic_mac_statistics mac_stats; - int index, ret, length, size, tx_size, ring; + int index, ret, length, size, ring; char *p; - tx_size = adapter->drv_tx_rings * QLCNIC_TX_STATS_LEN; + memset(data, 0, stats->n_stats * sizeof(u64)); - memset(data, 0, tx_size * sizeof(u64)); for (ring = 0, index = 0; ring < adapter->drv_tx_rings; ring++) { - if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) { + if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC) { tx_ring = &adapter->tx_ring[ring]; data = qlcnic_fill_tx_queue_stats(data, tx_ring); qlcnic_update_stats(adapter); + } else { + data += QLCNIC_TX_STATS_LEN; } } - memset(data, 0, stats->n_stats * sizeof(u64)); length = QLCNIC_STATS_LEN; for (index = 0; index < length; index++) { p = (char *)adapter + qlcnic_gstrings_stats[index].stat_offset; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 6e6ee226de04..b0c1521e08a3 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -2786,8 +2786,15 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device, if (IS_ERR(priv->stmmac_clk)) { dev_warn(priv->device, "%s: warning: cannot get CSR clock\n", __func__); - ret = PTR_ERR(priv->stmmac_clk); - goto error_clk_get; + /* If failed to obtain stmmac_clk and specific clk_csr value + * is NOT passed from the platform, probe fail. + */ + if (!priv->plat->clk_csr) { + ret = PTR_ERR(priv->stmmac_clk); + goto error_clk_get; + } else { + priv->stmmac_clk = NULL; + } } clk_prepare_enable(priv->stmmac_clk); diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index a9c5eaadc426..0fcb5e7eb073 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -387,6 +387,7 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) int hdr_offset; u32 net_trans_info; u32 hash; + u32 skb_length = skb->len; /* We will atmost need two pages to describe the rndis @@ -562,7 +563,7 @@ do_send: drop: if (ret == 0) { - net->stats.tx_bytes += skb->len; + net->stats.tx_bytes += skb_length; net->stats.tx_packets++; } else { kfree(packet); diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index 3381c4f91a8c..0c6adaaf898c 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c @@ -112,17 +112,15 @@ out: return err; } +/* Requires RTNL */ static int macvtap_set_queue(struct net_device *dev, struct file *file, struct macvtap_queue *q) { struct macvlan_dev *vlan = netdev_priv(dev); - int err = -EBUSY; - rtnl_lock(); if (vlan->numqueues == MAX_MACVTAP_QUEUES) - goto out; + return -EBUSY; - err = 0; rcu_assign_pointer(q->vlan, vlan); rcu_assign_pointer(vlan->taps[vlan->numvtaps], q); sock_hold(&q->sk); @@ -136,9 +134,7 @@ static int macvtap_set_queue(struct net_device *dev, struct file *file, vlan->numvtaps++; vlan->numqueues++; -out: - rtnl_unlock(); - return err; + return 0; } static int macvtap_disable_queue(struct macvtap_queue *q) @@ -454,11 +450,12 @@ static void macvtap_sock_destruct(struct sock *sk) static int macvtap_open(struct inode *inode, struct file *file) { struct net *net = current->nsproxy->net_ns; - struct net_device *dev = dev_get_by_macvtap_minor(iminor(inode)); + struct net_device *dev; struct macvtap_queue *q; - int err; + int err = -ENODEV; - err = -ENODEV; + rtnl_lock(); + dev = dev_get_by_macvtap_minor(iminor(inode)); if (!dev) goto out; @@ -498,6 +495,7 @@ out: if (dev) dev_put(dev); + rtnl_unlock(); return err; } diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 74760e8143e3..604ef210a4de 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -24,7 +24,7 @@ #include <net/ip6_checksum.h> /* Version Information */ -#define DRIVER_VERSION "v1.06.0 (2014/03/03)" +#define DRIVER_VERSION "v1.06.1 (2014/10/01)" #define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>" #define DRIVER_DESC "Realtek RTL8152/RTL8153 Based USB Ethernet Adapters" #define MODULENAME "r8152" @@ -1949,10 +1949,34 @@ static void rxdy_gated_en(struct r8152 *tp, bool enable) ocp_write_word(tp, MCU_TYPE_PLA, PLA_MISC_1, ocp_data); } +static int rtl_start_rx(struct r8152 *tp) +{ + int i, ret = 0; + + INIT_LIST_HEAD(&tp->rx_done); + for (i = 0; i < RTL8152_MAX_RX; i++) { + INIT_LIST_HEAD(&tp->rx_info[i].list); + ret = r8152_submit_rx(tp, &tp->rx_info[i], GFP_KERNEL); + if (ret) + break; + } + + return ret; +} + +static int rtl_stop_rx(struct r8152 *tp) +{ + int i; + + for (i = 0; i < RTL8152_MAX_RX; i++) + usb_kill_urb(tp->rx_info[i].urb); + + return 0; +} + static int rtl_enable(struct r8152 *tp) { u32 ocp_data; - int i, ret; r8152b_reset_packet_filter(tp); @@ -1962,14 +1986,7 @@ static int rtl_enable(struct r8152 *tp) rxdy_gated_en(tp, false); - INIT_LIST_HEAD(&tp->rx_done); - ret = 0; - for (i = 0; i < RTL8152_MAX_RX; i++) { - INIT_LIST_HEAD(&tp->rx_info[i].list); - ret |= r8152_submit_rx(tp, &tp->rx_info[i], GFP_KERNEL); - } - - return ret; + return rtl_start_rx(tp); } static int rtl8152_enable(struct r8152 *tp) @@ -2053,8 +2070,7 @@ static void rtl_disable(struct r8152 *tp) mdelay(1); } - for (i = 0; i < RTL8152_MAX_RX; i++) - usb_kill_urb(tp->rx_info[i].urb); + rtl_stop_rx(tp); rtl8152_nic_reset(tp); } @@ -2185,28 +2201,6 @@ static void rtl_phy_reset(struct r8152 *tp) } } -static void rtl_clear_bp(struct r8152 *tp) -{ - ocp_write_dword(tp, MCU_TYPE_PLA, PLA_BP_0, 0); - ocp_write_dword(tp, MCU_TYPE_PLA, PLA_BP_2, 0); - ocp_write_dword(tp, MCU_TYPE_PLA, PLA_BP_4, 0); - ocp_write_dword(tp, MCU_TYPE_PLA, PLA_BP_6, 0); - ocp_write_dword(tp, MCU_TYPE_USB, USB_BP_0, 0); - ocp_write_dword(tp, MCU_TYPE_USB, USB_BP_2, 0); - ocp_write_dword(tp, MCU_TYPE_USB, USB_BP_4, 0); - ocp_write_dword(tp, MCU_TYPE_USB, USB_BP_6, 0); - mdelay(3); - ocp_write_word(tp, MCU_TYPE_PLA, PLA_BP_BA, 0); - ocp_write_word(tp, MCU_TYPE_USB, USB_BP_BA, 0); -} - -static void r8153_clear_bp(struct r8152 *tp) -{ - ocp_write_byte(tp, MCU_TYPE_PLA, PLA_BP_EN, 0); - ocp_write_byte(tp, MCU_TYPE_USB, USB_BP_EN, 0); - rtl_clear_bp(tp); -} - static void r8153_teredo_off(struct r8152 *tp) { u32 ocp_data; @@ -2249,8 +2243,6 @@ static void r8152b_hw_phy_cfg(struct r8152 *tp) r8152_mdio_write(tp, MII_BMCR, data); } - rtl_clear_bp(tp); - set_bit(PHY_RESET, &tp->flags); } @@ -2401,8 +2393,6 @@ static void r8153_hw_phy_cfg(struct r8152 *tp) r8152_mdio_write(tp, MII_BMCR, data); } - r8153_clear_bp(tp); - if (tp->version == RTL_VER_03) { data = ocp_reg_read(tp, OCP_EEE_CFG); data &= ~CTAP_SHORT_EN; @@ -3083,13 +3073,14 @@ static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message) clear_bit(WORK_ENABLE, &tp->flags); usb_kill_urb(tp->intr_urb); cancel_delayed_work_sync(&tp->schedule); + tasklet_disable(&tp->tl); if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) { + rtl_stop_rx(tp); rtl_runtime_suspend_enable(tp, true); } else { - tasklet_disable(&tp->tl); tp->rtl_ops.down(tp); - tasklet_enable(&tp->tl); } + tasklet_enable(&tp->tl); } return 0; @@ -3108,17 +3099,18 @@ static int rtl8152_resume(struct usb_interface *intf) if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) { rtl_runtime_suspend_enable(tp, false); clear_bit(SELECTIVE_SUSPEND, &tp->flags); + set_bit(WORK_ENABLE, &tp->flags); if (tp->speed & LINK_STATUS) - tp->rtl_ops.disable(tp); + rtl_start_rx(tp); } else { tp->rtl_ops.up(tp); rtl8152_set_speed(tp, AUTONEG_ENABLE, tp->mii.supports_gmii ? SPEED_1000 : SPEED_100, DUPLEX_FULL); + tp->speed = 0; + netif_carrier_off(tp->netdev); + set_bit(WORK_ENABLE, &tp->flags); } - tp->speed = 0; - netif_carrier_off(tp->netdev); - set_bit(WORK_ENABLE, &tp->flags); usb_submit_urb(tp->intr_urb, GFP_KERNEL); } @@ -3405,7 +3397,7 @@ static void rtl8153_unload(struct r8152 *tp) if (test_bit(RTL8152_UNPLUG, &tp->flags)) return; - r8153_power_cut_en(tp, true); + r8153_power_cut_en(tp, false); } static int rtl_ops_init(struct r8152 *tp, const struct usb_device_id *id) @@ -3558,7 +3550,11 @@ static void rtl8152_disconnect(struct usb_interface *intf) usb_set_intfdata(intf, NULL); if (tp) { - set_bit(RTL8152_UNPLUG, &tp->flags); + struct usb_device *udev = tp->udev; + + if (udev->state == USB_STATE_NOTATTACHED) + set_bit(RTL8152_UNPLUG, &tp->flags); + tasklet_kill(&tp->tl); unregister_netdev(tp->netdev); tp->rtl_ops.unload(tp); diff --git a/drivers/parisc/superio.c b/drivers/parisc/superio.c index a042d065a0c7..8be2096c8423 100644 --- a/drivers/parisc/superio.c +++ b/drivers/parisc/superio.c @@ -395,7 +395,8 @@ static void __init superio_serial_init(void) serial_port.iotype = UPIO_PORT; serial_port.type = PORT_16550A; serial_port.uartclk = 115200*16; - serial_port.fifosize = 16; + serial_port.flags = UPF_FIXED_PORT | UPF_FIXED_TYPE | + UPF_BOOT_AUTOCONF; /* serial port #1 */ serial_port.iobase = sio_dev.sp1_base; diff --git a/drivers/pci/pcie/pme.c b/drivers/pci/pcie/pme.c index 82e06a86cd77..a9f9c46e5022 100644 --- a/drivers/pci/pcie/pme.c +++ b/drivers/pci/pcie/pme.c @@ -41,11 +41,17 @@ static int __init pcie_pme_setup(char *str) } __setup("pcie_pme=", pcie_pme_setup); +enum pme_suspend_level { + PME_SUSPEND_NONE = 0, + PME_SUSPEND_WAKEUP, + PME_SUSPEND_NOIRQ, +}; + struct pcie_pme_service_data { spinlock_t lock; struct pcie_device *srv; struct work_struct work; - bool noirq; /* Don't enable the PME interrupt used by this service. */ + enum pme_suspend_level suspend_level; }; /** @@ -223,7 +229,7 @@ static void pcie_pme_work_fn(struct work_struct *work) spin_lock_irq(&data->lock); for (;;) { - if (data->noirq) + if (data->suspend_level != PME_SUSPEND_NONE) break; pcie_capability_read_dword(port, PCI_EXP_RTSTA, &rtsta); @@ -250,7 +256,7 @@ static void pcie_pme_work_fn(struct work_struct *work) spin_lock_irq(&data->lock); } - if (!data->noirq) + if (data->suspend_level == PME_SUSPEND_NONE) pcie_pme_interrupt_enable(port, true); spin_unlock_irq(&data->lock); @@ -367,6 +373,21 @@ static int pcie_pme_probe(struct pcie_device *srv) return ret; } +static bool pcie_pme_check_wakeup(struct pci_bus *bus) +{ + struct pci_dev *dev; + + if (!bus) + return false; + + list_for_each_entry(dev, &bus->devices, bus_list) + if (device_may_wakeup(&dev->dev) + || pcie_pme_check_wakeup(dev->subordinate)) + return true; + + return false; +} + /** * pcie_pme_suspend - Suspend PCIe PME service device. * @srv: PCIe service device to suspend. @@ -375,11 +396,26 @@ static int pcie_pme_suspend(struct pcie_device *srv) { struct pcie_pme_service_data *data = get_service_data(srv); struct pci_dev *port = srv->port; + bool wakeup; + if (device_may_wakeup(&port->dev)) { + wakeup = true; + } else { + down_read(&pci_bus_sem); + wakeup = pcie_pme_check_wakeup(port->subordinate); + up_read(&pci_bus_sem); + } spin_lock_irq(&data->lock); - pcie_pme_interrupt_enable(port, false); - pcie_clear_root_pme_status(port); - data->noirq = true; + if (wakeup) { + enable_irq_wake(srv->irq); + data->suspend_level = PME_SUSPEND_WAKEUP; + } else { + struct pci_dev *port = srv->port; + + pcie_pme_interrupt_enable(port, false); + pcie_clear_root_pme_status(port); + data->suspend_level = PME_SUSPEND_NOIRQ; + } spin_unlock_irq(&data->lock); synchronize_irq(srv->irq); @@ -394,12 +430,17 @@ static int pcie_pme_suspend(struct pcie_device *srv) static int pcie_pme_resume(struct pcie_device *srv) { struct pcie_pme_service_data *data = get_service_data(srv); - struct pci_dev *port = srv->port; spin_lock_irq(&data->lock); - data->noirq = false; - pcie_clear_root_pme_status(port); - pcie_pme_interrupt_enable(port, true); + if (data->suspend_level == PME_SUSPEND_NOIRQ) { + struct pci_dev *port = srv->port; + + pcie_clear_root_pme_status(port); + pcie_pme_interrupt_enable(port, true); + } else { + disable_irq_wake(srv->irq); + } + data->suspend_level = PME_SUSPEND_NONE; spin_unlock_irq(&data->lock); return 0; diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c index 87aa28c4280f..2655d4a988f3 100644 --- a/drivers/platform/x86/fujitsu-laptop.c +++ b/drivers/platform/x86/fujitsu-laptop.c @@ -1050,6 +1050,13 @@ static struct acpi_driver acpi_fujitsu_hotkey_driver = { }, }; +static const struct acpi_device_id fujitsu_ids[] __used = { + {ACPI_FUJITSU_HID, 0}, + {ACPI_FUJITSU_HOTKEY_HID, 0}, + {"", 0} +}; +MODULE_DEVICE_TABLE(acpi, fujitsu_ids); + static int __init fujitsu_init(void) { int ret, result, max_brightness; @@ -1208,12 +1215,3 @@ MODULE_LICENSE("GPL"); MODULE_ALIAS("dmi:*:svnFUJITSUSIEMENS:*:pvr:rvnFUJITSU:rnFJNB1D3:*:cvrS6410:*"); MODULE_ALIAS("dmi:*:svnFUJITSUSIEMENS:*:pvr:rvnFUJITSU:rnFJNB1E6:*:cvrS6420:*"); MODULE_ALIAS("dmi:*:svnFUJITSU:*:pvr:rvnFUJITSU:rnFJNB19C:*:cvrS7020:*"); - -static struct pnp_device_id pnp_ids[] __used = { - {.id = "FUJ02bf"}, - {.id = "FUJ02B1"}, - {.id = "FUJ02E3"}, - {.id = ""} -}; - -MODULE_DEVICE_TABLE(pnp, pnp_ids); diff --git a/drivers/power/avs/Kconfig b/drivers/power/avs/Kconfig index 2a1008b61121..7f3d389bd601 100644 --- a/drivers/power/avs/Kconfig +++ b/drivers/power/avs/Kconfig @@ -10,3 +10,11 @@ menuconfig POWER_AVS AVS is also called SmartReflex on OMAP devices. Say Y here to enable Adaptive Voltage Scaling class support. + +config ROCKCHIP_IODOMAIN + tristate "Rockchip IO domain support" + depends on ARCH_ROCKCHIP && OF + help + Say y here to enable support io domains on Rockchip SoCs. It is + necessary for the io domain setting of the SoC to match the + voltage supplied by the regulators. diff --git a/drivers/power/avs/Makefile b/drivers/power/avs/Makefile index 0843386a6c19..ba4c7bc69225 100644 --- a/drivers/power/avs/Makefile +++ b/drivers/power/avs/Makefile @@ -1 +1,2 @@ obj-$(CONFIG_POWER_AVS_OMAP) += smartreflex.o +obj-$(CONFIG_ROCKCHIP_IODOMAIN) += rockchip-io-domain.o diff --git a/drivers/power/avs/rockchip-io-domain.c b/drivers/power/avs/rockchip-io-domain.c new file mode 100644 index 000000000000..3ae35d0590d2 --- /dev/null +++ b/drivers/power/avs/rockchip-io-domain.c @@ -0,0 +1,351 @@ +/* + * Rockchip IO Voltage Domain driver + * + * Copyright 2014 MundoReader S.L. + * Copyright 2014 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/err.h> +#include <linux/mfd/syscon.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> +#include <linux/regulator/consumer.h> + +#define MAX_SUPPLIES 16 + +/* + * The max voltage for 1.8V and 3.3V come from the Rockchip datasheet under + * "Recommended Operating Conditions" for "Digital GPIO". When the typical + * is 3.3V the max is 3.6V. When the typical is 1.8V the max is 1.98V. + * + * They are used like this: + * - If the voltage on a rail is above the "1.8" voltage (1.98V) we'll tell the + * SoC we're at 3.3. + * - If the voltage on a rail is above the "3.3" voltage (3.6V) we'll consider + * that to be an error. + */ +#define MAX_VOLTAGE_1_8 1980000 +#define MAX_VOLTAGE_3_3 3600000 + +#define RK3288_SOC_CON2 0x24c +#define RK3288_SOC_CON2_FLASH0 BIT(7) +#define RK3288_SOC_FLASH_SUPPLY_NUM 2 + +struct rockchip_iodomain; + +/** + * @supplies: voltage settings matching the register bits. + */ +struct rockchip_iodomain_soc_data { + int grf_offset; + const char *supply_names[MAX_SUPPLIES]; + void (*init)(struct rockchip_iodomain *iod); +}; + +struct rockchip_iodomain_supply { + struct rockchip_iodomain *iod; + struct regulator *reg; + struct notifier_block nb; + int idx; +}; + +struct rockchip_iodomain { + struct device *dev; + struct regmap *grf; + struct rockchip_iodomain_soc_data *soc_data; + struct rockchip_iodomain_supply supplies[MAX_SUPPLIES]; +}; + +static int rockchip_iodomain_write(struct rockchip_iodomain_supply *supply, + int uV) +{ + struct rockchip_iodomain *iod = supply->iod; + u32 val; + int ret; + + /* set value bit */ + val = (uV > MAX_VOLTAGE_1_8) ? 0 : 1; + val <<= supply->idx; + + /* apply hiword-mask */ + val |= (BIT(supply->idx) << 16); + + ret = regmap_write(iod->grf, iod->soc_data->grf_offset, val); + if (ret) + dev_err(iod->dev, "Couldn't write to GRF\n"); + + return ret; +} + +static int rockchip_iodomain_notify(struct notifier_block *nb, + unsigned long event, + void *data) +{ + struct rockchip_iodomain_supply *supply = + container_of(nb, struct rockchip_iodomain_supply, nb); + int uV; + int ret; + + /* + * According to Rockchip it's important to keep the SoC IO domain + * higher than (or equal to) the external voltage. That means we need + * to change it before external voltage changes happen in the case + * of an increase. + * + * Note that in the "pre" change we pick the max possible voltage that + * the regulator might end up at (the client requests a range and we + * don't know for certain the exact voltage). Right now we rely on the + * slop in MAX_VOLTAGE_1_8 and MAX_VOLTAGE_3_3 to save us if clients + * request something like a max of 3.6V when they really want 3.3V. + * We could attempt to come up with better rules if this fails. + */ + if (event & REGULATOR_EVENT_PRE_VOLTAGE_CHANGE) { + struct pre_voltage_change_data *pvc_data = data; + + uV = max_t(unsigned long, pvc_data->old_uV, pvc_data->max_uV); + } else if (event & (REGULATOR_EVENT_VOLTAGE_CHANGE | + REGULATOR_EVENT_ABORT_VOLTAGE_CHANGE)) { + uV = (unsigned long)data; + } else { + return NOTIFY_OK; + } + + dev_dbg(supply->iod->dev, "Setting to %d\n", uV); + + if (uV > MAX_VOLTAGE_3_3) { + dev_err(supply->iod->dev, "Voltage too high: %d\n", uV); + + if (event == REGULATOR_EVENT_PRE_VOLTAGE_CHANGE) + return NOTIFY_BAD; + } + + ret = rockchip_iodomain_write(supply, uV); + if (ret && event == REGULATOR_EVENT_PRE_VOLTAGE_CHANGE) + return NOTIFY_BAD; + + dev_info(supply->iod->dev, "Setting to %d done\n", uV); + return NOTIFY_OK; +} + +static void rk3288_iodomain_init(struct rockchip_iodomain *iod) +{ + int ret; + u32 val; + + /* if no flash supply we should leave things alone */ + if (!iod->supplies[RK3288_SOC_FLASH_SUPPLY_NUM].reg) + return; + + /* + * set flash0 iodomain to also use this framework + * instead of a special gpio. + */ + val = RK3288_SOC_CON2_FLASH0 | (RK3288_SOC_CON2_FLASH0 << 16); + ret = regmap_write(iod->grf, RK3288_SOC_CON2, val); + if (ret < 0) + dev_warn(iod->dev, "couldn't update flash0 ctrl\n"); +} + +/* + * On the rk3188 the io-domains are handled by a shared register with the + * lower 8 bits being still being continuing drive-strength settings. + */ +static const struct rockchip_iodomain_soc_data soc_data_rk3188 = { + .grf_offset = 0x104, + .supply_names = { + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + "ap0", + "ap1", + "cif", + "flash", + "vccio0", + "vccio1", + "lcdc0", + "lcdc1", + }, +}; + +static const struct rockchip_iodomain_soc_data soc_data_rk3288 = { + .grf_offset = 0x380, + .supply_names = { + "lcdc", /* LCDC_VDD */ + "dvp", /* DVPIO_VDD */ + "flash0", /* FLASH0_VDD (emmc) */ + "flash1", /* FLASH1_VDD (sdio1) */ + "wifi", /* APIO3_VDD (sdio0) */ + "bb", /* APIO5_VDD */ + "audio", /* APIO4_VDD */ + "sdcard", /* SDMMC0_VDD (sdmmc) */ + "gpio30", /* APIO1_VDD */ + "gpio1830", /* APIO2_VDD */ + }, + .init = rk3288_iodomain_init, +}; + +static const struct of_device_id rockchip_iodomain_match[] = { + { + .compatible = "rockchip,rk3188-io-voltage-domain", + .data = (void *)&soc_data_rk3188 + }, + { + .compatible = "rockchip,rk3288-io-voltage-domain", + .data = (void *)&soc_data_rk3288 + }, + { /* sentinel */ }, +}; + +static int rockchip_iodomain_probe(struct platform_device *pdev) +{ + struct device_node *np = pdev->dev.of_node; + const struct of_device_id *match; + struct rockchip_iodomain *iod; + int i, ret = 0; + + if (!np) + return -ENODEV; + + iod = devm_kzalloc(&pdev->dev, sizeof(*iod), GFP_KERNEL); + if (!iod) + return -ENOMEM; + + iod->dev = &pdev->dev; + platform_set_drvdata(pdev, iod); + + match = of_match_node(rockchip_iodomain_match, np); + iod->soc_data = (struct rockchip_iodomain_soc_data *)match->data; + + iod->grf = syscon_regmap_lookup_by_phandle(np, "rockchip,grf"); + if (IS_ERR(iod->grf)) { + dev_err(&pdev->dev, "couldn't find grf regmap\n"); + return PTR_ERR(iod->grf); + } + + for (i = 0; i < MAX_SUPPLIES; i++) { + const char *supply_name = iod->soc_data->supply_names[i]; + struct rockchip_iodomain_supply *supply = &iod->supplies[i]; + struct regulator *reg; + int uV; + + if (!supply_name) + continue; + + reg = devm_regulator_get_optional(iod->dev, supply_name); + if (IS_ERR(reg)) { + ret = PTR_ERR(reg); + + /* If a supply wasn't specified, that's OK */ + if (ret == -ENODEV) + continue; + else if (ret != -EPROBE_DEFER) + dev_err(iod->dev, "couldn't get regulator %s\n", + supply_name); + goto unreg_notify; + } + + /* set initial correct value */ + uV = regulator_get_voltage(reg); + + /* must be a regulator we can get the voltage of */ + if (uV < 0) { + dev_err(iod->dev, "Can't determine voltage: %s\n", + supply_name); + goto unreg_notify; + } + + if (uV > MAX_VOLTAGE_3_3) { + dev_crit(iod->dev, + "%d uV is too high. May damage SoC!\n", + uV); + ret = -EINVAL; + goto unreg_notify; + } + + /* setup our supply */ + supply->idx = i; + supply->iod = iod; + supply->reg = reg; + supply->nb.notifier_call = rockchip_iodomain_notify; + + ret = rockchip_iodomain_write(supply, uV); + if (ret) { + supply->reg = NULL; + goto unreg_notify; + } + + /* register regulator notifier */ + ret = regulator_register_notifier(reg, &supply->nb); + if (ret) { + dev_err(&pdev->dev, + "regulator notifier request failed\n"); + supply->reg = NULL; + goto unreg_notify; + } + } + + if (iod->soc_data->init) + iod->soc_data->init(iod); + + return 0; + +unreg_notify: + for (i = MAX_SUPPLIES - 1; i >= 0; i--) { + struct rockchip_iodomain_supply *io_supply = &iod->supplies[i]; + + if (io_supply->reg) + regulator_unregister_notifier(io_supply->reg, + &io_supply->nb); + } + + return ret; +} + +static int rockchip_iodomain_remove(struct platform_device *pdev) +{ + struct rockchip_iodomain *iod = platform_get_drvdata(pdev); + int i; + + for (i = MAX_SUPPLIES - 1; i >= 0; i--) { + struct rockchip_iodomain_supply *io_supply = &iod->supplies[i]; + + if (io_supply->reg) + regulator_unregister_notifier(io_supply->reg, + &io_supply->nb); + } + + return 0; +} + +static struct platform_driver rockchip_iodomain_driver = { + .probe = rockchip_iodomain_probe, + .remove = rockchip_iodomain_remove, + .driver = { + .name = "rockchip-iodomain", + .of_match_table = rockchip_iodomain_match, + }, +}; + +module_platform_driver(rockchip_iodomain_driver); + +MODULE_DESCRIPTION("Rockchip IO-domain driver"); +MODULE_AUTHOR("Heiko Stuebner <heiko@sntech.de>"); +MODULE_AUTHOR("Doug Anderson <dianders@chromium.org>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index a3c3785901f5..dabc8e8862c8 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c @@ -102,7 +102,7 @@ static int _regulator_disable(struct regulator_dev *rdev); static int _regulator_get_voltage(struct regulator_dev *rdev); static int _regulator_get_current_limit(struct regulator_dev *rdev); static unsigned int _regulator_get_mode(struct regulator_dev *rdev); -static void _notifier_call_chain(struct regulator_dev *rdev, +static int _notifier_call_chain(struct regulator_dev *rdev, unsigned long event, void *data); static int _regulator_do_set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV); @@ -2369,6 +2369,55 @@ int regulator_is_supported_voltage(struct regulator *regulator, } EXPORT_SYMBOL_GPL(regulator_is_supported_voltage); +static int _regulator_call_set_voltage(struct regulator_dev *rdev, + int min_uV, int max_uV, + unsigned *selector) +{ + struct pre_voltage_change_data data; + int ret; + + data.old_uV = _regulator_get_voltage(rdev); + data.min_uV = min_uV; + data.max_uV = max_uV; + ret = _notifier_call_chain(rdev, REGULATOR_EVENT_PRE_VOLTAGE_CHANGE, + &data); + if (ret & NOTIFY_STOP_MASK) + return -EINVAL; + + ret = rdev->desc->ops->set_voltage(rdev, min_uV, max_uV, selector); + if (ret >= 0) + return ret; + + _notifier_call_chain(rdev, REGULATOR_EVENT_ABORT_VOLTAGE_CHANGE, + (void *)data.old_uV); + + return ret; +} + +static int _regulator_call_set_voltage_sel(struct regulator_dev *rdev, + int uV, unsigned selector) +{ + struct pre_voltage_change_data data; + int ret; + + data.old_uV = _regulator_get_voltage(rdev); + data.min_uV = uV; + data.max_uV = uV; + ret = _notifier_call_chain(rdev, REGULATOR_EVENT_PRE_VOLTAGE_CHANGE, + &data); + if (ret & NOTIFY_STOP_MASK) + return -EINVAL; + + ret = rdev->desc->ops->set_voltage_sel(rdev, selector); + if (ret >= 0) + return ret; + + _notifier_call_chain(rdev, REGULATOR_EVENT_ABORT_VOLTAGE_CHANGE, + (void *)data.old_uV); + + return ret; +} + static int _regulator_do_set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV) { @@ -2396,8 +2445,8 @@ static int _regulator_do_set_voltage(struct regulator_dev *rdev, } if (rdev->desc->ops->set_voltage) { - ret = rdev->desc->ops->set_voltage(rdev, min_uV, max_uV, - &selector); + ret = _regulator_call_set_voltage(rdev, min_uV, max_uV, + &selector); if (ret >= 0) { if (rdev->desc->ops->list_voltage) @@ -2432,8 +2481,8 @@ static int _regulator_do_set_voltage(struct regulator_dev *rdev, if (old_selector == selector) ret = 0; else - ret = rdev->desc->ops->set_voltage_sel( - rdev, ret); + ret = _regulator_call_set_voltage_sel( + rdev, best_val, selector); } else { ret = -EINVAL; } @@ -3079,11 +3128,11 @@ EXPORT_SYMBOL_GPL(regulator_unregister_notifier); /* notify regulator consumers and downstream regulator consumers. * Note mutex must be held by caller. */ -static void _notifier_call_chain(struct regulator_dev *rdev, +static int _notifier_call_chain(struct regulator_dev *rdev, unsigned long event, void *data) { /* call rdev chain first */ - blocking_notifier_call_chain(&rdev->notifier, event, data); + return blocking_notifier_call_chain(&rdev->notifier, event, data); } /** diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c index 79788a12712d..02e69e7ee4a3 100644 --- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c +++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c @@ -1647,7 +1647,7 @@ static int cxgbi_inet6addr_handler(struct notifier_block *this, if (event_dev->priv_flags & IFF_802_1Q_VLAN) event_dev = vlan_dev_real_dev(event_dev); - cdev = cxgbi_device_find_by_netdev(event_dev, NULL); + cdev = cxgbi_device_find_by_netdev_rcu(event_dev, NULL); if (!cdev) return ret; diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c index d65df6dc106f..addd1dddce14 100644 --- a/drivers/scsi/cxgbi/libcxgbi.c +++ b/drivers/scsi/cxgbi/libcxgbi.c @@ -57,6 +57,9 @@ MODULE_PARM_DESC(dbg_level, "libiscsi debug level (default=0)"); static LIST_HEAD(cdev_list); static DEFINE_MUTEX(cdev_mutex); +static LIST_HEAD(cdev_rcu_list); +static DEFINE_SPINLOCK(cdev_rcu_lock); + int cxgbi_device_portmap_create(struct cxgbi_device *cdev, unsigned int base, unsigned int max_conn) { @@ -142,6 +145,10 @@ struct cxgbi_device *cxgbi_device_register(unsigned int extra, list_add_tail(&cdev->list_head, &cdev_list); mutex_unlock(&cdev_mutex); + spin_lock(&cdev_rcu_lock); + list_add_tail_rcu(&cdev->rcu_node, &cdev_rcu_list); + spin_unlock(&cdev_rcu_lock); + log_debug(1 << CXGBI_DBG_DEV, "cdev 0x%p, p# %u.\n", cdev, nports); return cdev; @@ -153,9 +160,16 @@ void cxgbi_device_unregister(struct cxgbi_device *cdev) log_debug(1 << CXGBI_DBG_DEV, "cdev 0x%p, p# %u,%s.\n", cdev, cdev->nports, cdev->nports ? cdev->ports[0]->name : ""); + mutex_lock(&cdev_mutex); list_del(&cdev->list_head); mutex_unlock(&cdev_mutex); + + spin_lock(&cdev_rcu_lock); + list_del_rcu(&cdev->rcu_node); + spin_unlock(&cdev_rcu_lock); + synchronize_rcu(); + cxgbi_device_destroy(cdev); } EXPORT_SYMBOL_GPL(cxgbi_device_unregister); @@ -167,12 +181,9 @@ void cxgbi_device_unregister_all(unsigned int flag) mutex_lock(&cdev_mutex); list_for_each_entry_safe(cdev, tmp, &cdev_list, list_head) { if ((cdev->flags & flag) == flag) { - log_debug(1 << CXGBI_DBG_DEV, - "cdev 0x%p, p# %u,%s.\n", - cdev, cdev->nports, cdev->nports ? - cdev->ports[0]->name : ""); - list_del(&cdev->list_head); - cxgbi_device_destroy(cdev); + mutex_unlock(&cdev_mutex); + cxgbi_device_unregister(cdev); + mutex_lock(&cdev_mutex); } } mutex_unlock(&cdev_mutex); @@ -191,6 +202,7 @@ struct cxgbi_device *cxgbi_device_find_by_lldev(void *lldev) } } mutex_unlock(&cdev_mutex); + log_debug(1 << CXGBI_DBG_DEV, "lldev 0x%p, NO match found.\n", lldev); return NULL; @@ -230,6 +242,39 @@ struct cxgbi_device *cxgbi_device_find_by_netdev(struct net_device *ndev, } EXPORT_SYMBOL_GPL(cxgbi_device_find_by_netdev); +struct cxgbi_device *cxgbi_device_find_by_netdev_rcu(struct net_device *ndev, + int *port) +{ + struct net_device *vdev = NULL; + struct cxgbi_device *cdev; + int i; + + if (ndev->priv_flags & IFF_802_1Q_VLAN) { + vdev = ndev; + ndev = vlan_dev_real_dev(ndev); + pr_info("vlan dev %s -> %s.\n", vdev->name, ndev->name); + } + + rcu_read_lock(); + list_for_each_entry_rcu(cdev, &cdev_rcu_list, rcu_node) { + for (i = 0; i < cdev->nports; i++) { + if (ndev == cdev->ports[i]) { + cdev->hbas[i]->vdev = vdev; + rcu_read_unlock(); + if (port) + *port = i; + return cdev; + } + } + } + rcu_read_unlock(); + + log_debug(1 << CXGBI_DBG_DEV, + "ndev 0x%p, %s, NO match found.\n", ndev, ndev->name); + return NULL; +} +EXPORT_SYMBOL_GPL(cxgbi_device_find_by_netdev_rcu); + static struct cxgbi_device *cxgbi_device_find_by_mac(struct net_device *ndev, int *port) { diff --git a/drivers/scsi/cxgbi/libcxgbi.h b/drivers/scsi/cxgbi/libcxgbi.h index b3e6e7541cc5..1d98fad6a0ab 100644 --- a/drivers/scsi/cxgbi/libcxgbi.h +++ b/drivers/scsi/cxgbi/libcxgbi.h @@ -527,6 +527,7 @@ struct cxgbi_ports_map { #define CXGBI_FLAG_IPV4_SET 0x10 struct cxgbi_device { struct list_head list_head; + struct list_head rcu_node; unsigned int flags; struct net_device **ports; void *lldev; @@ -709,6 +710,8 @@ void cxgbi_device_unregister(struct cxgbi_device *); void cxgbi_device_unregister_all(unsigned int flag); struct cxgbi_device *cxgbi_device_find_by_lldev(void *); struct cxgbi_device *cxgbi_device_find_by_netdev(struct net_device *, int *); +struct cxgbi_device *cxgbi_device_find_by_netdev_rcu(struct net_device *, + int *); int cxgbi_hbas_add(struct cxgbi_device *, u64, unsigned int, struct scsi_host_template *, struct scsi_transport_template *); diff --git a/drivers/sh/pm_runtime.c b/drivers/sh/pm_runtime.c index 72f63817a1a0..fe2c2d595f59 100644 --- a/drivers/sh/pm_runtime.c +++ b/drivers/sh/pm_runtime.c @@ -75,8 +75,6 @@ static struct pm_clk_notifier_block platform_bus_notifier = { .con_ids = { NULL, }, }; -static bool default_pm_on; - static int __init sh_pm_runtime_init(void) { if (IS_ENABLED(CONFIG_ARCH_SHMOBILE_MULTI)) { @@ -96,16 +94,7 @@ static int __init sh_pm_runtime_init(void) return 0; } - default_pm_on = true; pm_clk_add_notifier(&platform_bus_type, &platform_bus_notifier); return 0; } core_initcall(sh_pm_runtime_init); - -static int __init sh_pm_runtime_late_init(void) -{ - if (default_pm_on) - pm_genpd_poweroff_unused(); - return 0; -} -late_initcall(sh_pm_runtime_late_init); diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index ca935df80c88..3907f1493e7d 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -35,6 +35,7 @@ #include <linux/spi/spi.h> #include <linux/of_gpio.h> #include <linux/pm_runtime.h> +#include <linux/pm_domain.h> #include <linux/export.h> #include <linux/sched/rt.h> #include <linux/delay.h> @@ -264,10 +265,12 @@ static int spi_drv_probe(struct device *dev) if (ret) return ret; - acpi_dev_pm_attach(dev, true); - ret = sdrv->probe(to_spi_device(dev)); - if (ret) - acpi_dev_pm_detach(dev, true); + ret = dev_pm_domain_attach(dev, true); + if (ret != -EPROBE_DEFER) { + ret = sdrv->probe(to_spi_device(dev)); + if (ret) + dev_pm_domain_detach(dev, true); + } return ret; } @@ -278,7 +281,7 @@ static int spi_drv_remove(struct device *dev) int ret; ret = sdrv->remove(to_spi_device(dev)); - acpi_dev_pm_detach(dev, true); + dev_pm_domain_detach(dev, true); return ret; } diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c index 3f42785f653c..9bfa7252f7f9 100644 --- a/drivers/usb/storage/uas.c +++ b/drivers/usb/storage/uas.c @@ -970,6 +970,13 @@ static struct scsi_host_template uas_host_template = { .cmd_per_lun = 1, /* until we override it */ .skip_settle_delay = 1, .ordered_tag = 1, + + /* + * The uas drivers expects tags not to be bigger than the maximum + * per-device queue depth, which is not true with the blk-mq tag + * allocator. + */ + .disable_blk_mq = true, }; #define UNUSUAL_DEV(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax, \ |