diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-10-25 15:18:39 +0200 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-10-25 15:18:39 +0200 |
commit | 7e0bb71e75020348bee523720a0c2f04cc72f540 (patch) | |
tree | 1a22d65bbce34e8cc0f82c543c9486ffb58332f7 /drivers | |
parent | sysfs: Remove support for tagged directories with untagged members (again) (diff) | |
parent | PM / Clocks: Remove redundant NULL checks before kfree() (diff) | |
download | linux-7e0bb71e75020348bee523720a0c2f04cc72f540.tar.xz linux-7e0bb71e75020348bee523720a0c2f04cc72f540.zip |
Merge branch 'pm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
* 'pm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm: (63 commits)
PM / Clocks: Remove redundant NULL checks before kfree()
PM / Documentation: Update docs about suspend and CPU hotplug
ACPI / PM: Add Sony VGN-FW21E to nonvs blacklist.
ARM: mach-shmobile: sh7372 A4R support (v4)
ARM: mach-shmobile: sh7372 A3SP support (v4)
PM / Sleep: Mark devices involved in wakeup signaling during suspend
PM / Hibernate: Improve performance of LZO/plain hibernation, checksum image
PM / Hibernate: Do not initialize static and extern variables to 0
PM / Freezer: Make fake_signal_wake_up() wake TASK_KILLABLE tasks too
PM / Hibernate: Add resumedelay kernel param in addition to resumewait
MAINTAINERS: Update linux-pm list address
PM / ACPI: Blacklist Vaio VGN-FW520F machine known to require acpi_sleep=nonvs
PM / ACPI: Blacklist Sony Vaio known to require acpi_sleep=nonvs
PM / Hibernate: Add resumewait param to support MMC-like devices as resume file
PM / Hibernate: Fix typo in a kerneldoc comment
PM / Hibernate: Freeze kernel threads after preallocating memory
PM: Update the policy on default wakeup settings
PM / VT: Cleanup #if defined uglyness and fix compile error
PM / Suspend: Off by one in pm_suspend()
PM / Hibernate: Include storage keys in hibernation image on s390
...
Diffstat (limited to 'drivers')
41 files changed, 1969 insertions, 311 deletions
diff --git a/drivers/Kconfig b/drivers/Kconfig index e73aaaee0138..6268167a1bb0 100644 --- a/drivers/Kconfig +++ b/drivers/Kconfig @@ -132,4 +132,6 @@ source "drivers/iommu/Kconfig" source "drivers/virt/Kconfig" +source "drivers/devfreq/Kconfig" + endmenu diff --git a/drivers/Makefile b/drivers/Makefile index e7afb3acbc67..755eaf7a7285 100644 --- a/drivers/Makefile +++ b/drivers/Makefile @@ -129,3 +129,5 @@ obj-$(CONFIG_IOMMU_SUPPORT) += iommu/ # Virtualization drivers obj-$(CONFIG_VIRT_DRIVERS) += virt/ + +obj-$(CONFIG_PM_DEVFREQ) += devfreq/ diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index 431ab11c8c1b..2e69e09ff03e 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -37,7 +37,7 @@ #include <linux/dmi.h> #include <linux/moduleparam.h> #include <linux/sched.h> /* need_resched() */ -#include <linux/pm_qos_params.h> +#include <linux/pm_qos.h> #include <linux/clockchips.h> #include <linux/cpuidle.h> #include <linux/irqflags.h> diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index 3ed80b2ca907..0e46faef1d30 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c @@ -390,6 +390,14 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = { }, { .callback = init_nvs_nosave, + .ident = "Sony Vaio VGN-FW21E", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), + DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW21E"), + }, + }, + { + .callback = init_nvs_nosave, .ident = "Sony Vaio VGN-SR11M", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), @@ -444,6 +452,22 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = { DMI_MATCH(DMI_BOARD_NAME, "A8N-SLI Premium"), }, }, + { + .callback = init_nvs_nosave, + .ident = "Sony Vaio VGN-SR26GN_P", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), + DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SR26GN_P"), + }, + }, + { + .callback = init_nvs_nosave, + .ident = "Sony Vaio VGN-FW520F", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), + DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW520F"), + }, + }, {}, }; #endif /* CONFIG_SUSPEND */ diff --git a/drivers/base/power/Makefile b/drivers/base/power/Makefile index 2639ae79a372..81676dd17900 100644 --- a/drivers/base/power/Makefile +++ b/drivers/base/power/Makefile @@ -1,4 +1,4 @@ -obj-$(CONFIG_PM) += sysfs.o generic_ops.o +obj-$(CONFIG_PM) += sysfs.o generic_ops.o common.o qos.o obj-$(CONFIG_PM_SLEEP) += main.o wakeup.o obj-$(CONFIG_PM_RUNTIME) += runtime.o obj-$(CONFIG_PM_TRACE_RTC) += trace.o @@ -6,4 +6,4 @@ obj-$(CONFIG_PM_OPP) += opp.o obj-$(CONFIG_PM_GENERIC_DOMAINS) += domain.o obj-$(CONFIG_HAVE_CLK) += clock_ops.o -ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG
\ No newline at end of file +ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c index b97294e2d95b..5f0f85d5c576 100644 --- a/drivers/base/power/clock_ops.c +++ b/drivers/base/power/clock_ops.c @@ -10,18 +10,13 @@ #include <linux/kernel.h> #include <linux/io.h> #include <linux/pm.h> -#include <linux/pm_runtime.h> +#include <linux/pm_clock.h> #include <linux/clk.h> #include <linux/slab.h> #include <linux/err.h> #ifdef CONFIG_PM -struct pm_clk_data { - struct list_head clock_list; - spinlock_t lock; -}; - enum pce_status { PCE_STATUS_NONE = 0, PCE_STATUS_ACQUIRED, @@ -36,11 +31,6 @@ struct pm_clock_entry { enum pce_status status; }; -static struct pm_clk_data *__to_pcd(struct device *dev) -{ - return dev ? dev->power.subsys_data : NULL; -} - /** * pm_clk_acquire - Acquire a device clock. * @dev: Device whose clock is to be acquired. @@ -67,10 +57,10 @@ static void pm_clk_acquire(struct device *dev, struct pm_clock_entry *ce) */ int pm_clk_add(struct device *dev, const char *con_id) { - struct pm_clk_data *pcd = __to_pcd(dev); + struct pm_subsys_data *psd = dev_to_psd(dev); struct pm_clock_entry *ce; - if (!pcd) + if (!psd) return -EINVAL; ce = kzalloc(sizeof(*ce), GFP_KERNEL); @@ -91,9 +81,9 @@ int pm_clk_add(struct device *dev, const char *con_id) pm_clk_acquire(dev, ce); - spin_lock_irq(&pcd->lock); - list_add_tail(&ce->node, &pcd->clock_list); - spin_unlock_irq(&pcd->lock); + spin_lock_irq(&psd->lock); + list_add_tail(&ce->node, &psd->clock_list); + spin_unlock_irq(&psd->lock); return 0; } @@ -114,9 +104,7 @@ static void __pm_clk_remove(struct pm_clock_entry *ce) clk_put(ce->clk); } - if (ce->con_id) - kfree(ce->con_id); - + kfree(ce->con_id); kfree(ce); } @@ -130,15 +118,15 @@ static void __pm_clk_remove(struct pm_clock_entry *ce) */ void pm_clk_remove(struct device *dev, const char *con_id) { - struct pm_clk_data *pcd = __to_pcd(dev); + struct pm_subsys_data *psd = dev_to_psd(dev); struct pm_clock_entry *ce; - if (!pcd) + if (!psd) return; - spin_lock_irq(&pcd->lock); + spin_lock_irq(&psd->lock); - list_for_each_entry(ce, &pcd->clock_list, node) { + list_for_each_entry(ce, &psd->clock_list, node) { if (!con_id && !ce->con_id) goto remove; else if (!con_id || !ce->con_id) @@ -147,12 +135,12 @@ void pm_clk_remove(struct device *dev, const char *con_id) goto remove; } - spin_unlock_irq(&pcd->lock); + spin_unlock_irq(&psd->lock); return; remove: list_del(&ce->node); - spin_unlock_irq(&pcd->lock); + spin_unlock_irq(&psd->lock); __pm_clk_remove(ce); } @@ -161,23 +149,27 @@ void pm_clk_remove(struct device *dev, const char *con_id) * pm_clk_init - Initialize a device's list of power management clocks. * @dev: Device to initialize the list of PM clocks for. * - * Allocate a struct pm_clk_data object, initialize its lock member and - * make the @dev's power.subsys_data field point to it. + * Initialize the lock and clock_list members of the device's pm_subsys_data + * object. */ -int pm_clk_init(struct device *dev) +void pm_clk_init(struct device *dev) { - struct pm_clk_data *pcd; - - pcd = kzalloc(sizeof(*pcd), GFP_KERNEL); - if (!pcd) { - dev_err(dev, "Not enough memory for PM clock data.\n"); - return -ENOMEM; - } + struct pm_subsys_data *psd = dev_to_psd(dev); + if (psd) + INIT_LIST_HEAD(&psd->clock_list); +} - INIT_LIST_HEAD(&pcd->clock_list); - spin_lock_init(&pcd->lock); - dev->power.subsys_data = pcd; - return 0; +/** + * pm_clk_create - Create and initialize a device's list of PM clocks. + * @dev: Device to create and initialize the list of PM clocks for. + * + * Allocate a struct pm_subsys_data object, initialize its lock and clock_list + * members and make the @dev's power.subsys_data field point to it. + */ +int pm_clk_create(struct device *dev) +{ + int ret = dev_pm_get_subsys_data(dev); + return ret < 0 ? ret : 0; } /** @@ -185,29 +177,28 @@ int pm_clk_init(struct device *dev) * @dev: Device to destroy the list of PM clocks for. * * Clear the @dev's power.subsys_data field, remove the list of clock entries - * from the struct pm_clk_data object pointed to by it before and free + * from the struct pm_subsys_data object pointed to by it before and free * that object. */ void pm_clk_destroy(struct device *dev) { - struct pm_clk_data *pcd = __to_pcd(dev); + struct pm_subsys_data *psd = dev_to_psd(dev); struct pm_clock_entry *ce, *c; struct list_head list; - if (!pcd) + if (!psd) return; - dev->power.subsys_data = NULL; INIT_LIST_HEAD(&list); - spin_lock_irq(&pcd->lock); + spin_lock_irq(&psd->lock); - list_for_each_entry_safe_reverse(ce, c, &pcd->clock_list, node) + list_for_each_entry_safe_reverse(ce, c, &psd->clock_list, node) list_move(&ce->node, &list); - spin_unlock_irq(&pcd->lock); + spin_unlock_irq(&psd->lock); - kfree(pcd); + dev_pm_put_subsys_data(dev); list_for_each_entry_safe_reverse(ce, c, &list, node) { list_del(&ce->node); @@ -225,25 +216,25 @@ void pm_clk_destroy(struct device *dev) */ int pm_clk_suspend(struct device *dev) { - struct pm_clk_data *pcd = __to_pcd(dev); + struct pm_subsys_data *psd = dev_to_psd(dev); struct pm_clock_entry *ce; unsigned long flags; dev_dbg(dev, "%s()\n", __func__); - if (!pcd) + if (!psd) return 0; - spin_lock_irqsave(&pcd->lock, flags); + spin_lock_irqsave(&psd->lock, flags); - list_for_each_entry_reverse(ce, &pcd->clock_list, node) { + list_for_each_entry_reverse(ce, &psd->clock_list, node) { if (ce->status < PCE_STATUS_ERROR) { clk_disable(ce->clk); ce->status = PCE_STATUS_ACQUIRED; } } - spin_unlock_irqrestore(&pcd->lock, flags); + spin_unlock_irqrestore(&psd->lock, flags); return 0; } @@ -254,25 +245,25 @@ int pm_clk_suspend(struct device *dev) */ int pm_clk_resume(struct device *dev) { - struct pm_clk_data *pcd = __to_pcd(dev); + struct pm_subsys_data *psd = dev_to_psd(dev); struct pm_clock_entry *ce; unsigned long flags; dev_dbg(dev, "%s()\n", __func__); - if (!pcd) + if (!psd) return 0; - spin_lock_irqsave(&pcd->lock, flags); + spin_lock_irqsave(&psd->lock, flags); - list_for_each_entry(ce, &pcd->clock_list, node) { + list_for_each_entry(ce, &psd->clock_list, node) { if (ce->status < PCE_STATUS_ERROR) { clk_enable(ce->clk); ce->status = PCE_STATUS_ENABLED; } } - spin_unlock_irqrestore(&pcd->lock, flags); + spin_unlock_irqrestore(&psd->lock, flags); return 0; } @@ -310,7 +301,7 @@ static int pm_clk_notify(struct notifier_block *nb, if (dev->pm_domain) break; - error = pm_clk_init(dev); + error = pm_clk_create(dev); if (error) break; @@ -345,22 +336,22 @@ static int pm_clk_notify(struct notifier_block *nb, */ int pm_clk_suspend(struct device *dev) { - struct pm_clk_data *pcd = __to_pcd(dev); + struct pm_subsys_data *psd = dev_to_psd(dev); struct pm_clock_entry *ce; unsigned long flags; dev_dbg(dev, "%s()\n", __func__); /* If there is no driver, the clocks are already disabled. */ - if (!pcd || !dev->driver) + if (!psd || !dev->driver) return 0; - spin_lock_irqsave(&pcd->lock, flags); + spin_lock_irqsave(&psd->lock, flags); - list_for_each_entry_reverse(ce, &pcd->clock_list, node) + list_for_each_entry_reverse(ce, &psd->clock_list, node) clk_disable(ce->clk); - spin_unlock_irqrestore(&pcd->lock, flags); + spin_unlock_irqrestore(&psd->lock, flags); return 0; } @@ -371,22 +362,22 @@ int pm_clk_suspend(struct device *dev) */ int pm_clk_resume(struct device *dev) { - struct pm_clk_data *pcd = __to_pcd(dev); + struct pm_subsys_data *psd = dev_to_psd(dev); struct pm_clock_entry *ce; unsigned long flags; dev_dbg(dev, "%s()\n", __func__); /* If there is no driver, the clocks should remain disabled. */ - if (!pcd || !dev->driver) + if (!psd || !dev->driver) return 0; - spin_lock_irqsave(&pcd->lock, flags); + spin_lock_irqsave(&psd->lock, flags); - list_for_each_entry(ce, &pcd->clock_list, node) + list_for_each_entry(ce, &psd->clock_list, node) clk_enable(ce->clk); - spin_unlock_irqrestore(&pcd->lock, flags); + spin_unlock_irqrestore(&psd->lock, flags); return 0; } diff --git a/drivers/base/power/common.c b/drivers/base/power/common.c new file mode 100644 index 000000000000..29820c396182 --- /dev/null +++ b/drivers/base/power/common.c @@ -0,0 +1,86 @@ +/* + * drivers/base/power/common.c - Common device power management code. + * + * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp. + * + * This file is released under the GPLv2. + */ + +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/pm_clock.h> + +/** + * dev_pm_get_subsys_data - Create or refcount power.subsys_data for device. + * @dev: Device to handle. + * + * If power.subsys_data is NULL, point it to a new object, otherwise increment + * its reference counter. Return 1 if a new object has been created, otherwise + * return 0 or error code. + */ +int dev_pm_get_subsys_data(struct device *dev) +{ + struct pm_subsys_data *psd; + int ret = 0; + + psd = kzalloc(sizeof(*psd), GFP_KERNEL); + if (!psd) + return -ENOMEM; + + spin_lock_irq(&dev->power.lock); + + if (dev->power.subsys_data) { + dev->power.subsys_data->refcount++; + } else { + spin_lock_init(&psd->lock); + psd->refcount = 1; + dev->power.subsys_data = psd; + pm_clk_init(dev); + psd = NULL; + ret = 1; + } + + spin_unlock_irq(&dev->power.lock); + + /* kfree() verifies that its argument is nonzero. */ + kfree(psd); + + return ret; +} +EXPORT_SYMBOL_GPL(dev_pm_get_subsys_data); + +/** + * dev_pm_put_subsys_data - Drop reference to power.subsys_data. + * @dev: Device to handle. + * + * If the reference counter of power.subsys_data is zero after dropping the + * reference, power.subsys_data is removed. Return 1 if that happens or 0 + * otherwise. + */ +int dev_pm_put_subsys_data(struct device *dev) +{ + struct pm_subsys_data *psd; + int ret = 0; + + spin_lock_irq(&dev->power.lock); + + psd = dev_to_psd(dev); + if (!psd) { + ret = -EINVAL; + goto out; + } + + if (--psd->refcount == 0) { + dev->power.subsys_data = NULL; + kfree(psd); + ret = 1; + } + + out: + spin_unlock_irq(&dev->power.lock); + + return ret; +} +EXPORT_SYMBOL_GPL(dev_pm_put_subsys_data); diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index 1c374579407c..6790cf7eba5a 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -29,10 +29,20 @@ static struct generic_pm_domain *dev_to_genpd(struct device *dev) return pd_to_genpd(dev->pm_domain); } -static void genpd_sd_counter_dec(struct generic_pm_domain *genpd) +static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd) { - if (!WARN_ON(genpd->sd_count == 0)) - genpd->sd_count--; + bool ret = false; + + if (!WARN_ON(atomic_read(&genpd->sd_count) == 0)) + ret = !!atomic_dec_and_test(&genpd->sd_count); + + return ret; +} + +static void genpd_sd_counter_inc(struct generic_pm_domain *genpd) +{ + atomic_inc(&genpd->sd_count); + smp_mb__after_atomic_inc(); } static void genpd_acquire_lock(struct generic_pm_domain *genpd) @@ -71,81 +81,119 @@ static void genpd_set_active(struct generic_pm_domain *genpd) } /** - * pm_genpd_poweron - Restore power to a given PM domain and its parents. + * __pm_genpd_poweron - Restore power to a given PM domain and its masters. * @genpd: PM domain to power up. * - * Restore power to @genpd and all of its parents so that it is possible to + * Restore power to @genpd and all of its masters so that it is possible to * resume a device belonging to it. */ -int pm_genpd_poweron(struct generic_pm_domain *genpd) +int __pm_genpd_poweron(struct generic_pm_domain *genpd) + __releases(&genpd->lock) __acquires(&genpd->lock) { - struct generic_pm_domain *parent = genpd->parent; + struct gpd_link *link; + DEFINE_WAIT(wait); int ret = 0; - start: - if (parent) { - genpd_acquire_lock(parent); - mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING); - } else { + /* If the domain's master is being waited for, we have to wait too. */ + for (;;) { + prepare_to_wait(&genpd->status_wait_queue, &wait, + TASK_UNINTERRUPTIBLE); + if (genpd->status != GPD_STATE_WAIT_MASTER) + break; + mutex_unlock(&genpd->lock); + + schedule(); + mutex_lock(&genpd->lock); } + finish_wait(&genpd->status_wait_queue, &wait); if (genpd->status == GPD_STATE_ACTIVE || (genpd->prepared_count > 0 && genpd->suspend_power_off)) - goto out; + return 0; if (genpd->status != GPD_STATE_POWER_OFF) { genpd_set_active(genpd); - goto out; + return 0; } - if (parent && parent->status != GPD_STATE_ACTIVE) { + /* + * The list is guaranteed not to change while the loop below is being + * executed, unless one of the masters' .power_on() callbacks fiddles + * with it. + */ + list_for_each_entry(link, &genpd->slave_links, slave_node) { + genpd_sd_counter_inc(link->master); + genpd->status = GPD_STATE_WAIT_MASTER; + mutex_unlock(&genpd->lock); - genpd_release_lock(parent); - ret = pm_genpd_poweron(parent); - if (ret) - return ret; + ret = pm_genpd_poweron(link->master); - goto start; + mutex_lock(&genpd->lock); + + /* + * The "wait for parent" status is guaranteed not to change + * while the master is powering on. + */ + genpd->status = GPD_STATE_POWER_OFF; + wake_up_all(&genpd->status_wait_queue); + if (ret) { + genpd_sd_counter_dec(link->master); + goto err; + } } if (genpd->power_on) { ret = genpd->power_on(genpd); if (ret) - goto out; + goto err; } genpd_set_active(genpd); - if (parent) - parent->sd_count++; - out: - mutex_unlock(&genpd->lock); - if (parent) - genpd_release_lock(parent); + return 0; + + err: + list_for_each_entry_continue_reverse(link, &genpd->slave_links, slave_node) + genpd_sd_counter_dec(link->master); return ret; } +/** + * pm_genpd_poweron - Restore power to a given PM domain and its masters. + * @genpd: PM domain to power up. + */ +int pm_genpd_poweron(struct generic_pm_domain *genpd) +{ + int ret; + + mutex_lock(&genpd->lock); + ret = __pm_genpd_poweron(genpd); + mutex_unlock(&genpd->lock); + return ret; +} + #endif /* CONFIG_PM */ #ifdef CONFIG_PM_RUNTIME /** * __pm_genpd_save_device - Save the pre-suspend state of a device. - * @dle: Device list entry of the device to save the state of. + * @pdd: Domain data of the device to save the state of. * @genpd: PM domain the device belongs to. */ -static int __pm_genpd_save_device(struct dev_list_entry *dle, +static int __pm_genpd_save_device(struct pm_domain_data *pdd, struct generic_pm_domain *genpd) __releases(&genpd->lock) __acquires(&genpd->lock) { - struct device *dev = dle->dev; + struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd); + struct device *dev = pdd->dev; struct device_driver *drv = dev->driver; int ret = 0; - if (dle->need_restore) + if (gpd_data->need_restore) return 0; mutex_unlock(&genpd->lock); @@ -163,24 +211,25 @@ static int __pm_genpd_save_device(struct dev_list_entry *dle, mutex_lock(&genpd->lock); if (!ret) - dle->need_restore = true; + gpd_data->need_restore = true; return ret; } /** * __pm_genpd_restore_device - Restore the pre-suspend state of a device. - * @dle: Device list entry of the device to restore the state of. + * @pdd: Domain data of the device to restore the state of. * @genpd: PM domain the device belongs to. */ -static void __pm_genpd_restore_device(struct dev_list_entry *dle, +static void __pm_genpd_restore_device(struct pm_domain_data *pdd, struct generic_pm_domain *genpd) __releases(&genpd->lock) __acquires(&genpd->lock) { - struct device *dev = dle->dev; + struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd); + struct device *dev = pdd->dev; struct device_driver *drv = dev->driver; - if (!dle->need_restore) + if (!gpd_data->need_restore) return; mutex_unlock(&genpd->lock); @@ -197,7 +246,7 @@ static void __pm_genpd_restore_device(struct dev_list_entry *dle, mutex_lock(&genpd->lock); - dle->need_restore = false; + gpd_data->need_restore = false; } /** @@ -211,7 +260,8 @@ static void __pm_genpd_restore_device(struct dev_list_entry *dle, */ static bool genpd_abort_poweroff(struct generic_pm_domain *genpd) { - return genpd->status == GPD_STATE_ACTIVE || genpd->resume_count > 0; + return genpd->status == GPD_STATE_WAIT_MASTER + || genpd->status == GPD_STATE_ACTIVE || genpd->resume_count > 0; } /** @@ -238,8 +288,8 @@ void genpd_queue_power_off_work(struct generic_pm_domain *genpd) static int pm_genpd_poweroff(struct generic_pm_domain *genpd) __releases(&genpd->lock) __acquires(&genpd->lock) { - struct generic_pm_domain *parent; - struct dev_list_entry *dle; + struct pm_domain_data *pdd; + struct gpd_link *link; unsigned int not_suspended; int ret = 0; @@ -247,19 +297,22 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd) /* * Do not try to power off the domain in the following situations: * (1) The domain is already in the "power off" state. - * (2) System suspend is in progress. + * (2) The domain is waiting for its master to power up. * (3) One of the domain's devices is being resumed right now. + * (4) System suspend is in progress. */ - if (genpd->status == GPD_STATE_POWER_OFF || genpd->prepared_count > 0 - || genpd->resume_count > 0) + if (genpd->status == GPD_STATE_POWER_OFF + || genpd->status == GPD_STATE_WAIT_MASTER + || genpd->resume_count > 0 || genpd->prepared_count > 0) return 0; - if (genpd->sd_count > 0) + if (atomic_read(&genpd->sd_count) > 0) return -EBUSY; not_suspended = 0; - list_for_each_entry(dle, &genpd->dev_list, node) - if (dle->dev->driver && !pm_runtime_suspended(dle->dev)) + list_for_each_entry(pdd, &genpd->dev_list, list_node) + if (pdd->dev->driver && (!pm_runtime_suspended(pdd->dev) + || pdd->dev->power.irq_safe)) not_suspended++; if (not_suspended > genpd->in_progress) @@ -282,54 +335,50 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd) genpd->status = GPD_STATE_BUSY; genpd->poweroff_task = current; - list_for_each_entry_reverse(dle, &genpd->dev_list, node) { - ret = __pm_genpd_save_device(dle, genpd); + list_for_each_entry_reverse(pdd, &genpd->dev_list, list_node) { + ret = atomic_read(&genpd->sd_count) == 0 ? + __pm_genpd_save_device(pdd, genpd) : -EBUSY; + + if (genpd_abort_poweroff(genpd)) + goto out; + if (ret) { genpd_set_active(genpd); goto out; } - if (genpd_abort_poweroff(genpd)) - goto out; - if (genpd->status == GPD_STATE_REPEAT) { genpd->poweroff_task = NULL; goto start; } } - parent = genpd->parent; - if (parent) { - mutex_unlock(&genpd->lock); - - genpd_acquire_lock(parent); - mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING); - - if (genpd_abort_poweroff(genpd)) { - genpd_release_lock(parent); + if (genpd->power_off) { + if (atomic_read(&genpd->sd_count) > 0) { + ret = -EBUSY; goto out; } - } - if (genpd->power_off) { + /* + * If sd_count > 0 at this point, one of the subdomains hasn't + * managed to call pm_genpd_poweron() for the master yet after + * incrementing it. In that case pm_genpd_poweron() will wait + * for us to drop the lock, so we can call .power_off() and let + * the pm_genpd_poweron() restore power for us (this shouldn't + * happen very often). + */ ret = genpd->power_off(genpd); if (ret == -EBUSY) { genpd_set_active(genpd); - if (parent) - genpd_release_lock(parent); - goto out; } } genpd->status = GPD_STATE_POWER_OFF; - if (parent) { - genpd_sd_counter_dec(parent); - if (parent->sd_count == 0) - genpd_queue_power_off_work(parent); - - genpd_release_lock(parent); + list_for_each_entry(link, &genpd->slave_links, slave_node) { + genpd_sd_counter_dec(link->master); + genpd_queue_power_off_work(link->master); } out: @@ -371,12 +420,21 @@ static int pm_genpd_runtime_suspend(struct device *dev) if (IS_ERR(genpd)) return -EINVAL; + might_sleep_if(!genpd->dev_irq_safe); + if (genpd->stop_device) { int ret = genpd->stop_device(dev); if (ret) return ret; } + /* + * If power.irq_safe is set, this routine will be run with interrupts + * off, so it can't use mutexes. + */ + if (dev->power.irq_safe) + return 0; + mutex_lock(&genpd->lock); genpd->in_progress++; pm_genpd_poweroff(genpd); @@ -387,24 +445,6 @@ static int pm_genpd_runtime_suspend(struct device *dev) } /** - * __pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain. - * @dev: Device to resume. - * @genpd: PM domain the device belongs to. - */ -static void __pm_genpd_runtime_resume(struct device *dev, - struct generic_pm_domain *genpd) -{ - struct dev_list_entry *dle; - - list_for_each_entry(dle, &genpd->dev_list, node) { - if (dle->dev == dev) { - __pm_genpd_restore_device(dle, genpd); - break; - } - } -} - -/** * pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain. * @dev: Device to resume. * @@ -424,11 +464,18 @@ static int pm_genpd_runtime_resume(struct device *dev) if (IS_ERR(genpd)) return -EINVAL; - ret = pm_genpd_poweron(genpd); - if (ret) - return ret; + might_sleep_if(!genpd->dev_irq_safe); + + /* If power.irq_safe, the PM domain is never powered off. */ + if (dev->power.irq_safe) + goto out; mutex_lock(&genpd->lock); + ret = __pm_genpd_poweron(genpd); + if (ret) { + mutex_unlock(&genpd->lock); + return ret; + } genpd->status = GPD_STATE_BUSY; genpd->resume_count++; for (;;) { @@ -448,12 +495,13 @@ static int pm_genpd_runtime_resume(struct device *dev) mutex_lock(&genpd->lock); } finish_wait(&genpd->status_wait_queue, &wait); - __pm_genpd_runtime_resume(dev, genpd); + __pm_genpd_restore_device(dev->power.subsys_data->domain_data, genpd); genpd->resume_count--; genpd_set_active(genpd); wake_up_all(&genpd->status_wait_queue); mutex_unlock(&genpd->lock); + out: if (genpd->start_device) genpd->start_device(dev); @@ -478,8 +526,6 @@ void pm_genpd_poweroff_unused(void) #else static inline void genpd_power_off_work_fn(struct work_struct *work) {} -static inline void __pm_genpd_runtime_resume(struct device *dev, - struct generic_pm_domain *genpd) {} #define pm_genpd_runtime_suspend NULL #define pm_genpd_runtime_resume NULL @@ -489,11 +535,11 @@ static inline void __pm_genpd_runtime_resume(struct device *dev, #ifdef CONFIG_PM_SLEEP /** - * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its parents. + * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its masters. * @genpd: PM domain to power off, if possible. * * Check if the given PM domain can be powered off (during system suspend or - * hibernation) and do that if so. Also, in that case propagate to its parent. + * hibernation) and do that if so. Also, in that case propagate to its masters. * * This function is only called in "noirq" stages of system power transitions, * so it need not acquire locks (all of the "noirq" callbacks are executed @@ -501,21 +547,23 @@ static inline void __pm_genpd_runtime_resume(struct device *dev, */ static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd) { - struct generic_pm_domain *parent = genpd->parent; + struct gpd_link *link; if (genpd->status == GPD_STATE_POWER_OFF) return; - if (genpd->suspended_count != genpd->device_count || genpd->sd_count > 0) + if (genpd->suspended_count != genpd->device_count + || atomic_read(&genpd->sd_count) > 0) return; if (genpd->power_off) genpd->power_off(genpd); genpd->status = GPD_STATE_POWER_OFF; - if (parent) { - genpd_sd_counter_dec(parent); - pm_genpd_sync_poweroff(parent); + + list_for_each_entry(link, &genpd->slave_links, slave_node) { + genpd_sd_counter_dec(link->master); + pm_genpd_sync_poweroff(link->master); } } @@ -666,7 +714,7 @@ static int pm_genpd_suspend_noirq(struct device *dev) if (ret) return ret; - if (device_may_wakeup(dev) + if (dev->power.wakeup_path && genpd->active_wakeup && genpd->active_wakeup(dev)) return 0; @@ -890,7 +938,7 @@ static int pm_genpd_dev_poweroff_noirq(struct device *dev) if (ret) return ret; - if (device_may_wakeup(dev) + if (dev->power.wakeup_path && genpd->active_wakeup && genpd->active_wakeup(dev)) return 0; @@ -1034,7 +1082,8 @@ static void pm_genpd_complete(struct device *dev) */ int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev) { - struct dev_list_entry *dle; + struct generic_pm_domain_data *gpd_data; + struct pm_domain_data *pdd; int ret = 0; dev_dbg(dev, "%s()\n", __func__); @@ -1054,26 +1103,26 @@ int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev) goto out; } - list_for_each_entry(dle, &genpd->dev_list, node) - if (dle->dev == dev) { + list_for_each_entry(pdd, &genpd->dev_list, list_node) + if (pdd->dev == dev) { ret = -EINVAL; goto out; } - dle = kzalloc(sizeof(*dle), GFP_KERNEL); - if (!dle) { + gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL); + if (!gpd_data) { ret = -ENOMEM; goto out; } - dle->dev = dev; - dle->need_restore = false; - list_add_tail(&dle->node, &genpd->dev_list); genpd->device_count++; - spin_lock_irq(&dev->power.lock); dev->pm_domain = &genpd->domain; - spin_unlock_irq(&dev->power.lock); + dev_pm_get_subsys_data(dev); + dev->power.subsys_data->domain_data = &gpd_data->base; + gpd_data->base.dev = dev; + gpd_data->need_restore = false; + list_add_tail(&gpd_data->base.list_node, &genpd->dev_list); out: genpd_release_lock(genpd); @@ -1089,7 +1138,7 @@ int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev) int pm_genpd_remove_device(struct generic_pm_domain *genpd, struct device *dev) { - struct dev_list_entry *dle; + struct pm_domain_data *pdd; int ret = -EINVAL; dev_dbg(dev, "%s()\n", __func__); @@ -1104,17 +1153,17 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd, goto out; } - list_for_each_entry(dle, &genpd->dev_list, node) { - if (dle->dev != dev) + list_for_each_entry(pdd, &genpd->dev_list, list_node) { + if (pdd->dev != dev) continue; - spin_lock_irq(&dev->power.lock); + list_del_init(&pdd->list_node); + pdd->dev = NULL; + dev_pm_put_subsys_data(dev); dev->pm_domain = NULL; - spin_unlock_irq(&dev->power.lock); + kfree(to_gpd_data(pdd)); genpd->device_count--; - list_del(&dle->node); - kfree(dle); ret = 0; break; @@ -1129,48 +1178,55 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd, /** * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain. * @genpd: Master PM domain to add the subdomain to. - * @new_subdomain: Subdomain to be added. + * @subdomain: Subdomain to be added. */ int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, - struct generic_pm_domain *new_subdomain) + struct generic_pm_domain *subdomain) { - struct generic_pm_domain *subdomain; + struct gpd_link *link; int ret = 0; - if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(new_subdomain)) + if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)) return -EINVAL; start: genpd_acquire_lock(genpd); - mutex_lock_nested(&new_subdomain->lock, SINGLE_DEPTH_NESTING); + mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING); - if (new_subdomain->status != GPD_STATE_POWER_OFF - && new_subdomain->status != GPD_STATE_ACTIVE) { - mutex_unlock(&new_subdomain->lock); + if (subdomain->status != GPD_STATE_POWER_OFF + && subdomain->status != GPD_STATE_ACTIVE) { + mutex_unlock(&subdomain->lock); genpd_release_lock(genpd); goto start; } if (genpd->status == GPD_STATE_POWER_OFF - && new_subdomain->status != GPD_STATE_POWER_OFF) { + && subdomain->status != GPD_STATE_POWER_OFF) { ret = -EINVAL; goto out; } - list_for_each_entry(subdomain, &genpd->sd_list, sd_node) { - if (subdomain == new_subdomain) { + list_for_each_entry(link, &genpd->slave_links, slave_node) { + if (link->slave == subdomain && link->master == genpd) { ret = -EINVAL; goto out; } } - list_add_tail(&new_subdomain->sd_node, &genpd->sd_list); - new_subdomain->parent = genpd; + link = kzalloc(sizeof(*link), GFP_KERNEL); + if (!link) { + ret = -ENOMEM; + goto out; + } + link->master = genpd; + list_add_tail(&link->master_node, &genpd->master_links); + link->slave = subdomain; + list_add_tail(&link->slave_node, &subdomain->slave_links); if (subdomain->status != GPD_STATE_POWER_OFF) - genpd->sd_count++; + genpd_sd_counter_inc(genpd); out: - mutex_unlock(&new_subdomain->lock); + mutex_unlock(&subdomain->lock); genpd_release_lock(genpd); return ret; @@ -1179,22 +1235,22 @@ int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, /** * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain. * @genpd: Master PM domain to remove the subdomain from. - * @target: Subdomain to be removed. + * @subdomain: Subdomain to be removed. */ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, - struct generic_pm_domain *target) + struct generic_pm_domain *subdomain) { - struct generic_pm_domain *subdomain; + struct gpd_link *link; int ret = -EINVAL; - if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(target)) + if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)) return -EINVAL; start: genpd_acquire_lock(genpd); - list_for_each_entry(subdomain, &genpd->sd_list, sd_node) { - if (subdomain != target) + list_for_each_entry(link, &genpd->master_links, master_node) { + if (link->slave != subdomain) continue; mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING); @@ -1206,8 +1262,9 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, goto start; } - list_del(&subdomain->sd_node); - subdomain->parent = NULL; + list_del(&link->master_node); + list_del(&link->slave_node); + kfree(link); if (subdomain->status != GPD_STATE_POWER_OFF) genpd_sd_counter_dec(genpd); @@ -1234,15 +1291,14 @@ void pm_genpd_init(struct generic_pm_domain *genpd, if (IS_ERR_OR_NULL(genpd)) return; - INIT_LIST_HEAD(&genpd->sd_node); - genpd->parent = NULL; + INIT_LIST_HEAD(&genpd->master_links); + INIT_LIST_HEAD(&genpd->slave_links); INIT_LIST_HEAD(&genpd->dev_list); - INIT_LIST_HEAD(&genpd->sd_list); mutex_init(&genpd->lock); genpd->gov = gov; INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn); genpd->in_progress = 0; - genpd->sd_count = 0; + atomic_set(&genpd->sd_count, 0); genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE; init_waitqueue_head(&genpd->status_wait_queue); genpd->poweroff_task = NULL; diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index a85459126bc6..59f8ab235486 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -46,6 +46,7 @@ LIST_HEAD(dpm_prepared_list); LIST_HEAD(dpm_suspended_list); LIST_HEAD(dpm_noirq_list); +struct suspend_stats suspend_stats; static DEFINE_MUTEX(dpm_list_mtx); static pm_message_t pm_transition; @@ -65,6 +66,7 @@ void device_pm_init(struct device *dev) spin_lock_init(&dev->power.lock); pm_runtime_init(dev); INIT_LIST_HEAD(&dev->power.entry); + dev->power.power_state = PMSG_INVALID; } /** @@ -96,6 +98,7 @@ void device_pm_add(struct device *dev) dev_warn(dev, "parent %s should not be sleeping\n", dev_name(dev->parent)); list_add_tail(&dev->power.entry, &dpm_list); + dev_pm_qos_constraints_init(dev); mutex_unlock(&dpm_list_mtx); } @@ -109,6 +112,7 @@ void device_pm_remove(struct device *dev) dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); complete_all(&dev->power.completion); mutex_lock(&dpm_list_mtx); + dev_pm_qos_constraints_destroy(dev); list_del_init(&dev->power.entry); mutex_unlock(&dpm_list_mtx); device_wakeup_disable(dev); @@ -464,8 +468,12 @@ void dpm_resume_noirq(pm_message_t state) mutex_unlock(&dpm_list_mtx); error = device_resume_noirq(dev, state); - if (error) + if (error) { + suspend_stats.failed_resume_noirq++; + dpm_save_failed_step(SUSPEND_RESUME_NOIRQ); + dpm_save_failed_dev(dev_name(dev)); pm_dev_err(dev, state, " early", error); + } mutex_lock(&dpm_list_mtx); put_device(dev); @@ -626,8 +634,12 @@ void dpm_resume(pm_message_t state) mutex_unlock(&dpm_list_mtx); error = device_resume(dev, state, false); - if (error) + if (error) { + suspend_stats.failed_resume++; + dpm_save_failed_step(SUSPEND_RESUME); + dpm_save_failed_dev(dev_name(dev)); pm_dev_err(dev, state, "", error); + } mutex_lock(&dpm_list_mtx); } @@ -802,6 +814,9 @@ int dpm_suspend_noirq(pm_message_t state) mutex_lock(&dpm_list_mtx); if (error) { pm_dev_err(dev, state, " late", error); + suspend_stats.failed_suspend_noirq++; + dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ); + dpm_save_failed_dev(dev_name(dev)); put_device(dev); break; } @@ -902,7 +917,11 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async) } End: - dev->power.is_suspended = !error; + if (!error) { + dev->power.is_suspended = true; + if (dev->power.wakeup_path && dev->parent) + dev->parent->power.wakeup_path = true; + } device_unlock(dev); complete_all(&dev->power.completion); @@ -923,8 +942,10 @@ static void async_suspend(void *data, async_cookie_t cookie) int error; error = __device_suspend(dev, pm_transition, true); - if (error) + if (error) { + dpm_save_failed_dev(dev_name(dev)); pm_dev_err(dev, pm_transition, " async", error); + } put_device(dev); } @@ -967,6 +988,7 @@ int dpm_suspend(pm_message_t state) mutex_lock(&dpm_list_mtx); if (error) { pm_dev_err(dev, state, "", error); + dpm_save_failed_dev(dev_name(dev)); put_device(dev); break; } @@ -980,7 +1002,10 @@ int dpm_suspend(pm_message_t state) async_synchronize_full(); if (!error) error = async_error; - if (!error) + if (error) { + suspend_stats.failed_suspend++; + dpm_save_failed_step(SUSPEND_SUSPEND); + } else dpm_show_time(starttime, state, NULL); return error; } @@ -999,6 +1024,8 @@ static int device_prepare(struct device *dev, pm_message_t state) device_lock(dev); + dev->power.wakeup_path = device_may_wakeup(dev); + if (dev->pm_domain) { pm_dev_dbg(dev, state, "preparing power domain "); if (dev->pm_domain->ops.prepare) @@ -1088,7 +1115,10 @@ int dpm_suspend_start(pm_message_t state) int error; error = dpm_prepare(state); - if (!error) + if (error) { + suspend_stats.failed_prepare++; + dpm_save_failed_step(SUSPEND_PREPARE); + } else error = dpm_suspend(state); return error; } diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c index b23de185cb04..434a6c011675 100644 --- a/drivers/base/power/opp.c +++ b/drivers/base/power/opp.c @@ -73,6 +73,7 @@ struct opp { * RCU usage: nodes are not modified in the list of device_opp, * however addition is possible and is secured by dev_opp_list_lock * @dev: device pointer + * @head: notifier head to notify the OPP availability changes. * @opp_list: list of opps * * This is an internal data structure maintaining the link to opps attached to @@ -83,6 +84,7 @@ struct device_opp { struct list_head node; struct device *dev; + struct srcu_notifier_head head; struct list_head opp_list; }; @@ -404,6 +406,7 @@ int opp_add(struct device *dev, unsigned long freq, unsigned long u_volt) } dev_opp->dev = dev; + srcu_init_notifier_head(&dev_opp->head); INIT_LIST_HEAD(&dev_opp->opp_list); /* Secure the device list modification */ @@ -428,6 +431,11 @@ int opp_add(struct device *dev, unsigned long freq, unsigned long u_volt) list_add_rcu(&new_opp->node, head); mutex_unlock(&dev_opp_list_lock); + /* + * Notify the changes in the availability of the operable + * frequency/voltage list. + */ + srcu_notifier_call_chain(&dev_opp->head, OPP_EVENT_ADD, new_opp); return 0; } @@ -504,6 +512,14 @@ static int opp_set_availability(struct device *dev, unsigned long freq, mutex_unlock(&dev_opp_list_lock); synchronize_rcu(); + /* Notify the change of the OPP availability */ + if (availability_req) + srcu_notifier_call_chain(&dev_opp->head, OPP_EVENT_ENABLE, + new_opp); + else + srcu_notifier_call_chain(&dev_opp->head, OPP_EVENT_DISABLE, + new_opp); + /* clean up old opp */ new_opp = opp; goto out; @@ -643,3 +659,17 @@ void opp_free_cpufreq_table(struct device *dev, *table = NULL; } #endif /* CONFIG_CPU_FREQ */ + +/** + * opp_get_notifier() - find notifier_head of the device with opp + * @dev: device pointer used to lookup device OPPs. + */ +struct srcu_notifier_head *opp_get_notifier(struct device *dev) +{ + struct device_opp *dev_opp = find_device_opp(dev); + + if (IS_ERR(dev_opp)) + return ERR_PTR(PTR_ERR(dev_opp)); /* matching type */ + + return &dev_opp->head; +} diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h index f2a25f18fde7..9bf62323aaf3 100644 --- a/drivers/base/power/power.h +++ b/drivers/base/power/power.h @@ -1,3 +1,5 @@ +#include <linux/pm_qos.h> + #ifdef CONFIG_PM_RUNTIME extern void pm_runtime_init(struct device *dev); @@ -35,15 +37,21 @@ extern void device_pm_move_last(struct device *); static inline void device_pm_init(struct device *dev) { spin_lock_init(&dev->power.lock); + dev->power.power_state = PMSG_INVALID; pm_runtime_init(dev); } +static inline void device_pm_add(struct device *dev) +{ + dev_pm_qos_constraints_init(dev); +} + static inline void device_pm_remove(struct device *dev) { + dev_pm_qos_constraints_destroy(dev); pm_runtime_remove(dev); } -static inline void device_pm_add(struct device *dev) {} static inline void device_pm_move_before(struct device *deva, struct device *devb) {} static inline void device_pm_move_after(struct device *deva, diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c new file mode 100644 index 000000000000..91e061417382 --- /dev/null +++ b/drivers/base/power/qos.c @@ -0,0 +1,419 @@ +/* + * Devices PM QoS constraints management + * + * Copyright (C) 2011 Texas Instruments, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * + * This module exposes the interface to kernel space for specifying + * per-device PM QoS dependencies. It provides infrastructure for registration + * of: + * + * Dependents on a QoS value : register requests + * Watchers of QoS value : get notified when target QoS value changes + * + * This QoS design is best effort based. Dependents register their QoS needs. + * Watchers register to keep track of the current QoS needs of the system. + * Watchers can register different types of notification callbacks: + * . a per-device notification callback using the dev_pm_qos_*_notifier API. + * The notification chain data is stored in the per-device constraint + * data struct. + * . a system-wide notification callback using the dev_pm_qos_*_global_notifier + * API. The notification chain data is stored in a static variable. + * + * Note about the per-device constraint data struct allocation: + * . The per-device constraints data struct ptr is tored into the device + * dev_pm_info. + * . To minimize the data usage by the per-device constraints, the data struct + * is only allocated at the first call to dev_pm_qos_add_request. + * . The data is later free'd when the device is removed from the system. + * . A global mutex protects the constraints users from the data being + * allocated and free'd. + */ + +#include <linux/pm_qos.h> +#include <linux/spinlock.h> +#include <linux/slab.h> +#include <linux/device.h> +#include <linux/mutex.h> + + +static DEFINE_MUTEX(dev_pm_qos_mtx); + +static BLOCKING_NOTIFIER_HEAD(dev_pm_notifiers); + +/** + * dev_pm_qos_read_value - Get PM QoS constraint for a given device. + * @dev: Device to get the PM QoS constraint value for. + */ +s32 dev_pm_qos_read_value(struct device *dev) +{ + struct pm_qos_constraints *c; + unsigned long flags; + s32 ret = 0; + + spin_lock_irqsave(&dev->power.lock, flags); + + c = dev->power.constraints; + if (c) + ret = pm_qos_read_value(c); + + spin_unlock_irqrestore(&dev->power.lock, flags); + + return ret; +} + +/* + * apply_constraint + * @req: constraint request to apply + * @action: action to perform add/update/remove, of type enum pm_qos_req_action + * @value: defines the qos request + * + * Internal function to update the constraints list using the PM QoS core + * code and if needed call the per-device and the global notification + * callbacks + */ +static int apply_constraint(struct dev_pm_qos_request *req, + enum pm_qos_req_action action, int value) +{ + int ret, curr_value; + + ret = pm_qos_update_target(req->dev->power.constraints, + &req->node, action, value); + + if (ret) { + /* Call the global callbacks if needed */ + curr_value = pm_qos_read_value(req->dev->power.constraints); + blocking_notifier_call_chain(&dev_pm_notifiers, + (unsigned long)curr_value, + req); + } + + return ret; +} + +/* + * dev_pm_qos_constraints_allocate + * @dev: device to allocate data for + * + * Called at the first call to add_request, for constraint data allocation + * Must be called with the dev_pm_qos_mtx mutex held + */ +static int dev_pm_qos_constraints_allocate(struct device *dev) +{ + struct pm_qos_constraints *c; + struct blocking_notifier_head *n; + + c = kzalloc(sizeof(*c), GFP_KERNEL); + if (!c) + return -ENOMEM; + + n = kzalloc(sizeof(*n), GFP_KERNEL); + if (!n) { + kfree(c); + return -ENOMEM; + } + BLOCKING_INIT_NOTIFIER_HEAD(n); + + plist_head_init(&c->list); + c->target_value = PM_QOS_DEV_LAT_DEFAULT_VALUE; + c->default_value = PM_QOS_DEV_LAT_DEFAULT_VALUE; + c->type = PM_QOS_MIN; + c->notifiers = n; + + spin_lock_irq(&dev->power.lock); + dev->power.constraints = c; + spin_unlock_irq(&dev->power.lock); + + return 0; +} + +/** + * dev_pm_qos_constraints_init - Initalize device's PM QoS constraints pointer. + * @dev: target device + * + * Called from the device PM subsystem during device insertion under + * device_pm_lock(). + */ +void dev_pm_qos_constraints_init(struct device *dev) +{ + mutex_lock(&dev_pm_qos_mtx); + dev->power.constraints = NULL; + dev->power.power_state = PMSG_ON; + mutex_unlock(&dev_pm_qos_mtx); +} + +/** + * dev_pm_qos_constraints_destroy + * @dev: target device + * + * Called from the device PM subsystem on device removal under device_pm_lock(). + */ +void dev_pm_qos_constraints_destroy(struct device *dev) +{ + struct dev_pm_qos_request *req, *tmp; + struct pm_qos_constraints *c; + + mutex_lock(&dev_pm_qos_mtx); + + dev->power.power_state = PMSG_INVALID; + c = dev->power.constraints; + if (!c) + goto out; + + /* Flush the constraints list for the device */ + plist_for_each_entry_safe(req, tmp, &c->list, node) { + /* + * Update constraints list and call the notification + * callbacks if needed + */ + apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE); + memset(req, 0, sizeof(*req)); + } + + spin_lock_irq(&dev->power.lock); + dev->power.constraints = NULL; + spin_unlock_irq(&dev->power.lock); + + kfree(c->notifiers); + kfree(c); + + out: + mutex_unlock(&dev_pm_qos_mtx); +} + +/** + * dev_pm_qos_add_request - inserts new qos request into the list + * @dev: target device for the constraint + * @req: pointer to a preallocated handle + * @value: defines the qos request + * + * This function inserts a new entry in the device constraints list of + * requested qos performance characteristics. It recomputes the aggregate + * QoS expectations of parameters and initializes the dev_pm_qos_request + * handle. Caller needs to save this handle for later use in updates and + * removal. + * + * Returns 1 if the aggregated constraint value has changed, + * 0 if the aggregated constraint value has not changed, + * -EINVAL in case of wrong parameters, -ENOMEM if there's not enough memory + * to allocate for data structures, -ENODEV if the device has just been removed + * from the system. + */ +int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req, + s32 value) +{ + int ret = 0; + + if (!dev || !req) /*guard against callers passing in null */ + return -EINVAL; + + if (dev_pm_qos_request_active(req)) { + WARN(1, KERN_ERR "dev_pm_qos_add_request() called for already " + "added request\n"); + return -EINVAL; + } + + req->dev = dev; + + mutex_lock(&dev_pm_qos_mtx); + + if (!dev->power.constraints) { + if (dev->power.power_state.event == PM_EVENT_INVALID) { + /* The device has been removed from the system. */ + req->dev = NULL; + ret = -ENODEV; + goto out; + } else { + /* + * Allocate the constraints data on the first call to + * add_request, i.e. only if the data is not already + * allocated and if the device has not been removed. + */ + ret = dev_pm_qos_constraints_allocate(dev); + } + } + + if (!ret) + ret = apply_constraint(req, PM_QOS_ADD_REQ, value); + + out: + mutex_unlock(&dev_pm_qos_mtx); + + return ret; +} +EXPORT_SYMBOL_GPL(dev_pm_qos_add_request); + +/** + * dev_pm_qos_update_request - modifies an existing qos request + * @req : handle to list element holding a dev_pm_qos request to use + * @new_value: defines the qos request + * + * Updates an existing dev PM qos request along with updating the + * target value. + * + * Attempts are made to make this code callable on hot code paths. + * + * Returns 1 if the aggregated constraint value has changed, + * 0 if the aggregated constraint value has not changed, + * -EINVAL in case of wrong parameters, -ENODEV if the device has been + * removed from the system + */ +int dev_pm_qos_update_request(struct dev_pm_qos_request *req, + s32 new_value) +{ + int ret = 0; + + if (!req) /*guard against callers passing in null */ + return -EINVAL; + + if (!dev_pm_qos_request_active(req)) { + WARN(1, KERN_ERR "dev_pm_qos_update_request() called for " + "unknown object\n"); + return -EINVAL; + } + + mutex_lock(&dev_pm_qos_mtx); + + if (req->dev->power.constraints) { + if (new_value != req->node.prio) + ret = apply_constraint(req, PM_QOS_UPDATE_REQ, + new_value); + } else { + /* Return if the device has been removed */ + ret = -ENODEV; + } + + mutex_unlock(&dev_pm_qos_mtx); + return ret; +} +EXPORT_SYMBOL_GPL(dev_pm_qos_update_request); + +/** + * dev_pm_qos_remove_request - modifies an existing qos request + * @req: handle to request list element + * + * Will remove pm qos request from the list of constraints and + * recompute the current target value. Call this on slow code paths. + * + * Returns 1 if the aggregated constraint value has changed, + * 0 if the aggregated constraint value has not changed, + * -EINVAL in case of wrong parameters, -ENODEV if the device has been + * removed from the system + */ +int dev_pm_qos_remove_request(struct dev_pm_qos_request *req) +{ + int ret = 0; + + if (!req) /*guard against callers passing in null */ + return -EINVAL; + + if (!dev_pm_qos_request_active(req)) { + WARN(1, KERN_ERR "dev_pm_qos_remove_request() called for " + "unknown object\n"); + return -EINVAL; + } + + mutex_lock(&dev_pm_qos_mtx); + + if (req->dev->power.constraints) { + ret = apply_constraint(req, PM_QOS_REMOVE_REQ, + PM_QOS_DEFAULT_VALUE); + memset(req, 0, sizeof(*req)); + } else { + /* Return if the device has been removed */ + ret = -ENODEV; + } + + mutex_unlock(&dev_pm_qos_mtx); + return ret; +} +EXPORT_SYMBOL_GPL(dev_pm_qos_remove_request); + +/** + * dev_pm_qos_add_notifier - sets notification entry for changes to target value + * of per-device PM QoS constraints + * + * @dev: target device for the constraint + * @notifier: notifier block managed by caller. + * + * Will register the notifier into a notification chain that gets called + * upon changes to the target value for the device. + */ +int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier) +{ + int retval = 0; + + mutex_lock(&dev_pm_qos_mtx); + + /* Silently return if the constraints object is not present. */ + if (dev->power.constraints) + retval = blocking_notifier_chain_register( + dev->power.constraints->notifiers, + notifier); + + mutex_unlock(&dev_pm_qos_mtx); + return retval; +} +EXPORT_SYMBOL_GPL(dev_pm_qos_add_notifier); + +/** + * dev_pm_qos_remove_notifier - deletes notification for changes to target value + * of per-device PM QoS constraints + * + * @dev: target device for the constraint + * @notifier: notifier block to be removed. + * + * Will remove the notifier from the notification chain that gets called + * upon changes to the target value. + */ +int dev_pm_qos_remove_notifier(struct device *dev, + struct notifier_block *notifier) +{ + int retval = 0; + + mutex_lock(&dev_pm_qos_mtx); + + /* Silently return if the constraints object is not present. */ + if (dev->power.constraints) + retval = blocking_notifier_chain_unregister( + dev->power.constraints->notifiers, + notifier); + + mutex_unlock(&dev_pm_qos_mtx); + return retval; +} +EXPORT_SYMBOL_GPL(dev_pm_qos_remove_notifier); + +/** + * dev_pm_qos_add_global_notifier - sets notification entry for changes to + * target value of the PM QoS constraints for any device + * + * @notifier: notifier block managed by caller. + * + * Will register the notifier into a notification chain that gets called + * upon changes to the target value for any device. + */ +int dev_pm_qos_add_global_notifier(struct notifier_block *notifier) +{ + return blocking_notifier_chain_register(&dev_pm_notifiers, notifier); +} +EXPORT_SYMBOL_GPL(dev_pm_qos_add_global_notifier); + +/** + * dev_pm_qos_remove_global_notifier - deletes notification for changes to + * target value of PM QoS constraints for any device + * + * @notifier: notifier block to be removed. + * + * Will remove the notifier from the notification chain that gets called + * upon changes to the target value for any device. + */ +int dev_pm_qos_remove_global_notifier(struct notifier_block *notifier) +{ + return blocking_notifier_chain_unregister(&dev_pm_notifiers, notifier); +} +EXPORT_SYMBOL_GPL(dev_pm_qos_remove_global_notifier); diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index acb3f83b8079..6bb3aafa85ed 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -9,6 +9,7 @@ #include <linux/sched.h> #include <linux/pm_runtime.h> +#include <trace/events/rpm.h> #include "power.h" static int rpm_resume(struct device *dev, int rpmflags); @@ -155,6 +156,31 @@ static int rpm_check_suspend_allowed(struct device *dev) } /** + * __rpm_callback - Run a given runtime PM callback for a given device. + * @cb: Runtime PM callback to run. + * @dev: Device to run the callback for. + */ +static int __rpm_callback(int (*cb)(struct device *), struct device *dev) + __releases(&dev->power.lock) __acquires(&dev->power.lock) +{ + int retval; + + if (dev->power.irq_safe) + spin_unlock(&dev->power.lock); + else + spin_unlock_irq(&dev->power.lock); + + retval = cb(dev); + + if (dev->power.irq_safe) + spin_lock(&dev->power.lock); + else + spin_lock_irq(&dev->power.lock); + + return retval; +} + +/** * rpm_idle - Notify device bus type if the device can be suspended. * @dev: Device to notify the bus type about. * @rpmflags: Flag bits. @@ -171,6 +197,7 @@ static int rpm_idle(struct device *dev, int rpmflags) int (*callback)(struct device *); int retval; + trace_rpm_idle(dev, rpmflags); retval = rpm_check_suspend_allowed(dev); if (retval < 0) ; /* Conditions are wrong. */ @@ -225,24 +252,14 @@ static int rpm_idle(struct device *dev, int rpmflags) else callback = NULL; - if (callback) { - if (dev->power.irq_safe) - spin_unlock(&dev->power.lock); - else - spin_unlock_irq(&dev->power.lock); - - callback(dev); - - if (dev->power.irq_safe) - spin_lock(&dev->power.lock); - else - spin_lock_irq(&dev->power.lock); - } + if (callback) + __rpm_callback(callback, dev); dev->power.idle_notification = false; wake_up_all(&dev->power.wait_queue); out: + trace_rpm_return_int(dev, _THIS_IP_, retval); return retval; } @@ -252,22 +269,14 @@ static int rpm_idle(struct device *dev, int rpmflags) * @dev: Device to run the callback for. */ static int rpm_callback(int (*cb)(struct device *), struct device *dev) - __releases(&dev->power.lock) __acquires(&dev->power.lock) { int retval; if (!cb) return -ENOSYS; - if (dev->power.irq_safe) { - retval = cb(dev); - } else { - spin_unlock_irq(&dev->power.lock); - - retval = cb(dev); + retval = __rpm_callback(cb, dev); - spin_lock_irq(&dev->power.lock); - } dev->power.runtime_error = retval; return retval != -EACCES ? retval : -EIO; } @@ -277,14 +286,16 @@ static int rpm_callback(int (*cb)(struct device *), struct device *dev) * @dev: Device to suspend. * @rpmflags: Flag bits. * - * Check if the device's runtime PM status allows it to be suspended. If - * another suspend has been started earlier, either return immediately or wait - * for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC flags. Cancel a - * pending idle notification. If the RPM_ASYNC flag is set then queue a - * suspend request; otherwise run the ->runtime_suspend() callback directly. - * If a deferred resume was requested while the callback was running then carry - * it out; otherwise send an idle notification for the device (if the suspend - * failed) or for its parent (if the suspend succeeded). + * Check if the device's runtime PM status allows it to be suspended. + * Cancel a pending idle notification, autosuspend or suspend. If + * another suspend has been started earlier, either return immediately + * or wait for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC + * flags. If the RPM_ASYNC flag is set then queue a suspend request; + * otherwise run the ->runtime_suspend() callback directly. When + * ->runtime_suspend succeeded, if a deferred resume was requested while + * the callback was running then carry it out, otherwise send an idle + * notification for its parent (if the suspend succeeded and both + * ignore_children of parent->power and irq_safe of dev->power are not set). * * This function must be called under dev->power.lock with interrupts disabled. */ @@ -295,7 +306,7 @@ static int rpm_suspend(struct device *dev, int rpmflags) struct device *parent = NULL; int retval; - dev_dbg(dev, "%s flags 0x%x\n", __func__, rpmflags); + trace_rpm_suspend(dev, rpmflags); repeat: retval = rpm_check_suspend_allowed(dev); @@ -347,6 +358,15 @@ static int rpm_suspend(struct device *dev, int rpmflags) goto out; } + if (dev->power.irq_safe) { + spin_unlock(&dev->power.lock); + + cpu_relax(); + + spin_lock(&dev->power.lock); + goto repeat; + } + /* Wait for the other suspend running in parallel with us. */ for (;;) { prepare_to_wait(&dev->power.wait_queue, &wait, @@ -400,15 +420,16 @@ static int rpm_suspend(struct device *dev, int rpmflags) dev->power.runtime_error = 0; else pm_runtime_cancel_pending(dev); - } else { + wake_up_all(&dev->power.wait_queue); + goto out; + } no_callback: - __update_runtime_status(dev, RPM_SUSPENDED); - pm_runtime_deactivate_timer(dev); + __update_runtime_status(dev, RPM_SUSPENDED); + pm_runtime_deactivate_timer(dev); - if (dev->parent) { - parent = dev->parent; - atomic_add_unless(&parent->power.child_count, -1, 0); - } + if (dev->parent) { + parent = dev->parent; + atomic_add_unless(&parent->power.child_count, -1, 0); } wake_up_all(&dev->power.wait_queue); @@ -430,7 +451,7 @@ static int rpm_suspend(struct device *dev, int rpmflags) } out: - dev_dbg(dev, "%s returns %d\n", __func__, retval); + trace_rpm_return_int(dev, _THIS_IP_, retval); return retval; } @@ -459,7 +480,7 @@ static int rpm_resume(struct device *dev, int rpmflags) struct device *parent = NULL; int retval = 0; - dev_dbg(dev, "%s flags 0x%x\n", __func__, rpmflags); + trace_rpm_resume(dev, rpmflags); repeat: if (dev->power.runtime_error) @@ -496,6 +517,15 @@ static int rpm_resume(struct device *dev, int rpmflags) goto out; } + if (dev->power.irq_safe) { + spin_unlock(&dev->power.lock); + + cpu_relax(); + + spin_lock(&dev->power.lock); + goto repeat; + } + /* Wait for the operation carried out in parallel with us. */ for (;;) { prepare_to_wait(&dev->power.wait_queue, &wait, @@ -615,7 +645,7 @@ static int rpm_resume(struct device *dev, int rpmflags) spin_lock_irq(&dev->power.lock); } - dev_dbg(dev, "%s returns %d\n", __func__, retval); + trace_rpm_return_int(dev, _THIS_IP_, retval); return retval; } @@ -732,13 +762,16 @@ EXPORT_SYMBOL_GPL(pm_schedule_suspend); * return immediately if it is larger than zero. Then carry out an idle * notification, either synchronous or asynchronous. * - * This routine may be called in atomic context if the RPM_ASYNC flag is set. + * This routine may be called in atomic context if the RPM_ASYNC flag is set, + * or if pm_runtime_irq_safe() has been called. */ int __pm_runtime_idle(struct device *dev, int rpmflags) { unsigned long flags; int retval; + might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe); + if (rpmflags & RPM_GET_PUT) { if (!atomic_dec_and_test(&dev->power.usage_count)) return 0; @@ -761,13 +794,16 @@ EXPORT_SYMBOL_GPL(__pm_runtime_idle); * return immediately if it is larger than zero. Then carry out a suspend, * either synchronous or asynchronous. * - * This routine may be called in atomic context if the RPM_ASYNC flag is set. + * This routine may be called in atomic context if the RPM_ASYNC flag is set, + * or if pm_runtime_irq_safe() has been called. */ int __pm_runtime_suspend(struct device *dev, int rpmflags) { unsigned long flags; int retval; + might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe); + if (rpmflags & RPM_GET_PUT) { if (!atomic_dec_and_test(&dev->power.usage_count)) return 0; @@ -789,13 +825,16 @@ EXPORT_SYMBOL_GPL(__pm_runtime_suspend); * If the RPM_GET_PUT flag is set, increment the device's usage count. Then * carry out a resume, either synchronous or asynchronous. * - * This routine may be called in atomic context if the RPM_ASYNC flag is set. + * This routine may be called in atomic context if the RPM_ASYNC flag is set, + * or if pm_runtime_irq_safe() has been called. */ int __pm_runtime_resume(struct device *dev, int rpmflags) { unsigned long flags; int retval; + might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe); + if (rpmflags & RPM_GET_PUT) atomic_inc(&dev->power.usage_count); diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c index 84f7c7d5a098..14ee07e9cc43 100644 --- a/drivers/base/power/wakeup.c +++ b/drivers/base/power/wakeup.c @@ -276,7 +276,9 @@ EXPORT_SYMBOL_GPL(device_set_wakeup_capable); * * By default, most devices should leave wakeup disabled. The exceptions are * devices that everyone expects to be wakeup sources: keyboards, power buttons, - * possibly network interfaces, etc. + * possibly network interfaces, etc. Also, devices that don't generate their + * own wakeup requests but merely forward requests from one bus to another + * (like PCI bridges) should have wakeup enabled by default. */ int device_init_wakeup(struct device *dev, bool enable) { diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index 675246a6f7ef..f9b726091ad0 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -1118,7 +1118,7 @@ static int btusb_suspend(struct usb_interface *intf, pm_message_t message) return 0; spin_lock_irq(&data->txlock); - if (!((message.event & PM_EVENT_AUTO) && data->tx_in_flight)) { + if (!(PMSG_IS_AUTO(message) && data->tx_in_flight)) { set_bit(BTUSB_SUSPENDING, &data->flags); spin_unlock_irq(&data->txlock); } else { diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c index d4c542372886..0df014110097 100644 --- a/drivers/cpuidle/cpuidle.c +++ b/drivers/cpuidle/cpuidle.c @@ -12,7 +12,7 @@ #include <linux/mutex.h> #include <linux/sched.h> #include <linux/notifier.h> -#include <linux/pm_qos_params.h> +#include <linux/pm_qos.h> #include <linux/cpu.h> #include <linux/cpuidle.h> #include <linux/ktime.h> diff --git a/drivers/cpuidle/governors/ladder.c b/drivers/cpuidle/governors/ladder.c index 12c98900dcf8..f62fde21e962 100644 --- a/drivers/cpuidle/governors/ladder.c +++ b/drivers/cpuidle/governors/ladder.c @@ -14,7 +14,7 @@ #include <linux/kernel.h> #include <linux/cpuidle.h> -#include <linux/pm_qos_params.h> +#include <linux/pm_qos.h> #include <linux/moduleparam.h> #include <linux/jiffies.h> diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c index c47f3d09c1ee..3600f1955e48 100644 --- a/drivers/cpuidle/governors/menu.c +++ b/drivers/cpuidle/governors/menu.c @@ -12,7 +12,7 @@ #include <linux/kernel.h> #include <linux/cpuidle.h> -#include <linux/pm_qos_params.h> +#include <linux/pm_qos.h> #include <linux/time.h> #include <linux/ktime.h> #include <linux/hrtimer.h> diff --git a/drivers/devfreq/Kconfig b/drivers/devfreq/Kconfig new file mode 100644 index 000000000000..643b055ed3cd --- /dev/null +++ b/drivers/devfreq/Kconfig @@ -0,0 +1,75 @@ +config ARCH_HAS_DEVFREQ + bool + depends on ARCH_HAS_OPP + help + Denotes that the architecture supports DEVFREQ. If the architecture + supports multiple OPP entries per device and the frequency of the + devices with OPPs may be altered dynamically, the architecture + supports DEVFREQ. + +menuconfig PM_DEVFREQ + bool "Generic Dynamic Voltage and Frequency Scaling (DVFS) support" + depends on PM_OPP && ARCH_HAS_DEVFREQ + help + With OPP support, a device may have a list of frequencies and + voltages available. DEVFREQ, a generic DVFS framework can be + registered for a device with OPP support in order to let the + governor provided to DEVFREQ choose an operating frequency + based on the OPP's list and the policy given with DEVFREQ. + + Each device may have its own governor and policy. DEVFREQ can + reevaluate the device state periodically and/or based on the + OPP list changes (each frequency/voltage pair in OPP may be + disabled or enabled). + + Like some CPUs with CPUFREQ, a device may have multiple clocks. + However, because the clock frequencies of a single device are + determined by the single device's state, an instance of DEVFREQ + is attached to a single device and returns a "representative" + clock frequency from the OPP of the device, which is also attached + to a device by 1-to-1. The device registering DEVFREQ takes the + responsiblity to "interpret" the frequency listed in OPP and + to set its every clock accordingly with the "target" callback + given to DEVFREQ. + +if PM_DEVFREQ + +comment "DEVFREQ Governors" + +config DEVFREQ_GOV_SIMPLE_ONDEMAND + bool "Simple Ondemand" + help + Chooses frequency based on the recent load on the device. Works + similar as ONDEMAND governor of CPUFREQ does. A device with + Simple-Ondemand should be able to provide busy/total counter + values that imply the usage rate. A device may provide tuned + values to the governor with data field at devfreq_add_device(). + +config DEVFREQ_GOV_PERFORMANCE + bool "Performance" + help + Sets the frequency at the maximum available frequency. + This governor always returns UINT_MAX as frequency so that + the DEVFREQ framework returns the highest frequency available + at any time. + +config DEVFREQ_GOV_POWERSAVE + bool "Powersave" + help + Sets the frequency at the minimum available frequency. + This governor always returns 0 as frequency so that + the DEVFREQ framework returns the lowest frequency available + at any time. + +config DEVFREQ_GOV_USERSPACE + bool "Userspace" + help + Sets the frequency at the user specified one. + This governor returns the user configured frequency if there + has been an input to /sys/devices/.../power/devfreq_set_freq. + Otherwise, the governor does not change the frequnecy + given at the initialization. + +comment "DEVFREQ Drivers" + +endif # PM_DEVFREQ diff --git a/drivers/devfreq/Makefile b/drivers/devfreq/Makefile new file mode 100644 index 000000000000..4564a89e970a --- /dev/null +++ b/drivers/devfreq/Makefile @@ -0,0 +1,5 @@ +obj-$(CONFIG_PM_DEVFREQ) += devfreq.o +obj-$(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND) += governor_simpleondemand.o +obj-$(CONFIG_DEVFREQ_GOV_PERFORMANCE) += governor_performance.o +obj-$(CONFIG_DEVFREQ_GOV_POWERSAVE) += governor_powersave.o +obj-$(CONFIG_DEVFREQ_GOV_USERSPACE) += governor_userspace.o diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c new file mode 100644 index 000000000000..5d15b812377b --- /dev/null +++ b/drivers/devfreq/devfreq.c @@ -0,0 +1,601 @@ +/* + * devfreq: Generic Dynamic Voltage and Frequency Scaling (DVFS) Framework + * for Non-CPU Devices. + * + * Copyright (C) 2011 Samsung Electronics + * MyungJoo Ham <myungjoo.ham@samsung.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/kernel.h> +#include <linux/sched.h> +#include <linux/errno.h> +#include <linux/err.h> +#include <linux/init.h> +#include <linux/slab.h> +#include <linux/opp.h> +#include <linux/devfreq.h> +#include <linux/workqueue.h> +#include <linux/platform_device.h> +#include <linux/list.h> +#include <linux/printk.h> +#include <linux/hrtimer.h> +#include "governor.h" + +struct class *devfreq_class; + +/* + * devfreq_work periodically monitors every registered device. + * The minimum polling interval is one jiffy. The polling interval is + * determined by the minimum polling period among all polling devfreq + * devices. The resolution of polling interval is one jiffy. + */ +static bool polling; +static struct workqueue_struct *devfreq_wq; +static struct delayed_work devfreq_work; + +/* wait removing if this is to be removed */ +static struct devfreq *wait_remove_device; + +/* The list of all device-devfreq */ +static LIST_HEAD(devfreq_list); +static DEFINE_MUTEX(devfreq_list_lock); + +/** + * find_device_devfreq() - find devfreq struct using device pointer + * @dev: device pointer used to lookup device devfreq. + * + * Search the list of device devfreqs and return the matched device's + * devfreq info. devfreq_list_lock should be held by the caller. + */ +static struct devfreq *find_device_devfreq(struct device *dev) +{ + struct devfreq *tmp_devfreq; + + if (unlikely(IS_ERR_OR_NULL(dev))) { + pr_err("DEVFREQ: %s: Invalid parameters\n", __func__); + return ERR_PTR(-EINVAL); + } + WARN(!mutex_is_locked(&devfreq_list_lock), + "devfreq_list_lock must be locked."); + + list_for_each_entry(tmp_devfreq, &devfreq_list, node) { + if (tmp_devfreq->dev.parent == dev) + return tmp_devfreq; + } + + return ERR_PTR(-ENODEV); +} + +/** + * update_devfreq() - Reevaluate the device and configure frequency. + * @devfreq: the devfreq instance. + * + * Note: Lock devfreq->lock before calling update_devfreq + * This function is exported for governors. + */ +int update_devfreq(struct devfreq *devfreq) +{ + unsigned long freq; + int err = 0; + + if (!mutex_is_locked(&devfreq->lock)) { + WARN(true, "devfreq->lock must be locked by the caller.\n"); + return -EINVAL; + } + + /* Reevaluate the proper frequency */ + err = devfreq->governor->get_target_freq(devfreq, &freq); + if (err) + return err; + + err = devfreq->profile->target(devfreq->dev.parent, &freq); + if (err) + return err; + + devfreq->previous_freq = freq; + return err; +} + +/** + * devfreq_notifier_call() - Notify that the device frequency requirements + * has been changed out of devfreq framework. + * @nb the notifier_block (supposed to be devfreq->nb) + * @type not used + * @devp not used + * + * Called by a notifier that uses devfreq->nb. + */ +static int devfreq_notifier_call(struct notifier_block *nb, unsigned long type, + void *devp) +{ + struct devfreq *devfreq = container_of(nb, struct devfreq, nb); + int ret; + + mutex_lock(&devfreq->lock); + ret = update_devfreq(devfreq); + mutex_unlock(&devfreq->lock); + + return ret; +} + +/** + * _remove_devfreq() - Remove devfreq from the device. + * @devfreq: the devfreq struct + * @skip: skip calling device_unregister(). + * + * Note that the caller should lock devfreq->lock before calling + * this. _remove_devfreq() will unlock it and free devfreq + * internally. devfreq_list_lock should be locked by the caller + * as well (not relased at return) + * + * Lock usage: + * devfreq->lock: locked before call. + * unlocked at return (and freed) + * devfreq_list_lock: locked before call. + * kept locked at return. + * if devfreq is centrally polled. + * + * Freed memory: + * devfreq + */ +static void _remove_devfreq(struct devfreq *devfreq, bool skip) +{ + if (!mutex_is_locked(&devfreq->lock)) { + WARN(true, "devfreq->lock must be locked by the caller.\n"); + return; + } + if (!devfreq->governor->no_central_polling && + !mutex_is_locked(&devfreq_list_lock)) { + WARN(true, "devfreq_list_lock must be locked by the caller.\n"); + return; + } + + if (devfreq->being_removed) + return; + + devfreq->being_removed = true; + + if (devfreq->profile->exit) + devfreq->profile->exit(devfreq->dev.parent); + + if (devfreq->governor->exit) + devfreq->governor->exit(devfreq); + + if (!skip && get_device(&devfreq->dev)) { + device_unregister(&devfreq->dev); + put_device(&devfreq->dev); + } + + if (!devfreq->governor->no_central_polling) + list_del(&devfreq->node); + + mutex_unlock(&devfreq->lock); + mutex_destroy(&devfreq->lock); + + kfree(devfreq); +} + +/** + * devfreq_dev_release() - Callback for struct device to release the device. + * @dev: the devfreq device + * + * This calls _remove_devfreq() if _remove_devfreq() is not called. + * Note that devfreq_dev_release() could be called by _remove_devfreq() as + * well as by others unregistering the device. + */ +static void devfreq_dev_release(struct device *dev) +{ + struct devfreq *devfreq = to_devfreq(dev); + bool central_polling = !devfreq->governor->no_central_polling; + + /* + * If devfreq_dev_release() was called by device_unregister() of + * _remove_devfreq(), we cannot mutex_lock(&devfreq->lock) and + * being_removed is already set. This also partially checks the case + * where devfreq_dev_release() is called from a thread other than + * the one called _remove_devfreq(); however, this case is + * dealt completely with another following being_removed check. + * + * Because being_removed is never being + * unset, we do not need to worry about race conditions on + * being_removed. + */ + if (devfreq->being_removed) + return; + + if (central_polling) + mutex_lock(&devfreq_list_lock); + + mutex_lock(&devfreq->lock); + + /* + * Check being_removed flag again for the case where + * devfreq_dev_release() was called in a thread other than the one + * possibly called _remove_devfreq(). + */ + if (devfreq->being_removed) { + mutex_unlock(&devfreq->lock); + goto out; + } + + /* devfreq->lock is unlocked and removed in _removed_devfreq() */ + _remove_devfreq(devfreq, true); + +out: + if (central_polling) + mutex_unlock(&devfreq_list_lock); +} + +/** + * devfreq_monitor() - Periodically poll devfreq objects. + * @work: the work struct used to run devfreq_monitor periodically. + * + */ +static void devfreq_monitor(struct work_struct *work) +{ + static unsigned long last_polled_at; + struct devfreq *devfreq, *tmp; + int error; + unsigned long jiffies_passed; + unsigned long next_jiffies = ULONG_MAX, now = jiffies; + struct device *dev; + + /* Initially last_polled_at = 0, polling every device at bootup */ + jiffies_passed = now - last_polled_at; + last_polled_at = now; + if (jiffies_passed == 0) + jiffies_passed = 1; + + mutex_lock(&devfreq_list_lock); + list_for_each_entry_safe(devfreq, tmp, &devfreq_list, node) { + mutex_lock(&devfreq->lock); + dev = devfreq->dev.parent; + + /* Do not remove tmp for a while */ + wait_remove_device = tmp; + + if (devfreq->governor->no_central_polling || + devfreq->next_polling == 0) { + mutex_unlock(&devfreq->lock); + continue; + } + mutex_unlock(&devfreq_list_lock); + + /* + * Reduce more next_polling if devfreq_wq took an extra + * delay. (i.e., CPU has been idled.) + */ + if (devfreq->next_polling <= jiffies_passed) { + error = update_devfreq(devfreq); + + /* Remove a devfreq with an error. */ + if (error && error != -EAGAIN) { + + dev_err(dev, "Due to update_devfreq error(%d), devfreq(%s) is removed from the device\n", + error, devfreq->governor->name); + + /* + * Unlock devfreq before locking the list + * in order to avoid deadlock with + * find_device_devfreq or others + */ + mutex_unlock(&devfreq->lock); + mutex_lock(&devfreq_list_lock); + /* Check if devfreq is already removed */ + if (IS_ERR(find_device_devfreq(dev))) + continue; + mutex_lock(&devfreq->lock); + /* This unlocks devfreq->lock and free it */ + _remove_devfreq(devfreq, false); + continue; + } + devfreq->next_polling = devfreq->polling_jiffies; + } else { + devfreq->next_polling -= jiffies_passed; + } + + if (devfreq->next_polling) + next_jiffies = (next_jiffies > devfreq->next_polling) ? + devfreq->next_polling : next_jiffies; + + mutex_unlock(&devfreq->lock); + mutex_lock(&devfreq_list_lock); + } + wait_remove_device = NULL; + mutex_unlock(&devfreq_list_lock); + + if (next_jiffies > 0 && next_jiffies < ULONG_MAX) { + polling = true; + queue_delayed_work(devfreq_wq, &devfreq_work, next_jiffies); + } else { + polling = false; + } +} + +/** + * devfreq_add_device() - Add devfreq feature to the device + * @dev: the device to add devfreq feature. + * @profile: device-specific profile to run devfreq. + * @governor: the policy to choose frequency. + * @data: private data for the governor. The devfreq framework does not + * touch this value. + */ +struct devfreq *devfreq_add_device(struct device *dev, + struct devfreq_dev_profile *profile, + const struct devfreq_governor *governor, + void *data) +{ + struct devfreq *devfreq; + int err = 0; + + if (!dev || !profile || !governor) { + dev_err(dev, "%s: Invalid parameters.\n", __func__); + return ERR_PTR(-EINVAL); + } + + + if (!governor->no_central_polling) { + mutex_lock(&devfreq_list_lock); + devfreq = find_device_devfreq(dev); + mutex_unlock(&devfreq_list_lock); + if (!IS_ERR(devfreq)) { + dev_err(dev, "%s: Unable to create devfreq for the device. It already has one.\n", __func__); + err = -EINVAL; + goto out; + } + } + + devfreq = kzalloc(sizeof(struct devfreq), GFP_KERNEL); + if (!devfreq) { + dev_err(dev, "%s: Unable to create devfreq for the device\n", + __func__); + err = -ENOMEM; + goto out; + } + + mutex_init(&devfreq->lock); + mutex_lock(&devfreq->lock); + devfreq->dev.parent = dev; + devfreq->dev.class = devfreq_class; + devfreq->dev.release = devfreq_dev_release; + devfreq->profile = profile; + devfreq->governor = governor; + devfreq->previous_freq = profile->initial_freq; + devfreq->data = data; + devfreq->next_polling = devfreq->polling_jiffies + = msecs_to_jiffies(devfreq->profile->polling_ms); + devfreq->nb.notifier_call = devfreq_notifier_call; + + dev_set_name(&devfreq->dev, dev_name(dev)); + err = device_register(&devfreq->dev); + if (err) { + put_device(&devfreq->dev); + goto err_dev; + } + + if (governor->init) + err = governor->init(devfreq); + if (err) + goto err_init; + + mutex_unlock(&devfreq->lock); + + if (governor->no_central_polling) + goto out; + + mutex_lock(&devfreq_list_lock); + + list_add(&devfreq->node, &devfreq_list); + + if (devfreq_wq && devfreq->next_polling && !polling) { + polling = true; + queue_delayed_work(devfreq_wq, &devfreq_work, + devfreq->next_polling); + } + mutex_unlock(&devfreq_list_lock); + goto out; +err_init: + device_unregister(&devfreq->dev); +err_dev: + mutex_unlock(&devfreq->lock); + kfree(devfreq); +out: + if (err) + return ERR_PTR(err); + else + return devfreq; +} + +/** + * devfreq_remove_device() - Remove devfreq feature from a device. + * @devfreq the devfreq instance to be removed + */ +int devfreq_remove_device(struct devfreq *devfreq) +{ + if (!devfreq) + return -EINVAL; + + if (!devfreq->governor->no_central_polling) { + mutex_lock(&devfreq_list_lock); + while (wait_remove_device == devfreq) { + mutex_unlock(&devfreq_list_lock); + schedule(); + mutex_lock(&devfreq_list_lock); + } + } + + mutex_lock(&devfreq->lock); + _remove_devfreq(devfreq, false); /* it unlocks devfreq->lock */ + + if (!devfreq->governor->no_central_polling) + mutex_unlock(&devfreq_list_lock); + + return 0; +} + +static ssize_t show_governor(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%s\n", to_devfreq(dev)->governor->name); +} + +static ssize_t show_freq(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%lu\n", to_devfreq(dev)->previous_freq); +} + +static ssize_t show_polling_interval(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", to_devfreq(dev)->profile->polling_ms); +} + +static ssize_t store_polling_interval(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct devfreq *df = to_devfreq(dev); + unsigned int value; + int ret; + + ret = sscanf(buf, "%u", &value); + if (ret != 1) + goto out; + + mutex_lock(&df->lock); + df->profile->polling_ms = value; + df->next_polling = df->polling_jiffies + = msecs_to_jiffies(value); + mutex_unlock(&df->lock); + + ret = count; + + if (df->governor->no_central_polling) + goto out; + + mutex_lock(&devfreq_list_lock); + if (df->next_polling > 0 && !polling) { + polling = true; + queue_delayed_work(devfreq_wq, &devfreq_work, + df->next_polling); + } + mutex_unlock(&devfreq_list_lock); +out: + return ret; +} + +static ssize_t show_central_polling(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", + !to_devfreq(dev)->governor->no_central_polling); +} + +static struct device_attribute devfreq_attrs[] = { + __ATTR(governor, S_IRUGO, show_governor, NULL), + __ATTR(cur_freq, S_IRUGO, show_freq, NULL), + __ATTR(central_polling, S_IRUGO, show_central_polling, NULL), + __ATTR(polling_interval, S_IRUGO | S_IWUSR, show_polling_interval, + store_polling_interval), + { }, +}; + +/** + * devfreq_start_polling() - Initialize data structure for devfreq framework and + * start polling registered devfreq devices. + */ +static int __init devfreq_start_polling(void) +{ + mutex_lock(&devfreq_list_lock); + polling = false; + devfreq_wq = create_freezable_workqueue("devfreq_wq"); + INIT_DELAYED_WORK_DEFERRABLE(&devfreq_work, devfreq_monitor); + mutex_unlock(&devfreq_list_lock); + + devfreq_monitor(&devfreq_work.work); + return 0; +} +late_initcall(devfreq_start_polling); + +static int __init devfreq_init(void) +{ + devfreq_class = class_create(THIS_MODULE, "devfreq"); + if (IS_ERR(devfreq_class)) { + pr_err("%s: couldn't create class\n", __FILE__); + return PTR_ERR(devfreq_class); + } + devfreq_class->dev_attrs = devfreq_attrs; + return 0; +} +subsys_initcall(devfreq_init); + +static void __exit devfreq_exit(void) +{ + class_destroy(devfreq_class); +} +module_exit(devfreq_exit); + +/* + * The followings are helper functions for devfreq user device drivers with + * OPP framework. + */ + +/** + * devfreq_recommended_opp() - Helper function to get proper OPP for the + * freq value given to target callback. + * @dev The devfreq user device. (parent of devfreq) + * @freq The frequency given to target function + * + */ +struct opp *devfreq_recommended_opp(struct device *dev, unsigned long *freq) +{ + struct opp *opp = opp_find_freq_ceil(dev, freq); + + if (opp == ERR_PTR(-ENODEV)) + opp = opp_find_freq_floor(dev, freq); + return opp; +} + +/** + * devfreq_register_opp_notifier() - Helper function to get devfreq notified + * for any changes in the OPP availability + * changes + * @dev The devfreq user device. (parent of devfreq) + * @devfreq The devfreq object. + */ +int devfreq_register_opp_notifier(struct device *dev, struct devfreq *devfreq) +{ + struct srcu_notifier_head *nh = opp_get_notifier(dev); + + if (IS_ERR(nh)) + return PTR_ERR(nh); + return srcu_notifier_chain_register(nh, &devfreq->nb); +} + +/** + * devfreq_unregister_opp_notifier() - Helper function to stop getting devfreq + * notified for any changes in the OPP + * availability changes anymore. + * @dev The devfreq user device. (parent of devfreq) + * @devfreq The devfreq object. + * + * At exit() callback of devfreq_dev_profile, this must be included if + * devfreq_recommended_opp is used. + */ +int devfreq_unregister_opp_notifier(struct device *dev, struct devfreq *devfreq) +{ + struct srcu_notifier_head *nh = opp_get_notifier(dev); + + if (IS_ERR(nh)) + return PTR_ERR(nh); + return srcu_notifier_chain_unregister(nh, &devfreq->nb); +} + +MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>"); +MODULE_DESCRIPTION("devfreq class support"); +MODULE_LICENSE("GPL"); diff --git a/drivers/devfreq/governor.h b/drivers/devfreq/governor.h new file mode 100644 index 000000000000..ea7f13c58ded --- /dev/null +++ b/drivers/devfreq/governor.h @@ -0,0 +1,24 @@ +/* + * governor.h - internal header for devfreq governors. + * + * Copyright (C) 2011 Samsung Electronics + * MyungJoo Ham <myungjoo.ham@samsung.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This header is for devfreq governors in drivers/devfreq/ + */ + +#ifndef _GOVERNOR_H +#define _GOVERNOR_H + +#include <linux/devfreq.h> + +#define to_devfreq(DEV) container_of((DEV), struct devfreq, dev) + +/* Caution: devfreq->lock must be locked before calling update_devfreq */ +extern int update_devfreq(struct devfreq *devfreq); + +#endif /* _GOVERNOR_H */ diff --git a/drivers/devfreq/governor_performance.c b/drivers/devfreq/governor_performance.c new file mode 100644 index 000000000000..c0596b291761 --- /dev/null +++ b/drivers/devfreq/governor_performance.c @@ -0,0 +1,29 @@ +/* + * linux/drivers/devfreq/governor_performance.c + * + * Copyright (C) 2011 Samsung Electronics + * MyungJoo Ham <myungjoo.ham@samsung.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/devfreq.h> + +static int devfreq_performance_func(struct devfreq *df, + unsigned long *freq) +{ + /* + * target callback should be able to get floor value as + * said in devfreq.h + */ + *freq = UINT_MAX; + return 0; +} + +const struct devfreq_governor devfreq_performance = { + .name = "performance", + .get_target_freq = devfreq_performance_func, + .no_central_polling = true, +}; diff --git a/drivers/devfreq/governor_powersave.c b/drivers/devfreq/governor_powersave.c new file mode 100644 index 000000000000..2483a85a266f --- /dev/null +++ b/drivers/devfreq/governor_powersave.c @@ -0,0 +1,29 @@ +/* + * linux/drivers/devfreq/governor_powersave.c + * + * Copyright (C) 2011 Samsung Electronics + * MyungJoo Ham <myungjoo.ham@samsung.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/devfreq.h> + +static int devfreq_powersave_func(struct devfreq *df, + unsigned long *freq) +{ + /* + * target callback should be able to get ceiling value as + * said in devfreq.h + */ + *freq = 0; + return 0; +} + +const struct devfreq_governor devfreq_powersave = { + .name = "powersave", + .get_target_freq = devfreq_powersave_func, + .no_central_polling = true, +}; diff --git a/drivers/devfreq/governor_simpleondemand.c b/drivers/devfreq/governor_simpleondemand.c new file mode 100644 index 000000000000..efad8dcf9028 --- /dev/null +++ b/drivers/devfreq/governor_simpleondemand.c @@ -0,0 +1,88 @@ +/* + * linux/drivers/devfreq/governor_simpleondemand.c + * + * Copyright (C) 2011 Samsung Electronics + * MyungJoo Ham <myungjoo.ham@samsung.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/errno.h> +#include <linux/devfreq.h> +#include <linux/math64.h> + +/* Default constants for DevFreq-Simple-Ondemand (DFSO) */ +#define DFSO_UPTHRESHOLD (90) +#define DFSO_DOWNDIFFERENCTIAL (5) +static int devfreq_simple_ondemand_func(struct devfreq *df, + unsigned long *freq) +{ + struct devfreq_dev_status stat; + int err = df->profile->get_dev_status(df->dev.parent, &stat); + unsigned long long a, b; + unsigned int dfso_upthreshold = DFSO_UPTHRESHOLD; + unsigned int dfso_downdifferential = DFSO_DOWNDIFFERENCTIAL; + struct devfreq_simple_ondemand_data *data = df->data; + + if (err) + return err; + + if (data) { + if (data->upthreshold) + dfso_upthreshold = data->upthreshold; + if (data->downdifferential) + dfso_downdifferential = data->downdifferential; + } + if (dfso_upthreshold > 100 || + dfso_upthreshold < dfso_downdifferential) + return -EINVAL; + + /* Assume MAX if it is going to be divided by zero */ + if (stat.total_time == 0) { + *freq = UINT_MAX; + return 0; + } + + /* Prevent overflow */ + if (stat.busy_time >= (1 << 24) || stat.total_time >= (1 << 24)) { + stat.busy_time >>= 7; + stat.total_time >>= 7; + } + + /* Set MAX if it's busy enough */ + if (stat.busy_time * 100 > + stat.total_time * dfso_upthreshold) { + *freq = UINT_MAX; + return 0; + } + + /* Set MAX if we do not know the initial frequency */ + if (stat.current_frequency == 0) { + *freq = UINT_MAX; + return 0; + } + + /* Keep the current frequency */ + if (stat.busy_time * 100 > + stat.total_time * (dfso_upthreshold - dfso_downdifferential)) { + *freq = stat.current_frequency; + return 0; + } + + /* Set the desired frequency based on the load */ + a = stat.busy_time; + a *= stat.current_frequency; + b = div_u64(a, stat.total_time); + b *= 100; + b = div_u64(b, (dfso_upthreshold - dfso_downdifferential / 2)); + *freq = (unsigned long) b; + + return 0; +} + +const struct devfreq_governor devfreq_simple_ondemand = { + .name = "simple_ondemand", + .get_target_freq = devfreq_simple_ondemand_func, +}; diff --git a/drivers/devfreq/governor_userspace.c b/drivers/devfreq/governor_userspace.c new file mode 100644 index 000000000000..4f8b563da782 --- /dev/null +++ b/drivers/devfreq/governor_userspace.c @@ -0,0 +1,116 @@ +/* + * linux/drivers/devfreq/governor_simpleondemand.c + * + * Copyright (C) 2011 Samsung Electronics + * MyungJoo Ham <myungjoo.ham@samsung.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/slab.h> +#include <linux/device.h> +#include <linux/devfreq.h> +#include <linux/pm.h> +#include <linux/mutex.h> +#include "governor.h" + +struct userspace_data { + unsigned long user_frequency; + bool valid; +}; + +static int devfreq_userspace_func(struct devfreq *df, unsigned long *freq) +{ + struct userspace_data *data = df->data; + + if (!data->valid) + *freq = df->previous_freq; /* No user freq specified yet */ + else + *freq = data->user_frequency; + return 0; +} + +static ssize_t store_freq(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct devfreq *devfreq = to_devfreq(dev); + struct userspace_data *data; + unsigned long wanted; + int err = 0; + + + mutex_lock(&devfreq->lock); + data = devfreq->data; + + sscanf(buf, "%lu", &wanted); + data->user_frequency = wanted; + data->valid = true; + err = update_devfreq(devfreq); + if (err == 0) + err = count; + mutex_unlock(&devfreq->lock); + return err; +} + +static ssize_t show_freq(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct devfreq *devfreq = to_devfreq(dev); + struct userspace_data *data; + int err = 0; + + mutex_lock(&devfreq->lock); + data = devfreq->data; + + if (data->valid) + err = sprintf(buf, "%lu\n", data->user_frequency); + else + err = sprintf(buf, "undefined\n"); + mutex_unlock(&devfreq->lock); + return err; +} + +static DEVICE_ATTR(set_freq, 0644, show_freq, store_freq); +static struct attribute *dev_entries[] = { + &dev_attr_set_freq.attr, + NULL, +}; +static struct attribute_group dev_attr_group = { + .name = "userspace", + .attrs = dev_entries, +}; + +static int userspace_init(struct devfreq *devfreq) +{ + int err = 0; + struct userspace_data *data = kzalloc(sizeof(struct userspace_data), + GFP_KERNEL); + + if (!data) { + err = -ENOMEM; + goto out; + } + data->valid = false; + devfreq->data = data; + + err = sysfs_create_group(&devfreq->dev.kobj, &dev_attr_group); +out: + return err; +} + +static void userspace_exit(struct devfreq *devfreq) +{ + sysfs_remove_group(&devfreq->dev.kobj, &dev_attr_group); + kfree(devfreq->data); + devfreq->data = NULL; +} + +const struct devfreq_governor devfreq_userspace = { + .name = "userspace", + .get_target_freq = devfreq_userspace_func, + .init = userspace_init, + .exit = userspace_exit, + .no_central_polling = true, +}; diff --git a/drivers/hid/hid-picolcd.c b/drivers/hid/hid-picolcd.c index 9d8710f8bc79..1782693819f3 100644 --- a/drivers/hid/hid-picolcd.c +++ b/drivers/hid/hid-picolcd.c @@ -2409,7 +2409,7 @@ static int picolcd_raw_event(struct hid_device *hdev, #ifdef CONFIG_PM static int picolcd_suspend(struct hid_device *hdev, pm_message_t message) { - if (message.event & PM_EVENT_AUTO) + if (PMSG_IS_AUTO(message)) return 0; picolcd_suspend_backlight(hid_get_drvdata(hdev)); diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c index 77e705c2209c..b403fcef0b86 100644 --- a/drivers/hid/usbhid/hid-core.c +++ b/drivers/hid/usbhid/hid-core.c @@ -1332,7 +1332,7 @@ static int hid_suspend(struct usb_interface *intf, pm_message_t message) struct usbhid_device *usbhid = hid->driver_data; int status; - if (message.event & PM_EVENT_AUTO) { + if (PMSG_IS_AUTO(message)) { spin_lock_irq(&usbhid->lock); /* Sync with error handler */ if (!test_bit(HID_RESET_PENDING, &usbhid->iofl) && !test_bit(HID_CLEAR_HALT, &usbhid->iofl) @@ -1367,7 +1367,7 @@ static int hid_suspend(struct usb_interface *intf, pm_message_t message) return -EIO; } - if (!ignoreled && (message.event & PM_EVENT_AUTO)) { + if (!ignoreled && PMSG_IS_AUTO(message)) { spin_lock_irq(&usbhid->lock); if (test_bit(HID_LED_ON, &usbhid->iofl)) { spin_unlock_irq(&usbhid->lock); @@ -1380,8 +1380,7 @@ static int hid_suspend(struct usb_interface *intf, pm_message_t message) hid_cancel_delayed_stuff(usbhid); hid_cease_io(usbhid); - if ((message.event & PM_EVENT_AUTO) && - test_bit(HID_KEYS_PRESSED, &usbhid->iofl)) { + if (PMSG_IS_AUTO(message) && test_bit(HID_KEYS_PRESSED, &usbhid->iofl)) { /* lost race against keypresses */ status = hid_start_in(hid); if (status < 0) diff --git a/drivers/media/video/via-camera.c b/drivers/media/video/via-camera.c index bb7f17f2a33c..cbf13d09b4ac 100644 --- a/drivers/media/video/via-camera.c +++ b/drivers/media/video/via-camera.c @@ -21,7 +21,7 @@ #include <media/videobuf-dma-sg.h> #include <linux/delay.h> #include <linux/dma-mapping.h> -#include <linux/pm_qos_params.h> +#include <linux/pm_qos.h> #include <linux/via-core.h> #include <linux/via-gpio.h> #include <linux/via_i2c.h> @@ -69,7 +69,7 @@ struct via_camera { struct mutex lock; enum viacam_opstate opstate; unsigned long flags; - struct pm_qos_request_list qos_request; + struct pm_qos_request qos_request; /* * GPIO info for power/reset management */ diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 680312710a78..a855db1ad249 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -47,7 +47,7 @@ #include <linux/if_vlan.h> #include <linux/cpu.h> #include <linux/smp.h> -#include <linux/pm_qos_params.h> +#include <linux/pm_qos.h> #include <linux/pm_runtime.h> #include <linux/aer.h> #include <linux/prefetch.h> diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index cdb958875ba4..7d6082160bcc 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -1476,7 +1476,7 @@ int usbnet_suspend (struct usb_interface *intf, pm_message_t message) if (!dev->suspend_count++) { spin_lock_irq(&dev->txq.lock); /* don't autosuspend while transmitting */ - if (dev->txq.qlen && (message.event & PM_EVENT_AUTO)) { + if (dev->txq.qlen && PMSG_IS_AUTO(message)) { spin_unlock_irq(&dev->txq.lock); return -EBUSY; } else { diff --git a/drivers/net/wimax/i2400m/usb.c b/drivers/net/wimax/i2400m/usb.c index 298f2b0b6311..9a644d052f1e 100644 --- a/drivers/net/wimax/i2400m/usb.c +++ b/drivers/net/wimax/i2400m/usb.c @@ -599,7 +599,7 @@ void i2400mu_disconnect(struct usb_interface *iface) * * As well, the device might refuse going to sleep for whichever * reason. In this case we just fail. For system suspend/hibernate, - * we *can't* fail. We check PM_EVENT_AUTO to see if the + * we *can't* fail. We check PMSG_IS_AUTO to see if the * suspend call comes from the USB stack or from the system and act * in consequence. * @@ -615,7 +615,7 @@ int i2400mu_suspend(struct usb_interface *iface, pm_message_t pm_msg) struct i2400m *i2400m = &i2400mu->i2400m; #ifdef CONFIG_PM - if (pm_msg.event & PM_EVENT_AUTO) + if (PMSG_IS_AUTO(pm_msg)) is_autosuspend = 1; #endif diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c index ef9ad79d1bfd..127e9c63beaf 100644 --- a/drivers/net/wireless/ipw2x00/ipw2100.c +++ b/drivers/net/wireless/ipw2x00/ipw2100.c @@ -161,7 +161,7 @@ that only one external action is invoked at a time. #include <linux/firmware.h> #include <linux/acpi.h> #include <linux/ctype.h> -#include <linux/pm_qos_params.h> +#include <linux/pm_qos.h> #include <net/lib80211.h> @@ -174,7 +174,7 @@ that only one external action is invoked at a time. #define DRV_DESCRIPTION "Intel(R) PRO/Wireless 2100 Network Driver" #define DRV_COPYRIGHT "Copyright(c) 2003-2006 Intel Corporation" -static struct pm_qos_request_list ipw2100_pm_qos_req; +static struct pm_qos_request ipw2100_pm_qos_req; /* Debugging stuff */ #ifdef CONFIG_IPW2100_DEBUG diff --git a/drivers/tty/Kconfig b/drivers/tty/Kconfig index f462fa5f937c..33175504bb39 100644 --- a/drivers/tty/Kconfig +++ b/drivers/tty/Kconfig @@ -60,6 +60,10 @@ config VT_CONSOLE If unsure, say Y. +config VT_CONSOLE_SLEEP + def_bool y + depends on VT_CONSOLE && PM_SLEEP + config HW_CONSOLE bool depends on VT && !UML diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index 3ec6699ab725..6960715c5063 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c @@ -1305,7 +1305,7 @@ static int acm_suspend(struct usb_interface *intf, pm_message_t message) struct acm *acm = usb_get_intfdata(intf); int cnt; - if (message.event & PM_EVENT_AUTO) { + if (PMSG_IS_AUTO(message)) { int b; spin_lock_irq(&acm->write_lock); diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c index 1d26a7135dd9..efe684908c1f 100644 --- a/drivers/usb/class/cdc-wdm.c +++ b/drivers/usb/class/cdc-wdm.c @@ -798,11 +798,11 @@ static int wdm_suspend(struct usb_interface *intf, pm_message_t message) dev_dbg(&desc->intf->dev, "wdm%d_suspend\n", intf->minor); /* if this is an autosuspend the caller does the locking */ - if (!(message.event & PM_EVENT_AUTO)) + if (!PMSG_IS_AUTO(message)) mutex_lock(&desc->lock); spin_lock_irq(&desc->iuspin); - if ((message.event & PM_EVENT_AUTO) && + if (PMSG_IS_AUTO(message) && (test_bit(WDM_IN_USE, &desc->flags) || test_bit(WDM_RESPONDING, &desc->flags))) { spin_unlock_irq(&desc->iuspin); @@ -815,7 +815,7 @@ static int wdm_suspend(struct usb_interface *intf, pm_message_t message) kill_urbs(desc); cancel_work_sync(&desc->rxwork); } - if (!(message.event & PM_EVENT_AUTO)) + if (!PMSG_IS_AUTO(message)) mutex_unlock(&desc->lock); return rv; diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c index adf5ca8a2396..3b029a0a4787 100644 --- a/drivers/usb/core/driver.c +++ b/drivers/usb/core/driver.c @@ -1046,8 +1046,7 @@ static int usb_resume_device(struct usb_device *udev, pm_message_t msg) /* Non-root devices on a full/low-speed bus must wait for their * companion high-speed root hub, in case a handoff is needed. */ - if (!(msg.event & PM_EVENT_AUTO) && udev->parent && - udev->bus->hs_companion) + if (!PMSG_IS_AUTO(msg) && udev->parent && udev->bus->hs_companion) device_pm_wait_for_dev(&udev->dev, &udev->bus->hs_companion->root_hub->dev); @@ -1075,7 +1074,7 @@ static int usb_suspend_interface(struct usb_device *udev, if (driver->suspend) { status = driver->suspend(intf, msg); - if (status && !(msg.event & PM_EVENT_AUTO)) + if (status && !PMSG_IS_AUTO(msg)) dev_err(&intf->dev, "%s error %d\n", "suspend", status); } else { @@ -1189,7 +1188,7 @@ static int usb_suspend_both(struct usb_device *udev, pm_message_t msg) status = usb_suspend_interface(udev, intf, msg); /* Ignore errors during system sleep transitions */ - if (!(msg.event & PM_EVENT_AUTO)) + if (!PMSG_IS_AUTO(msg)) status = 0; if (status != 0) break; @@ -1199,7 +1198,7 @@ static int usb_suspend_both(struct usb_device *udev, pm_message_t msg) status = usb_suspend_device(udev, msg); /* Again, ignore errors during system sleep transitions */ - if (!(msg.event & PM_EVENT_AUTO)) + if (!PMSG_IS_AUTO(msg)) status = 0; } diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c index b3b7d062906d..13222d352a61 100644 --- a/drivers/usb/core/hcd.c +++ b/drivers/usb/core/hcd.c @@ -1975,8 +1975,9 @@ int hcd_bus_suspend(struct usb_device *rhdev, pm_message_t msg) int status; int old_state = hcd->state; - dev_dbg(&rhdev->dev, "bus %s%s\n", - (msg.event & PM_EVENT_AUTO ? "auto-" : ""), "suspend"); + dev_dbg(&rhdev->dev, "bus %ssuspend, wakeup %d\n", + (PMSG_IS_AUTO(msg) ? "auto-" : ""), + rhdev->do_remote_wakeup); if (HCD_DEAD(hcd)) { dev_dbg(&rhdev->dev, "skipped %s of dead bus\n", "suspend"); return 0; @@ -2011,8 +2012,8 @@ int hcd_bus_resume(struct usb_device *rhdev, pm_message_t msg) int status; int old_state = hcd->state; - dev_dbg(&rhdev->dev, "usb %s%s\n", - (msg.event & PM_EVENT_AUTO ? "auto-" : ""), "resume"); + dev_dbg(&rhdev->dev, "usb %sresume\n", + (PMSG_IS_AUTO(msg) ? "auto-" : "")); if (HCD_DEAD(hcd)) { dev_dbg(&rhdev->dev, "skipped %s of dead bus\n", "resume"); return 0; diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index d6cc83249341..96f05b29c9ad 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -2369,8 +2369,6 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg) int port1 = udev->portnum; int status; - // dev_dbg(hub->intfdev, "suspend port %d\n", port1); - /* enable remote wakeup when appropriate; this lets the device * wake up the upstream hub (including maybe the root hub). * @@ -2387,7 +2385,7 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg) dev_dbg(&udev->dev, "won't remote wakeup, status %d\n", status); /* bail if autosuspend is requested */ - if (msg.event & PM_EVENT_AUTO) + if (PMSG_IS_AUTO(msg)) return status; } } @@ -2416,12 +2414,13 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg) USB_CTRL_SET_TIMEOUT); /* System sleep transitions should never fail */ - if (!(msg.event & PM_EVENT_AUTO)) + if (!PMSG_IS_AUTO(msg)) status = 0; } else { /* device has up to 10 msec to fully suspend */ - dev_dbg(&udev->dev, "usb %ssuspend\n", - (msg.event & PM_EVENT_AUTO ? "auto-" : "")); + dev_dbg(&udev->dev, "usb %ssuspend, wakeup %d\n", + (PMSG_IS_AUTO(msg) ? "auto-" : ""), + udev->do_remote_wakeup); usb_set_device_state(udev, USB_STATE_SUSPENDED); msleep(10); } @@ -2572,7 +2571,7 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg) } else { /* drive resume for at least 20 msec */ dev_dbg(&udev->dev, "usb %sresume\n", - (msg.event & PM_EVENT_AUTO ? "auto-" : "")); + (PMSG_IS_AUTO(msg) ? "auto-" : "")); msleep(25); /* Virtual root hubs can trigger on GET_PORT_STATUS to @@ -2679,7 +2678,7 @@ static int hub_suspend(struct usb_interface *intf, pm_message_t msg) udev = hdev->children [port1-1]; if (udev && udev->can_submit) { dev_warn(&intf->dev, "port %d nyet suspended\n", port1); - if (msg.event & PM_EVENT_AUTO) + if (PMSG_IS_AUTO(msg)) return -EBUSY; } } diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c index d5d136a53b61..b18179bda0d8 100644 --- a/drivers/usb/serial/sierra.c +++ b/drivers/usb/serial/sierra.c @@ -1009,7 +1009,7 @@ static int sierra_suspend(struct usb_serial *serial, pm_message_t message) struct sierra_intf_private *intfdata; int b; - if (message.event & PM_EVENT_AUTO) { + if (PMSG_IS_AUTO(message)) { intfdata = serial->private; spin_lock_irq(&intfdata->susp_lock); b = intfdata->in_flight; diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c index e4fad5e643d7..d555ca9567b8 100644 --- a/drivers/usb/serial/usb_wwan.c +++ b/drivers/usb/serial/usb_wwan.c @@ -651,7 +651,7 @@ int usb_wwan_suspend(struct usb_serial *serial, pm_message_t message) dbg("%s entered", __func__); - if (message.event & PM_EVENT_AUTO) { + if (PMSG_IS_AUTO(message)) { spin_lock_irq(&intfdata->susp_lock); b = intfdata->in_flight; spin_unlock_irq(&intfdata->susp_lock); |