diff options
author | Rafael J. Wysocki <rjw@sisk.pl> | 2012-09-17 20:25:18 +0200 |
---|---|---|
committer | Rafael J. Wysocki <rjw@sisk.pl> | 2012-09-17 20:25:18 +0200 |
commit | 00e8b2613331042bbe0177e5b9bb5c8a654b14ae (patch) | |
tree | cfec260ba68b007ddd090c8f63c879b5bb58d2c4 | |
parent | Linux 3.6-rc6 (diff) | |
parent | PM: Do not use the syscore flag for runtime PM (diff) | |
download | linux-00e8b2613331042bbe0177e5b9bb5c8a654b14ae.tar.xz linux-00e8b2613331042bbe0177e5b9bb5c8a654b14ae.zip |
Merge branch 'pm-timers'
* pm-timers:
PM: Do not use the syscore flag for runtime PM
sh: MTU2: Basic runtime PM support
sh: CMT: Basic runtime PM support
sh: TMU: Basic runtime PM support
PM / Domains: Do not measure start time for "irq safe" devices
PM / Domains: Move syscore flag from subsys data to struct device
PM / Domains: Rename the always_on device flag to syscore
PM / Runtime: Allow helpers to be called by early platform drivers
PM: Reorganize device PM initialization
sh: MTU2: Introduce clock events suspend/resume routines
sh: CMT: Introduce clocksource/clock events suspend/resume routines
sh: TMU: Introduce clocksource/clock events suspend/resume routines
timekeeping: Add suspend and resume of clock event devices
PM / Domains: Add power off/on function for system core suspend stage
PM / Domains: Introduce simplified power on routine for system resume
-rw-r--r-- | drivers/base/platform.c | 2 | ||||
-rw-r--r-- | drivers/base/power/domain.c | 130 | ||||
-rw-r--r-- | drivers/base/power/main.c | 35 | ||||
-rw-r--r-- | drivers/base/power/power.h | 36 | ||||
-rw-r--r-- | drivers/clocksource/sh_cmt.c | 71 | ||||
-rw-r--r-- | drivers/clocksource/sh_mtu2.c | 41 | ||||
-rw-r--r-- | drivers/clocksource/sh_tmu.c | 112 | ||||
-rw-r--r-- | include/linux/clockchips.h | 8 | ||||
-rw-r--r-- | include/linux/device.h | 7 | ||||
-rw-r--r-- | include/linux/pm.h | 2 | ||||
-rw-r--r-- | include/linux/pm_domain.h | 19 | ||||
-rw-r--r-- | kernel/power/Kconfig | 4 | ||||
-rw-r--r-- | kernel/time/clockevents.c | 24 | ||||
-rw-r--r-- | kernel/time/timekeeping.c | 2 |
14 files changed, 419 insertions, 74 deletions
diff --git a/drivers/base/platform.c b/drivers/base/platform.c index a1a722502587..d51514b79efe 100644 --- a/drivers/base/platform.c +++ b/drivers/base/platform.c @@ -22,6 +22,7 @@ #include <linux/pm_runtime.h> #include "base.h" +#include "power/power.h" #define to_platform_driver(drv) (container_of((drv), struct platform_driver, \ driver)) @@ -948,6 +949,7 @@ void __init early_platform_add_devices(struct platform_device **devs, int num) dev = &devs[i]->dev; if (!dev->devres_head.next) { + pm_runtime_early_init(dev); INIT_LIST_HEAD(&dev->devres_head); list_add_tail(&dev->devres_head, &early_platform_device_list); diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index ba3487c9835b..5f4606f13be6 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -75,6 +75,12 @@ static int genpd_start_dev(struct generic_pm_domain *genpd, struct device *dev) start_latency_ns, "start"); } +static int genpd_start_dev_no_timing(struct generic_pm_domain *genpd, + struct device *dev) +{ + return GENPD_DEV_CALLBACK(genpd, int, start, dev); +} + static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd) { bool ret = false; @@ -436,7 +442,7 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd) not_suspended = 0; list_for_each_entry(pdd, &genpd->dev_list, list_node) if (pdd->dev->driver && (!pm_runtime_suspended(pdd->dev) - || pdd->dev->power.irq_safe || to_gpd_data(pdd)->always_on)) + || pdd->dev->power.irq_safe)) not_suspended++; if (not_suspended > genpd->in_progress) @@ -578,9 +584,6 @@ static int pm_genpd_runtime_suspend(struct device *dev) might_sleep_if(!genpd->dev_irq_safe); - if (dev_gpd_data(dev)->always_on) - return -EBUSY; - stop_ok = genpd->gov ? genpd->gov->stop_ok : NULL; if (stop_ok && !stop_ok(dev)) return -EBUSY; @@ -629,7 +632,7 @@ static int pm_genpd_runtime_resume(struct device *dev) /* If power.irq_safe, the PM domain is never powered off. */ if (dev->power.irq_safe) - return genpd_start_dev(genpd, dev); + return genpd_start_dev_no_timing(genpd, dev); mutex_lock(&genpd->lock); ret = __pm_genpd_poweron(genpd); @@ -697,6 +700,24 @@ static inline void genpd_power_off_work_fn(struct work_struct *work) {} #ifdef CONFIG_PM_SLEEP +/** + * pm_genpd_present - Check if the given PM domain has been initialized. + * @genpd: PM domain to check. + */ +static bool pm_genpd_present(struct generic_pm_domain *genpd) +{ + struct generic_pm_domain *gpd; + + if (IS_ERR_OR_NULL(genpd)) + return false; + + list_for_each_entry(gpd, &gpd_list, gpd_list_node) + if (gpd == genpd) + return true; + + return false; +} + static bool genpd_dev_active_wakeup(struct generic_pm_domain *genpd, struct device *dev) { @@ -750,9 +771,10 @@ static int genpd_thaw_dev(struct generic_pm_domain *genpd, struct device *dev) * Check if the given PM domain can be powered off (during system suspend or * hibernation) and do that if so. Also, in that case propagate to its masters. * - * This function is only called in "noirq" stages of system power transitions, - * so it need not acquire locks (all of the "noirq" callbacks are executed - * sequentially, so it is guaranteed that it will never run twice in parallel). + * This function is only called in "noirq" and "syscore" stages of system power + * transitions, so it need not acquire locks (all of the "noirq" callbacks are + * executed sequentially, so it is guaranteed that it will never run twice in + * parallel). */ static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd) { @@ -777,6 +799,33 @@ static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd) } /** + * pm_genpd_sync_poweron - Synchronously power on a PM domain and its masters. + * @genpd: PM domain to power on. + * + * This function is only called in "noirq" and "syscore" stages of system power + * transitions, so it need not acquire locks (all of the "noirq" callbacks are + * executed sequentially, so it is guaranteed that it will never run twice in + * parallel). + */ +static void pm_genpd_sync_poweron(struct generic_pm_domain *genpd) +{ + struct gpd_link *link; + + if (genpd->status != GPD_STATE_POWER_OFF) + return; + + list_for_each_entry(link, &genpd->slave_links, slave_node) { + pm_genpd_sync_poweron(link->master); + genpd_sd_counter_inc(link->master); + } + + if (genpd->power_on) + genpd->power_on(genpd); + + genpd->status = GPD_STATE_ACTIVE; +} + +/** * resume_needed - Check whether to resume a device before system suspend. * @dev: Device to check. * @genpd: PM domain the device belongs to. @@ -937,7 +986,7 @@ static int pm_genpd_suspend_noirq(struct device *dev) if (IS_ERR(genpd)) return -EINVAL; - if (genpd->suspend_power_off || dev_gpd_data(dev)->always_on + if (genpd->suspend_power_off || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev))) return 0; @@ -970,7 +1019,7 @@ static int pm_genpd_resume_noirq(struct device *dev) if (IS_ERR(genpd)) return -EINVAL; - if (genpd->suspend_power_off || dev_gpd_data(dev)->always_on + if (genpd->suspend_power_off || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev))) return 0; @@ -979,7 +1028,7 @@ static int pm_genpd_resume_noirq(struct device *dev) * guaranteed that this function will never run twice in parallel for * the same PM domain, so it is not necessary to use locking here. */ - pm_genpd_poweron(genpd); + pm_genpd_sync_poweron(genpd); genpd->suspended_count--; return genpd_start_dev(genpd, dev); @@ -1090,8 +1139,7 @@ static int pm_genpd_freeze_noirq(struct device *dev) if (IS_ERR(genpd)) return -EINVAL; - return genpd->suspend_power_off || dev_gpd_data(dev)->always_on ? - 0 : genpd_stop_dev(genpd, dev); + return genpd->suspend_power_off ? 0 : genpd_stop_dev(genpd, dev); } /** @@ -1111,8 +1159,7 @@ static int pm_genpd_thaw_noirq(struct device *dev) if (IS_ERR(genpd)) return -EINVAL; - return genpd->suspend_power_off || dev_gpd_data(dev)->always_on ? - 0 : genpd_start_dev(genpd, dev); + return genpd->suspend_power_off ? 0 : genpd_start_dev(genpd, dev); } /** @@ -1186,8 +1233,8 @@ static int pm_genpd_restore_noirq(struct device *dev) if (genpd->suspended_count++ == 0) { /* * The boot kernel might put the domain into arbitrary state, - * so make it appear as powered off to pm_genpd_poweron(), so - * that it tries to power it on in case it was really off. + * so make it appear as powered off to pm_genpd_sync_poweron(), + * so that it tries to power it on in case it was really off. */ genpd->status = GPD_STATE_POWER_OFF; if (genpd->suspend_power_off) { @@ -1205,9 +1252,9 @@ static int pm_genpd_restore_noirq(struct device *dev) if (genpd->suspend_power_off) return 0; - pm_genpd_poweron(genpd); + pm_genpd_sync_poweron(genpd); - return dev_gpd_data(dev)->always_on ? 0 : genpd_start_dev(genpd, dev); + return genpd_start_dev(genpd, dev); } /** @@ -1246,6 +1293,31 @@ static void pm_genpd_complete(struct device *dev) } } +/** + * pm_genpd_syscore_switch - Switch power during system core suspend or resume. + * @dev: Device that normally is marked as "always on" to switch power for. + * + * This routine may only be called during the system core (syscore) suspend or + * resume phase for devices whose "always on" flags are set. + */ +void pm_genpd_syscore_switch(struct device *dev, bool suspend) +{ + struct generic_pm_domain *genpd; + + genpd = dev_to_genpd(dev); + if (!pm_genpd_present(genpd)) + return; + + if (suspend) { + genpd->suspended_count++; + pm_genpd_sync_poweroff(genpd); + } else { + pm_genpd_sync_poweron(genpd); + genpd->suspended_count--; + } +} +EXPORT_SYMBOL_GPL(pm_genpd_syscore_switch); + #else #define pm_genpd_prepare NULL @@ -1455,26 +1527,6 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd, } /** - * pm_genpd_dev_always_on - Set/unset the "always on" flag for a given device. - * @dev: Device to set/unset the flag for. - * @val: The new value of the device's "always on" flag. - */ -void pm_genpd_dev_always_on(struct device *dev, bool val) -{ - struct pm_subsys_data *psd; - unsigned long flags; - - spin_lock_irqsave(&dev->power.lock, flags); - - psd = dev_to_psd(dev); - if (psd && psd->domain_data) - to_gpd_data(psd->domain_data)->always_on = val; - - spin_unlock_irqrestore(&dev->power.lock, flags); -} -EXPORT_SYMBOL_GPL(pm_genpd_dev_always_on); - -/** * pm_genpd_dev_need_restore - Set/unset the device's "need restore" flag. * @dev: Device to set/unset the flag for. * @val: The new value of the device's "need restore" flag. diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 0113adc310dc..57f5814c2732 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -57,20 +57,17 @@ static pm_message_t pm_transition; static int async_error; /** - * device_pm_init - Initialize the PM-related part of a device object. + * device_pm_sleep_init - Initialize system suspend-related device fields. * @dev: Device object being initialized. */ -void device_pm_init(struct device *dev) +void device_pm_sleep_init(struct device *dev) { dev->power.is_prepared = false; dev->power.is_suspended = false; init_completion(&dev->power.completion); complete_all(&dev->power.completion); dev->power.wakeup = NULL; - spin_lock_init(&dev->power.lock); - pm_runtime_init(dev); INIT_LIST_HEAD(&dev->power.entry); - dev->power.power_state = PMSG_INVALID; } /** @@ -408,6 +405,9 @@ static int device_resume_noirq(struct device *dev, pm_message_t state) TRACE_DEVICE(dev); TRACE_RESUME(0); + if (dev->power.syscore) + goto Out; + if (dev->pm_domain) { info = "noirq power domain "; callback = pm_noirq_op(&dev->pm_domain->ops, state); @@ -429,6 +429,7 @@ static int device_resume_noirq(struct device *dev, pm_message_t state) error = dpm_run_callback(callback, dev, state, info); + Out: TRACE_RESUME(error); return error; } @@ -486,6 +487,9 @@ static int device_resume_early(struct device *dev, pm_message_t state) TRACE_DEVICE(dev); TRACE_RESUME(0); + if (dev->power.syscore) + goto Out; + if (dev->pm_domain) { info = "early power domain "; callback = pm_late_early_op(&dev->pm_domain->ops, state); @@ -507,6 +511,7 @@ static int device_resume_early(struct device *dev, pm_message_t state) error = dpm_run_callback(callback, dev, state, info); + Out: TRACE_RESUME(error); return error; } @@ -570,6 +575,9 @@ static int device_resume(struct device *dev, pm_message_t state, bool async) TRACE_DEVICE(dev); TRACE_RESUME(0); + if (dev->power.syscore) + goto Complete; + dpm_wait(dev->parent, async); device_lock(dev); @@ -632,6 +640,8 @@ static int device_resume(struct device *dev, pm_message_t state, bool async) Unlock: device_unlock(dev); + + Complete: complete_all(&dev->power.completion); TRACE_RESUME(error); @@ -722,6 +732,9 @@ static void device_complete(struct device *dev, pm_message_t state) void (*callback)(struct device *) = NULL; char *info = NULL; + if (dev->power.syscore) + return; + device_lock(dev); if (dev->pm_domain) { @@ -834,6 +847,9 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state) pm_callback_t callback = NULL; char *info = NULL; + if (dev->power.syscore) + return 0; + if (dev->pm_domain) { info = "noirq power domain "; callback = pm_noirq_op(&dev->pm_domain->ops, state); @@ -917,6 +933,9 @@ static int device_suspend_late(struct device *dev, pm_message_t state) pm_callback_t callback = NULL; char *info = NULL; + if (dev->power.syscore) + return 0; + if (dev->pm_domain) { info = "late power domain "; callback = pm_late_early_op(&dev->pm_domain->ops, state); @@ -1053,6 +1072,9 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async) goto Complete; } + if (dev->power.syscore) + goto Complete; + device_lock(dev); if (dev->pm_domain) { @@ -1209,6 +1231,9 @@ static int device_prepare(struct device *dev, pm_message_t state) char *info = NULL; int error = 0; + if (dev->power.syscore) + return 0; + device_lock(dev); dev->power.wakeup_path = device_may_wakeup(dev); diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h index eeb4bff9505c..0dbfdf4419af 100644 --- a/drivers/base/power/power.h +++ b/drivers/base/power/power.h @@ -1,12 +1,32 @@ #include <linux/pm_qos.h> +static inline void device_pm_init_common(struct device *dev) +{ + if (!dev->power.early_init) { + spin_lock_init(&dev->power.lock); + dev->power.power_state = PMSG_INVALID; + dev->power.early_init = true; + } +} + #ifdef CONFIG_PM_RUNTIME +static inline void pm_runtime_early_init(struct device *dev) +{ + dev->power.disable_depth = 1; + device_pm_init_common(dev); +} + extern void pm_runtime_init(struct device *dev); extern void pm_runtime_remove(struct device *dev); #else /* !CONFIG_PM_RUNTIME */ +static inline void pm_runtime_early_init(struct device *dev) +{ + device_pm_init_common(dev); +} + static inline void pm_runtime_init(struct device *dev) {} static inline void pm_runtime_remove(struct device *dev) {} @@ -25,7 +45,7 @@ static inline struct device *to_device(struct list_head *entry) return container_of(entry, struct device, power.entry); } -extern void device_pm_init(struct device *dev); +extern void device_pm_sleep_init(struct device *dev); extern void device_pm_add(struct device *); extern void device_pm_remove(struct device *); extern void device_pm_move_before(struct device *, struct device *); @@ -34,12 +54,7 @@ extern void device_pm_move_last(struct device *); #else /* !CONFIG_PM_SLEEP */ -static inline void device_pm_init(struct device *dev) -{ - spin_lock_init(&dev->power.lock); - dev->power.power_state = PMSG_INVALID; - pm_runtime_init(dev); -} +static inline void device_pm_sleep_init(struct device *dev) {} static inline void device_pm_add(struct device *dev) { @@ -60,6 +75,13 @@ static inline void device_pm_move_last(struct device *dev) {} #endif /* !CONFIG_PM_SLEEP */ +static inline void device_pm_init(struct device *dev) +{ + device_pm_init_common(dev); + device_pm_sleep_init(dev); + pm_runtime_init(dev); +} + #ifdef CONFIG_PM /* diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c index 98b06baafcc6..a5f7829f2799 100644 --- a/drivers/clocksource/sh_cmt.c +++ b/drivers/clocksource/sh_cmt.c @@ -33,6 +33,7 @@ #include <linux/slab.h> #include <linux/module.h> #include <linux/pm_domain.h> +#include <linux/pm_runtime.h> struct sh_cmt_priv { void __iomem *mapbase; @@ -52,6 +53,7 @@ struct sh_cmt_priv { struct clock_event_device ced; struct clocksource cs; unsigned long total_cycles; + bool cs_enabled; }; static DEFINE_RAW_SPINLOCK(sh_cmt_lock); @@ -155,6 +157,9 @@ static int sh_cmt_enable(struct sh_cmt_priv *p, unsigned long *rate) { int k, ret; + pm_runtime_get_sync(&p->pdev->dev); + dev_pm_syscore_device(&p->pdev->dev, true); + /* enable clock */ ret = clk_enable(p->clk); if (ret) { @@ -221,6 +226,9 @@ static void sh_cmt_disable(struct sh_cmt_priv *p) /* stop clock */ clk_disable(p->clk); + + dev_pm_syscore_device(&p->pdev->dev, false); + pm_runtime_put(&p->pdev->dev); } /* private flags */ @@ -451,22 +459,42 @@ static int sh_cmt_clocksource_enable(struct clocksource *cs) int ret; struct sh_cmt_priv *p = cs_to_sh_cmt(cs); + WARN_ON(p->cs_enabled); + p->total_cycles = 0; ret = sh_cmt_start(p, FLAG_CLOCKSOURCE); - if (!ret) + if (!ret) { __clocksource_updatefreq_hz(cs, p->rate); + p->cs_enabled = true; + } return ret; } static void sh_cmt_clocksource_disable(struct clocksource *cs) { - sh_cmt_stop(cs_to_sh_cmt(cs), FLAG_CLOCKSOURCE); + struct sh_cmt_priv *p = cs_to_sh_cmt(cs); + + WARN_ON(!p->cs_enabled); + + sh_cmt_stop(p, FLAG_CLOCKSOURCE); + p->cs_enabled = false; +} + +static void sh_cmt_clocksource_suspend(struct clocksource *cs) +{ + struct sh_cmt_priv *p = cs_to_sh_cmt(cs); + + sh_cmt_stop(p, FLAG_CLOCKSOURCE); + pm_genpd_syscore_poweroff(&p->pdev->dev); } static void sh_cmt_clocksource_resume(struct clocksource *cs) { - sh_cmt_start(cs_to_sh_cmt(cs), FLAG_CLOCKSOURCE); + struct sh_cmt_priv *p = cs_to_sh_cmt(cs); + + pm_genpd_syscore_poweron(&p->pdev->dev); + sh_cmt_start(p, FLAG_CLOCKSOURCE); } static int sh_cmt_register_clocksource(struct sh_cmt_priv *p, @@ -479,7 +507,7 @@ static int sh_cmt_register_clocksource(struct sh_cmt_priv *p, cs->read = sh_cmt_clocksource_read; cs->enable = sh_cmt_clocksource_enable; cs->disable = sh_cmt_clocksource_disable; - cs->suspend = sh_cmt_clocksource_disable; + cs->suspend = sh_cmt_clocksource_suspend; cs->resume = sh_cmt_clocksource_resume; cs->mask = CLOCKSOURCE_MASK(sizeof(unsigned long) * 8); cs->flags = CLOCK_SOURCE_IS_CONTINUOUS; @@ -562,6 +590,16 @@ static int sh_cmt_clock_event_next(unsigned long delta, return 0; } +static void sh_cmt_clock_event_suspend(struct clock_event_device *ced) +{ + pm_genpd_syscore_poweroff(&ced_to_sh_cmt(ced)->pdev->dev); +} + +static void sh_cmt_clock_event_resume(struct clock_event_device *ced) +{ + pm_genpd_syscore_poweron(&ced_to_sh_cmt(ced)->pdev->dev); +} + static void sh_cmt_register_clockevent(struct sh_cmt_priv *p, char *name, unsigned long rating) { @@ -576,6 +614,8 @@ static void sh_cmt_register_clockevent(struct sh_cmt_priv *p, ced->cpumask = cpumask_of(0); ced->set_next_event = sh_cmt_clock_event_next; ced->set_mode = sh_cmt_clock_event_mode; + ced->suspend = sh_cmt_clock_event_suspend; + ced->resume = sh_cmt_clock_event_resume; dev_info(&p->pdev->dev, "used for clock events\n"); clockevents_register_device(ced); @@ -670,6 +710,7 @@ static int sh_cmt_setup(struct sh_cmt_priv *p, struct platform_device *pdev) dev_err(&p->pdev->dev, "registration failed\n"); goto err1; } + p->cs_enabled = false; ret = setup_irq(irq, &p->irqaction); if (ret) { @@ -688,14 +729,17 @@ err0: static int __devinit sh_cmt_probe(struct platform_device *pdev) { struct sh_cmt_priv *p = platform_get_drvdata(pdev); + struct sh_timer_config *cfg = pdev->dev.platform_data; int ret; - if (!is_early_platform_device(pdev)) - pm_genpd_dev_always_on(&pdev->dev, true); + if (!is_early_platform_device(pdev)) { + pm_runtime_set_active(&pdev->dev); + pm_runtime_enable(&pdev->dev); + } if (p) { dev_info(&pdev->dev, "kept as earlytimer\n"); - return 0; + goto out; } p = kmalloc(sizeof(*p), GFP_KERNEL); @@ -708,8 +752,19 @@ static int __devinit sh_cmt_probe(struct platform_device *pdev) if (ret) { kfree(p); platform_set_drvdata(pdev, NULL); + pm_runtime_idle(&pdev->dev); + return ret; } - return ret; + if (is_early_platform_device(pdev)) + return 0; + + out: + if (cfg->clockevent_rating || cfg->clocksource_rating) + pm_runtime_irq_safe(&pdev->dev); + else + pm_runtime_idle(&pdev->dev); + + return 0; } static int __devexit sh_cmt_remove(struct platform_device *pdev) diff --git a/drivers/clocksource/sh_mtu2.c b/drivers/clocksource/sh_mtu2.c index d9b76ca64a61..c5eea858054a 100644 --- a/drivers/clocksource/sh_mtu2.c +++ b/drivers/clocksource/sh_mtu2.c @@ -32,6 +32,7 @@ #include <linux/slab.h> #include <linux/module.h> #include <linux/pm_domain.h> +#include <linux/pm_runtime.h> struct sh_mtu2_priv { void __iomem *mapbase; @@ -123,6 +124,9 @@ static int sh_mtu2_enable(struct sh_mtu2_priv *p) { int ret; + pm_runtime_get_sync(&p->pdev->dev); + dev_pm_syscore_device(&p->pdev->dev, true); + /* enable clock */ ret = clk_enable(p->clk); if (ret) { @@ -157,6 +161,9 @@ static void sh_mtu2_disable(struct sh_mtu2_priv *p) /* stop clock */ clk_disable(p->clk); + + dev_pm_syscore_device(&p->pdev->dev, false); + pm_runtime_put(&p->pdev->dev); } static irqreturn_t sh_mtu2_interrupt(int irq, void *dev_id) @@ -208,6 +215,16 @@ static void sh_mtu2_clock_event_mode(enum clock_event_mode mode, } } +static void sh_mtu2_clock_event_suspend(struct clock_event_device *ced) +{ + pm_genpd_syscore_poweroff(&ced_to_sh_mtu2(ced)->pdev->dev); +} + +static void sh_mtu2_clock_event_resume(struct clock_event_device *ced) +{ + pm_genpd_syscore_poweron(&ced_to_sh_mtu2(ced)->pdev->dev); +} + static void sh_mtu2_register_clockevent(struct sh_mtu2_priv *p, char *name, unsigned long rating) { @@ -221,6 +238,8 @@ static void sh_mtu2_register_clockevent(struct sh_mtu2_priv *p, ced->rating = rating; ced->cpumask = cpumask_of(0); ced->set_mode = sh_mtu2_clock_event_mode; + ced->suspend = sh_mtu2_clock_event_suspend; + ced->resume = sh_mtu2_clock_event_resume; dev_info(&p->pdev->dev, "used for clock events\n"); clockevents_register_device(ced); @@ -305,14 +324,17 @@ static int sh_mtu2_setup(struct sh_mtu2_priv *p, struct platform_device *pdev) static int __devinit sh_mtu2_probe(struct platform_device *pdev) { struct sh_mtu2_priv *p = platform_get_drvdata(pdev); + struct sh_timer_config *cfg = pdev->dev.platform_data; int ret; - if (!is_early_platform_device(pdev)) - pm_genpd_dev_always_on(&pdev->dev, true); + if (!is_early_platform_device(pdev)) { + pm_runtime_set_active(&pdev->dev); + pm_runtime_enable(&pdev->dev); + } if (p) { dev_info(&pdev->dev, "kept as earlytimer\n"); - return 0; + goto out; } p = kmalloc(sizeof(*p), GFP_KERNEL); @@ -325,8 +347,19 @@ static int __devinit sh_mtu2_probe(struct platform_device *pdev) if (ret) { kfree(p); platform_set_drvdata(pdev, NULL); + pm_runtime_idle(&pdev->dev); + return ret; } - return ret; + if (is_early_platform_device(pdev)) + return 0; + + out: + if (cfg->clockevent_rating) + pm_runtime_irq_safe(&pdev->dev); + else + pm_runtime_idle(&pdev->dev); + + return 0; } static int __devexit sh_mtu2_remove(struct platform_device *pdev) diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c index c1b51d49d106..0cc4add88279 100644 --- a/drivers/clocksource/sh_tmu.c +++ b/drivers/clocksource/sh_tmu.c @@ -33,6 +33,7 @@ #include <linux/slab.h> #include <linux/module.h> #include <linux/pm_domain.h> +#include <linux/pm_runtime.h> struct sh_tmu_priv { void __iomem *mapbase; @@ -43,6 +44,8 @@ struct sh_tmu_priv { unsigned long periodic; struct clock_event_device ced; struct clocksource cs; + bool cs_enabled; + unsigned int enable_count; }; static DEFINE_RAW_SPINLOCK(sh_tmu_lock); @@ -107,7 +110,7 @@ static void sh_tmu_start_stop_ch(struct sh_tmu_priv *p, int start) raw_spin_unlock_irqrestore(&sh_tmu_lock, flags); } -static int sh_tmu_enable(struct sh_tmu_priv *p) +static int __sh_tmu_enable(struct sh_tmu_priv *p) { int ret; @@ -135,7 +138,18 @@ static int sh_tmu_enable(struct sh_tmu_priv *p) return 0; } -static void sh_tmu_disable(struct sh_tmu_priv *p) +static int sh_tmu_enable(struct sh_tmu_priv *p) +{ + if (p->enable_count++ > 0) + return 0; + + pm_runtime_get_sync(&p->pdev->dev); + dev_pm_syscore_device(&p->pdev->dev, true); + + return __sh_tmu_enable(p); +} + +static void __sh_tmu_disable(struct sh_tmu_priv *p) { /* disable channel */ sh_tmu_start_stop_ch(p, 0); @@ -147,6 +161,20 @@ static void sh_tmu_disable(struct sh_tmu_priv *p) clk_disable(p->clk); } +static void sh_tmu_disable(struct sh_tmu_priv *p) +{ + if (WARN_ON(p->enable_count == 0)) + return; + + if (--p->enable_count > 0) + return; + + __sh_tmu_disable(p); + + dev_pm_syscore_device(&p->pdev->dev, false); + pm_runtime_put(&p->pdev->dev); +} + static void sh_tmu_set_next(struct sh_tmu_priv *p, unsigned long delta, int periodic) { @@ -203,15 +231,53 @@ static int sh_tmu_clocksource_enable(struct clocksource *cs) struct sh_tmu_priv *p = cs_to_sh_tmu(cs); int ret; + if (WARN_ON(p->cs_enabled)) + return 0; + ret = sh_tmu_enable(p); - if (!ret) + if (!ret) { __clocksource_updatefreq_hz(cs, p->rate); + p->cs_enabled = true; + } + return ret; } static void sh_tmu_clocksource_disable(struct clocksource *cs) { - sh_tmu_disable(cs_to_sh_tmu(cs)); + struct sh_tmu_priv *p = cs_to_sh_tmu(cs); + + if (WARN_ON(!p->cs_enabled)) + return; + + sh_tmu_disable(p); + p->cs_enabled = false; +} + +static void sh_tmu_clocksource_suspend(struct clocksource *cs) +{ + struct sh_tmu_priv *p = cs_to_sh_tmu(cs); + + if (!p->cs_enabled) + return; + + if (--p->enable_count == 0) { + __sh_tmu_disable(p); + pm_genpd_syscore_poweroff(&p->pdev->dev); + } +} + +static void sh_tmu_clocksource_resume(struct clocksource *cs) +{ + struct sh_tmu_priv *p = cs_to_sh_tmu(cs); + + if (!p->cs_enabled) + return; + + if (p->enable_count++ == 0) { + pm_genpd_syscore_poweron(&p->pdev->dev); + __sh_tmu_enable(p); + } } static int sh_tmu_register_clocksource(struct sh_tmu_priv *p, @@ -224,6 +290,8 @@ static int sh_tmu_register_clocksource(struct sh_tmu_priv *p, cs->read = sh_tmu_clocksource_read; cs->enable = sh_tmu_clocksource_enable; cs->disable = sh_tmu_clocksource_disable; + cs->suspend = sh_tmu_clocksource_suspend; + cs->resume = sh_tmu_clocksource_resume; cs->mask = CLOCKSOURCE_MASK(32); cs->flags = CLOCK_SOURCE_IS_CONTINUOUS; @@ -301,6 +369,16 @@ static int sh_tmu_clock_event_next(unsigned long delta, return 0; } +static void sh_tmu_clock_event_suspend(struct clock_event_device *ced) +{ + pm_genpd_syscore_poweroff(&ced_to_sh_tmu(ced)->pdev->dev); +} + +static void sh_tmu_clock_event_resume(struct clock_event_device *ced) +{ + pm_genpd_syscore_poweron(&ced_to_sh_tmu(ced)->pdev->dev); +} + static void sh_tmu_register_clockevent(struct sh_tmu_priv *p, char *name, unsigned long rating) { @@ -316,6 +394,8 @@ static void sh_tmu_register_clockevent(struct sh_tmu_priv *p, ced->cpumask = cpumask_of(0); ced->set_next_event = sh_tmu_clock_event_next; ced->set_mode = sh_tmu_clock_event_mode; + ced->suspend = sh_tmu_clock_event_suspend; + ced->resume = sh_tmu_clock_event_resume; dev_info(&p->pdev->dev, "used for clock events\n"); @@ -392,6 +472,8 @@ static int sh_tmu_setup(struct sh_tmu_priv *p, struct platform_device *pdev) ret = PTR_ERR(p->clk); goto err1; } + p->cs_enabled = false; + p->enable_count = 0; return sh_tmu_register(p, (char *)dev_name(&p->pdev->dev), cfg->clockevent_rating, @@ -405,14 +487,17 @@ static int sh_tmu_setup(struct sh_tmu_priv *p, struct platform_device *pdev) static int __devinit sh_tmu_probe(struct platform_device *pdev) { struct sh_tmu_priv *p = platform_get_drvdata(pdev); + struct sh_timer_config *cfg = pdev->dev.platform_data; int ret; - if (!is_early_platform_device(pdev)) - pm_genpd_dev_always_on(&pdev->dev, true); + if (!is_early_platform_device(pdev)) { + pm_runtime_set_active(&pdev->dev); + pm_runtime_enable(&pdev->dev); + } if (p) { dev_info(&pdev->dev, "kept as earlytimer\n"); - return 0; + goto out; } p = kmalloc(sizeof(*p), GFP_KERNEL); @@ -425,8 +510,19 @@ static int __devinit sh_tmu_probe(struct platform_device *pdev) if (ret) { kfree(p); platform_set_drvdata(pdev, NULL); + pm_runtime_idle(&pdev->dev); + return ret; } - return ret; + if (is_early_platform_device(pdev)) + return 0; + + out: + if (cfg->clockevent_rating || cfg->clocksource_rating) + pm_runtime_irq_safe(&pdev->dev); + else + pm_runtime_idle(&pdev->dev); + + return 0; } static int __devexit sh_tmu_remove(struct platform_device *pdev) diff --git a/include/linux/clockchips.h b/include/linux/clockchips.h index acba894374a1..8a7096fcb01e 100644 --- a/include/linux/clockchips.h +++ b/include/linux/clockchips.h @@ -97,6 +97,8 @@ struct clock_event_device { void (*broadcast)(const struct cpumask *mask); void (*set_mode)(enum clock_event_mode mode, struct clock_event_device *); + void (*suspend)(struct clock_event_device *); + void (*resume)(struct clock_event_device *); unsigned long min_delta_ticks; unsigned long max_delta_ticks; @@ -156,6 +158,9 @@ clockevents_calc_mult_shift(struct clock_event_device *ce, u32 freq, u32 minsec) freq, minsec); } +extern void clockevents_suspend(void); +extern void clockevents_resume(void); + #ifdef CONFIG_GENERIC_CLOCKEVENTS extern void clockevents_notify(unsigned long reason, void *arg); #else @@ -164,6 +169,9 @@ extern void clockevents_notify(unsigned long reason, void *arg); #else /* CONFIG_GENERIC_CLOCKEVENTS_BUILD */ +static inline void clockevents_suspend(void) {} +static inline void clockevents_resume(void) {} + #define clockevents_notify(reason, arg) do { } while (0) #endif diff --git a/include/linux/device.h b/include/linux/device.h index 52a5f15a2223..86529e642d6c 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -772,6 +772,13 @@ static inline void pm_suspend_ignore_children(struct device *dev, bool enable) dev->power.ignore_children = enable; } +static inline void dev_pm_syscore_device(struct device *dev, bool val) +{ +#ifdef CONFIG_PM_SLEEP + dev->power.syscore = val; +#endif +} + static inline void device_lock(struct device *dev) { mutex_lock(&dev->mutex); diff --git a/include/linux/pm.h b/include/linux/pm.h index f067e60a3832..44d1f2307dbc 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h @@ -510,12 +510,14 @@ struct dev_pm_info { bool is_prepared:1; /* Owned by the PM core */ bool is_suspended:1; /* Ditto */ bool ignore_children:1; + bool early_init:1; /* Owned by the PM core */ spinlock_t lock; #ifdef CONFIG_PM_SLEEP struct list_head entry; struct completion completion; struct wakeup_source *wakeup; bool wakeup_path:1; + bool syscore:1; #else unsigned int should_wakeup:1; #endif diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h index a7d6172922d4..08adf8e5a80e 100644 --- a/include/linux/pm_domain.h +++ b/include/linux/pm_domain.h @@ -114,7 +114,6 @@ struct generic_pm_domain_data { struct mutex lock; unsigned int refcount; bool need_restore; - bool always_on; }; #ifdef CONFIG_PM_GENERIC_DOMAINS @@ -153,7 +152,6 @@ static inline int pm_genpd_of_add_device(struct device_node *genpd_node, extern int pm_genpd_remove_device(struct generic_pm_domain *genpd, struct device *dev); -extern void pm_genpd_dev_always_on(struct device *dev, bool val); extern void pm_genpd_dev_need_restore(struct device *dev, bool val); extern int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, struct generic_pm_domain *new_subdomain); @@ -199,7 +197,6 @@ static inline int pm_genpd_remove_device(struct generic_pm_domain *genpd, { return -ENOSYS; } -static inline void pm_genpd_dev_always_on(struct device *dev, bool val) {} static inline void pm_genpd_dev_need_restore(struct device *dev, bool val) {} static inline int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, struct generic_pm_domain *new_sd) @@ -258,4 +255,20 @@ static inline void genpd_queue_power_off_work(struct generic_pm_domain *gpd) {} static inline void pm_genpd_poweroff_unused(void) {} #endif +#ifdef CONFIG_PM_GENERIC_DOMAINS_SLEEP +extern void pm_genpd_syscore_switch(struct device *dev, bool suspend); +#else +static inline void pm_genpd_syscore_switch(struct device *dev, bool suspend) {} +#endif + +static inline void pm_genpd_syscore_poweroff(struct device *dev) +{ + pm_genpd_syscore_switch(dev, true); +} + +static inline void pm_genpd_syscore_poweron(struct device *dev) +{ + pm_genpd_syscore_switch(dev, false); +} + #endif /* _LINUX_PM_DOMAIN_H */ diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig index a70518c9d82f..5dfdc9ea180b 100644 --- a/kernel/power/Kconfig +++ b/kernel/power/Kconfig @@ -263,6 +263,10 @@ config PM_GENERIC_DOMAINS bool depends on PM +config PM_GENERIC_DOMAINS_SLEEP + def_bool y + depends on PM_SLEEP && PM_GENERIC_DOMAINS + config PM_GENERIC_DOMAINS_RUNTIME def_bool y depends on PM_RUNTIME && PM_GENERIC_DOMAINS diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c index 7e1ce012a851..30b6de0d977c 100644 --- a/kernel/time/clockevents.c +++ b/kernel/time/clockevents.c @@ -397,6 +397,30 @@ void clockevents_exchange_device(struct clock_event_device *old, local_irq_restore(flags); } +/** + * clockevents_suspend - suspend clock devices + */ +void clockevents_suspend(void) +{ + struct clock_event_device *dev; + + list_for_each_entry_reverse(dev, &clockevent_devices, list) + if (dev->suspend) + dev->suspend(dev); +} + +/** + * clockevents_resume - resume clock devices + */ +void clockevents_resume(void) +{ + struct clock_event_device *dev; + + list_for_each_entry(dev, &clockevent_devices, list) + if (dev->resume) + dev->resume(dev); +} + #ifdef CONFIG_GENERIC_CLOCKEVENTS /** * clockevents_notify - notification about relevant events diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 34e5eac81424..312a675cb240 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -773,6 +773,7 @@ static void timekeeping_resume(void) read_persistent_clock(&ts); + clockevents_resume(); clocksource_resume(); write_seqlock_irqsave(&tk->lock, flags); @@ -832,6 +833,7 @@ static int timekeeping_suspend(void) clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL); clocksource_suspend(); + clockevents_suspend(); return 0; } |