diff options
Diffstat (limited to 'drivers/base')
-rw-r--r-- | drivers/base/Makefile | 1 | ||||
-rw-r--r-- | drivers/base/base.h | 3 | ||||
-rw-r--r-- | drivers/base/core.c | 95 | ||||
-rw-r--r-- | drivers/base/cpu.c | 2 | ||||
-rw-r--r-- | drivers/base/dd.c | 28 | ||||
-rw-r--r-- | drivers/base/devres.c | 4 | ||||
-rw-r--r-- | drivers/base/firmware_class.c | 2 | ||||
-rw-r--r-- | drivers/base/platform-msi.c | 282 | ||||
-rw-r--r-- | drivers/base/platform.c | 8 | ||||
-rw-r--r-- | drivers/base/power/clock_ops.c | 4 | ||||
-rw-r--r-- | drivers/base/power/domain.c | 424 | ||||
-rw-r--r-- | drivers/base/power/main.c | 2 | ||||
-rw-r--r-- | drivers/base/power/opp.c | 1035 | ||||
-rw-r--r-- | drivers/base/power/power.h | 2 | ||||
-rw-r--r-- | drivers/base/power/qos.c | 37 | ||||
-rw-r--r-- | drivers/base/power/sysfs.c | 11 | ||||
-rw-r--r-- | drivers/base/property.c | 8 |
17 files changed, 1424 insertions, 524 deletions
diff --git a/drivers/base/Makefile b/drivers/base/Makefile index 527d291706e8..6b2a84e7f2be 100644 --- a/drivers/base/Makefile +++ b/drivers/base/Makefile @@ -22,6 +22,7 @@ obj-$(CONFIG_REGMAP) += regmap/ obj-$(CONFIG_SOC_BUS) += soc.o obj-$(CONFIG_PINCTRL) += pinctrl.o obj-$(CONFIG_DEV_COREDUMP) += devcoredump.o +obj-$(CONFIG_GENERIC_MSI_IRQ_DOMAIN) += platform-msi.o ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG diff --git a/drivers/base/base.h b/drivers/base/base.h index fd3347d9f153..1782f3aa386e 100644 --- a/drivers/base/base.h +++ b/drivers/base/base.h @@ -63,7 +63,7 @@ struct driver_private { * binding of drivers which were unable to get all the resources needed by * the device; typically because it depends on another driver getting * probed first. - * @device - pointer back to the struct class that this structure is + * @device - pointer back to the struct device that this structure is * associated with. * * Nothing outside of the driver core should ever touch these fields. @@ -134,6 +134,7 @@ extern int devres_release_all(struct device *dev); /* /sys/devices directory */ extern struct kset *devices_kset; +extern void devices_kset_move_last(struct device *dev); #if defined(CONFIG_MODULES) && defined(CONFIG_SYSFS) extern void module_add_driver(struct module *mod, struct device_driver *drv); diff --git a/drivers/base/core.c b/drivers/base/core.c index dafae6d2f7ac..334ec7ef1960 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -534,6 +534,52 @@ static DEVICE_ATTR_RO(dev); struct kset *devices_kset; /** + * devices_kset_move_before - Move device in the devices_kset's list. + * @deva: Device to move. + * @devb: Device @deva should come before. + */ +static void devices_kset_move_before(struct device *deva, struct device *devb) +{ + if (!devices_kset) + return; + pr_debug("devices_kset: Moving %s before %s\n", + dev_name(deva), dev_name(devb)); + spin_lock(&devices_kset->list_lock); + list_move_tail(&deva->kobj.entry, &devb->kobj.entry); + spin_unlock(&devices_kset->list_lock); +} + +/** + * devices_kset_move_after - Move device in the devices_kset's list. + * @deva: Device to move + * @devb: Device @deva should come after. + */ +static void devices_kset_move_after(struct device *deva, struct device *devb) +{ + if (!devices_kset) + return; + pr_debug("devices_kset: Moving %s after %s\n", + dev_name(deva), dev_name(devb)); + spin_lock(&devices_kset->list_lock); + list_move(&deva->kobj.entry, &devb->kobj.entry); + spin_unlock(&devices_kset->list_lock); +} + +/** + * devices_kset_move_last - move the device to the end of devices_kset's list. + * @dev: device to move + */ +void devices_kset_move_last(struct device *dev) +{ + if (!devices_kset) + return; + pr_debug("devices_kset: Moving %s to end of list\n", dev_name(dev)); + spin_lock(&devices_kset->list_lock); + list_move_tail(&dev->kobj.entry, &devices_kset->list); + spin_unlock(&devices_kset->list_lock); +} + +/** * device_create_file - create sysfs attribute file for device. * @dev: device. * @attr: device attribute descriptor. @@ -662,6 +708,9 @@ void device_initialize(struct device *dev) INIT_LIST_HEAD(&dev->devres_head); device_pm_init(dev); set_dev_node(dev, -1); +#ifdef CONFIG_GENERIC_MSI_IRQ + INIT_LIST_HEAD(&dev->msi_list); +#endif } EXPORT_SYMBOL_GPL(device_initialize); @@ -1252,6 +1301,19 @@ void device_unregister(struct device *dev) } EXPORT_SYMBOL_GPL(device_unregister); +static struct device *prev_device(struct klist_iter *i) +{ + struct klist_node *n = klist_prev(i); + struct device *dev = NULL; + struct device_private *p; + + if (n) { + p = to_device_private_parent(n); + dev = p->device; + } + return dev; +} + static struct device *next_device(struct klist_iter *i) { struct klist_node *n = klist_next(i); @@ -1341,6 +1403,36 @@ int device_for_each_child(struct device *parent, void *data, EXPORT_SYMBOL_GPL(device_for_each_child); /** + * device_for_each_child_reverse - device child iterator in reversed order. + * @parent: parent struct device. + * @fn: function to be called for each device. + * @data: data for the callback. + * + * Iterate over @parent's child devices, and call @fn for each, + * passing it @data. + * + * We check the return of @fn each time. If it returns anything + * other than 0, we break out and return that value. + */ +int device_for_each_child_reverse(struct device *parent, void *data, + int (*fn)(struct device *dev, void *data)) +{ + struct klist_iter i; + struct device *child; + int error = 0; + + if (!parent->p) + return 0; + + klist_iter_init(&parent->p->klist_children, &i); + while ((child = prev_device(&i)) && !error) + error = fn(child, data); + klist_iter_exit(&i); + return error; +} +EXPORT_SYMBOL_GPL(device_for_each_child_reverse); + +/** * device_find_child - device iterator for locating a particular device. * @parent: parent struct device * @match: Callback function to check device @@ -1923,12 +2015,15 @@ int device_move(struct device *dev, struct device *new_parent, break; case DPM_ORDER_DEV_AFTER_PARENT: device_pm_move_after(dev, new_parent); + devices_kset_move_after(dev, new_parent); break; case DPM_ORDER_PARENT_BEFORE_DEV: device_pm_move_before(new_parent, dev); + devices_kset_move_before(new_parent, dev); break; case DPM_ORDER_DEV_LAST: device_pm_move_last(dev); + devices_kset_move_last(dev); break; } diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c index 78720e706176..91bbb1959d8d 100644 --- a/drivers/base/cpu.c +++ b/drivers/base/cpu.c @@ -41,7 +41,7 @@ static void change_cpu_under_node(struct cpu *cpu, cpu->node_id = to_nid; } -static int __ref cpu_subsys_online(struct device *dev) +static int cpu_subsys_online(struct device *dev) { struct cpu *cpu = container_of(dev, struct cpu, dev); int cpuid = dev->id; diff --git a/drivers/base/dd.c b/drivers/base/dd.c index a638bbb1a27a..be0eb4639128 100644 --- a/drivers/base/dd.c +++ b/drivers/base/dd.c @@ -304,6 +304,14 @@ static int really_probe(struct device *dev, struct device_driver *drv) goto probe_failed; } + /* + * Ensure devices are listed in devices_kset in correct order + * It's important to move Dev to the end of devices_kset before + * calling .probe, because it could be recursive and parent Dev + * should always go first + */ + devices_kset_move_last(dev); + if (dev->bus->probe) { ret = dev->bus->probe(dev); if (ret) @@ -399,6 +407,8 @@ EXPORT_SYMBOL_GPL(wait_for_device_probe); * * This function must be called with @dev lock held. When called for a * USB interface, @dev->parent lock must be held as well. + * + * If the device has a parent, runtime-resume the parent before driver probing. */ int driver_probe_device(struct device_driver *drv, struct device *dev) { @@ -410,10 +420,16 @@ int driver_probe_device(struct device_driver *drv, struct device *dev) pr_debug("bus: '%s': %s: matched device %s with driver %s\n", drv->bus->name, __func__, dev_name(dev), drv->name); + if (dev->parent) + pm_runtime_get_sync(dev->parent); + pm_runtime_barrier(dev); ret = really_probe(dev, drv); pm_request_idle(dev); + if (dev->parent) + pm_runtime_put(dev->parent); + return ret; } @@ -507,11 +523,17 @@ static void __device_attach_async_helper(void *_dev, async_cookie_t cookie) device_lock(dev); + if (dev->parent) + pm_runtime_get_sync(dev->parent); + bus_for_each_drv(dev->bus, NULL, &data, __device_attach_driver); dev_dbg(dev, "async probe completed\n"); pm_request_idle(dev); + if (dev->parent) + pm_runtime_put(dev->parent); + device_unlock(dev); put_device(dev); @@ -541,6 +563,9 @@ static int __device_attach(struct device *dev, bool allow_async) .want_async = false, }; + if (dev->parent) + pm_runtime_get_sync(dev->parent); + ret = bus_for_each_drv(dev->bus, NULL, &data, __device_attach_driver); if (!ret && allow_async && data.have_async) { @@ -557,6 +582,9 @@ static int __device_attach(struct device *dev, bool allow_async) } else { pm_request_idle(dev); } + + if (dev->parent) + pm_runtime_put(dev->parent); } out_unlock: device_unlock(dev); diff --git a/drivers/base/devres.c b/drivers/base/devres.c index c8a53d1e019f..875464690117 100644 --- a/drivers/base/devres.c +++ b/drivers/base/devres.c @@ -297,10 +297,10 @@ void * devres_get(struct device *dev, void *new_res, if (!dr) { add_dr(dev, &new_dr->node); dr = new_dr; - new_dr = NULL; + new_res = NULL; } spin_unlock_irqrestore(&dev->devres_lock, flags); - devres_free(new_dr); + devres_free(new_res); return dr->data; } diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c index 894bda114224..8524450e75bd 100644 --- a/drivers/base/firmware_class.c +++ b/drivers/base/firmware_class.c @@ -443,7 +443,7 @@ static int fw_add_devm_name(struct device *dev, const char *name) return -ENOMEM; fwn->name = kstrdup_const(name, GFP_KERNEL); if (!fwn->name) { - kfree(fwn); + devres_free(fwn); return -ENOMEM; } diff --git a/drivers/base/platform-msi.c b/drivers/base/platform-msi.c new file mode 100644 index 000000000000..1857a5dd0816 --- /dev/null +++ b/drivers/base/platform-msi.c @@ -0,0 +1,282 @@ +/* + * MSI framework for platform devices + * + * Copyright (C) 2015 ARM Limited, All Rights Reserved. + * Author: Marc Zyngier <marc.zyngier@arm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/device.h> +#include <linux/idr.h> +#include <linux/irq.h> +#include <linux/irqdomain.h> +#include <linux/msi.h> +#include <linux/slab.h> + +#define DEV_ID_SHIFT 24 + +/* + * Internal data structure containing a (made up, but unique) devid + * and the callback to write the MSI message. + */ +struct platform_msi_priv_data { + irq_write_msi_msg_t write_msg; + int devid; +}; + +/* The devid allocator */ +static DEFINE_IDA(platform_msi_devid_ida); + +#ifdef GENERIC_MSI_DOMAIN_OPS +/* + * Convert an msi_desc to a globaly unique identifier (per-device + * devid + msi_desc position in the msi_list). + */ +static irq_hw_number_t platform_msi_calc_hwirq(struct msi_desc *desc) +{ + u32 devid; + + devid = desc->platform.msi_priv_data->devid; + + return (devid << (32 - DEV_ID_SHIFT)) | desc->platform.msi_index; +} + +static void platform_msi_set_desc(msi_alloc_info_t *arg, struct msi_desc *desc) +{ + arg->desc = desc; + arg->hwirq = platform_msi_calc_hwirq(desc); +} + +static int platform_msi_init(struct irq_domain *domain, + struct msi_domain_info *info, + unsigned int virq, irq_hw_number_t hwirq, + msi_alloc_info_t *arg) +{ + struct irq_data *data; + + irq_domain_set_hwirq_and_chip(domain, virq, hwirq, + info->chip, info->chip_data); + + /* + * Save the MSI descriptor in handler_data so that the + * irq_write_msi_msg callback can retrieve it (and the + * associated device). + */ + data = irq_domain_get_irq_data(domain, virq); + data->handler_data = arg->desc; + + return 0; +} +#else +#define platform_msi_set_desc NULL +#define platform_msi_init NULL +#endif + +static void platform_msi_update_dom_ops(struct msi_domain_info *info) +{ + struct msi_domain_ops *ops = info->ops; + + BUG_ON(!ops); + + if (ops->msi_init == NULL) + ops->msi_init = platform_msi_init; + if (ops->set_desc == NULL) + ops->set_desc = platform_msi_set_desc; +} + +static void platform_msi_write_msg(struct irq_data *data, struct msi_msg *msg) +{ + struct msi_desc *desc = irq_data_get_irq_handler_data(data); + struct platform_msi_priv_data *priv_data; + + priv_data = desc->platform.msi_priv_data; + + priv_data->write_msg(desc, msg); +} + +static void platform_msi_update_chip_ops(struct msi_domain_info *info) +{ + struct irq_chip *chip = info->chip; + + BUG_ON(!chip); + if (!chip->irq_mask) + chip->irq_mask = irq_chip_mask_parent; + if (!chip->irq_unmask) + chip->irq_unmask = irq_chip_unmask_parent; + if (!chip->irq_eoi) + chip->irq_eoi = irq_chip_eoi_parent; + if (!chip->irq_set_affinity) + chip->irq_set_affinity = msi_domain_set_affinity; + if (!chip->irq_write_msi_msg) + chip->irq_write_msi_msg = platform_msi_write_msg; +} + +static void platform_msi_free_descs(struct device *dev) +{ + struct msi_desc *desc, *tmp; + + list_for_each_entry_safe(desc, tmp, dev_to_msi_list(dev), list) { + list_del(&desc->list); + free_msi_entry(desc); + } +} + +static int platform_msi_alloc_descs(struct device *dev, int nvec, + struct platform_msi_priv_data *data) + +{ + int i; + + for (i = 0; i < nvec; i++) { + struct msi_desc *desc; + + desc = alloc_msi_entry(dev); + if (!desc) + break; + + desc->platform.msi_priv_data = data; + desc->platform.msi_index = i; + desc->nvec_used = 1; + + list_add_tail(&desc->list, dev_to_msi_list(dev)); + } + + if (i != nvec) { + /* Clean up the mess */ + platform_msi_free_descs(dev); + + return -ENOMEM; + } + + return 0; +} + +/** + * platform_msi_create_irq_domain - Create a platform MSI interrupt domain + * @np: Optional device-tree node of the interrupt controller + * @info: MSI domain info + * @parent: Parent irq domain + * + * Updates the domain and chip ops and creates a platform MSI + * interrupt domain. + * + * Returns: + * A domain pointer or NULL in case of failure. + */ +struct irq_domain *platform_msi_create_irq_domain(struct device_node *np, + struct msi_domain_info *info, + struct irq_domain *parent) +{ + struct irq_domain *domain; + + if (info->flags & MSI_FLAG_USE_DEF_DOM_OPS) + platform_msi_update_dom_ops(info); + if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS) + platform_msi_update_chip_ops(info); + + domain = msi_create_irq_domain(np, info, parent); + if (domain) + domain->bus_token = DOMAIN_BUS_PLATFORM_MSI; + + return domain; +} + +/** + * platform_msi_domain_alloc_irqs - Allocate MSI interrupts for @dev + * @dev: The device for which to allocate interrupts + * @nvec: The number of interrupts to allocate + * @write_msi_msg: Callback to write an interrupt message for @dev + * + * Returns: + * Zero for success, or an error code in case of failure + */ +int platform_msi_domain_alloc_irqs(struct device *dev, unsigned int nvec, + irq_write_msi_msg_t write_msi_msg) +{ + struct platform_msi_priv_data *priv_data; + int err; + + /* + * Limit the number of interrupts to 256 per device. Should we + * need to bump this up, DEV_ID_SHIFT should be adjusted + * accordingly (which would impact the max number of MSI + * capable devices). + */ + if (!dev->msi_domain || !write_msi_msg || !nvec || + nvec > (1 << (32 - DEV_ID_SHIFT))) + return -EINVAL; + + if (dev->msi_domain->bus_token != DOMAIN_BUS_PLATFORM_MSI) { + dev_err(dev, "Incompatible msi_domain, giving up\n"); + return -EINVAL; + } + + /* Already had a helping of MSI? Greed... */ + if (!list_empty(dev_to_msi_list(dev))) + return -EBUSY; + + priv_data = kzalloc(sizeof(*priv_data), GFP_KERNEL); + if (!priv_data) + return -ENOMEM; + + priv_data->devid = ida_simple_get(&platform_msi_devid_ida, + 0, 1 << DEV_ID_SHIFT, GFP_KERNEL); + if (priv_data->devid < 0) { + err = priv_data->devid; + goto out_free_data; + } + + priv_data->write_msg = write_msi_msg; + + err = platform_msi_alloc_descs(dev, nvec, priv_data); + if (err) + goto out_free_id; + + err = msi_domain_alloc_irqs(dev->msi_domain, dev, nvec); + if (err) + goto out_free_desc; + + return 0; + +out_free_desc: + platform_msi_free_descs(dev); +out_free_id: + ida_simple_remove(&platform_msi_devid_ida, priv_data->devid); +out_free_data: + kfree(priv_data); + + return err; +} + +/** + * platform_msi_domain_free_irqs - Free MSI interrupts for @dev + * @dev: The device for which to free interrupts + */ +void platform_msi_domain_free_irqs(struct device *dev) +{ + struct msi_desc *desc; + + desc = first_msi_entry(dev); + if (desc) { + struct platform_msi_priv_data *data; + + data = desc->platform.msi_priv_data; + + ida_simple_remove(&platform_msi_devid_ida, data->devid); + kfree(data); + } + + msi_domain_free_irqs(dev->msi_domain, dev); + platform_msi_free_descs(dev); +} diff --git a/drivers/base/platform.c b/drivers/base/platform.c index 063f0ab15259..f80aaaf9f610 100644 --- a/drivers/base/platform.c +++ b/drivers/base/platform.c @@ -375,9 +375,7 @@ int platform_device_add(struct platform_device *pdev) while (--i >= 0) { struct resource *r = &pdev->resource[i]; - unsigned long type = resource_type(r); - - if (type == IORESOURCE_MEM || type == IORESOURCE_IO) + if (r->parent) release_resource(r); } @@ -408,9 +406,7 @@ void platform_device_del(struct platform_device *pdev) for (i = 0; i < pdev->num_resources; i++) { struct resource *r = &pdev->resource[i]; - unsigned long type = resource_type(r); - - if (type == IORESOURCE_MEM || type == IORESOURCE_IO) + if (r->parent) release_resource(r); } } diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c index acef9f9f759a..652b5a367c1f 100644 --- a/drivers/base/power/clock_ops.c +++ b/drivers/base/power/clock_ops.c @@ -38,7 +38,7 @@ struct pm_clock_entry { * @dev: The device for the given clock * @ce: PM clock entry corresponding to the clock. */ -static inline int __pm_clk_enable(struct device *dev, struct pm_clock_entry *ce) +static inline void __pm_clk_enable(struct device *dev, struct pm_clock_entry *ce) { int ret; @@ -50,8 +50,6 @@ static inline int __pm_clk_enable(struct device *dev, struct pm_clock_entry *ce) dev_err(dev, "%s: failed to enable clk %p, error %d\n", __func__, ce->clk, ret); } - - return ret; } /** diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index 0ee43c1056e0..16550c63d611 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -114,8 +114,12 @@ static int genpd_stop_dev(struct generic_pm_domain *genpd, struct device *dev) stop_latency_ns, "stop"); } -static int genpd_start_dev(struct generic_pm_domain *genpd, struct device *dev) +static int genpd_start_dev(struct generic_pm_domain *genpd, struct device *dev, + bool timed) { + if (!timed) + return GENPD_DEV_CALLBACK(genpd, int, start, dev); + return GENPD_DEV_TIMED_CALLBACK(genpd, int, start, dev, start_latency_ns, "start"); } @@ -136,41 +140,6 @@ static void genpd_sd_counter_inc(struct generic_pm_domain *genpd) smp_mb__after_atomic(); } -static void genpd_acquire_lock(struct generic_pm_domain *genpd) -{ - DEFINE_WAIT(wait); - - mutex_lock(&genpd->lock); - /* - * Wait for the domain to transition into either the active, - * or the power off state. - */ - for (;;) { - prepare_to_wait(&genpd->status_wait_queue, &wait, - TASK_UNINTERRUPTIBLE); - if (genpd->status == GPD_STATE_ACTIVE - || genpd->status == GPD_STATE_POWER_OFF) - break; - mutex_unlock(&genpd->lock); - - schedule(); - - mutex_lock(&genpd->lock); - } - finish_wait(&genpd->status_wait_queue, &wait); -} - -static void genpd_release_lock(struct generic_pm_domain *genpd) -{ - mutex_unlock(&genpd->lock); -} - -static void genpd_set_active(struct generic_pm_domain *genpd) -{ - if (genpd->resume_count == 0) - genpd->status = GPD_STATE_ACTIVE; -} - static void genpd_recalc_cpu_exit_latency(struct generic_pm_domain *genpd) { s64 usecs64; @@ -244,6 +213,18 @@ static int genpd_power_off(struct generic_pm_domain *genpd, bool timed) } /** + * genpd_queue_power_off_work - Queue up the execution of pm_genpd_poweroff(). + * @genpd: PM domait to power off. + * + * Queue up the execution of pm_genpd_poweroff() unless it's already been done + * before. + */ +static void genpd_queue_power_off_work(struct generic_pm_domain *genpd) +{ + queue_work(pm_wq, &genpd->power_off_work); +} + +/** * __pm_genpd_poweron - Restore power to a given PM domain and its masters. * @genpd: PM domain to power up. * @@ -251,35 +232,14 @@ static int genpd_power_off(struct generic_pm_domain *genpd, bool timed) * resume a device belonging to it. */ static int __pm_genpd_poweron(struct generic_pm_domain *genpd) - __releases(&genpd->lock) __acquires(&genpd->lock) { struct gpd_link *link; - DEFINE_WAIT(wait); int ret = 0; - /* If the domain's master is being waited for, we have to wait too. */ - for (;;) { - prepare_to_wait(&genpd->status_wait_queue, &wait, - TASK_UNINTERRUPTIBLE); - if (genpd->status != GPD_STATE_WAIT_MASTER) - break; - mutex_unlock(&genpd->lock); - - schedule(); - - mutex_lock(&genpd->lock); - } - finish_wait(&genpd->status_wait_queue, &wait); - if (genpd->status == GPD_STATE_ACTIVE || (genpd->prepared_count > 0 && genpd->suspend_power_off)) return 0; - if (genpd->status != GPD_STATE_POWER_OFF) { - genpd_set_active(genpd); - return 0; - } - if (genpd->cpuidle_data) { cpuidle_pause_and_lock(); genpd->cpuidle_data->idle_state->disabled = true; @@ -294,20 +254,8 @@ static int __pm_genpd_poweron(struct generic_pm_domain *genpd) */ list_for_each_entry(link, &genpd->slave_links, slave_node) { genpd_sd_counter_inc(link->master); - genpd->status = GPD_STATE_WAIT_MASTER; - - mutex_unlock(&genpd->lock); ret = pm_genpd_poweron(link->master); - - mutex_lock(&genpd->lock); - - /* - * The "wait for parent" status is guaranteed not to change - * while the master is powering on. - */ - genpd->status = GPD_STATE_POWER_OFF; - wake_up_all(&genpd->status_wait_queue); if (ret) { genpd_sd_counter_dec(link->master); goto err; @@ -319,13 +267,16 @@ static int __pm_genpd_poweron(struct generic_pm_domain *genpd) goto err; out: - genpd_set_active(genpd); - + genpd->status = GPD_STATE_ACTIVE; return 0; err: - list_for_each_entry_continue_reverse(link, &genpd->slave_links, slave_node) + list_for_each_entry_continue_reverse(link, + &genpd->slave_links, + slave_node) { genpd_sd_counter_dec(link->master); + genpd_queue_power_off_work(link->master); + } return ret; } @@ -356,20 +307,18 @@ int pm_genpd_name_poweron(const char *domain_name) return genpd ? pm_genpd_poweron(genpd) : -EINVAL; } -static int genpd_start_dev_no_timing(struct generic_pm_domain *genpd, - struct device *dev) -{ - return GENPD_DEV_CALLBACK(genpd, int, start, dev); -} - static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev) { return GENPD_DEV_TIMED_CALLBACK(genpd, int, save_state, dev, save_state_latency_ns, "state save"); } -static int genpd_restore_dev(struct generic_pm_domain *genpd, struct device *dev) +static int genpd_restore_dev(struct generic_pm_domain *genpd, + struct device *dev, bool timed) { + if (!timed) + return GENPD_DEV_CALLBACK(genpd, int, restore_state, dev); + return GENPD_DEV_TIMED_CALLBACK(genpd, int, restore_state, dev, restore_state_latency_ns, "state restore"); @@ -416,133 +365,30 @@ static int genpd_dev_pm_qos_notifier(struct notifier_block *nb, } /** - * __pm_genpd_save_device - Save the pre-suspend state of a device. - * @pdd: Domain data of the device to save the state of. - * @genpd: PM domain the device belongs to. - */ -static int __pm_genpd_save_device(struct pm_domain_data *pdd, - struct generic_pm_domain *genpd) - __releases(&genpd->lock) __acquires(&genpd->lock) -{ - struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd); - struct device *dev = pdd->dev; - int ret = 0; - - if (gpd_data->need_restore > 0) - return 0; - - /* - * If the value of the need_restore flag is still unknown at this point, - * we trust that pm_genpd_poweroff() has verified that the device is - * already runtime PM suspended. - */ - if (gpd_data->need_restore < 0) { - gpd_data->need_restore = 1; - return 0; - } - - mutex_unlock(&genpd->lock); - - genpd_start_dev(genpd, dev); - ret = genpd_save_dev(genpd, dev); - genpd_stop_dev(genpd, dev); - - mutex_lock(&genpd->lock); - - if (!ret) - gpd_data->need_restore = 1; - - return ret; -} - -/** - * __pm_genpd_restore_device - Restore the pre-suspend state of a device. - * @pdd: Domain data of the device to restore the state of. - * @genpd: PM domain the device belongs to. - */ -static void __pm_genpd_restore_device(struct pm_domain_data *pdd, - struct generic_pm_domain *genpd) - __releases(&genpd->lock) __acquires(&genpd->lock) -{ - struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd); - struct device *dev = pdd->dev; - int need_restore = gpd_data->need_restore; - - gpd_data->need_restore = 0; - mutex_unlock(&genpd->lock); - - genpd_start_dev(genpd, dev); - - /* - * Call genpd_restore_dev() for recently added devices too (need_restore - * is negative then). - */ - if (need_restore) - genpd_restore_dev(genpd, dev); - - mutex_lock(&genpd->lock); -} - -/** - * genpd_abort_poweroff - Check if a PM domain power off should be aborted. - * @genpd: PM domain to check. - * - * Return true if a PM domain's status changed to GPD_STATE_ACTIVE during - * a "power off" operation, which means that a "power on" has occured in the - * meantime, or if its resume_count field is different from zero, which means - * that one of its devices has been resumed in the meantime. - */ -static bool genpd_abort_poweroff(struct generic_pm_domain *genpd) -{ - return genpd->status == GPD_STATE_WAIT_MASTER - || genpd->status == GPD_STATE_ACTIVE || genpd->resume_count > 0; -} - -/** - * genpd_queue_power_off_work - Queue up the execution of pm_genpd_poweroff(). - * @genpd: PM domait to power off. - * - * Queue up the execution of pm_genpd_poweroff() unless it's already been done - * before. - */ -static void genpd_queue_power_off_work(struct generic_pm_domain *genpd) -{ - queue_work(pm_wq, &genpd->power_off_work); -} - -/** * pm_genpd_poweroff - Remove power from a given PM domain. * @genpd: PM domain to power down. * * If all of the @genpd's devices have been suspended and all of its subdomains - * have been powered down, run the runtime suspend callbacks provided by all of - * the @genpd's devices' drivers and remove power from @genpd. + * have been powered down, remove power from @genpd. */ static int pm_genpd_poweroff(struct generic_pm_domain *genpd) - __releases(&genpd->lock) __acquires(&genpd->lock) { struct pm_domain_data *pdd; struct gpd_link *link; - unsigned int not_suspended; - int ret = 0; + unsigned int not_suspended = 0; - start: /* * Do not try to power off the domain in the following situations: * (1) The domain is already in the "power off" state. - * (2) The domain is waiting for its master to power up. - * (3) One of the domain's devices is being resumed right now. - * (4) System suspend is in progress. + * (2) System suspend is in progress. */ if (genpd->status == GPD_STATE_POWER_OFF - || genpd->status == GPD_STATE_WAIT_MASTER - || genpd->resume_count > 0 || genpd->prepared_count > 0) + || genpd->prepared_count > 0) return 0; if (atomic_read(&genpd->sd_count) > 0) return -EBUSY; - not_suspended = 0; list_for_each_entry(pdd, &genpd->dev_list, list_node) { enum pm_qos_flags_status stat; @@ -560,41 +406,11 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd) if (not_suspended > genpd->in_progress) return -EBUSY; - if (genpd->poweroff_task) { - /* - * Another instance of pm_genpd_poweroff() is executing - * callbacks, so tell it to start over and return. - */ - genpd->status = GPD_STATE_REPEAT; - return 0; - } - if (genpd->gov && genpd->gov->power_down_ok) { if (!genpd->gov->power_down_ok(&genpd->domain)) return -EAGAIN; } - genpd->status = GPD_STATE_BUSY; - genpd->poweroff_task = current; - - list_for_each_entry_reverse(pdd, &genpd->dev_list, list_node) { - ret = atomic_read(&genpd->sd_count) == 0 ? - __pm_genpd_save_device(pdd, genpd) : -EBUSY; - - if (genpd_abort_poweroff(genpd)) - goto out; - - if (ret) { - genpd_set_active(genpd); - goto out; - } - - if (genpd->status == GPD_STATE_REPEAT) { - genpd->poweroff_task = NULL; - goto start; - } - } - if (genpd->cpuidle_data) { /* * If cpuidle_data is set, cpuidle should turn the domain off @@ -607,14 +423,14 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd) cpuidle_pause_and_lock(); genpd->cpuidle_data->idle_state->disabled = false; cpuidle_resume_and_unlock(); - goto out; + return 0; } if (genpd->power_off) { - if (atomic_read(&genpd->sd_count) > 0) { - ret = -EBUSY; - goto out; - } + int ret; + + if (atomic_read(&genpd->sd_count) > 0) + return -EBUSY; /* * If sd_count > 0 at this point, one of the subdomains hasn't @@ -625,10 +441,8 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd) * happen very often). */ ret = genpd_power_off(genpd, true); - if (ret == -EBUSY) { - genpd_set_active(genpd); - goto out; - } + if (ret) + return ret; } genpd->status = GPD_STATE_POWER_OFF; @@ -638,10 +452,7 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd) genpd_queue_power_off_work(link->master); } - out: - genpd->poweroff_task = NULL; - wake_up_all(&genpd->status_wait_queue); - return ret; + return 0; } /** @@ -654,9 +465,9 @@ static void genpd_power_off_work_fn(struct work_struct *work) genpd = container_of(work, struct generic_pm_domain, power_off_work); - genpd_acquire_lock(genpd); + mutex_lock(&genpd->lock); pm_genpd_poweroff(genpd); - genpd_release_lock(genpd); + mutex_unlock(&genpd->lock); } /** @@ -670,7 +481,6 @@ static void genpd_power_off_work_fn(struct work_struct *work) static int pm_genpd_runtime_suspend(struct device *dev) { struct generic_pm_domain *genpd; - struct generic_pm_domain_data *gpd_data; bool (*stop_ok)(struct device *__dev); int ret; @@ -684,10 +494,16 @@ static int pm_genpd_runtime_suspend(struct device *dev) if (stop_ok && !stop_ok(dev)) return -EBUSY; - ret = genpd_stop_dev(genpd, dev); + ret = genpd_save_dev(genpd, dev); if (ret) return ret; + ret = genpd_stop_dev(genpd, dev); + if (ret) { + genpd_restore_dev(genpd, dev, true); + return ret; + } + /* * If power.irq_safe is set, this routine will be run with interrupts * off, so it can't use mutexes. @@ -696,16 +512,6 @@ static int pm_genpd_runtime_suspend(struct device *dev) return 0; mutex_lock(&genpd->lock); - - /* - * If we have an unknown state of the need_restore flag, it means none - * of the runtime PM callbacks has been invoked yet. Let's update the - * flag to reflect that the current state is active. - */ - gpd_data = to_gpd_data(dev->power.subsys_data->domain_data); - if (gpd_data->need_restore < 0) - gpd_data->need_restore = 0; - genpd->in_progress++; pm_genpd_poweroff(genpd); genpd->in_progress--; @@ -725,8 +531,8 @@ static int pm_genpd_runtime_suspend(struct device *dev) static int pm_genpd_runtime_resume(struct device *dev) { struct generic_pm_domain *genpd; - DEFINE_WAIT(wait); int ret; + bool timed = true; dev_dbg(dev, "%s()\n", __func__); @@ -735,39 +541,21 @@ static int pm_genpd_runtime_resume(struct device *dev) return -EINVAL; /* If power.irq_safe, the PM domain is never powered off. */ - if (dev->power.irq_safe) - return genpd_start_dev_no_timing(genpd, dev); + if (dev->power.irq_safe) { + timed = false; + goto out; + } mutex_lock(&genpd->lock); ret = __pm_genpd_poweron(genpd); - if (ret) { - mutex_unlock(&genpd->lock); - return ret; - } - genpd->status = GPD_STATE_BUSY; - genpd->resume_count++; - for (;;) { - prepare_to_wait(&genpd->status_wait_queue, &wait, - TASK_UNINTERRUPTIBLE); - /* - * If current is the powering off task, we have been called - * reentrantly from one of the device callbacks, so we should - * not wait. - */ - if (!genpd->poweroff_task || genpd->poweroff_task == current) - break; - mutex_unlock(&genpd->lock); + mutex_unlock(&genpd->lock); - schedule(); + if (ret) + return ret; - mutex_lock(&genpd->lock); - } - finish_wait(&genpd->status_wait_queue, &wait); - __pm_genpd_restore_device(dev->power.subsys_data->domain_data, genpd); - genpd->resume_count--; - genpd_set_active(genpd); - wake_up_all(&genpd->status_wait_queue); - mutex_unlock(&genpd->lock); + out: + genpd_start_dev(genpd, dev, timed); + genpd_restore_dev(genpd, dev, timed); return 0; } @@ -883,7 +671,7 @@ static void pm_genpd_sync_poweron(struct generic_pm_domain *genpd, { struct gpd_link *link; - if (genpd->status != GPD_STATE_POWER_OFF) + if (genpd->status == GPD_STATE_ACTIVE) return; list_for_each_entry(link, &genpd->slave_links, slave_node) { @@ -960,14 +748,14 @@ static int pm_genpd_prepare(struct device *dev) if (resume_needed(dev, genpd)) pm_runtime_resume(dev); - genpd_acquire_lock(genpd); + mutex_lock(&genpd->lock); if (genpd->prepared_count++ == 0) { genpd->suspended_count = 0; genpd->suspend_power_off = genpd->status == GPD_STATE_POWER_OFF; } - genpd_release_lock(genpd); + mutex_unlock(&genpd->lock); if (genpd->suspend_power_off) { pm_runtime_put_noidle(dev); @@ -1102,7 +890,7 @@ static int pm_genpd_resume_noirq(struct device *dev) pm_genpd_sync_poweron(genpd, true); genpd->suspended_count--; - return genpd_start_dev(genpd, dev); + return genpd_start_dev(genpd, dev, true); } /** @@ -1230,7 +1018,7 @@ static int pm_genpd_thaw_noirq(struct device *dev) if (IS_ERR(genpd)) return -EINVAL; - return genpd->suspend_power_off ? 0 : genpd_start_dev(genpd, dev); + return genpd->suspend_power_off ? 0 : genpd_start_dev(genpd, dev, true); } /** @@ -1324,7 +1112,7 @@ static int pm_genpd_restore_noirq(struct device *dev) pm_genpd_sync_poweron(genpd, true); - return genpd_start_dev(genpd, dev); + return genpd_start_dev(genpd, dev, true); } /** @@ -1440,7 +1228,6 @@ static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev, gpd_data->td = *td; gpd_data->base.dev = dev; - gpd_data->need_restore = -1; gpd_data->td.constraint_changed = true; gpd_data->td.effective_constraint_ns = -1; gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier; @@ -1502,7 +1289,7 @@ int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, if (IS_ERR(gpd_data)) return PTR_ERR(gpd_data); - genpd_acquire_lock(genpd); + mutex_lock(&genpd->lock); if (genpd->prepared_count > 0) { ret = -EAGAIN; @@ -1519,7 +1306,7 @@ int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, list_add_tail(&gpd_data->base.list_node, &genpd->dev_list); out: - genpd_release_lock(genpd); + mutex_unlock(&genpd->lock); if (ret) genpd_free_dev_data(dev, gpd_data); @@ -1563,7 +1350,7 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd, gpd_data = to_gpd_data(pdd); dev_pm_qos_remove_notifier(dev, &gpd_data->nb); - genpd_acquire_lock(genpd); + mutex_lock(&genpd->lock); if (genpd->prepared_count > 0) { ret = -EAGAIN; @@ -1578,14 +1365,14 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd, list_del_init(&pdd->list_node); - genpd_release_lock(genpd); + mutex_unlock(&genpd->lock); genpd_free_dev_data(dev, gpd_data); return 0; out: - genpd_release_lock(genpd); + mutex_unlock(&genpd->lock); dev_pm_qos_add_notifier(dev, &gpd_data->nb); return ret; @@ -1606,17 +1393,9 @@ int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, || genpd == subdomain) return -EINVAL; - start: - genpd_acquire_lock(genpd); + mutex_lock(&genpd->lock); mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING); - if (subdomain->status != GPD_STATE_POWER_OFF - && subdomain->status != GPD_STATE_ACTIVE) { - mutex_unlock(&subdomain->lock); - genpd_release_lock(genpd); - goto start; - } - if (genpd->status == GPD_STATE_POWER_OFF && subdomain->status != GPD_STATE_POWER_OFF) { ret = -EINVAL; @@ -1644,7 +1423,7 @@ int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, out: mutex_unlock(&subdomain->lock); - genpd_release_lock(genpd); + mutex_unlock(&genpd->lock); return ret; } @@ -1692,8 +1471,14 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)) return -EINVAL; - start: - genpd_acquire_lock(genpd); + mutex_lock(&genpd->lock); + + if (!list_empty(&subdomain->slave_links) || subdomain->device_count) { + pr_warn("%s: unable to remove subdomain %s\n", genpd->name, + subdomain->name); + ret = -EBUSY; + goto out; + } list_for_each_entry(link, &genpd->master_links, master_node) { if (link->slave != subdomain) @@ -1701,13 +1486,6 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING); - if (subdomain->status != GPD_STATE_POWER_OFF - && subdomain->status != GPD_STATE_ACTIVE) { - mutex_unlock(&subdomain->lock); - genpd_release_lock(genpd); - goto start; - } - list_del(&link->master_node); list_del(&link->slave_node); kfree(link); @@ -1720,7 +1498,8 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, break; } - genpd_release_lock(genpd); +out: + mutex_unlock(&genpd->lock); return ret; } @@ -1744,7 +1523,7 @@ int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state) if (IS_ERR_OR_NULL(genpd) || state < 0) return -EINVAL; - genpd_acquire_lock(genpd); + mutex_lock(&genpd->lock); if (genpd->cpuidle_data) { ret = -EEXIST; @@ -1775,7 +1554,7 @@ int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state) genpd_recalc_cpu_exit_latency(genpd); out: - genpd_release_lock(genpd); + mutex_unlock(&genpd->lock); return ret; err: @@ -1812,7 +1591,7 @@ int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd) if (IS_ERR_OR_NULL(genpd)) return -EINVAL; - genpd_acquire_lock(genpd); + mutex_lock(&genpd->lock); cpuidle_data = genpd->cpuidle_data; if (!cpuidle_data) { @@ -1830,7 +1609,7 @@ int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd) kfree(cpuidle_data); out: - genpd_release_lock(genpd); + mutex_unlock(&genpd->lock); return ret; } @@ -1912,9 +1691,6 @@ void pm_genpd_init(struct generic_pm_domain *genpd, genpd->in_progress = 0; atomic_set(&genpd->sd_count, 0); genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE; - init_waitqueue_head(&genpd->status_wait_queue); - genpd->poweroff_task = NULL; - genpd->resume_count = 0; genpd->device_count = 0; genpd->max_off_time_ns = -1; genpd->max_off_time_changed = true; @@ -1952,6 +1728,7 @@ void pm_genpd_init(struct generic_pm_domain *genpd, list_add(&genpd->gpd_list_node, &gpd_list); mutex_unlock(&gpd_list_lock); } +EXPORT_SYMBOL_GPL(pm_genpd_init); #ifdef CONFIG_PM_GENERIC_DOMAINS_OF /* @@ -2125,7 +1902,7 @@ EXPORT_SYMBOL_GPL(of_genpd_get_from_provider); /** * genpd_dev_pm_detach - Detach a device from its PM domain. - * @dev: Device to attach. + * @dev: Device to detach. * @power_off: Currently not used * * Try to locate a corresponding generic PM domain, which the device was @@ -2183,7 +1960,10 @@ static void genpd_dev_pm_sync(struct device *dev) * Both generic and legacy Samsung-specific DT bindings are supported to keep * backwards compatibility with existing DTBs. * - * Returns 0 on successfully attached PM domain or negative error code. + * Returns 0 on successfully attached PM domain or negative error code. Note + * that if a power-domain exists for the device, but it cannot be found or + * turned on, then return -EPROBE_DEFER to ensure that the device is not + * probed and to re-try again later. */ int genpd_dev_pm_attach(struct device *dev) { @@ -2220,7 +2000,7 @@ int genpd_dev_pm_attach(struct device *dev) dev_dbg(dev, "%s() failed to find PM domain: %ld\n", __func__, PTR_ERR(pd)); of_node_put(dev->of_node); - return PTR_ERR(pd); + return -EPROBE_DEFER; } dev_dbg(dev, "adding to PM domain %s\n", pd->name); @@ -2238,14 +2018,15 @@ int genpd_dev_pm_attach(struct device *dev) dev_err(dev, "failed to add to PM domain %s: %d", pd->name, ret); of_node_put(dev->of_node); - return ret; + goto out; } dev->pm_domain->detach = genpd_dev_pm_detach; dev->pm_domain->sync = genpd_dev_pm_sync; - pm_genpd_poweron(pd); + ret = pm_genpd_poweron(pd); - return 0; +out: + return ret ? -EPROBE_DEFER : 0; } EXPORT_SYMBOL_GPL(genpd_dev_pm_attach); #endif /* CONFIG_PM_GENERIC_DOMAINS_OF */ @@ -2293,9 +2074,6 @@ static int pm_genpd_summary_one(struct seq_file *s, { static const char * const status_lookup[] = { [GPD_STATE_ACTIVE] = "on", - [GPD_STATE_WAIT_MASTER] = "wait-master", - [GPD_STATE_BUSY] = "busy", - [GPD_STATE_REPEAT] = "off-in-progress", [GPD_STATE_POWER_OFF] = "off" }; struct pm_domain_data *pm_data; @@ -2309,7 +2087,7 @@ static int pm_genpd_summary_one(struct seq_file *s, if (WARN_ON(genpd->status >= ARRAY_SIZE(status_lookup))) goto exit; - seq_printf(s, "%-30s %-15s ", genpd->name, status_lookup[genpd->status]); + seq_printf(s, "%-30s %-15s ", genpd->name, status_lookup[genpd->status]); /* * Modifications on the list require holding locks on both @@ -2344,8 +2122,8 @@ static int pm_genpd_summary_show(struct seq_file *s, void *data) struct generic_pm_domain *genpd; int ret = 0; - seq_puts(s, " domain status slaves\n"); - seq_puts(s, " /device runtime status\n"); + seq_puts(s, "domain status slaves\n"); + seq_puts(s, " /device runtime status\n"); seq_puts(s, "----------------------------------------------------------------------\n"); ret = mutex_lock_interruptible(&gpd_list_lock); diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 30b7bbfdc558..1710c26ba097 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -1377,7 +1377,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async) if (dev->power.direct_complete) { if (pm_runtime_status_suspended(dev)) { pm_runtime_disable(dev); - if (pm_runtime_suspended_if_enabled(dev)) + if (pm_runtime_status_suspended(dev)) goto Complete; pm_runtime_enable(dev); diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c index 677fb2843553..28cd75c535b0 100644 --- a/drivers/base/power/opp.c +++ b/drivers/base/power/opp.c @@ -11,6 +11,7 @@ * published by the Free Software Foundation. */ +#include <linux/cpu.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/err.h> @@ -51,10 +52,17 @@ * order. * @dynamic: not-created from static DT entries. * @available: true/false - marks if this OPP as available or not + * @turbo: true if turbo (boost) OPP * @rate: Frequency in hertz - * @u_volt: Nominal voltage in microvolts corresponding to this OPP + * @u_volt: Target voltage in microvolts corresponding to this OPP + * @u_volt_min: Minimum voltage in microvolts corresponding to this OPP + * @u_volt_max: Maximum voltage in microvolts corresponding to this OPP + * @u_amp: Maximum current drawn by the device in microamperes + * @clock_latency_ns: Latency (in nanoseconds) of switching to this OPP's + * frequency from any other OPP's frequency. * @dev_opp: points back to the device_opp struct this opp belongs to * @rcu_head: RCU callback head used for deferred freeing + * @np: OPP's device node. * * This structure stores the OPP information for a given device. */ @@ -63,11 +71,34 @@ struct dev_pm_opp { bool available; bool dynamic; + bool turbo; unsigned long rate; + unsigned long u_volt; + unsigned long u_volt_min; + unsigned long u_volt_max; + unsigned long u_amp; + unsigned long clock_latency_ns; struct device_opp *dev_opp; struct rcu_head rcu_head; + + struct device_node *np; +}; + +/** + * struct device_list_opp - devices managed by 'struct device_opp' + * @node: list node + * @dev: device to which the struct object belongs + * @rcu_head: RCU callback head used for deferred freeing + * + * This is an internal data structure maintaining the list of devices that are + * managed by 'struct device_opp'. + */ +struct device_list_opp { + struct list_head node; + const struct device *dev; + struct rcu_head rcu_head; }; /** @@ -77,10 +108,12 @@ struct dev_pm_opp { * list. * RCU usage: nodes are not modified in the list of device_opp, * however addition is possible and is secured by dev_opp_list_lock - * @dev: device pointer * @srcu_head: notifier head to notify the OPP availability changes. * @rcu_head: RCU callback head used for deferred freeing + * @dev_list: list of devices that share these OPPs * @opp_list: list of opps + * @np: struct device_node pointer for opp's DT node. + * @shared_opp: OPP is shared between multiple devices. * * This is an internal data structure maintaining the link to opps attached to * a device. This structure is not meant to be shared to users as it is @@ -93,10 +126,15 @@ struct dev_pm_opp { struct device_opp { struct list_head node; - struct device *dev; struct srcu_notifier_head srcu_head; struct rcu_head rcu_head; + struct list_head dev_list; struct list_head opp_list; + + struct device_node *np; + unsigned long clock_latency_ns_max; + bool shared_opp; + struct dev_pm_opp *suspend_opp; }; /* @@ -110,12 +148,44 @@ static DEFINE_MUTEX(dev_opp_list_lock); #define opp_rcu_lockdep_assert() \ do { \ - rcu_lockdep_assert(rcu_read_lock_held() || \ - lockdep_is_held(&dev_opp_list_lock), \ + RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \ + !lockdep_is_held(&dev_opp_list_lock), \ "Missing rcu_read_lock() or " \ "dev_opp_list_lock protection"); \ } while (0) +static struct device_list_opp *_find_list_dev(const struct device *dev, + struct device_opp *dev_opp) +{ + struct device_list_opp *list_dev; + + list_for_each_entry(list_dev, &dev_opp->dev_list, node) + if (list_dev->dev == dev) + return list_dev; + + return NULL; +} + +static struct device_opp *_managed_opp(const struct device_node *np) +{ + struct device_opp *dev_opp; + + list_for_each_entry_rcu(dev_opp, &dev_opp_list, node) { + if (dev_opp->np == np) { + /* + * Multiple devices can point to the same OPP table and + * so will have same node-pointer, np. + * + * But the OPPs will be considered as shared only if the + * OPP table contains a "opp-shared" property. + */ + return dev_opp->shared_opp ? dev_opp : NULL; + } + } + + return NULL; +} + /** * _find_device_opp() - find device_opp struct using device pointer * @dev: device pointer used to lookup device OPPs @@ -132,21 +202,18 @@ do { \ */ static struct device_opp *_find_device_opp(struct device *dev) { - struct device_opp *tmp_dev_opp, *dev_opp = ERR_PTR(-ENODEV); + struct device_opp *dev_opp; - if (unlikely(IS_ERR_OR_NULL(dev))) { + if (IS_ERR_OR_NULL(dev)) { pr_err("%s: Invalid parameters\n", __func__); return ERR_PTR(-EINVAL); } - list_for_each_entry_rcu(tmp_dev_opp, &dev_opp_list, node) { - if (tmp_dev_opp->dev == dev) { - dev_opp = tmp_dev_opp; - break; - } - } + list_for_each_entry_rcu(dev_opp, &dev_opp_list, node) + if (_find_list_dev(dev, dev_opp)) + return dev_opp; - return dev_opp; + return ERR_PTR(-ENODEV); } /** @@ -172,7 +239,7 @@ unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp) opp_rcu_lockdep_assert(); tmp_opp = rcu_dereference(opp); - if (unlikely(IS_ERR_OR_NULL(tmp_opp)) || !tmp_opp->available) + if (IS_ERR_OR_NULL(tmp_opp) || !tmp_opp->available) pr_err("%s: Invalid parameters\n", __func__); else v = tmp_opp->u_volt; @@ -204,7 +271,7 @@ unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp) opp_rcu_lockdep_assert(); tmp_opp = rcu_dereference(opp); - if (unlikely(IS_ERR_OR_NULL(tmp_opp)) || !tmp_opp->available) + if (IS_ERR_OR_NULL(tmp_opp) || !tmp_opp->available) pr_err("%s: Invalid parameters\n", __func__); else f = tmp_opp->rate; @@ -214,6 +281,94 @@ unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp) EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq); /** + * dev_pm_opp_is_turbo() - Returns if opp is turbo OPP or not + * @opp: opp for which turbo mode is being verified + * + * Turbo OPPs are not for normal use, and can be enabled (under certain + * conditions) for short duration of times to finish high throughput work + * quickly. Running on them for longer times may overheat the chip. + * + * Return: true if opp is turbo opp, else false. + * + * Locking: This function must be called under rcu_read_lock(). opp is a rcu + * protected pointer. This means that opp which could have been fetched by + * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are + * under RCU lock. The pointer returned by the opp_find_freq family must be + * used in the same section as the usage of this function with the pointer + * prior to unlocking with rcu_read_unlock() to maintain the integrity of the + * pointer. + */ +bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp) +{ + struct dev_pm_opp *tmp_opp; + + opp_rcu_lockdep_assert(); + + tmp_opp = rcu_dereference(opp); + if (IS_ERR_OR_NULL(tmp_opp) || !tmp_opp->available) { + pr_err("%s: Invalid parameters\n", __func__); + return false; + } + + return tmp_opp->turbo; +} +EXPORT_SYMBOL_GPL(dev_pm_opp_is_turbo); + +/** + * dev_pm_opp_get_max_clock_latency() - Get max clock latency in nanoseconds + * @dev: device for which we do this operation + * + * Return: This function returns the max clock latency in nanoseconds. + * + * Locking: This function takes rcu_read_lock(). + */ +unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev) +{ + struct device_opp *dev_opp; + unsigned long clock_latency_ns; + + rcu_read_lock(); + + dev_opp = _find_device_opp(dev); + if (IS_ERR(dev_opp)) + clock_latency_ns = 0; + else + clock_latency_ns = dev_opp->clock_latency_ns_max; + + rcu_read_unlock(); + return clock_latency_ns; +} +EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_clock_latency); + +/** + * dev_pm_opp_get_suspend_opp() - Get suspend opp + * @dev: device for which we do this operation + * + * Return: This function returns pointer to the suspend opp if it is + * defined and available, otherwise it returns NULL. + * + * Locking: This function must be called under rcu_read_lock(). opp is a rcu + * protected pointer. The reason for the same is that the opp pointer which is + * returned will remain valid for use with opp_get_{voltage, freq} only while + * under the locked area. The pointer returned must be used prior to unlocking + * with rcu_read_unlock() to maintain the integrity of the pointer. + */ +struct dev_pm_opp *dev_pm_opp_get_suspend_opp(struct device *dev) +{ + struct device_opp *dev_opp; + + opp_rcu_lockdep_assert(); + + dev_opp = _find_device_opp(dev); + if (IS_ERR(dev_opp) || !dev_opp->suspend_opp || + !dev_opp->suspend_opp->available) + return NULL; + + return dev_opp->suspend_opp; +} +EXPORT_SYMBOL_GPL(dev_pm_opp_get_suspend_opp); + +/** * dev_pm_opp_get_opp_count() - Get number of opps available in the opp list * @dev: device for which we do this operation * @@ -407,18 +562,57 @@ struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev, } EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor); +/* List-dev Helpers */ +static void _kfree_list_dev_rcu(struct rcu_head *head) +{ + struct device_list_opp *list_dev; + + list_dev = container_of(head, struct device_list_opp, rcu_head); + kfree_rcu(list_dev, rcu_head); +} + +static void _remove_list_dev(struct device_list_opp *list_dev, + struct device_opp *dev_opp) +{ + list_del(&list_dev->node); + call_srcu(&dev_opp->srcu_head.srcu, &list_dev->rcu_head, + _kfree_list_dev_rcu); +} + +static struct device_list_opp *_add_list_dev(const struct device *dev, + struct device_opp *dev_opp) +{ + struct device_list_opp *list_dev; + + list_dev = kzalloc(sizeof(*list_dev), GFP_KERNEL); + if (!list_dev) + return NULL; + + /* Initialize list-dev */ + list_dev->dev = dev; + list_add_rcu(&list_dev->node, &dev_opp->dev_list); + + return list_dev; +} + /** - * _add_device_opp() - Allocate a new device OPP table + * _add_device_opp() - Find device OPP table or allocate a new one * @dev: device for which we do this operation * - * New device node which uses OPPs - used when multiple devices with OPP tables - * are maintained. + * It tries to find an existing table first, if it couldn't find one, it + * allocates a new OPP table and returns that. * * Return: valid device_opp pointer if success, else NULL. */ static struct device_opp *_add_device_opp(struct device *dev) { struct device_opp *dev_opp; + struct device_list_opp *list_dev; + + /* Check for existing list for 'dev' first */ + dev_opp = _find_device_opp(dev); + if (!IS_ERR(dev_opp)) + return dev_opp; /* * Allocate a new device OPP table. In the infrequent case where a new @@ -428,7 +622,14 @@ static struct device_opp *_add_device_opp(struct device *dev) if (!dev_opp) return NULL; - dev_opp->dev = dev; + INIT_LIST_HEAD(&dev_opp->dev_list); + + list_dev = _add_list_dev(dev, dev_opp); + if (!list_dev) { + kfree(dev_opp); + return NULL; + } + srcu_init_notifier_head(&dev_opp->srcu_head); INIT_LIST_HEAD(&dev_opp->opp_list); @@ -438,6 +639,185 @@ static struct device_opp *_add_device_opp(struct device *dev) } /** + * _kfree_device_rcu() - Free device_opp RCU handler + * @head: RCU head + */ +static void _kfree_device_rcu(struct rcu_head *head) +{ + struct device_opp *device_opp = container_of(head, struct device_opp, rcu_head); + + kfree_rcu(device_opp, rcu_head); +} + +/** + * _remove_device_opp() - Removes a device OPP table + * @dev_opp: device OPP table to be removed. + * + * Removes/frees device OPP table it it doesn't contain any OPPs. + */ +static void _remove_device_opp(struct device_opp *dev_opp) +{ + struct device_list_opp *list_dev; + + if (!list_empty(&dev_opp->opp_list)) + return; + + list_dev = list_first_entry(&dev_opp->dev_list, struct device_list_opp, + node); + + _remove_list_dev(list_dev, dev_opp); + + /* dev_list must be empty now */ + WARN_ON(!list_empty(&dev_opp->dev_list)); + + list_del_rcu(&dev_opp->node); + call_srcu(&dev_opp->srcu_head.srcu, &dev_opp->rcu_head, + _kfree_device_rcu); +} + +/** + * _kfree_opp_rcu() - Free OPP RCU handler + * @head: RCU head + */ +static void _kfree_opp_rcu(struct rcu_head *head) +{ + struct dev_pm_opp *opp = container_of(head, struct dev_pm_opp, rcu_head); + + kfree_rcu(opp, rcu_head); +} + +/** + * _opp_remove() - Remove an OPP from a table definition + * @dev_opp: points back to the device_opp struct this opp belongs to + * @opp: pointer to the OPP to remove + * @notify: OPP_EVENT_REMOVE notification should be sent or not + * + * This function removes an opp definition from the opp list. + * + * Locking: The internal device_opp and opp structures are RCU protected. + * It is assumed that the caller holds required mutex for an RCU updater + * strategy. + */ +static void _opp_remove(struct device_opp *dev_opp, + struct dev_pm_opp *opp, bool notify) +{ + /* + * Notify the changes in the availability of the operable + * frequency/voltage list. + */ + if (notify) + srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_REMOVE, opp); + list_del_rcu(&opp->node); + call_srcu(&dev_opp->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu); + + _remove_device_opp(dev_opp); +} + +/** + * dev_pm_opp_remove() - Remove an OPP from OPP list + * @dev: device for which we do this operation + * @freq: OPP to remove with matching 'freq' + * + * This function removes an opp from the opp list. + * + * Locking: The internal device_opp and opp structures are RCU protected. + * Hence this function internally uses RCU updater strategy with mutex locks + * to keep the integrity of the internal data structures. Callers should ensure + * that this function is *NOT* called under RCU protection or in contexts where + * mutex cannot be locked. + */ +void dev_pm_opp_remove(struct device *dev, unsigned long freq) +{ + struct dev_pm_opp *opp; + struct device_opp *dev_opp; + bool found = false; + + /* Hold our list modification lock here */ + mutex_lock(&dev_opp_list_lock); + + dev_opp = _find_device_opp(dev); + if (IS_ERR(dev_opp)) + goto unlock; + + list_for_each_entry(opp, &dev_opp->opp_list, node) { + if (opp->rate == freq) { + found = true; + break; + } + } + + if (!found) { + dev_warn(dev, "%s: Couldn't find OPP with freq: %lu\n", + __func__, freq); + goto unlock; + } + + _opp_remove(dev_opp, opp, true); +unlock: + mutex_unlock(&dev_opp_list_lock); +} +EXPORT_SYMBOL_GPL(dev_pm_opp_remove); + +static struct dev_pm_opp *_allocate_opp(struct device *dev, + struct device_opp **dev_opp) +{ + struct dev_pm_opp *opp; + + /* allocate new OPP node */ + opp = kzalloc(sizeof(*opp), GFP_KERNEL); + if (!opp) + return NULL; + + INIT_LIST_HEAD(&opp->node); + + *dev_opp = _add_device_opp(dev); + if (!*dev_opp) { + kfree(opp); + return NULL; + } + + return opp; +} + +static int _opp_add(struct device *dev, struct dev_pm_opp *new_opp, + struct device_opp *dev_opp) +{ + struct dev_pm_opp *opp; + struct list_head *head = &dev_opp->opp_list; + + /* + * Insert new OPP in order of increasing frequency and discard if + * already present. + * + * Need to use &dev_opp->opp_list in the condition part of the 'for' + * loop, don't replace it with head otherwise it will become an infinite + * loop. + */ + list_for_each_entry_rcu(opp, &dev_opp->opp_list, node) { + if (new_opp->rate > opp->rate) { + head = &opp->node; + continue; + } + + if (new_opp->rate < opp->rate) + break; + + /* Duplicate OPPs */ + dev_warn(dev, "%s: duplicate OPPs detected. Existing: freq: %lu, volt: %lu, enabled: %d. New: freq: %lu, volt: %lu, enabled: %d\n", + __func__, opp->rate, opp->u_volt, opp->available, + new_opp->rate, new_opp->u_volt, new_opp->available); + + return opp->available && new_opp->u_volt == opp->u_volt ? + 0 : -EEXIST; + } + + new_opp->dev_opp = dev_opp; + list_add_rcu(&new_opp->node, head); + + return 0; +} + +/** * _opp_add_dynamic() - Allocate a dynamic OPP. * @dev: device for which we do this operation * @freq: Frequency in Hz for this OPP @@ -467,64 +847,29 @@ static struct device_opp *_add_device_opp(struct device *dev) static int _opp_add_dynamic(struct device *dev, unsigned long freq, long u_volt, bool dynamic) { - struct device_opp *dev_opp = NULL; - struct dev_pm_opp *opp, *new_opp; - struct list_head *head; + struct device_opp *dev_opp; + struct dev_pm_opp *new_opp; int ret; - /* allocate new OPP node */ - new_opp = kzalloc(sizeof(*new_opp), GFP_KERNEL); - if (!new_opp) - return -ENOMEM; - /* Hold our list modification lock here */ mutex_lock(&dev_opp_list_lock); + new_opp = _allocate_opp(dev, &dev_opp); + if (!new_opp) { + ret = -ENOMEM; + goto unlock; + } + /* populate the opp table */ new_opp->rate = freq; new_opp->u_volt = u_volt; new_opp->available = true; new_opp->dynamic = dynamic; - /* Check for existing list for 'dev' */ - dev_opp = _find_device_opp(dev); - if (IS_ERR(dev_opp)) { - dev_opp = _add_device_opp(dev); - if (!dev_opp) { - ret = -ENOMEM; - goto free_opp; - } - - head = &dev_opp->opp_list; - goto list_add; - } - - /* - * Insert new OPP in order of increasing frequency - * and discard if already present - */ - head = &dev_opp->opp_list; - list_for_each_entry_rcu(opp, &dev_opp->opp_list, node) { - if (new_opp->rate <= opp->rate) - break; - else - head = &opp->node; - } - - /* Duplicate OPPs ? */ - if (new_opp->rate == opp->rate) { - ret = opp->available && new_opp->u_volt == opp->u_volt ? - 0 : -EEXIST; - - dev_warn(dev, "%s: duplicate OPPs detected. Existing: freq: %lu, volt: %lu, enabled: %d. New: freq: %lu, volt: %lu, enabled: %d\n", - __func__, opp->rate, opp->u_volt, opp->available, - new_opp->rate, new_opp->u_volt, new_opp->available); + ret = _opp_add(dev, new_opp, dev_opp); + if (ret) goto free_opp; - } -list_add: - new_opp->dev_opp = dev_opp; - list_add_rcu(&new_opp->node, head); mutex_unlock(&dev_opp_list_lock); /* @@ -535,20 +880,52 @@ list_add: return 0; free_opp: + _opp_remove(dev_opp, new_opp, false); +unlock: mutex_unlock(&dev_opp_list_lock); - kfree(new_opp); return ret; } +/* TODO: Support multiple regulators */ +static int opp_get_microvolt(struct dev_pm_opp *opp, struct device *dev) +{ + u32 microvolt[3] = {0}; + int count, ret; + + count = of_property_count_u32_elems(opp->np, "opp-microvolt"); + if (!count) + return 0; + + /* There can be one or three elements here */ + if (count != 1 && count != 3) { + dev_err(dev, "%s: Invalid number of elements in opp-microvolt property (%d)\n", + __func__, count); + return -EINVAL; + } + + ret = of_property_read_u32_array(opp->np, "opp-microvolt", microvolt, + count); + if (ret) { + dev_err(dev, "%s: error parsing opp-microvolt: %d\n", __func__, + ret); + return -EINVAL; + } + + opp->u_volt = microvolt[0]; + opp->u_volt_min = microvolt[1]; + opp->u_volt_max = microvolt[2]; + + return 0; +} + /** - * dev_pm_opp_add() - Add an OPP table from a table definitions + * _opp_add_static_v2() - Allocate static OPPs (As per 'v2' DT bindings) * @dev: device for which we do this operation - * @freq: Frequency in Hz for this OPP - * @u_volt: Voltage in uVolts for this OPP + * @np: device node * - * This function adds an opp definition to the opp list and returns status. - * The opp is made available by default and it can be controlled using - * dev_pm_opp_enable/disable functions. + * This function adds an opp definition to the opp list and returns status. The + * opp can be controlled using dev_pm_opp_enable/disable functions and may be + * removed by dev_pm_opp_remove. * * Locking: The internal device_opp and opp structures are RCU protected. * Hence this function internally uses RCU updater strategy with mutex locks @@ -562,108 +939,119 @@ free_opp: * -EEXIST Freq are same and volt are different OR * Duplicate OPPs (both freq and volt are same) and !opp->available * -ENOMEM Memory allocation failure + * -EINVAL Failed parsing the OPP node */ -int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt) +static int _opp_add_static_v2(struct device *dev, struct device_node *np) { - return _opp_add_dynamic(dev, freq, u_volt, true); -} -EXPORT_SYMBOL_GPL(dev_pm_opp_add); + struct device_opp *dev_opp; + struct dev_pm_opp *new_opp; + u64 rate; + u32 val; + int ret; -/** - * _kfree_opp_rcu() - Free OPP RCU handler - * @head: RCU head - */ -static void _kfree_opp_rcu(struct rcu_head *head) -{ - struct dev_pm_opp *opp = container_of(head, struct dev_pm_opp, rcu_head); + /* Hold our list modification lock here */ + mutex_lock(&dev_opp_list_lock); - kfree_rcu(opp, rcu_head); -} + new_opp = _allocate_opp(dev, &dev_opp); + if (!new_opp) { + ret = -ENOMEM; + goto unlock; + } -/** - * _kfree_device_rcu() - Free device_opp RCU handler - * @head: RCU head - */ -static void _kfree_device_rcu(struct rcu_head *head) -{ - struct device_opp *device_opp = container_of(head, struct device_opp, rcu_head); + ret = of_property_read_u64(np, "opp-hz", &rate); + if (ret < 0) { + dev_err(dev, "%s: opp-hz not found\n", __func__); + goto free_opp; + } - kfree_rcu(device_opp, rcu_head); -} + /* + * Rate is defined as an unsigned long in clk API, and so casting + * explicitly to its type. Must be fixed once rate is 64 bit + * guaranteed in clk API. + */ + new_opp->rate = (unsigned long)rate; + new_opp->turbo = of_property_read_bool(np, "turbo-mode"); + + new_opp->np = np; + new_opp->dynamic = false; + new_opp->available = true; + + if (!of_property_read_u32(np, "clock-latency-ns", &val)) + new_opp->clock_latency_ns = val; + + ret = opp_get_microvolt(new_opp, dev); + if (ret) + goto free_opp; + + if (!of_property_read_u32(new_opp->np, "opp-microamp", &val)) + new_opp->u_amp = val; + + ret = _opp_add(dev, new_opp, dev_opp); + if (ret) + goto free_opp; + + /* OPP to select on device suspend */ + if (of_property_read_bool(np, "opp-suspend")) { + if (dev_opp->suspend_opp) + dev_warn(dev, "%s: Multiple suspend OPPs found (%lu %lu)\n", + __func__, dev_opp->suspend_opp->rate, + new_opp->rate); + else + dev_opp->suspend_opp = new_opp; + } + + if (new_opp->clock_latency_ns > dev_opp->clock_latency_ns_max) + dev_opp->clock_latency_ns_max = new_opp->clock_latency_ns; + + mutex_unlock(&dev_opp_list_lock); + + pr_debug("%s: turbo:%d rate:%lu uv:%lu uvmin:%lu uvmax:%lu latency:%lu\n", + __func__, new_opp->turbo, new_opp->rate, new_opp->u_volt, + new_opp->u_volt_min, new_opp->u_volt_max, + new_opp->clock_latency_ns); -/** - * _opp_remove() - Remove an OPP from a table definition - * @dev_opp: points back to the device_opp struct this opp belongs to - * @opp: pointer to the OPP to remove - * - * This function removes an opp definition from the opp list. - * - * Locking: The internal device_opp and opp structures are RCU protected. - * It is assumed that the caller holds required mutex for an RCU updater - * strategy. - */ -static void _opp_remove(struct device_opp *dev_opp, - struct dev_pm_opp *opp) -{ /* * Notify the changes in the availability of the operable * frequency/voltage list. */ - srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_REMOVE, opp); - list_del_rcu(&opp->node); - call_srcu(&dev_opp->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu); + srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_ADD, new_opp); + return 0; - if (list_empty(&dev_opp->opp_list)) { - list_del_rcu(&dev_opp->node); - call_srcu(&dev_opp->srcu_head.srcu, &dev_opp->rcu_head, - _kfree_device_rcu); - } +free_opp: + _opp_remove(dev_opp, new_opp, false); +unlock: + mutex_unlock(&dev_opp_list_lock); + return ret; } /** - * dev_pm_opp_remove() - Remove an OPP from OPP list + * dev_pm_opp_add() - Add an OPP table from a table definitions * @dev: device for which we do this operation - * @freq: OPP to remove with matching 'freq' + * @freq: Frequency in Hz for this OPP + * @u_volt: Voltage in uVolts for this OPP * - * This function removes an opp from the opp list. + * This function adds an opp definition to the opp list and returns status. + * The opp is made available by default and it can be controlled using + * dev_pm_opp_enable/disable functions. * * Locking: The internal device_opp and opp structures are RCU protected. * Hence this function internally uses RCU updater strategy with mutex locks * to keep the integrity of the internal data structures. Callers should ensure * that this function is *NOT* called under RCU protection or in contexts where * mutex cannot be locked. + * + * Return: + * 0 On success OR + * Duplicate OPPs (both freq and volt are same) and opp->available + * -EEXIST Freq are same and volt are different OR + * Duplicate OPPs (both freq and volt are same) and !opp->available + * -ENOMEM Memory allocation failure */ -void dev_pm_opp_remove(struct device *dev, unsigned long freq) +int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt) { - struct dev_pm_opp *opp; - struct device_opp *dev_opp; - bool found = false; - - /* Hold our list modification lock here */ - mutex_lock(&dev_opp_list_lock); - - dev_opp = _find_device_opp(dev); - if (IS_ERR(dev_opp)) - goto unlock; - - list_for_each_entry(opp, &dev_opp->opp_list, node) { - if (opp->rate == freq) { - found = true; - break; - } - } - - if (!found) { - dev_warn(dev, "%s: Couldn't find OPP with freq: %lu\n", - __func__, freq); - goto unlock; - } - - _opp_remove(dev_opp, opp); -unlock: - mutex_unlock(&dev_opp_list_lock); + return _opp_add_dynamic(dev, freq, u_volt, true); } -EXPORT_SYMBOL_GPL(dev_pm_opp_remove); +EXPORT_SYMBOL_GPL(dev_pm_opp_add); /** * _opp_set_availability() - helper to set the availability of an opp @@ -825,28 +1213,179 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_notifier); #ifdef CONFIG_OF /** - * of_init_opp_table() - Initialize opp table from device tree + * of_free_opp_table() - Free OPP table entries created from static DT entries * @dev: device pointer used to lookup device OPPs. * - * Register the initial OPP table with the OPP library for given device. + * Free OPPs created using static entries present in DT. * * Locking: The internal device_opp and opp structures are RCU protected. * Hence this function indirectly uses RCU updater strategy with mutex locks * to keep the integrity of the internal data structures. Callers should ensure * that this function is *NOT* called under RCU protection or in contexts where * mutex cannot be locked. - * - * Return: - * 0 On success OR - * Duplicate OPPs (both freq and volt are same) and opp->available - * -EEXIST Freq are same and volt are different OR - * Duplicate OPPs (both freq and volt are same) and !opp->available - * -ENOMEM Memory allocation failure - * -ENODEV when 'operating-points' property is not found or is invalid data - * in device node. - * -ENODATA when empty 'operating-points' property is found */ -int of_init_opp_table(struct device *dev) +void of_free_opp_table(struct device *dev) +{ + struct device_opp *dev_opp; + struct dev_pm_opp *opp, *tmp; + + /* Hold our list modification lock here */ + mutex_lock(&dev_opp_list_lock); + + /* Check for existing list for 'dev' */ + dev_opp = _find_device_opp(dev); + if (IS_ERR(dev_opp)) { + int error = PTR_ERR(dev_opp); + + if (error != -ENODEV) + WARN(1, "%s: dev_opp: %d\n", + IS_ERR_OR_NULL(dev) ? + "Invalid device" : dev_name(dev), + error); + goto unlock; + } + + /* Find if dev_opp manages a single device */ + if (list_is_singular(&dev_opp->dev_list)) { + /* Free static OPPs */ + list_for_each_entry_safe(opp, tmp, &dev_opp->opp_list, node) { + if (!opp->dynamic) + _opp_remove(dev_opp, opp, true); + } + } else { + _remove_list_dev(_find_list_dev(dev, dev_opp), dev_opp); + } + +unlock: + mutex_unlock(&dev_opp_list_lock); +} +EXPORT_SYMBOL_GPL(of_free_opp_table); + +void of_cpumask_free_opp_table(cpumask_var_t cpumask) +{ + struct device *cpu_dev; + int cpu; + + WARN_ON(cpumask_empty(cpumask)); + + for_each_cpu(cpu, cpumask) { + cpu_dev = get_cpu_device(cpu); + if (!cpu_dev) { + pr_err("%s: failed to get cpu%d device\n", __func__, + cpu); + continue; + } + + of_free_opp_table(cpu_dev); + } +} +EXPORT_SYMBOL_GPL(of_cpumask_free_opp_table); + +/* Returns opp descriptor node from its phandle. Caller must do of_node_put() */ +static struct device_node * +_of_get_opp_desc_node_from_prop(struct device *dev, const struct property *prop) +{ + struct device_node *opp_np; + + opp_np = of_find_node_by_phandle(be32_to_cpup(prop->value)); + if (!opp_np) { + dev_err(dev, "%s: Prop: %s contains invalid opp desc phandle\n", + __func__, prop->name); + return ERR_PTR(-EINVAL); + } + + return opp_np; +} + +/* Returns opp descriptor node for a device. Caller must do of_node_put() */ +static struct device_node *_of_get_opp_desc_node(struct device *dev) +{ + const struct property *prop; + + prop = of_find_property(dev->of_node, "operating-points-v2", NULL); + if (!prop) + return ERR_PTR(-ENODEV); + if (!prop->value) + return ERR_PTR(-ENODATA); + + /* + * TODO: Support for multiple OPP tables. + * + * There should be only ONE phandle present in "operating-points-v2" + * property. + */ + if (prop->length != sizeof(__be32)) { + dev_err(dev, "%s: Invalid opp desc phandle\n", __func__); + return ERR_PTR(-EINVAL); + } + + return _of_get_opp_desc_node_from_prop(dev, prop); +} + +/* Initializes OPP tables based on new bindings */ +static int _of_init_opp_table_v2(struct device *dev, + const struct property *prop) +{ + struct device_node *opp_np, *np; + struct device_opp *dev_opp; + int ret = 0, count = 0; + + if (!prop->value) + return -ENODATA; + + /* Get opp node */ + opp_np = _of_get_opp_desc_node_from_prop(dev, prop); + if (IS_ERR(opp_np)) + return PTR_ERR(opp_np); + + dev_opp = _managed_opp(opp_np); + if (dev_opp) { + /* OPPs are already managed */ + if (!_add_list_dev(dev, dev_opp)) + ret = -ENOMEM; + goto put_opp_np; + } + + /* We have opp-list node now, iterate over it and add OPPs */ + for_each_available_child_of_node(opp_np, np) { + count++; + + ret = _opp_add_static_v2(dev, np); + if (ret) { + dev_err(dev, "%s: Failed to add OPP, %d\n", __func__, + ret); + goto free_table; + } + } + + /* There should be one of more OPP defined */ + if (WARN_ON(!count)) { + ret = -ENOENT; + goto put_opp_np; + } + + dev_opp = _find_device_opp(dev); + if (WARN_ON(IS_ERR(dev_opp))) { + ret = PTR_ERR(dev_opp); + goto free_table; + } + + dev_opp->np = opp_np; + dev_opp->shared_opp = of_property_read_bool(opp_np, "opp-shared"); + + of_node_put(opp_np); + return 0; + +free_table: + of_free_opp_table(dev); +put_opp_np: + of_node_put(opp_np); + + return ret; +} + +/* Initializes OPP tables based on old-deprecated bindings */ +static int _of_init_opp_table_v1(struct device *dev) { const struct property *prop; const __be32 *val; @@ -881,47 +1420,177 @@ int of_init_opp_table(struct device *dev) return 0; } -EXPORT_SYMBOL_GPL(of_init_opp_table); /** - * of_free_opp_table() - Free OPP table entries created from static DT entries + * of_init_opp_table() - Initialize opp table from device tree * @dev: device pointer used to lookup device OPPs. * - * Free OPPs created using static entries present in DT. + * Register the initial OPP table with the OPP library for given device. * * Locking: The internal device_opp and opp structures are RCU protected. * Hence this function indirectly uses RCU updater strategy with mutex locks * to keep the integrity of the internal data structures. Callers should ensure * that this function is *NOT* called under RCU protection or in contexts where * mutex cannot be locked. + * + * Return: + * 0 On success OR + * Duplicate OPPs (both freq and volt are same) and opp->available + * -EEXIST Freq are same and volt are different OR + * Duplicate OPPs (both freq and volt are same) and !opp->available + * -ENOMEM Memory allocation failure + * -ENODEV when 'operating-points' property is not found or is invalid data + * in device node. + * -ENODATA when empty 'operating-points' property is found + * -EINVAL when invalid entries are found in opp-v2 table */ -void of_free_opp_table(struct device *dev) +int of_init_opp_table(struct device *dev) { + const struct property *prop; + + /* + * OPPs have two version of bindings now. The older one is deprecated, + * try for the new binding first. + */ + prop = of_find_property(dev->of_node, "operating-points-v2", NULL); + if (!prop) { + /* + * Try old-deprecated bindings for backward compatibility with + * older dtbs. + */ + return _of_init_opp_table_v1(dev); + } + + return _of_init_opp_table_v2(dev, prop); +} +EXPORT_SYMBOL_GPL(of_init_opp_table); + +int of_cpumask_init_opp_table(cpumask_var_t cpumask) +{ + struct device *cpu_dev; + int cpu, ret = 0; + + WARN_ON(cpumask_empty(cpumask)); + + for_each_cpu(cpu, cpumask) { + cpu_dev = get_cpu_device(cpu); + if (!cpu_dev) { + pr_err("%s: failed to get cpu%d device\n", __func__, + cpu); + continue; + } + + ret = of_init_opp_table(cpu_dev); + if (ret) { + pr_err("%s: couldn't find opp table for cpu:%d, %d\n", + __func__, cpu, ret); + + /* Free all other OPPs */ + of_cpumask_free_opp_table(cpumask); + break; + } + } + + return ret; +} +EXPORT_SYMBOL_GPL(of_cpumask_init_opp_table); + +/* Required only for V1 bindings, as v2 can manage it from DT itself */ +int set_cpus_sharing_opps(struct device *cpu_dev, cpumask_var_t cpumask) +{ + struct device_list_opp *list_dev; struct device_opp *dev_opp; - struct dev_pm_opp *opp, *tmp; + struct device *dev; + int cpu, ret = 0; - /* Check for existing list for 'dev' */ - dev_opp = _find_device_opp(dev); + rcu_read_lock(); + + dev_opp = _find_device_opp(cpu_dev); if (IS_ERR(dev_opp)) { - int error = PTR_ERR(dev_opp); - if (error != -ENODEV) - WARN(1, "%s: dev_opp: %d\n", - IS_ERR_OR_NULL(dev) ? - "Invalid device" : dev_name(dev), - error); - return; + ret = -EINVAL; + goto out_rcu_read_unlock; } - /* Hold our list modification lock here */ - mutex_lock(&dev_opp_list_lock); + for_each_cpu(cpu, cpumask) { + if (cpu == cpu_dev->id) + continue; - /* Free static OPPs */ - list_for_each_entry_safe(opp, tmp, &dev_opp->opp_list, node) { - if (!opp->dynamic) - _opp_remove(dev_opp, opp); + dev = get_cpu_device(cpu); + if (!dev) { + dev_err(cpu_dev, "%s: failed to get cpu%d device\n", + __func__, cpu); + continue; + } + + list_dev = _add_list_dev(dev, dev_opp); + if (!list_dev) { + dev_err(dev, "%s: failed to add list-dev for cpu%d device\n", + __func__, cpu); + continue; + } } +out_rcu_read_unlock: + rcu_read_unlock(); - mutex_unlock(&dev_opp_list_lock); + return 0; } -EXPORT_SYMBOL_GPL(of_free_opp_table); +EXPORT_SYMBOL_GPL(set_cpus_sharing_opps); + +/* + * Works only for OPP v2 bindings. + * + * cpumask should be already set to mask of cpu_dev->id. + * Returns -ENOENT if operating-points-v2 bindings aren't supported. + */ +int of_get_cpus_sharing_opps(struct device *cpu_dev, cpumask_var_t cpumask) +{ + struct device_node *np, *tmp_np; + struct device *tcpu_dev; + int cpu, ret = 0; + + /* Get OPP descriptor node */ + np = _of_get_opp_desc_node(cpu_dev); + if (IS_ERR(np)) { + dev_dbg(cpu_dev, "%s: Couldn't find opp node: %ld\n", __func__, + PTR_ERR(np)); + return -ENOENT; + } + + /* OPPs are shared ? */ + if (!of_property_read_bool(np, "opp-shared")) + goto put_cpu_node; + + for_each_possible_cpu(cpu) { + if (cpu == cpu_dev->id) + continue; + + tcpu_dev = get_cpu_device(cpu); + if (!tcpu_dev) { + dev_err(cpu_dev, "%s: failed to get cpu%d device\n", + __func__, cpu); + ret = -ENODEV; + goto put_cpu_node; + } + + /* Get OPP descriptor node */ + tmp_np = _of_get_opp_desc_node(tcpu_dev); + if (IS_ERR(tmp_np)) { + dev_err(tcpu_dev, "%s: Couldn't find opp node: %ld\n", + __func__, PTR_ERR(tmp_np)); + ret = PTR_ERR(tmp_np); + goto put_cpu_node; + } + + /* CPUs are sharing opp node */ + if (np == tmp_np) + cpumask_set_cpu(cpu, cpumask); + + of_node_put(tmp_np); + } + +put_cpu_node: + of_node_put(np); + return ret; +} +EXPORT_SYMBOL_GPL(of_get_cpus_sharing_opps); #endif diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h index f1a5d95e7b20..998fa6b23084 100644 --- a/drivers/base/power/power.h +++ b/drivers/base/power/power.h @@ -73,6 +73,8 @@ extern int pm_qos_sysfs_add_resume_latency(struct device *dev); extern void pm_qos_sysfs_remove_resume_latency(struct device *dev); extern int pm_qos_sysfs_add_flags(struct device *dev); extern void pm_qos_sysfs_remove_flags(struct device *dev); +extern int pm_qos_sysfs_add_latency_tolerance(struct device *dev); +extern void pm_qos_sysfs_remove_latency_tolerance(struct device *dev); #else /* CONFIG_PM */ diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c index e56d538d039e..7f3646e459cb 100644 --- a/drivers/base/power/qos.c +++ b/drivers/base/power/qos.c @@ -883,3 +883,40 @@ int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val) mutex_unlock(&dev_pm_qos_mtx); return ret; } + +/** + * dev_pm_qos_expose_latency_tolerance - Expose latency tolerance to userspace + * @dev: Device whose latency tolerance to expose + */ +int dev_pm_qos_expose_latency_tolerance(struct device *dev) +{ + int ret; + + if (!dev->power.set_latency_tolerance) + return -EINVAL; + + mutex_lock(&dev_pm_qos_sysfs_mtx); + ret = pm_qos_sysfs_add_latency_tolerance(dev); + mutex_unlock(&dev_pm_qos_sysfs_mtx); + + return ret; +} +EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_tolerance); + +/** + * dev_pm_qos_hide_latency_tolerance - Hide latency tolerance from userspace + * @dev: Device whose latency tolerance to hide + */ +void dev_pm_qos_hide_latency_tolerance(struct device *dev) +{ + mutex_lock(&dev_pm_qos_sysfs_mtx); + pm_qos_sysfs_remove_latency_tolerance(dev); + mutex_unlock(&dev_pm_qos_sysfs_mtx); + + /* Remove the request from user space now */ + pm_runtime_get_sync(dev); + dev_pm_qos_update_user_latency_tolerance(dev, + PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT); + pm_runtime_put(dev); +} +EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_tolerance); diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c index d2be3f9c211c..a7b46798c81d 100644 --- a/drivers/base/power/sysfs.c +++ b/drivers/base/power/sysfs.c @@ -738,6 +738,17 @@ void pm_qos_sysfs_remove_flags(struct device *dev) sysfs_unmerge_group(&dev->kobj, &pm_qos_flags_attr_group); } +int pm_qos_sysfs_add_latency_tolerance(struct device *dev) +{ + return sysfs_merge_group(&dev->kobj, + &pm_qos_latency_tolerance_attr_group); +} + +void pm_qos_sysfs_remove_latency_tolerance(struct device *dev) +{ + sysfs_unmerge_group(&dev->kobj, &pm_qos_latency_tolerance_attr_group); +} + void rpm_sysfs_remove(struct device *dev) { sysfs_unmerge_group(&dev->kobj, &pm_runtime_attr_group); diff --git a/drivers/base/property.c b/drivers/base/property.c index f3f6d167f3f1..841b15c5c058 100644 --- a/drivers/base/property.c +++ b/drivers/base/property.c @@ -27,9 +27,10 @@ */ void device_add_property_set(struct device *dev, struct property_set *pset) { - if (pset) - pset->fwnode.type = FWNODE_PDATA; + if (!pset) + return; + pset->fwnode.type = FWNODE_PDATA; set_secondary_fwnode(dev, &pset->fwnode); } EXPORT_SYMBOL_GPL(device_add_property_set); @@ -461,7 +462,8 @@ int fwnode_property_read_string(struct fwnode_handle *fwnode, return acpi_dev_prop_read(to_acpi_node(fwnode), propname, DEV_PROP_STRING, val, 1); - return -ENXIO; + return pset_prop_read_array(to_pset(fwnode), propname, + DEV_PROP_STRING, val, 1); } EXPORT_SYMBOL_GPL(fwnode_property_read_string); |