diff options
Diffstat (limited to 'drivers/base')
-rw-r--r-- | drivers/base/cpu.c | 2 | ||||
-rw-r--r-- | drivers/base/firmware_class.c | 5 | ||||
-rw-r--r-- | drivers/base/memory.c | 12 | ||||
-rw-r--r-- | drivers/base/platform-msi.c | 2 | ||||
-rw-r--r-- | drivers/base/platform.c | 10 | ||||
-rw-r--r-- | drivers/base/power/domain.c | 284 | ||||
-rw-r--r-- | drivers/base/power/opp/core.c | 3 | ||||
-rw-r--r-- | drivers/base/power/qos.c | 55 | ||||
-rw-r--r-- | drivers/base/power/runtime.c | 11 | ||||
-rw-r--r-- | drivers/base/power/wakeirq.c | 22 | ||||
-rw-r--r-- | drivers/base/regmap/regcache-rbtree.c | 7 | ||||
-rw-r--r-- | drivers/base/regmap/regcache.c | 20 | ||||
-rw-r--r-- | drivers/base/regmap/regmap-irq.c | 62 | ||||
-rw-r--r-- | drivers/base/regmap/regmap.c | 129 |
14 files changed, 322 insertions, 302 deletions
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c index 4c28e1a09786..2c3b359b3536 100644 --- a/drivers/base/cpu.c +++ b/drivers/base/cpu.c @@ -17,6 +17,7 @@ #include <linux/of.h> #include <linux/cpufeature.h> #include <linux/tick.h> +#include <linux/pm_qos.h> #include "base.h" @@ -376,6 +377,7 @@ int register_cpu(struct cpu *cpu, int num) per_cpu(cpu_sys_devices, num) = &cpu->dev; register_cpu_under_node(num, cpu_to_node(num)); + dev_pm_qos_expose_latency_limit(&cpu->dev, 0); return 0; } diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c index 4497d263209f..ac350c518e0c 100644 --- a/drivers/base/firmware_class.c +++ b/drivers/base/firmware_class.c @@ -558,9 +558,6 @@ static void fw_load_abort(struct firmware_priv *fw_priv) struct firmware_buf *buf = fw_priv->buf; __fw_load_abort(buf); - - /* avoid user action after loading abort */ - fw_priv->buf = NULL; } static LIST_HEAD(pending_fw_head); @@ -713,7 +710,7 @@ static ssize_t firmware_loading_store(struct device *dev, mutex_lock(&fw_lock); fw_buf = fw_priv->buf; - if (!fw_buf) + if (fw_state_is_aborted(&fw_buf->fw_st)) goto out; switch (loading) { diff --git a/drivers/base/memory.c b/drivers/base/memory.c index dacb6a8418aa..fa26ffd25fa6 100644 --- a/drivers/base/memory.c +++ b/drivers/base/memory.c @@ -389,33 +389,33 @@ static ssize_t show_valid_zones(struct device *dev, { struct memory_block *mem = to_memory_block(dev); unsigned long start_pfn, end_pfn; + unsigned long valid_start, valid_end, valid_pages; unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block; - struct page *first_page; struct zone *zone; int zone_shift = 0; start_pfn = section_nr_to_pfn(mem->start_section_nr); end_pfn = start_pfn + nr_pages; - first_page = pfn_to_page(start_pfn); /* The block contains more than one zone can not be offlined. */ - if (!test_pages_in_a_zone(start_pfn, end_pfn)) + if (!test_pages_in_a_zone(start_pfn, end_pfn, &valid_start, &valid_end)) return sprintf(buf, "none\n"); - zone = page_zone(first_page); + zone = page_zone(pfn_to_page(valid_start)); + valid_pages = valid_end - valid_start; /* MMOP_ONLINE_KEEP */ sprintf(buf, "%s", zone->name); /* MMOP_ONLINE_KERNEL */ - zone_can_shift(start_pfn, nr_pages, ZONE_NORMAL, &zone_shift); + zone_can_shift(valid_start, valid_pages, ZONE_NORMAL, &zone_shift); if (zone_shift) { strcat(buf, " "); strcat(buf, (zone + zone_shift)->name); } /* MMOP_ONLINE_MOVABLE */ - zone_can_shift(start_pfn, nr_pages, ZONE_MOVABLE, &zone_shift); + zone_can_shift(valid_start, valid_pages, ZONE_MOVABLE, &zone_shift); if (zone_shift) { strcat(buf, " "); strcat(buf, (zone + zone_shift)->name); diff --git a/drivers/base/platform-msi.c b/drivers/base/platform-msi.c index be6a599bc0c1..0fc7c4da7756 100644 --- a/drivers/base/platform-msi.c +++ b/drivers/base/platform-msi.c @@ -206,7 +206,7 @@ platform_msi_alloc_priv_data(struct device *dev, unsigned int nvec, { struct platform_msi_priv_data *datap; /* - * Limit the number of interrupts to 256 per device. Should we + * Limit the number of interrupts to 2048 per device. Should we * need to bump this up, DEV_ID_SHIFT should be adjusted * accordingly (which would impact the max number of MSI * capable devices). diff --git a/drivers/base/platform.c b/drivers/base/platform.c index c4af00385502..647e4761dbf3 100644 --- a/drivers/base/platform.c +++ b/drivers/base/platform.c @@ -102,6 +102,16 @@ int platform_get_irq(struct platform_device *dev, unsigned int num) } r = platform_get_resource(dev, IORESOURCE_IRQ, num); + if (has_acpi_companion(&dev->dev)) { + if (r && r->flags & IORESOURCE_DISABLED) { + int ret; + + ret = acpi_irq_get(ACPI_HANDLE(&dev->dev), num, r); + if (ret) + return ret; + } + } + /* * The resources may pass trigger flags to the irqs that need * to be set up. It so happens that the trigger flags for diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index 2997026b4dfb..e697dec9d25b 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -130,7 +130,7 @@ static inline bool irq_safe_dev_in_no_sleep_domain(struct device *dev, ret = pm_runtime_is_irq_safe(dev) && !genpd_is_irq_safe(genpd); - /* Warn once for each IRQ safe dev in no sleep domain */ + /* Warn once if IRQ safe dev in no sleep domain */ if (ret) dev_warn_once(dev, "PM domain %s will not be powered off\n", genpd->name); @@ -201,7 +201,7 @@ static void genpd_sd_counter_inc(struct generic_pm_domain *genpd) smp_mb__after_atomic(); } -static int genpd_power_on(struct generic_pm_domain *genpd, bool timed) +static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed) { unsigned int state_idx = genpd->state_idx; ktime_t time_start; @@ -231,7 +231,7 @@ static int genpd_power_on(struct generic_pm_domain *genpd, bool timed) return ret; } -static int genpd_power_off(struct generic_pm_domain *genpd, bool timed) +static int _genpd_power_off(struct generic_pm_domain *genpd, bool timed) { unsigned int state_idx = genpd->state_idx; ktime_t time_start; @@ -262,10 +262,10 @@ static int genpd_power_off(struct generic_pm_domain *genpd, bool timed) } /** - * genpd_queue_power_off_work - Queue up the execution of genpd_poweroff(). + * genpd_queue_power_off_work - Queue up the execution of genpd_power_off(). * @genpd: PM domain to power off. * - * Queue up the execution of genpd_poweroff() unless it's already been done + * Queue up the execution of genpd_power_off() unless it's already been done * before. */ static void genpd_queue_power_off_work(struct generic_pm_domain *genpd) @@ -274,14 +274,101 @@ static void genpd_queue_power_off_work(struct generic_pm_domain *genpd) } /** - * genpd_poweron - Restore power to a given PM domain and its masters. + * genpd_power_off - Remove power from a given PM domain. + * @genpd: PM domain to power down. + * @one_dev_on: If invoked from genpd's ->runtime_suspend|resume() callback, the + * RPM status of the releated device is in an intermediate state, not yet turned + * into RPM_SUSPENDED. This means genpd_power_off() must allow one device to not + * be RPM_SUSPENDED, while it tries to power off the PM domain. + * + * If all of the @genpd's devices have been suspended and all of its subdomains + * have been powered down, remove power from @genpd. + */ +static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on, + unsigned int depth) +{ + struct pm_domain_data *pdd; + struct gpd_link *link; + unsigned int not_suspended = 0; + + /* + * Do not try to power off the domain in the following situations: + * (1) The domain is already in the "power off" state. + * (2) System suspend is in progress. + */ + if (genpd->status == GPD_STATE_POWER_OFF + || genpd->prepared_count > 0) + return 0; + + if (atomic_read(&genpd->sd_count) > 0) + return -EBUSY; + + list_for_each_entry(pdd, &genpd->dev_list, list_node) { + enum pm_qos_flags_status stat; + + stat = dev_pm_qos_flags(pdd->dev, + PM_QOS_FLAG_NO_POWER_OFF + | PM_QOS_FLAG_REMOTE_WAKEUP); + if (stat > PM_QOS_FLAGS_NONE) + return -EBUSY; + + /* + * Do not allow PM domain to be powered off, when an IRQ safe + * device is part of a non-IRQ safe domain. + */ + if (!pm_runtime_suspended(pdd->dev) || + irq_safe_dev_in_no_sleep_domain(pdd->dev, genpd)) + not_suspended++; + } + + if (not_suspended > 1 || (not_suspended == 1 && !one_dev_on)) + return -EBUSY; + + if (genpd->gov && genpd->gov->power_down_ok) { + if (!genpd->gov->power_down_ok(&genpd->domain)) + return -EAGAIN; + } + + if (genpd->power_off) { + int ret; + + if (atomic_read(&genpd->sd_count) > 0) + return -EBUSY; + + /* + * If sd_count > 0 at this point, one of the subdomains hasn't + * managed to call genpd_power_on() for the master yet after + * incrementing it. In that case genpd_power_on() will wait + * for us to drop the lock, so we can call .power_off() and let + * the genpd_power_on() restore power for us (this shouldn't + * happen very often). + */ + ret = _genpd_power_off(genpd, true); + if (ret) + return ret; + } + + genpd->status = GPD_STATE_POWER_OFF; + + list_for_each_entry(link, &genpd->slave_links, slave_node) { + genpd_sd_counter_dec(link->master); + genpd_lock_nested(link->master, depth + 1); + genpd_power_off(link->master, false, depth + 1); + genpd_unlock(link->master); + } + + return 0; +} + +/** + * genpd_power_on - Restore power to a given PM domain and its masters. * @genpd: PM domain to power up. * @depth: nesting count for lockdep. * * Restore power to @genpd and all of its masters so that it is possible to * resume a device belonging to it. */ -static int genpd_poweron(struct generic_pm_domain *genpd, unsigned int depth) +static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth) { struct gpd_link *link; int ret = 0; @@ -300,7 +387,7 @@ static int genpd_poweron(struct generic_pm_domain *genpd, unsigned int depth) genpd_sd_counter_inc(master); genpd_lock_nested(master, depth + 1); - ret = genpd_poweron(master, depth + 1); + ret = genpd_power_on(master, depth + 1); genpd_unlock(master); if (ret) { @@ -309,7 +396,7 @@ static int genpd_poweron(struct generic_pm_domain *genpd, unsigned int depth) } } - ret = genpd_power_on(genpd, true); + ret = _genpd_power_on(genpd, true); if (ret) goto err; @@ -321,7 +408,9 @@ static int genpd_poweron(struct generic_pm_domain *genpd, unsigned int depth) &genpd->slave_links, slave_node) { genpd_sd_counter_dec(link->master); - genpd_queue_power_off_work(link->master); + genpd_lock_nested(link->master, depth + 1); + genpd_power_off(link->master, false, depth + 1); + genpd_unlock(link->master); } return ret; @@ -368,87 +457,6 @@ static int genpd_dev_pm_qos_notifier(struct notifier_block *nb, } /** - * genpd_poweroff - Remove power from a given PM domain. - * @genpd: PM domain to power down. - * @is_async: PM domain is powered down from a scheduled work - * - * If all of the @genpd's devices have been suspended and all of its subdomains - * have been powered down, remove power from @genpd. - */ -static int genpd_poweroff(struct generic_pm_domain *genpd, bool is_async) -{ - struct pm_domain_data *pdd; - struct gpd_link *link; - unsigned int not_suspended = 0; - - /* - * Do not try to power off the domain in the following situations: - * (1) The domain is already in the "power off" state. - * (2) System suspend is in progress. - */ - if (genpd->status == GPD_STATE_POWER_OFF - || genpd->prepared_count > 0) - return 0; - - if (atomic_read(&genpd->sd_count) > 0) - return -EBUSY; - - list_for_each_entry(pdd, &genpd->dev_list, list_node) { - enum pm_qos_flags_status stat; - - stat = dev_pm_qos_flags(pdd->dev, - PM_QOS_FLAG_NO_POWER_OFF - | PM_QOS_FLAG_REMOTE_WAKEUP); - if (stat > PM_QOS_FLAGS_NONE) - return -EBUSY; - - /* - * Do not allow PM domain to be powered off, when an IRQ safe - * device is part of a non-IRQ safe domain. - */ - if (!pm_runtime_suspended(pdd->dev) || - irq_safe_dev_in_no_sleep_domain(pdd->dev, genpd)) - not_suspended++; - } - - if (not_suspended > 1 || (not_suspended == 1 && is_async)) - return -EBUSY; - - if (genpd->gov && genpd->gov->power_down_ok) { - if (!genpd->gov->power_down_ok(&genpd->domain)) - return -EAGAIN; - } - - if (genpd->power_off) { - int ret; - - if (atomic_read(&genpd->sd_count) > 0) - return -EBUSY; - - /* - * If sd_count > 0 at this point, one of the subdomains hasn't - * managed to call genpd_poweron() for the master yet after - * incrementing it. In that case genpd_poweron() will wait - * for us to drop the lock, so we can call .power_off() and let - * the genpd_poweron() restore power for us (this shouldn't - * happen very often). - */ - ret = genpd_power_off(genpd, true); - if (ret) - return ret; - } - - genpd->status = GPD_STATE_POWER_OFF; - - list_for_each_entry(link, &genpd->slave_links, slave_node) { - genpd_sd_counter_dec(link->master); - genpd_queue_power_off_work(link->master); - } - - return 0; -} - -/** * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0. * @work: Work structure used for scheduling the execution of this function. */ @@ -459,7 +467,7 @@ static void genpd_power_off_work_fn(struct work_struct *work) genpd = container_of(work, struct generic_pm_domain, power_off_work); genpd_lock(genpd); - genpd_poweroff(genpd, true); + genpd_power_off(genpd, false, 0); genpd_unlock(genpd); } @@ -578,7 +586,7 @@ static int genpd_runtime_suspend(struct device *dev) return 0; genpd_lock(genpd); - genpd_poweroff(genpd, false); + genpd_power_off(genpd, true, 0); genpd_unlock(genpd); return 0; @@ -618,7 +626,7 @@ static int genpd_runtime_resume(struct device *dev) } genpd_lock(genpd); - ret = genpd_poweron(genpd, 0); + ret = genpd_power_on(genpd, 0); genpd_unlock(genpd); if (ret) @@ -658,7 +666,7 @@ err_poweroff: if (!pm_runtime_is_irq_safe(dev) || (pm_runtime_is_irq_safe(dev) && genpd_is_irq_safe(genpd))) { genpd_lock(genpd); - genpd_poweroff(genpd, 0); + genpd_power_off(genpd, true, 0); genpd_unlock(genpd); } @@ -674,9 +682,9 @@ static int __init pd_ignore_unused_setup(char *__unused) __setup("pd_ignore_unused", pd_ignore_unused_setup); /** - * genpd_poweroff_unused - Power off all PM domains with no devices in use. + * genpd_power_off_unused - Power off all PM domains with no devices in use. */ -static int __init genpd_poweroff_unused(void) +static int __init genpd_power_off_unused(void) { struct generic_pm_domain *genpd; @@ -694,7 +702,7 @@ static int __init genpd_poweroff_unused(void) return 0; } -late_initcall(genpd_poweroff_unused); +late_initcall(genpd_power_off_unused); #if defined(CONFIG_PM_SLEEP) || defined(CONFIG_PM_GENERIC_DOMAINS_OF) @@ -727,18 +735,20 @@ static bool genpd_dev_active_wakeup(struct generic_pm_domain *genpd, } /** - * genpd_sync_poweroff - Synchronously power off a PM domain and its masters. + * genpd_sync_power_off - Synchronously power off a PM domain and its masters. * @genpd: PM domain to power off, if possible. + * @use_lock: use the lock. + * @depth: nesting count for lockdep. * * Check if the given PM domain can be powered off (during system suspend or * hibernation) and do that if so. Also, in that case propagate to its masters. * * This function is only called in "noirq" and "syscore" stages of system power - * transitions, so it need not acquire locks (all of the "noirq" callbacks are - * executed sequentially, so it is guaranteed that it will never run twice in - * parallel). + * transitions. The "noirq" callbacks may be executed asynchronously, thus in + * these cases the lock must be held. */ -static void genpd_sync_poweroff(struct generic_pm_domain *genpd) +static void genpd_sync_power_off(struct generic_pm_domain *genpd, bool use_lock, + unsigned int depth) { struct gpd_link *link; @@ -751,26 +761,35 @@ static void genpd_sync_poweroff(struct generic_pm_domain *genpd) /* Choose the deepest state when suspending */ genpd->state_idx = genpd->state_count - 1; - genpd_power_off(genpd, false); + _genpd_power_off(genpd, false); genpd->status = GPD_STATE_POWER_OFF; list_for_each_entry(link, &genpd->slave_links, slave_node) { genpd_sd_counter_dec(link->master); - genpd_sync_poweroff(link->master); + + if (use_lock) + genpd_lock_nested(link->master, depth + 1); + + genpd_sync_power_off(link->master, use_lock, depth + 1); + + if (use_lock) + genpd_unlock(link->master); } } /** - * genpd_sync_poweron - Synchronously power on a PM domain and its masters. + * genpd_sync_power_on - Synchronously power on a PM domain and its masters. * @genpd: PM domain to power on. + * @use_lock: use the lock. + * @depth: nesting count for lockdep. * * This function is only called in "noirq" and "syscore" stages of system power - * transitions, so it need not acquire locks (all of the "noirq" callbacks are - * executed sequentially, so it is guaranteed that it will never run twice in - * parallel). + * transitions. The "noirq" callbacks may be executed asynchronously, thus in + * these cases the lock must be held. */ -static void genpd_sync_poweron(struct generic_pm_domain *genpd) +static void genpd_sync_power_on(struct generic_pm_domain *genpd, bool use_lock, + unsigned int depth) { struct gpd_link *link; @@ -778,11 +797,18 @@ static void genpd_sync_poweron(struct generic_pm_domain *genpd) return; list_for_each_entry(link, &genpd->slave_links, slave_node) { - genpd_sync_poweron(link->master); genpd_sd_counter_inc(link->master); + + if (use_lock) + genpd_lock_nested(link->master, depth + 1); + + genpd_sync_power_on(link->master, use_lock, depth + 1); + + if (use_lock) + genpd_unlock(link->master); } - genpd_power_on(genpd, false); + _genpd_power_on(genpd, false); genpd->status = GPD_STATE_ACTIVE; } @@ -888,13 +914,10 @@ static int pm_genpd_suspend_noirq(struct device *dev) return ret; } - /* - * Since all of the "noirq" callbacks are executed sequentially, it is - * guaranteed that this function will never run twice in parallel for - * the same PM domain, so it is not necessary to use locking here. - */ + genpd_lock(genpd); genpd->suspended_count++; - genpd_sync_poweroff(genpd); + genpd_sync_power_off(genpd, true, 0); + genpd_unlock(genpd); return 0; } @@ -919,13 +942,10 @@ static int pm_genpd_resume_noirq(struct device *dev) if (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)) return 0; - /* - * Since all of the "noirq" callbacks are executed sequentially, it is - * guaranteed that this function will never run twice in parallel for - * the same PM domain, so it is not necessary to use locking here. - */ - genpd_sync_poweron(genpd); + genpd_lock(genpd); + genpd_sync_power_on(genpd, true, 0); genpd->suspended_count--; + genpd_unlock(genpd); if (genpd->dev_ops.stop && genpd->dev_ops.start) ret = pm_runtime_force_resume(dev); @@ -1002,22 +1022,20 @@ static int pm_genpd_restore_noirq(struct device *dev) return -EINVAL; /* - * Since all of the "noirq" callbacks are executed sequentially, it is - * guaranteed that this function will never run twice in parallel for - * the same PM domain, so it is not necessary to use locking here. - * * At this point suspended_count == 0 means we are being run for the * first time for the given domain in the present cycle. */ + genpd_lock(genpd); if (genpd->suspended_count++ == 0) /* * The boot kernel might put the domain into arbitrary state, - * so make it appear as powered off to genpd_sync_poweron(), + * so make it appear as powered off to genpd_sync_power_on(), * so that it tries to power it on in case it was really off. */ genpd->status = GPD_STATE_POWER_OFF; - genpd_sync_poweron(genpd); + genpd_sync_power_on(genpd, true, 0); + genpd_unlock(genpd); if (genpd->dev_ops.stop && genpd->dev_ops.start) ret = pm_runtime_force_resume(dev); @@ -1072,9 +1090,9 @@ static void genpd_syscore_switch(struct device *dev, bool suspend) if (suspend) { genpd->suspended_count++; - genpd_sync_poweroff(genpd); + genpd_sync_power_off(genpd, false, 0); } else { - genpd_sync_poweron(genpd); + genpd_sync_power_on(genpd, false, 0); genpd->suspended_count--; } } @@ -2043,7 +2061,7 @@ int genpd_dev_pm_attach(struct device *dev) dev->pm_domain->sync = genpd_dev_pm_sync; genpd_lock(pd); - ret = genpd_poweron(pd, 0); + ret = genpd_power_on(pd, 0); genpd_unlock(pd); out: return ret ? -EPROBE_DEFER : 0; diff --git a/drivers/base/power/opp/core.c b/drivers/base/power/opp/core.c index 91ec3232d630..dae61720b314 100644 --- a/drivers/base/power/opp/core.c +++ b/drivers/base/power/opp/core.c @@ -231,7 +231,8 @@ unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev) * The caller needs to ensure that opp_table (and hence the regulator) * isn't freed, while we are executing this routine. */ - for (i = 0; reg = regulators[i], i < count; i++) { + for (i = 0; i < count; i++) { + reg = regulators[i]; ret = regulator_set_voltage_time(reg, uV[i].min, uV[i].max); if (ret > 0) latency_ns += ret * 1000; diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c index 58fcc758334e..f850daeffba4 100644 --- a/drivers/base/power/qos.c +++ b/drivers/base/power/qos.c @@ -17,12 +17,9 @@ * * This QoS design is best effort based. Dependents register their QoS needs. * Watchers register to keep track of the current QoS needs of the system. - * Watchers can register different types of notification callbacks: - * . a per-device notification callback using the dev_pm_qos_*_notifier API. - * The notification chain data is stored in the per-device constraint - * data struct. - * . a system-wide notification callback using the dev_pm_qos_*_global_notifier - * API. The notification chain data is stored in a static variable. + * Watchers can register a per-device notification callback using the + * dev_pm_qos_*_notifier API. The notification chain data is stored in the + * per-device constraint data struct. * * Note about the per-device constraint data struct allocation: * . The per-device constraints data struct ptr is tored into the device @@ -49,8 +46,6 @@ static DEFINE_MUTEX(dev_pm_qos_mtx); static DEFINE_MUTEX(dev_pm_qos_sysfs_mtx); -static BLOCKING_NOTIFIER_HEAD(dev_pm_notifiers); - /** * __dev_pm_qos_flags - Check PM QoS flags for a given device. * @dev: Device to check the PM QoS flags for. @@ -108,8 +103,7 @@ s32 __dev_pm_qos_read_value(struct device *dev) { lockdep_assert_held(&dev->power.lock); - return IS_ERR_OR_NULL(dev->power.qos) ? - 0 : pm_qos_read_value(&dev->power.qos->resume_latency); + return dev_pm_qos_raw_read_value(dev); } /** @@ -135,8 +129,7 @@ s32 dev_pm_qos_read_value(struct device *dev) * @value: Value to assign to the QoS request. * * Internal function to update the constraints list using the PM QoS core - * code and if needed call the per-device and the global notification - * callbacks + * code and if needed call the per-device callbacks. */ static int apply_constraint(struct dev_pm_qos_request *req, enum pm_qos_req_action action, s32 value) @@ -148,12 +141,6 @@ static int apply_constraint(struct dev_pm_qos_request *req, case DEV_PM_QOS_RESUME_LATENCY: ret = pm_qos_update_target(&qos->resume_latency, &req->data.pnode, action, value); - if (ret) { - value = pm_qos_read_value(&qos->resume_latency); - blocking_notifier_call_chain(&dev_pm_notifiers, - (unsigned long)value, - req); - } break; case DEV_PM_QOS_LATENCY_TOLERANCE: ret = pm_qos_update_target(&qos->latency_tolerance, @@ -281,7 +268,7 @@ void dev_pm_qos_constraints_destroy(struct device *dev) dev->power.qos = ERR_PTR(-ENODEV); spin_unlock_irq(&dev->power.lock); - kfree(c->notifiers); + kfree(qos->resume_latency.notifiers); kfree(qos); out: @@ -536,36 +523,6 @@ int dev_pm_qos_remove_notifier(struct device *dev, EXPORT_SYMBOL_GPL(dev_pm_qos_remove_notifier); /** - * dev_pm_qos_add_global_notifier - sets notification entry for changes to - * target value of the PM QoS constraints for any device - * - * @notifier: notifier block managed by caller. - * - * Will register the notifier into a notification chain that gets called - * upon changes to the target value for any device. - */ -int dev_pm_qos_add_global_notifier(struct notifier_block *notifier) -{ - return blocking_notifier_chain_register(&dev_pm_notifiers, notifier); -} -EXPORT_SYMBOL_GPL(dev_pm_qos_add_global_notifier); - -/** - * dev_pm_qos_remove_global_notifier - deletes notification for changes to - * target value of PM QoS constraints for any device - * - * @notifier: notifier block to be removed. - * - * Will remove the notifier from the notification chain that gets called - * upon changes to the target value for any device. - */ -int dev_pm_qos_remove_global_notifier(struct notifier_block *notifier) -{ - return blocking_notifier_chain_unregister(&dev_pm_notifiers, notifier); -} -EXPORT_SYMBOL_GPL(dev_pm_qos_remove_global_notifier); - -/** * dev_pm_qos_add_ancestor_request - Add PM QoS request for device's ancestor. * @dev: Device whose ancestor to add the request for. * @req: Pointer to the preallocated handle. diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index 872eac4cb1df..a14fac6a01d3 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -966,13 +966,13 @@ int __pm_runtime_idle(struct device *dev, int rpmflags) unsigned long flags; int retval; - might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe); - if (rpmflags & RPM_GET_PUT) { if (!atomic_dec_and_test(&dev->power.usage_count)) return 0; } + might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe); + spin_lock_irqsave(&dev->power.lock, flags); retval = rpm_idle(dev, rpmflags); spin_unlock_irqrestore(&dev->power.lock, flags); @@ -998,13 +998,13 @@ int __pm_runtime_suspend(struct device *dev, int rpmflags) unsigned long flags; int retval; - might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe); - if (rpmflags & RPM_GET_PUT) { if (!atomic_dec_and_test(&dev->power.usage_count)) return 0; } + might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe); + spin_lock_irqsave(&dev->power.lock, flags); retval = rpm_suspend(dev, rpmflags); spin_unlock_irqrestore(&dev->power.lock, flags); @@ -1029,7 +1029,8 @@ int __pm_runtime_resume(struct device *dev, int rpmflags) unsigned long flags; int retval; - might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe); + might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe && + dev->power.runtime_status != RPM_ACTIVE); if (rpmflags & RPM_GET_PUT) atomic_inc(&dev->power.usage_count); diff --git a/drivers/base/power/wakeirq.c b/drivers/base/power/wakeirq.c index 404d94c6c8bc..ae0429827f31 100644 --- a/drivers/base/power/wakeirq.c +++ b/drivers/base/power/wakeirq.c @@ -141,6 +141,13 @@ static irqreturn_t handle_threaded_wake_irq(int irq, void *_wirq) struct wake_irq *wirq = _wirq; int res; + /* Maybe abort suspend? */ + if (irqd_is_wakeup_set(irq_get_irq_data(irq))) { + pm_wakeup_event(wirq->dev, 0); + + return IRQ_HANDLED; + } + /* We don't want RPM_ASYNC or RPM_NOWAIT here */ res = pm_runtime_resume(wirq->dev); if (res < 0) @@ -183,6 +190,9 @@ int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq) wirq->irq = irq; irq_set_status_flags(irq, IRQ_NOAUTOEN); + /* Prevent deferred spurious wakeirqs with disable_irq_nosync() */ + irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY); + /* * Consumer device may need to power up and restore state * so we use a threaded irq. @@ -312,8 +322,12 @@ void dev_pm_arm_wake_irq(struct wake_irq *wirq) if (!wirq) return; - if (device_may_wakeup(wirq->dev)) + if (device_may_wakeup(wirq->dev)) { + if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED) + enable_irq(wirq->irq); + enable_irq_wake(wirq->irq); + } } /** @@ -328,6 +342,10 @@ void dev_pm_disarm_wake_irq(struct wake_irq *wirq) if (!wirq) return; - if (device_may_wakeup(wirq->dev)) + if (device_may_wakeup(wirq->dev)) { disable_irq_wake(wirq->irq); + + if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED) + disable_irq_nosync(wirq->irq); + } } diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c index b11af3f2c1db..b1e9aae9a5d0 100644 --- a/drivers/base/regmap/regcache-rbtree.c +++ b/drivers/base/regmap/regcache-rbtree.c @@ -81,7 +81,7 @@ static struct regcache_rbtree_node *regcache_rbtree_lookup(struct regmap *map, node = rbtree_ctx->root.rb_node; while (node) { - rbnode = container_of(node, struct regcache_rbtree_node, node); + rbnode = rb_entry(node, struct regcache_rbtree_node, node); regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg, &top_reg); if (reg >= base_reg && reg <= top_reg) { @@ -108,8 +108,7 @@ static int regcache_rbtree_insert(struct regmap *map, struct rb_root *root, parent = NULL; new = &root->rb_node; while (*new) { - rbnode_tmp = container_of(*new, struct regcache_rbtree_node, - node); + rbnode_tmp = rb_entry(*new, struct regcache_rbtree_node, node); /* base and top registers of the current rbnode */ regcache_rbtree_get_base_top_reg(map, rbnode_tmp, &base_reg_tmp, &top_reg_tmp); @@ -152,7 +151,7 @@ static int rbtree_show(struct seq_file *s, void *ignored) for (node = rb_first(&rbtree_ctx->root); node != NULL; node = rb_next(node)) { - n = container_of(node, struct regcache_rbtree_node, node); + n = rb_entry(node, struct regcache_rbtree_node, node); mem_size += sizeof(*n); mem_size += (n->blklen * map->cache_word_size); mem_size += BITS_TO_LONGS(n->blklen) * sizeof(long); diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c index 4e582561e1e7..b0a0dcf32fb7 100644 --- a/drivers/base/regmap/regcache.c +++ b/drivers/base/regmap/regcache.c @@ -224,7 +224,7 @@ void regcache_exit(struct regmap *map) } /** - * regcache_read: Fetch the value of a given register from the cache. + * regcache_read - Fetch the value of a given register from the cache. * * @map: map to configure. * @reg: The register index. @@ -255,7 +255,7 @@ int regcache_read(struct regmap *map, } /** - * regcache_write: Set the value of a given register in the cache. + * regcache_write - Set the value of a given register in the cache. * * @map: map to configure. * @reg: The register index. @@ -328,7 +328,7 @@ static int regcache_default_sync(struct regmap *map, unsigned int min, } /** - * regcache_sync: Sync the register cache with the hardware. + * regcache_sync - Sync the register cache with the hardware. * * @map: map to configure. * @@ -396,7 +396,7 @@ out: EXPORT_SYMBOL_GPL(regcache_sync); /** - * regcache_sync_region: Sync part of the register cache with the hardware. + * regcache_sync_region - Sync part of the register cache with the hardware. * * @map: map to sync. * @min: first register to sync @@ -452,7 +452,7 @@ out: EXPORT_SYMBOL_GPL(regcache_sync_region); /** - * regcache_drop_region: Discard part of the register cache + * regcache_drop_region - Discard part of the register cache * * @map: map to operate on * @min: first register to discard @@ -483,10 +483,10 @@ int regcache_drop_region(struct regmap *map, unsigned int min, EXPORT_SYMBOL_GPL(regcache_drop_region); /** - * regcache_cache_only: Put a register map into cache only mode + * regcache_cache_only - Put a register map into cache only mode * * @map: map to configure - * @cache_only: flag if changes should be written to the hardware + * @enable: flag if changes should be written to the hardware * * When a register map is marked as cache only writes to the register * map API will only update the register cache, they will not cause @@ -505,7 +505,7 @@ void regcache_cache_only(struct regmap *map, bool enable) EXPORT_SYMBOL_GPL(regcache_cache_only); /** - * regcache_mark_dirty: Indicate that HW registers were reset to default values + * regcache_mark_dirty - Indicate that HW registers were reset to default values * * @map: map to mark * @@ -527,10 +527,10 @@ void regcache_mark_dirty(struct regmap *map) EXPORT_SYMBOL_GPL(regcache_mark_dirty); /** - * regcache_cache_bypass: Put a register map into cache bypass mode + * regcache_cache_bypass - Put a register map into cache bypass mode * * @map: map to configure - * @cache_bypass: flag if changes should not be written to the cache + * @enable: flag if changes should not be written to the cache * * When a register map is marked with the cache bypass option, writes * to the register map API will only update the hardware and not the diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c index ec262476d043..cd54189f2b1d 100644 --- a/drivers/base/regmap/regmap-irq.c +++ b/drivers/base/regmap/regmap-irq.c @@ -398,13 +398,14 @@ static const struct irq_domain_ops regmap_domain_ops = { }; /** - * regmap_add_irq_chip(): Use standard regmap IRQ controller handling + * regmap_add_irq_chip() - Use standard regmap IRQ controller handling * - * map: The regmap for the device. - * irq: The IRQ the device uses to signal interrupts - * irq_flags: The IRQF_ flags to use for the primary interrupt. - * chip: Configuration for the interrupt controller. - * data: Runtime data structure for the controller, allocated on success + * @map: The regmap for the device. + * @irq: The IRQ the device uses to signal interrupts. + * @irq_flags: The IRQF_ flags to use for the primary interrupt. + * @irq_base: Allocate at specific IRQ number if irq_base > 0. + * @chip: Configuration for the interrupt controller. + * @data: Runtime data structure for the controller, allocated on success. * * Returns 0 on success or an errno on failure. * @@ -659,12 +660,12 @@ err_alloc: EXPORT_SYMBOL_GPL(regmap_add_irq_chip); /** - * regmap_del_irq_chip(): Stop interrupt handling for a regmap IRQ chip + * regmap_del_irq_chip() - Stop interrupt handling for a regmap IRQ chip * * @irq: Primary IRQ for the device - * @d: regmap_irq_chip_data allocated by regmap_add_irq_chip() + * @d: ®map_irq_chip_data allocated by regmap_add_irq_chip() * - * This function also dispose all mapped irq on chip. + * This function also disposes of all mapped IRQs on the chip. */ void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *d) { @@ -723,18 +724,19 @@ static int devm_regmap_irq_chip_match(struct device *dev, void *res, void *data) } /** - * devm_regmap_add_irq_chip(): Resource manager regmap_add_irq_chip() + * devm_regmap_add_irq_chip() - Resource manager regmap_add_irq_chip() * - * @dev: The device pointer on which irq_chip belongs to. - * @map: The regmap for the device. - * @irq: The IRQ the device uses to signal interrupts + * @dev: The device pointer on which irq_chip belongs to. + * @map: The regmap for the device. + * @irq: The IRQ the device uses to signal interrupts * @irq_flags: The IRQF_ flags to use for the primary interrupt. - * @chip: Configuration for the interrupt controller. - * @data: Runtime data structure for the controller, allocated on success + * @irq_base: Allocate at specific IRQ number if irq_base > 0. + * @chip: Configuration for the interrupt controller. + * @data: Runtime data structure for the controller, allocated on success * * Returns 0 on success or an errno on failure. * - * The regmap_irq_chip data automatically be released when the device is + * The ®map_irq_chip_data will be automatically released when the device is * unbound. */ int devm_regmap_add_irq_chip(struct device *dev, struct regmap *map, int irq, @@ -765,11 +767,13 @@ int devm_regmap_add_irq_chip(struct device *dev, struct regmap *map, int irq, EXPORT_SYMBOL_GPL(devm_regmap_add_irq_chip); /** - * devm_regmap_del_irq_chip(): Resource managed regmap_del_irq_chip() + * devm_regmap_del_irq_chip() - Resource managed regmap_del_irq_chip() * * @dev: Device for which which resource was allocated. - * @irq: Primary IRQ for the device - * @d: regmap_irq_chip_data allocated by regmap_add_irq_chip() + * @irq: Primary IRQ for the device. + * @data: ®map_irq_chip_data allocated by regmap_add_irq_chip(). + * + * A resource managed version of regmap_del_irq_chip(). */ void devm_regmap_del_irq_chip(struct device *dev, int irq, struct regmap_irq_chip_data *data) @@ -786,11 +790,11 @@ void devm_regmap_del_irq_chip(struct device *dev, int irq, EXPORT_SYMBOL_GPL(devm_regmap_del_irq_chip); /** - * regmap_irq_chip_get_base(): Retrieve interrupt base for a regmap IRQ chip + * regmap_irq_chip_get_base() - Retrieve interrupt base for a regmap IRQ chip * - * Useful for drivers to request their own IRQs. + * @data: regmap irq controller to operate on. * - * @data: regmap_irq controller to operate on. + * Useful for drivers to request their own IRQs. */ int regmap_irq_chip_get_base(struct regmap_irq_chip_data *data) { @@ -800,12 +804,12 @@ int regmap_irq_chip_get_base(struct regmap_irq_chip_data *data) EXPORT_SYMBOL_GPL(regmap_irq_chip_get_base); /** - * regmap_irq_get_virq(): Map an interrupt on a chip to a virtual IRQ + * regmap_irq_get_virq() - Map an interrupt on a chip to a virtual IRQ * - * Useful for drivers to request their own IRQs. + * @data: regmap irq controller to operate on. + * @irq: index of the interrupt requested in the chip IRQs. * - * @data: regmap_irq controller to operate on. - * @irq: index of the interrupt requested in the chip IRQs + * Useful for drivers to request their own IRQs. */ int regmap_irq_get_virq(struct regmap_irq_chip_data *data, int irq) { @@ -818,14 +822,14 @@ int regmap_irq_get_virq(struct regmap_irq_chip_data *data, int irq) EXPORT_SYMBOL_GPL(regmap_irq_get_virq); /** - * regmap_irq_get_domain(): Retrieve the irq_domain for the chip + * regmap_irq_get_domain() - Retrieve the irq_domain for the chip + * + * @data: regmap_irq controller to operate on. * * Useful for drivers to request their own IRQs and for integration * with subsystems. For ease of integration NULL is accepted as a * domain, allowing devices to just call this even if no domain is * allocated. - * - * @data: regmap_irq controller to operate on. */ struct irq_domain *regmap_irq_get_domain(struct regmap_irq_chip_data *data) { diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c index ae63bb0875ea..b9a779a4a739 100644 --- a/drivers/base/regmap/regmap.c +++ b/drivers/base/regmap/regmap.c @@ -459,7 +459,7 @@ static bool _regmap_range_add(struct regmap *map, while (*new) { struct regmap_range_node *this = - container_of(*new, struct regmap_range_node, node); + rb_entry(*new, struct regmap_range_node, node); parent = *new; if (data->range_max < this->range_min) @@ -483,7 +483,7 @@ static struct regmap_range_node *_regmap_range_lookup(struct regmap *map, while (node) { struct regmap_range_node *this = - container_of(node, struct regmap_range_node, node); + rb_entry(node, struct regmap_range_node, node); if (reg < this->range_min) node = node->rb_left; @@ -1091,8 +1091,7 @@ static void regmap_field_init(struct regmap_field *rm_field, } /** - * devm_regmap_field_alloc(): Allocate and initialise a register field - * in a register map. + * devm_regmap_field_alloc() - Allocate and initialise a register field. * * @dev: Device that will be interacted with * @regmap: regmap bank in which this register field is located. @@ -1118,13 +1117,15 @@ struct regmap_field *devm_regmap_field_alloc(struct device *dev, EXPORT_SYMBOL_GPL(devm_regmap_field_alloc); /** - * devm_regmap_field_free(): Free register field allocated using - * devm_regmap_field_alloc. Usally drivers need not call this function, - * as the memory allocated via devm will be freed as per device-driver - * life-cyle. + * devm_regmap_field_free() - Free a register field allocated using + * devm_regmap_field_alloc. * * @dev: Device that will be interacted with * @field: regmap field which should be freed. + * + * Free register field allocated using devm_regmap_field_alloc(). Usually + * drivers need not call this function, as the memory allocated via devm + * will be freed as per device-driver life-cyle. */ void devm_regmap_field_free(struct device *dev, struct regmap_field *field) @@ -1134,8 +1135,7 @@ void devm_regmap_field_free(struct device *dev, EXPORT_SYMBOL_GPL(devm_regmap_field_free); /** - * regmap_field_alloc(): Allocate and initialise a register field - * in a register map. + * regmap_field_alloc() - Allocate and initialise a register field. * * @regmap: regmap bank in which this register field is located. * @reg_field: Register field with in the bank. @@ -1159,7 +1159,8 @@ struct regmap_field *regmap_field_alloc(struct regmap *regmap, EXPORT_SYMBOL_GPL(regmap_field_alloc); /** - * regmap_field_free(): Free register field allocated using regmap_field_alloc + * regmap_field_free() - Free register field allocated using + * regmap_field_alloc. * * @field: regmap field which should be freed. */ @@ -1170,7 +1171,7 @@ void regmap_field_free(struct regmap_field *field) EXPORT_SYMBOL_GPL(regmap_field_free); /** - * regmap_reinit_cache(): Reinitialise the current register cache + * regmap_reinit_cache() - Reinitialise the current register cache * * @map: Register map to operate on. * @config: New configuration. Only the cache data will be used. @@ -1205,7 +1206,9 @@ int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config) EXPORT_SYMBOL_GPL(regmap_reinit_cache); /** - * regmap_exit(): Free a previously allocated register map + * regmap_exit() - Free a previously allocated register map + * + * @map: Register map to operate on. */ void regmap_exit(struct regmap *map) { @@ -1245,7 +1248,7 @@ static int dev_get_regmap_match(struct device *dev, void *res, void *data) } /** - * dev_get_regmap(): Obtain the regmap (if any) for a device + * dev_get_regmap() - Obtain the regmap (if any) for a device * * @dev: Device to retrieve the map for * @name: Optional name for the register map, usually NULL. @@ -1268,7 +1271,7 @@ struct regmap *dev_get_regmap(struct device *dev, const char *name) EXPORT_SYMBOL_GPL(dev_get_regmap); /** - * regmap_get_device(): Obtain the device from a regmap + * regmap_get_device() - Obtain the device from a regmap * * @map: Register map to operate on. * @@ -1654,7 +1657,7 @@ int _regmap_write(struct regmap *map, unsigned int reg, } /** - * regmap_write(): Write a value to a single register + * regmap_write() - Write a value to a single register * * @map: Register map to write to * @reg: Register to write to @@ -1681,7 +1684,7 @@ int regmap_write(struct regmap *map, unsigned int reg, unsigned int val) EXPORT_SYMBOL_GPL(regmap_write); /** - * regmap_write_async(): Write a value to a single register asynchronously + * regmap_write_async() - Write a value to a single register asynchronously * * @map: Register map to write to * @reg: Register to write to @@ -1712,7 +1715,7 @@ int regmap_write_async(struct regmap *map, unsigned int reg, unsigned int val) EXPORT_SYMBOL_GPL(regmap_write_async); /** - * regmap_raw_write(): Write raw values to one or more registers + * regmap_raw_write() - Write raw values to one or more registers * * @map: Register map to write to * @reg: Initial register to write to @@ -1750,9 +1753,8 @@ int regmap_raw_write(struct regmap *map, unsigned int reg, EXPORT_SYMBOL_GPL(regmap_raw_write); /** - * regmap_field_update_bits_base(): - * Perform a read/modify/write cycle on the register field - * with change, async, force option + * regmap_field_update_bits_base() - Perform a read/modify/write cycle a + * register field. * * @field: Register field to write to * @mask: Bitmask to change @@ -1761,6 +1763,9 @@ EXPORT_SYMBOL_GPL(regmap_raw_write); * @async: Boolean indicating asynchronously * @force: Boolean indicating use force update * + * Perform a read/modify/write cycle on the register field with change, + * async, force option. + * * A value of zero will be returned on success, a negative errno will * be returned in error cases. */ @@ -1777,9 +1782,8 @@ int regmap_field_update_bits_base(struct regmap_field *field, EXPORT_SYMBOL_GPL(regmap_field_update_bits_base); /** - * regmap_fields_update_bits_base(): - * Perform a read/modify/write cycle on the register field - * with change, async, force option + * regmap_fields_update_bits_base() - Perform a read/modify/write cycle a + * register field with port ID * * @field: Register field to write to * @id: port ID @@ -1808,8 +1812,8 @@ int regmap_fields_update_bits_base(struct regmap_field *field, unsigned int id, } EXPORT_SYMBOL_GPL(regmap_fields_update_bits_base); -/* - * regmap_bulk_write(): Write multiple registers to the device +/** + * regmap_bulk_write() - Write multiple registers to the device * * @map: Register map to write to * @reg: First register to be write from @@ -2174,18 +2178,18 @@ static int _regmap_multi_reg_write(struct regmap *map, return _regmap_raw_multi_reg_write(map, regs, num_regs); } -/* - * regmap_multi_reg_write(): Write multiple registers to the device - * - * where the set of register,value pairs are supplied in any order, - * possibly not all in a single range. +/** + * regmap_multi_reg_write() - Write multiple registers to the device * * @map: Register map to write to * @regs: Array of structures containing register,value to be written * @num_regs: Number of registers to write * + * Write multiple registers to the device where the set of register, value + * pairs are supplied in any order, possibly not all in a single range. + * * The 'normal' block write mode will send ultimately send data on the - * target bus as R,V1,V2,V3,..,Vn where successively higer registers are + * target bus as R,V1,V2,V3,..,Vn where successively higher registers are * addressed. However, this alternative block multi write mode will send * the data as R1,V1,R2,V2,..,Rn,Vn on the target bus. The target device * must of course support the mode. @@ -2208,16 +2212,17 @@ int regmap_multi_reg_write(struct regmap *map, const struct reg_sequence *regs, } EXPORT_SYMBOL_GPL(regmap_multi_reg_write); -/* - * regmap_multi_reg_write_bypassed(): Write multiple registers to the - * device but not the cache - * - * where the set of register are supplied in any order +/** + * regmap_multi_reg_write_bypassed() - Write multiple registers to the + * device but not the cache * * @map: Register map to write to * @regs: Array of structures containing register,value to be written * @num_regs: Number of registers to write * + * Write multiple registers to the device but not the cache where the set + * of register are supplied in any order. + * * This function is intended to be used for writing a large block of data * atomically to the device in single transfer for those I2C client devices * that implement this alternative block write mode. @@ -2248,8 +2253,8 @@ int regmap_multi_reg_write_bypassed(struct regmap *map, EXPORT_SYMBOL_GPL(regmap_multi_reg_write_bypassed); /** - * regmap_raw_write_async(): Write raw values to one or more registers - * asynchronously + * regmap_raw_write_async() - Write raw values to one or more registers + * asynchronously * * @map: Register map to write to * @reg: Initial register to write to @@ -2385,7 +2390,7 @@ static int _regmap_read(struct regmap *map, unsigned int reg, } /** - * regmap_read(): Read a value from a single register + * regmap_read() - Read a value from a single register * * @map: Register map to read from * @reg: Register to be read from @@ -2412,7 +2417,7 @@ int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val) EXPORT_SYMBOL_GPL(regmap_read); /** - * regmap_raw_read(): Read raw data from the device + * regmap_raw_read() - Read raw data from the device * * @map: Register map to read from * @reg: First register to be read from @@ -2477,7 +2482,7 @@ int regmap_raw_read(struct regmap *map, unsigned int reg, void *val, EXPORT_SYMBOL_GPL(regmap_raw_read); /** - * regmap_field_read(): Read a value to a single register field + * regmap_field_read() - Read a value to a single register field * * @field: Register field to read from * @val: Pointer to store read value @@ -2502,7 +2507,7 @@ int regmap_field_read(struct regmap_field *field, unsigned int *val) EXPORT_SYMBOL_GPL(regmap_field_read); /** - * regmap_fields_read(): Read a value to a single register field with port ID + * regmap_fields_read() - Read a value to a single register field with port ID * * @field: Register field to read from * @id: port ID @@ -2535,7 +2540,7 @@ int regmap_fields_read(struct regmap_field *field, unsigned int id, EXPORT_SYMBOL_GPL(regmap_fields_read); /** - * regmap_bulk_read(): Read multiple registers from the device + * regmap_bulk_read() - Read multiple registers from the device * * @map: Register map to read from * @reg: First register to be read from @@ -2692,9 +2697,7 @@ static int _regmap_update_bits(struct regmap *map, unsigned int reg, } /** - * regmap_update_bits_base: - * Perform a read/modify/write cycle on the - * register map with change, async, force option + * regmap_update_bits_base() - Perform a read/modify/write cycle on a register * * @map: Register map to update * @reg: Register to update @@ -2704,10 +2707,14 @@ static int _regmap_update_bits(struct regmap *map, unsigned int reg, * @async: Boolean indicating asynchronously * @force: Boolean indicating use force update * - * if async was true, - * With most buses the read must be done synchronously so this is most - * useful for devices with a cache which do not need to interact with - * the hardware to determine the current register value. + * Perform a read/modify/write cycle on a register map with change, async, force + * options. + * + * If async is true: + * + * With most buses the read must be done synchronously so this is most useful + * for devices with a cache which do not need to interact with the hardware to + * determine the current register value. * * Returns zero for success, a negative number on error. */ @@ -2765,7 +2772,7 @@ static int regmap_async_is_done(struct regmap *map) } /** - * regmap_async_complete: Ensure all asynchronous I/O has completed. + * regmap_async_complete - Ensure all asynchronous I/O has completed. * * @map: Map to operate on. * @@ -2797,8 +2804,8 @@ int regmap_async_complete(struct regmap *map) EXPORT_SYMBOL_GPL(regmap_async_complete); /** - * regmap_register_patch: Register and apply register updates to be applied - * on device initialistion + * regmap_register_patch - Register and apply register updates to be applied + * on device initialistion * * @map: Register map to apply updates to. * @regs: Values to update. @@ -2855,8 +2862,10 @@ int regmap_register_patch(struct regmap *map, const struct reg_sequence *regs, } EXPORT_SYMBOL_GPL(regmap_register_patch); -/* - * regmap_get_val_bytes(): Report the size of a register value +/** + * regmap_get_val_bytes() - Report the size of a register value + * + * @map: Register map to operate on. * * Report the size of a register value, mainly intended to for use by * generic infrastructure built on top of regmap. @@ -2871,7 +2880,9 @@ int regmap_get_val_bytes(struct regmap *map) EXPORT_SYMBOL_GPL(regmap_get_val_bytes); /** - * regmap_get_max_register(): Report the max register value + * regmap_get_max_register() - Report the max register value + * + * @map: Register map to operate on. * * Report the max register value, mainly intended to for use by * generic infrastructure built on top of regmap. @@ -2883,7 +2894,9 @@ int regmap_get_max_register(struct regmap *map) EXPORT_SYMBOL_GPL(regmap_get_max_register); /** - * regmap_get_reg_stride(): Report the register address stride + * regmap_get_reg_stride() - Report the register address stride + * + * @map: Register map to operate on. * * Report the register address stride, mainly intended to for use by * generic infrastructure built on top of regmap. |