diff options
Diffstat (limited to 'drivers/base')
-rw-r--r-- | drivers/base/devres.c | 10 | ||||
-rw-r--r-- | drivers/base/power/domain.c | 343 | ||||
-rw-r--r-- | drivers/base/power/runtime.c | 63 | ||||
-rw-r--r-- | drivers/base/regmap/regcache-rbtree.c | 12 | ||||
-rw-r--r-- | drivers/base/regmap/regmap-debugfs.c | 12 | ||||
-rw-r--r-- | drivers/base/regmap/regmap-irq.c | 142 |
6 files changed, 342 insertions, 240 deletions
diff --git a/drivers/base/devres.c b/drivers/base/devres.c index 4aaf00d2098b..e038e2b3b7ea 100644 --- a/drivers/base/devres.c +++ b/drivers/base/devres.c @@ -26,8 +26,14 @@ struct devres_node { struct devres { struct devres_node node; - /* -- 3 pointers */ - unsigned long long data[]; /* guarantee ull alignment */ + /* + * Some archs want to perform DMA into kmalloc caches + * and need a guaranteed alignment larger than + * the alignment of a 64-bit integer. + * Thus we use ARCH_KMALLOC_MINALIGN here and get exactly the same + * buffer alignment as if it was allocated by plain kmalloc(). + */ + u8 __aligned(ARCH_KMALLOC_MINALIGN) data[]; }; struct devres_group { diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index 7f38a92b444a..500de1dee967 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -239,6 +239,127 @@ static void genpd_update_accounting(struct generic_pm_domain *genpd) static inline void genpd_update_accounting(struct generic_pm_domain *genpd) {} #endif +static int _genpd_reeval_performance_state(struct generic_pm_domain *genpd, + unsigned int state) +{ + struct generic_pm_domain_data *pd_data; + struct pm_domain_data *pdd; + struct gpd_link *link; + + /* New requested state is same as Max requested state */ + if (state == genpd->performance_state) + return state; + + /* New requested state is higher than Max requested state */ + if (state > genpd->performance_state) + return state; + + /* Traverse all devices within the domain */ + list_for_each_entry(pdd, &genpd->dev_list, list_node) { + pd_data = to_gpd_data(pdd); + + if (pd_data->performance_state > state) + state = pd_data->performance_state; + } + + /* + * Traverse all sub-domains within the domain. This can be + * done without any additional locking as the link->performance_state + * field is protected by the master genpd->lock, which is already taken. + * + * Also note that link->performance_state (subdomain's performance state + * requirement to master domain) is different from + * link->slave->performance_state (current performance state requirement + * of the devices/sub-domains of the subdomain) and so can have a + * different value. + * + * Note that we also take vote from powered-off sub-domains into account + * as the same is done for devices right now. + */ + list_for_each_entry(link, &genpd->master_links, master_node) { + if (link->performance_state > state) + state = link->performance_state; + } + + return state; +} + +static int _genpd_set_performance_state(struct generic_pm_domain *genpd, + unsigned int state, int depth) +{ + struct generic_pm_domain *master; + struct gpd_link *link; + int master_state, ret; + + if (state == genpd->performance_state) + return 0; + + /* Propagate to masters of genpd */ + list_for_each_entry(link, &genpd->slave_links, slave_node) { + master = link->master; + + if (!master->set_performance_state) + continue; + + /* Find master's performance state */ + ret = dev_pm_opp_xlate_performance_state(genpd->opp_table, + master->opp_table, + state); + if (unlikely(ret < 0)) + goto err; + + master_state = ret; + + genpd_lock_nested(master, depth + 1); + + link->prev_performance_state = link->performance_state; + link->performance_state = master_state; + master_state = _genpd_reeval_performance_state(master, + master_state); + ret = _genpd_set_performance_state(master, master_state, depth + 1); + if (ret) + link->performance_state = link->prev_performance_state; + + genpd_unlock(master); + + if (ret) + goto err; + } + + ret = genpd->set_performance_state(genpd, state); + if (ret) + goto err; + + genpd->performance_state = state; + return 0; + +err: + /* Encountered an error, lets rollback */ + list_for_each_entry_continue_reverse(link, &genpd->slave_links, + slave_node) { + master = link->master; + + if (!master->set_performance_state) + continue; + + genpd_lock_nested(master, depth + 1); + + master_state = link->prev_performance_state; + link->performance_state = master_state; + + master_state = _genpd_reeval_performance_state(master, + master_state); + if (_genpd_set_performance_state(master, master_state, depth + 1)) { + pr_err("%s: Failed to roll back to %d performance state\n", + master->name, master_state); + } + + genpd_unlock(master); + } + + return ret; +} + /** * dev_pm_genpd_set_performance_state- Set performance state of device's power * domain. @@ -257,10 +378,9 @@ static inline void genpd_update_accounting(struct generic_pm_domain *genpd) {} int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state) { struct generic_pm_domain *genpd; - struct generic_pm_domain_data *gpd_data, *pd_data; - struct pm_domain_data *pdd; + struct generic_pm_domain_data *gpd_data; unsigned int prev; - int ret = 0; + int ret; genpd = dev_to_genpd(dev); if (IS_ERR(genpd)) @@ -281,47 +401,11 @@ int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state) prev = gpd_data->performance_state; gpd_data->performance_state = state; - /* New requested state is same as Max requested state */ - if (state == genpd->performance_state) - goto unlock; - - /* New requested state is higher than Max requested state */ - if (state > genpd->performance_state) - goto update_state; - - /* Traverse all devices within the domain */ - list_for_each_entry(pdd, &genpd->dev_list, list_node) { - pd_data = to_gpd_data(pdd); - - if (pd_data->performance_state > state) - state = pd_data->performance_state; - } - - if (state == genpd->performance_state) - goto unlock; - - /* - * We aren't propagating performance state changes of a subdomain to its - * masters as we don't have hardware that needs it. Over that, the - * performance states of subdomain and its masters may not have - * one-to-one mapping and would require additional information. We can - * get back to this once we have hardware that needs it. For that - * reason, we don't have to consider performance state of the subdomains - * of genpd here. - */ - -update_state: - if (genpd_status_on(genpd)) { - ret = genpd->set_performance_state(genpd, state); - if (ret) { - gpd_data->performance_state = prev; - goto unlock; - } - } - - genpd->performance_state = state; + state = _genpd_reeval_performance_state(genpd, state); + ret = _genpd_set_performance_state(genpd, state, 0); + if (ret) + gpd_data->performance_state = prev; -unlock: genpd_unlock(genpd); return ret; @@ -347,15 +431,6 @@ static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed) return ret; elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); - - if (unlikely(genpd->set_performance_state)) { - ret = genpd->set_performance_state(genpd, genpd->performance_state); - if (ret) { - pr_warn("%s: Failed to set performance state %d (%d)\n", - genpd->name, genpd->performance_state, ret); - } - } - if (elapsed_ns <= genpd->states[state_idx].power_on_latency_ns) return ret; @@ -1907,12 +1982,21 @@ int of_genpd_add_provider_simple(struct device_node *np, ret); goto unlock; } + + /* + * Save table for faster processing while setting performance + * state. + */ + genpd->opp_table = dev_pm_opp_get_opp_table(&genpd->dev); + WARN_ON(!genpd->opp_table); } ret = genpd_add_provider(np, genpd_xlate_simple, genpd); if (ret) { - if (genpd->set_performance_state) + if (genpd->set_performance_state) { + dev_pm_opp_put_opp_table(genpd->opp_table); dev_pm_opp_of_remove_table(&genpd->dev); + } goto unlock; } @@ -1965,6 +2049,13 @@ int of_genpd_add_provider_onecell(struct device_node *np, i, ret); goto error; } + + /* + * Save table for faster processing while setting + * performance state. + */ + genpd->opp_table = dev_pm_opp_get_opp_table_indexed(&genpd->dev, i); + WARN_ON(!genpd->opp_table); } genpd->provider = &np->fwnode; @@ -1989,8 +2080,10 @@ error: genpd->provider = NULL; genpd->has_provider = false; - if (genpd->set_performance_state) + if (genpd->set_performance_state) { + dev_pm_opp_put_opp_table(genpd->opp_table); dev_pm_opp_of_remove_table(&genpd->dev); + } } mutex_unlock(&gpd_list_lock); @@ -2024,6 +2117,7 @@ void of_genpd_del_provider(struct device_node *np) if (!gpd->set_performance_state) continue; + dev_pm_opp_put_opp_table(gpd->opp_table); dev_pm_opp_of_remove_table(&gpd->dev); } } @@ -2338,7 +2432,7 @@ EXPORT_SYMBOL_GPL(genpd_dev_pm_attach); struct device *genpd_dev_pm_attach_by_id(struct device *dev, unsigned int index) { - struct device *genpd_dev; + struct device *virt_dev; int num_domains; int ret; @@ -2352,31 +2446,31 @@ struct device *genpd_dev_pm_attach_by_id(struct device *dev, return NULL; /* Allocate and register device on the genpd bus. */ - genpd_dev = kzalloc(sizeof(*genpd_dev), GFP_KERNEL); - if (!genpd_dev) + virt_dev = kzalloc(sizeof(*virt_dev), GFP_KERNEL); + if (!virt_dev) return ERR_PTR(-ENOMEM); - dev_set_name(genpd_dev, "genpd:%u:%s", index, dev_name(dev)); - genpd_dev->bus = &genpd_bus_type; - genpd_dev->release = genpd_release_dev; + dev_set_name(virt_dev, "genpd:%u:%s", index, dev_name(dev)); + virt_dev->bus = &genpd_bus_type; + virt_dev->release = genpd_release_dev; - ret = device_register(genpd_dev); + ret = device_register(virt_dev); if (ret) { - kfree(genpd_dev); + kfree(virt_dev); return ERR_PTR(ret); } /* Try to attach the device to the PM domain at the specified index. */ - ret = __genpd_dev_pm_attach(genpd_dev, dev->of_node, index, false); + ret = __genpd_dev_pm_attach(virt_dev, dev->of_node, index, false); if (ret < 1) { - device_unregister(genpd_dev); + device_unregister(virt_dev); return ret ? ERR_PTR(ret) : NULL; } - pm_runtime_enable(genpd_dev); - genpd_queue_power_off_work(dev_to_genpd(genpd_dev)); + pm_runtime_enable(virt_dev); + genpd_queue_power_off_work(dev_to_genpd(virt_dev)); - return genpd_dev; + return virt_dev; } EXPORT_SYMBOL_GPL(genpd_dev_pm_attach_by_id); @@ -2521,52 +2615,36 @@ int of_genpd_parse_idle_states(struct device_node *dn, EXPORT_SYMBOL_GPL(of_genpd_parse_idle_states); /** - * of_genpd_opp_to_performance_state- Gets performance state of device's - * power domain corresponding to a DT node's "required-opps" property. + * pm_genpd_opp_to_performance_state - Gets performance state of the genpd from its OPP node. * - * @dev: Device for which the performance-state needs to be found. - * @np: DT node where the "required-opps" property is present. This can be - * the device node itself (if it doesn't have an OPP table) or a node - * within the OPP table of a device (if device has an OPP table). + * @genpd_dev: Genpd's device for which the performance-state needs to be found. + * @opp: struct dev_pm_opp of the OPP for which we need to find performance + * state. * - * Returns performance state corresponding to the "required-opps" property of - * a DT node. This calls platform specific genpd->opp_to_performance_state() - * callback to translate power domain OPP to performance state. + * Returns performance state encoded in the OPP of the genpd. This calls + * platform specific genpd->opp_to_performance_state() callback to translate + * power domain OPP to performance state. * * Returns performance state on success and 0 on failure. */ -unsigned int of_genpd_opp_to_performance_state(struct device *dev, - struct device_node *np) +unsigned int pm_genpd_opp_to_performance_state(struct device *genpd_dev, + struct dev_pm_opp *opp) { - struct generic_pm_domain *genpd; - struct dev_pm_opp *opp; - int state = 0; + struct generic_pm_domain *genpd = NULL; + int state; - genpd = dev_to_genpd(dev); - if (IS_ERR(genpd)) - return 0; + genpd = container_of(genpd_dev, struct generic_pm_domain, dev); - if (unlikely(!genpd->set_performance_state)) + if (unlikely(!genpd->opp_to_performance_state)) return 0; genpd_lock(genpd); - - opp = of_dev_pm_opp_find_required_opp(&genpd->dev, np); - if (IS_ERR(opp)) { - dev_err(dev, "Failed to find required OPP: %ld\n", - PTR_ERR(opp)); - goto unlock; - } - state = genpd->opp_to_performance_state(genpd, opp); - dev_pm_opp_put(opp); - -unlock: genpd_unlock(genpd); return state; } -EXPORT_SYMBOL_GPL(of_genpd_opp_to_performance_state); +EXPORT_SYMBOL_GPL(pm_genpd_opp_to_performance_state); static int __init genpd_bus_init(void) { @@ -2671,7 +2749,7 @@ exit: return 0; } -static int genpd_summary_show(struct seq_file *s, void *data) +static int summary_show(struct seq_file *s, void *data) { struct generic_pm_domain *genpd; int ret = 0; @@ -2694,7 +2772,7 @@ static int genpd_summary_show(struct seq_file *s, void *data) return ret; } -static int genpd_status_show(struct seq_file *s, void *data) +static int status_show(struct seq_file *s, void *data) { static const char * const status_lookup[] = { [GPD_STATE_ACTIVE] = "on", @@ -2721,7 +2799,7 @@ exit: return ret; } -static int genpd_sub_domains_show(struct seq_file *s, void *data) +static int sub_domains_show(struct seq_file *s, void *data) { struct generic_pm_domain *genpd = s->private; struct gpd_link *link; @@ -2738,7 +2816,7 @@ static int genpd_sub_domains_show(struct seq_file *s, void *data) return ret; } -static int genpd_idle_states_show(struct seq_file *s, void *data) +static int idle_states_show(struct seq_file *s, void *data) { struct generic_pm_domain *genpd = s->private; unsigned int i; @@ -2767,7 +2845,7 @@ static int genpd_idle_states_show(struct seq_file *s, void *data) return ret; } -static int genpd_active_time_show(struct seq_file *s, void *data) +static int active_time_show(struct seq_file *s, void *data) { struct generic_pm_domain *genpd = s->private; ktime_t delta = 0; @@ -2787,7 +2865,7 @@ static int genpd_active_time_show(struct seq_file *s, void *data) return ret; } -static int genpd_total_idle_time_show(struct seq_file *s, void *data) +static int total_idle_time_show(struct seq_file *s, void *data) { struct generic_pm_domain *genpd = s->private; ktime_t delta = 0, total = 0; @@ -2815,7 +2893,7 @@ static int genpd_total_idle_time_show(struct seq_file *s, void *data) } -static int genpd_devices_show(struct seq_file *s, void *data) +static int devices_show(struct seq_file *s, void *data) { struct generic_pm_domain *genpd = s->private; struct pm_domain_data *pm_data; @@ -2841,7 +2919,7 @@ static int genpd_devices_show(struct seq_file *s, void *data) return ret; } -static int genpd_perf_state_show(struct seq_file *s, void *data) +static int perf_state_show(struct seq_file *s, void *data) { struct generic_pm_domain *genpd = s->private; @@ -2854,37 +2932,14 @@ static int genpd_perf_state_show(struct seq_file *s, void *data) return 0; } -#define define_genpd_open_function(name) \ -static int genpd_##name##_open(struct inode *inode, struct file *file) \ -{ \ - return single_open(file, genpd_##name##_show, inode->i_private); \ -} - -define_genpd_open_function(summary); -define_genpd_open_function(status); -define_genpd_open_function(sub_domains); -define_genpd_open_function(idle_states); -define_genpd_open_function(active_time); -define_genpd_open_function(total_idle_time); -define_genpd_open_function(devices); -define_genpd_open_function(perf_state); - -#define define_genpd_debugfs_fops(name) \ -static const struct file_operations genpd_##name##_fops = { \ - .open = genpd_##name##_open, \ - .read = seq_read, \ - .llseek = seq_lseek, \ - .release = single_release, \ -} - -define_genpd_debugfs_fops(summary); -define_genpd_debugfs_fops(status); -define_genpd_debugfs_fops(sub_domains); -define_genpd_debugfs_fops(idle_states); -define_genpd_debugfs_fops(active_time); -define_genpd_debugfs_fops(total_idle_time); -define_genpd_debugfs_fops(devices); -define_genpd_debugfs_fops(perf_state); +DEFINE_SHOW_ATTRIBUTE(summary); +DEFINE_SHOW_ATTRIBUTE(status); +DEFINE_SHOW_ATTRIBUTE(sub_domains); +DEFINE_SHOW_ATTRIBUTE(idle_states); +DEFINE_SHOW_ATTRIBUTE(active_time); +DEFINE_SHOW_ATTRIBUTE(total_idle_time); +DEFINE_SHOW_ATTRIBUTE(devices); +DEFINE_SHOW_ATTRIBUTE(perf_state); static int __init genpd_debug_init(void) { @@ -2897,7 +2952,7 @@ static int __init genpd_debug_init(void) return -ENOMEM; d = debugfs_create_file("pm_genpd_summary", S_IRUGO, - genpd_debugfs_dir, NULL, &genpd_summary_fops); + genpd_debugfs_dir, NULL, &summary_fops); if (!d) return -ENOMEM; @@ -2907,20 +2962,20 @@ static int __init genpd_debug_init(void) return -ENOMEM; debugfs_create_file("current_state", 0444, - d, genpd, &genpd_status_fops); + d, genpd, &status_fops); debugfs_create_file("sub_domains", 0444, - d, genpd, &genpd_sub_domains_fops); + d, genpd, &sub_domains_fops); debugfs_create_file("idle_states", 0444, - d, genpd, &genpd_idle_states_fops); + d, genpd, &idle_states_fops); debugfs_create_file("active_time", 0444, - d, genpd, &genpd_active_time_fops); + d, genpd, &active_time_fops); debugfs_create_file("total_idle_time", 0444, - d, genpd, &genpd_total_idle_time_fops); + d, genpd, &total_idle_time_fops); debugfs_create_file("devices", 0444, - d, genpd, &genpd_devices_fops); + d, genpd, &devices_fops); if (genpd->set_performance_state) debugfs_create_file("perf_state", 0444, - d, genpd, &genpd_perf_state_fops); + d, genpd, &perf_state_fops); } return 0; diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index beb85c31f3fa..70624695b6d5 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -8,6 +8,8 @@ */ #include <linux/sched/mm.h> +#include <linux/ktime.h> +#include <linux/hrtimer.h> #include <linux/export.h> #include <linux/pm_runtime.h> #include <linux/pm_wakeirq.h> @@ -93,7 +95,7 @@ static void __update_runtime_status(struct device *dev, enum rpm_status status) static void pm_runtime_deactivate_timer(struct device *dev) { if (dev->power.timer_expires > 0) { - del_timer(&dev->power.suspend_timer); + hrtimer_cancel(&dev->power.suspend_timer); dev->power.timer_expires = 0; } } @@ -124,12 +126,11 @@ static void pm_runtime_cancel_pending(struct device *dev) * This function may be called either with or without dev->power.lock held. * Either way it can be racy, since power.last_busy may be updated at any time. */ -unsigned long pm_runtime_autosuspend_expiration(struct device *dev) +u64 pm_runtime_autosuspend_expiration(struct device *dev) { int autosuspend_delay; - long elapsed; - unsigned long last_busy; - unsigned long expires = 0; + u64 last_busy, expires = 0; + u64 now = ktime_to_ns(ktime_get()); if (!dev->power.use_autosuspend) goto out; @@ -139,19 +140,9 @@ unsigned long pm_runtime_autosuspend_expiration(struct device *dev) goto out; last_busy = READ_ONCE(dev->power.last_busy); - elapsed = jiffies - last_busy; - if (elapsed < 0) - goto out; /* jiffies has wrapped around. */ - /* - * If the autosuspend_delay is >= 1 second, align the timer by rounding - * up to the nearest second. - */ - expires = last_busy + msecs_to_jiffies(autosuspend_delay); - if (autosuspend_delay >= 1000) - expires = round_jiffies(expires); - expires += !expires; - if (elapsed >= expires - last_busy) + expires = last_busy + autosuspend_delay * NSEC_PER_MSEC; + if (expires <= now) expires = 0; /* Already expired. */ out: @@ -515,7 +506,7 @@ static int rpm_suspend(struct device *dev, int rpmflags) /* If the autosuspend_delay time hasn't expired yet, reschedule. */ if ((rpmflags & RPM_AUTO) && dev->power.runtime_status != RPM_SUSPENDING) { - unsigned long expires = pm_runtime_autosuspend_expiration(dev); + u64 expires = pm_runtime_autosuspend_expiration(dev); if (expires != 0) { /* Pending requests need to be canceled. */ @@ -528,10 +519,20 @@ static int rpm_suspend(struct device *dev, int rpmflags) * expire; pm_suspend_timer_fn() will take care of the * rest. */ - if (!(dev->power.timer_expires && time_before_eq( - dev->power.timer_expires, expires))) { + if (!(dev->power.timer_expires && + dev->power.timer_expires <= expires)) { + /* + * We add a slack of 25% to gather wakeups + * without sacrificing the granularity. + */ + u64 slack = READ_ONCE(dev->power.autosuspend_delay) * + (NSEC_PER_MSEC >> 2); + dev->power.timer_expires = expires; - mod_timer(&dev->power.suspend_timer, expires); + hrtimer_start_range_ns(&dev->power.suspend_timer, + ns_to_ktime(expires), + slack, + HRTIMER_MODE_ABS); } dev->power.timer_autosuspends = 1; goto out; @@ -895,23 +896,25 @@ static void pm_runtime_work(struct work_struct *work) * * Check if the time is right and queue a suspend request. */ -static void pm_suspend_timer_fn(struct timer_list *t) +static enum hrtimer_restart pm_suspend_timer_fn(struct hrtimer *timer) { - struct device *dev = from_timer(dev, t, power.suspend_timer); + struct device *dev = container_of(timer, struct device, power.suspend_timer); unsigned long flags; - unsigned long expires; + u64 expires; spin_lock_irqsave(&dev->power.lock, flags); expires = dev->power.timer_expires; /* If 'expire' is after 'jiffies' we've been called too early. */ - if (expires > 0 && !time_after(expires, jiffies)) { + if (expires > 0 && expires < ktime_to_ns(ktime_get())) { dev->power.timer_expires = 0; rpm_suspend(dev, dev->power.timer_autosuspends ? (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC); } spin_unlock_irqrestore(&dev->power.lock, flags); + + return HRTIMER_NORESTART; } /** @@ -922,6 +925,7 @@ static void pm_suspend_timer_fn(struct timer_list *t) int pm_schedule_suspend(struct device *dev, unsigned int delay) { unsigned long flags; + ktime_t expires; int retval; spin_lock_irqsave(&dev->power.lock, flags); @@ -938,10 +942,10 @@ int pm_schedule_suspend(struct device *dev, unsigned int delay) /* Other scheduled or pending requests need to be canceled. */ pm_runtime_cancel_pending(dev); - dev->power.timer_expires = jiffies + msecs_to_jiffies(delay); - dev->power.timer_expires += !dev->power.timer_expires; + expires = ktime_add(ktime_get(), ms_to_ktime(delay)); + dev->power.timer_expires = ktime_to_ns(expires); dev->power.timer_autosuspends = 0; - mod_timer(&dev->power.suspend_timer, dev->power.timer_expires); + hrtimer_start(&dev->power.suspend_timer, expires, HRTIMER_MODE_ABS); out: spin_unlock_irqrestore(&dev->power.lock, flags); @@ -1491,7 +1495,8 @@ void pm_runtime_init(struct device *dev) INIT_WORK(&dev->power.work, pm_runtime_work); dev->power.timer_expires = 0; - timer_setup(&dev->power.suspend_timer, pm_suspend_timer_fn, 0); + hrtimer_init(&dev->power.suspend_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); + dev->power.suspend_timer.function = pm_suspend_timer_fn; init_waitqueue_head(&dev->power.wait_queue); } diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c index b1e9aae9a5d0..2e8f0144f9ab 100644 --- a/drivers/base/regmap/regcache-rbtree.c +++ b/drivers/base/regmap/regcache-rbtree.c @@ -177,17 +177,7 @@ static int rbtree_show(struct seq_file *s, void *ignored) return 0; } -static int rbtree_open(struct inode *inode, struct file *file) -{ - return single_open(file, rbtree_show, inode->i_private); -} - -static const struct file_operations rbtree_fops = { - .open = rbtree_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; +DEFINE_SHOW_ATTRIBUTE(rbtree); static void rbtree_debugfs_init(struct regmap *map) { diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c index 87b562e49a43..19eb454f26c3 100644 --- a/drivers/base/regmap/regmap-debugfs.c +++ b/drivers/base/regmap/regmap-debugfs.c @@ -435,17 +435,7 @@ static int regmap_access_show(struct seq_file *s, void *ignored) return 0; } -static int access_open(struct inode *inode, struct file *file) -{ - return single_open(file, regmap_access_show, inode->i_private); -} - -static const struct file_operations regmap_access_fops = { - .open = access_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; +DEFINE_SHOW_ATTRIBUTE(regmap_access); static ssize_t regmap_cache_only_write_file(struct file *file, const char __user *user_buf, diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c index 429ca8ed7e51..1bd1145ad8b5 100644 --- a/drivers/base/regmap/regmap-irq.c +++ b/drivers/base/regmap/regmap-irq.c @@ -44,6 +44,8 @@ struct regmap_irq_chip_data { unsigned int irq_reg_stride; unsigned int type_reg_stride; + + bool clear_status:1; }; static inline const @@ -77,6 +79,7 @@ static void regmap_irq_sync_unlock(struct irq_data *data) int i, ret; u32 reg; u32 unmask_offset; + u32 val; if (d->chip->runtime_pm) { ret = pm_runtime_get_sync(map->dev); @@ -85,6 +88,20 @@ static void regmap_irq_sync_unlock(struct irq_data *data) ret); } + if (d->clear_status) { + for (i = 0; i < d->chip->num_regs; i++) { + reg = d->chip->status_base + + (i * map->reg_stride * d->irq_reg_stride); + + ret = regmap_read(map, reg, &val); + if (ret) + dev_err(d->map->dev, + "Failed to clear the interrupt status bits\n"); + } + + d->clear_status = false; + } + /* * If there's been a change in the mask write it back to the * hardware. We rely on the use of the regmap core cache to @@ -157,20 +174,23 @@ static void regmap_irq_sync_unlock(struct irq_data *data) } } - for (i = 0; i < d->chip->num_type_reg; i++) { - if (!d->type_buf_def[i]) - continue; - reg = d->chip->type_base + - (i * map->reg_stride * d->type_reg_stride); - if (d->chip->type_invert) - ret = regmap_irq_update_bits(d, reg, - d->type_buf_def[i], ~d->type_buf[i]); - else - ret = regmap_irq_update_bits(d, reg, - d->type_buf_def[i], d->type_buf[i]); - if (ret != 0) - dev_err(d->map->dev, "Failed to sync type in %x\n", - reg); + /* Don't update the type bits if we're using mask bits for irq type. */ + if (!d->chip->type_in_mask) { + for (i = 0; i < d->chip->num_type_reg; i++) { + if (!d->type_buf_def[i]) + continue; + reg = d->chip->type_base + + (i * map->reg_stride * d->type_reg_stride); + if (d->chip->type_invert) + ret = regmap_irq_update_bits(d, reg, + d->type_buf_def[i], ~d->type_buf[i]); + else + ret = regmap_irq_update_bits(d, reg, + d->type_buf_def[i], d->type_buf[i]); + if (ret != 0) + dev_err(d->map->dev, "Failed to sync type in %x\n", + reg); + } } if (d->chip->runtime_pm) @@ -194,8 +214,30 @@ static void regmap_irq_enable(struct irq_data *data) struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data); struct regmap *map = d->map; const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq); + unsigned int mask, type; + + type = irq_data->type.type_falling_val | irq_data->type.type_rising_val; - d->mask_buf[irq_data->reg_offset / map->reg_stride] &= ~irq_data->mask; + /* + * The type_in_mask flag means that the underlying hardware uses + * separate mask bits for rising and falling edge interrupts, but + * we want to make them into a single virtual interrupt with + * configurable edge. + * + * If the interrupt we're enabling defines the falling or rising + * masks then instead of using the regular mask bits for this + * interrupt, use the value previously written to the type buffer + * at the corresponding offset in regmap_irq_set_type(). + */ + if (d->chip->type_in_mask && type) + mask = d->type_buf[irq_data->reg_offset / map->reg_stride]; + else + mask = irq_data->mask; + + if (d->chip->clear_on_unmask) + d->clear_status = true; + + d->mask_buf[irq_data->reg_offset / map->reg_stride] &= ~mask; } static void regmap_irq_disable(struct irq_data *data) @@ -212,27 +254,42 @@ static int regmap_irq_set_type(struct irq_data *data, unsigned int type) struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data); struct regmap *map = d->map; const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq); - int reg = irq_data->type_reg_offset / map->reg_stride; + int reg; + const struct regmap_irq_type *t = &irq_data->type; - if (!(irq_data->type_rising_mask | irq_data->type_falling_mask)) - return 0; + if ((t->types_supported & type) != type) + return -ENOTSUPP; - d->type_buf[reg] &= ~(irq_data->type_falling_mask | - irq_data->type_rising_mask); + reg = t->type_reg_offset / map->reg_stride; + + if (t->type_reg_mask) + d->type_buf[reg] &= ~t->type_reg_mask; + else + d->type_buf[reg] &= ~(t->type_falling_val | + t->type_rising_val | + t->type_level_low_val | + t->type_level_high_val); switch (type) { case IRQ_TYPE_EDGE_FALLING: - d->type_buf[reg] |= irq_data->type_falling_mask; + d->type_buf[reg] |= t->type_falling_val; break; case IRQ_TYPE_EDGE_RISING: - d->type_buf[reg] |= irq_data->type_rising_mask; + d->type_buf[reg] |= t->type_rising_val; break; case IRQ_TYPE_EDGE_BOTH: - d->type_buf[reg] |= (irq_data->type_falling_mask | - irq_data->type_rising_mask); + d->type_buf[reg] |= (t->type_falling_val | + t->type_rising_val); break; + case IRQ_TYPE_LEVEL_HIGH: + d->type_buf[reg] |= t->type_level_high_val; + break; + + case IRQ_TYPE_LEVEL_LOW: + d->type_buf[reg] |= t->type_level_low_val; + break; default: return -EINVAL; } @@ -430,12 +487,16 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags, struct regmap_irq_chip_data *d; int i; int ret = -ENOMEM; + int num_type_reg; u32 reg; u32 unmask_offset; if (chip->num_regs <= 0) return -EINVAL; + if (chip->clear_on_unmask && (chip->ack_base || chip->use_ack)) + return -EINVAL; + for (i = 0; i < chip->num_irqs; i++) { if (chip->irqs[i].reg_offset % map->reg_stride) return -EINVAL; @@ -479,13 +540,14 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags, goto err_alloc; } - if (chip->num_type_reg) { - d->type_buf_def = kcalloc(chip->num_type_reg, - sizeof(unsigned int), GFP_KERNEL); + num_type_reg = chip->type_in_mask ? chip->num_regs : chip->num_type_reg; + if (num_type_reg) { + d->type_buf_def = kcalloc(num_type_reg, + sizeof(unsigned int), GFP_KERNEL); if (!d->type_buf_def) goto err_alloc; - d->type_buf = kcalloc(chip->num_type_reg, sizeof(unsigned int), + d->type_buf = kcalloc(num_type_reg, sizeof(unsigned int), GFP_KERNEL); if (!d->type_buf) goto err_alloc; @@ -600,27 +662,21 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags, } } - if (chip->num_type_reg) { - for (i = 0; i < chip->num_irqs; i++) { - reg = chip->irqs[i].type_reg_offset / map->reg_stride; - d->type_buf_def[reg] |= chip->irqs[i].type_rising_mask | - chip->irqs[i].type_falling_mask; - } + if (chip->num_type_reg && !chip->type_in_mask) { for (i = 0; i < chip->num_type_reg; ++i) { if (!d->type_buf_def[i]) continue; reg = chip->type_base + (i * map->reg_stride * d->type_reg_stride); - if (chip->type_invert) - ret = regmap_irq_update_bits(d, reg, - d->type_buf_def[i], 0xFF); - else - ret = regmap_irq_update_bits(d, reg, - d->type_buf_def[i], 0x0); - if (ret != 0) { - dev_err(map->dev, - "Failed to set type in 0x%x: %x\n", + + ret = regmap_read(map, reg, &d->type_buf_def[i]); + + if (d->chip->type_invert) + d->type_buf_def[i] = ~d->type_buf_def[i]; + + if (ret) { + dev_err(map->dev, "Failed to get type defaults at 0x%x: %d\n", reg, ret); goto err_alloc; } |