diff options
Diffstat (limited to 'drivers/base')
-rw-r--r-- | drivers/base/Makefile | 2 | ||||
-rw-r--r-- | drivers/base/memory.c | 38 | ||||
-rw-r--r-- | drivers/base/platform-msi.c | 6 | ||||
-rw-r--r-- | drivers/base/power/domain.c | 180 | ||||
-rw-r--r-- | drivers/base/power/domain_governor.c | 3 | ||||
-rw-r--r-- | drivers/base/power/runtime.c | 145 | ||||
-rw-r--r-- | drivers/base/property.c | 4 | ||||
-rw-r--r-- | drivers/base/regmap/Kconfig | 6 | ||||
-rw-r--r-- | drivers/base/regmap/Makefile | 1 | ||||
-rw-r--r-- | drivers/base/regmap/regmap-fsi.c | 231 | ||||
-rw-r--r-- | drivers/base/regmap/regmap-irq.c | 59 | ||||
-rw-r--r-- | drivers/base/regmap/regmap.c | 13 |
12 files changed, 504 insertions, 184 deletions
diff --git a/drivers/base/Makefile b/drivers/base/Makefile index 83217d243c25..3079bfe53d04 100644 --- a/drivers/base/Makefile +++ b/drivers/base/Makefile @@ -22,7 +22,7 @@ obj-$(CONFIG_REGMAP) += regmap/ obj-$(CONFIG_SOC_BUS) += soc.o obj-$(CONFIG_PINCTRL) += pinctrl.o obj-$(CONFIG_DEV_COREDUMP) += devcoredump.o -obj-$(CONFIG_GENERIC_MSI_IRQ_DOMAIN) += platform-msi.o +obj-$(CONFIG_GENERIC_MSI_IRQ) += platform-msi.o obj-$(CONFIG_GENERIC_ARCH_TOPOLOGY) += arch_topology.o obj-$(CONFIG_GENERIC_ARCH_NUMA) += arch_numa.o obj-$(CONFIG_ACPI) += physical_location.o diff --git a/drivers/base/memory.c b/drivers/base/memory.c index 9aa0da991cfb..fe98fb8d94e5 100644 --- a/drivers/base/memory.c +++ b/drivers/base/memory.c @@ -175,6 +175,15 @@ int memory_notify(unsigned long val, void *v) return blocking_notifier_call_chain(&memory_chain, val, v); } +#if defined(CONFIG_MEMORY_FAILURE) && defined(CONFIG_MEMORY_HOTPLUG) +static unsigned long memblk_nr_poison(struct memory_block *mem); +#else +static inline unsigned long memblk_nr_poison(struct memory_block *mem) +{ + return 0; +} +#endif + static int memory_block_online(struct memory_block *mem) { unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr); @@ -183,6 +192,9 @@ static int memory_block_online(struct memory_block *mem) struct zone *zone; int ret; + if (memblk_nr_poison(mem)) + return -EHWPOISON; + zone = zone_for_pfn_range(mem->online_type, mem->nid, mem->group, start_pfn, nr_pages); @@ -864,6 +876,7 @@ void remove_memory_block_devices(unsigned long start, unsigned long size) mem = find_memory_block_by_id(block_id); if (WARN_ON_ONCE(!mem)) continue; + num_poisoned_pages_sub(-1UL, memblk_nr_poison(mem)); unregister_memory_block_under_nodes(mem); remove_memory_block(mem); } @@ -1164,3 +1177,28 @@ int walk_dynamic_memory_groups(int nid, walk_memory_groups_func_t func, } return ret; } + +#if defined(CONFIG_MEMORY_FAILURE) && defined(CONFIG_MEMORY_HOTPLUG) +void memblk_nr_poison_inc(unsigned long pfn) +{ + const unsigned long block_id = pfn_to_block_id(pfn); + struct memory_block *mem = find_memory_block_by_id(block_id); + + if (mem) + atomic_long_inc(&mem->nr_hwpoison); +} + +void memblk_nr_poison_sub(unsigned long pfn, long i) +{ + const unsigned long block_id = pfn_to_block_id(pfn); + struct memory_block *mem = find_memory_block_by_id(block_id); + + if (mem) + atomic_long_sub(i, &mem->nr_hwpoison); +} + +static unsigned long memblk_nr_poison(struct memory_block *mem) +{ + return atomic_long_read(&mem->nr_hwpoison); +} +#endif diff --git a/drivers/base/platform-msi.c b/drivers/base/platform-msi.c index 12b044151298..5883e7634a2b 100644 --- a/drivers/base/platform-msi.c +++ b/drivers/base/platform-msi.c @@ -213,7 +213,7 @@ int platform_msi_domain_alloc_irqs(struct device *dev, unsigned int nvec, if (err) return err; - err = msi_domain_alloc_irqs(dev->msi.domain, dev, nvec); + err = msi_domain_alloc_irqs_range(dev, MSI_DEFAULT_DOMAIN, 0, nvec - 1); if (err) platform_msi_free_priv_data(dev); @@ -227,7 +227,7 @@ EXPORT_SYMBOL_GPL(platform_msi_domain_alloc_irqs); */ void platform_msi_domain_free_irqs(struct device *dev) { - msi_domain_free_irqs(dev->msi.domain, dev); + msi_domain_free_irqs_all(dev, MSI_DEFAULT_DOMAIN); platform_msi_free_priv_data(dev); } EXPORT_SYMBOL_GPL(platform_msi_domain_free_irqs); @@ -325,7 +325,7 @@ void platform_msi_device_domain_free(struct irq_domain *domain, unsigned int vir msi_lock_descs(data->dev); irq_domain_free_irqs_common(domain, virq, nr_irqs); - msi_free_msi_descs_range(data->dev, MSI_DESC_ALL, virq, virq + nr_irqs - 1); + msi_free_msi_descs_range(data->dev, virq, virq + nr_irqs - 1); msi_unlock_descs(data->dev); } diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index ead135c7044c..967bcf9d415e 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -494,6 +494,31 @@ void dev_pm_genpd_set_next_wakeup(struct device *dev, ktime_t next) } EXPORT_SYMBOL_GPL(dev_pm_genpd_set_next_wakeup); +/** + * dev_pm_genpd_get_next_hrtimer - Return the next_hrtimer for the genpd + * @dev: A device that is attached to the genpd. + * + * This routine should typically be called for a device, at the point of when a + * GENPD_NOTIFY_PRE_OFF notification has been sent for it. + * + * Returns the aggregated value of the genpd's next hrtimer or KTIME_MAX if no + * valid value have been set. + */ +ktime_t dev_pm_genpd_get_next_hrtimer(struct device *dev) +{ + struct generic_pm_domain *genpd; + + genpd = dev_to_genpd_safe(dev); + if (!genpd) + return KTIME_MAX; + + if (genpd->gd) + return genpd->gd->next_hrtimer; + + return KTIME_MAX; +} +EXPORT_SYMBOL_GPL(dev_pm_genpd_get_next_hrtimer); + static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed) { unsigned int state_idx = genpd->state_idx; @@ -939,8 +964,8 @@ static int genpd_runtime_suspend(struct device *dev) return 0; genpd_lock(genpd); - gpd_data->rpm_pstate = genpd_drop_performance_state(dev); genpd_power_off(genpd, true, 0); + gpd_data->rpm_pstate = genpd_drop_performance_state(dev); genpd_unlock(genpd); return 0; @@ -978,9 +1003,8 @@ static int genpd_runtime_resume(struct device *dev) goto out; genpd_lock(genpd); + genpd_restore_performance_state(dev, gpd_data->rpm_pstate); ret = genpd_power_on(genpd, 0); - if (!ret) - genpd_restore_performance_state(dev, gpd_data->rpm_pstate); genpd_unlock(genpd); if (ret) @@ -1018,8 +1042,8 @@ err_stop: err_poweroff: if (!pm_runtime_is_irq_safe(dev) || genpd_is_irq_safe(genpd)) { genpd_lock(genpd); - gpd_data->rpm_pstate = genpd_drop_performance_state(dev); genpd_power_off(genpd, true, 0); + gpd_data->rpm_pstate = genpd_drop_performance_state(dev); genpd_unlock(genpd); } @@ -1189,12 +1213,15 @@ static int genpd_prepare(struct device *dev) * genpd_finish_suspend - Completion of suspend or hibernation of device in an * I/O pm domain. * @dev: Device to suspend. - * @poweroff: Specifies if this is a poweroff_noirq or suspend_noirq callback. + * @suspend_noirq: Generic suspend_noirq callback. + * @resume_noirq: Generic resume_noirq callback. * * Stop the device and remove power from the domain if all devices in it have * been stopped. */ -static int genpd_finish_suspend(struct device *dev, bool poweroff) +static int genpd_finish_suspend(struct device *dev, + int (*suspend_noirq)(struct device *dev), + int (*resume_noirq)(struct device *dev)) { struct generic_pm_domain *genpd; int ret = 0; @@ -1203,10 +1230,7 @@ static int genpd_finish_suspend(struct device *dev, bool poweroff) if (IS_ERR(genpd)) return -EINVAL; - if (poweroff) - ret = pm_generic_poweroff_noirq(dev); - else - ret = pm_generic_suspend_noirq(dev); + ret = suspend_noirq(dev); if (ret) return ret; @@ -1217,10 +1241,7 @@ static int genpd_finish_suspend(struct device *dev, bool poweroff) !pm_runtime_status_suspended(dev)) { ret = genpd_stop_dev(genpd, dev); if (ret) { - if (poweroff) - pm_generic_restore_noirq(dev); - else - pm_generic_resume_noirq(dev); + resume_noirq(dev); return ret; } } @@ -1244,16 +1265,20 @@ static int genpd_suspend_noirq(struct device *dev) { dev_dbg(dev, "%s()\n", __func__); - return genpd_finish_suspend(dev, false); + return genpd_finish_suspend(dev, + pm_generic_suspend_noirq, + pm_generic_resume_noirq); } /** - * genpd_resume_noirq - Start of resume of device in an I/O PM domain. + * genpd_finish_resume - Completion of resume of device in an I/O PM domain. * @dev: Device to resume. + * @resume_noirq: Generic resume_noirq callback. * * Restore power to the device's PM domain, if necessary, and start the device. */ -static int genpd_resume_noirq(struct device *dev) +static int genpd_finish_resume(struct device *dev, + int (*resume_noirq)(struct device *dev)) { struct generic_pm_domain *genpd; int ret; @@ -1265,7 +1290,7 @@ static int genpd_resume_noirq(struct device *dev) return -EINVAL; if (device_wakeup_path(dev) && genpd_is_active_wakeup(genpd)) - return pm_generic_resume_noirq(dev); + return resume_noirq(dev); genpd_lock(genpd); genpd_sync_power_on(genpd, true, 0); @@ -1283,6 +1308,19 @@ static int genpd_resume_noirq(struct device *dev) } /** + * genpd_resume_noirq - Start of resume of device in an I/O PM domain. + * @dev: Device to resume. + * + * Restore power to the device's PM domain, if necessary, and start the device. + */ +static int genpd_resume_noirq(struct device *dev) +{ + dev_dbg(dev, "%s()\n", __func__); + + return genpd_finish_resume(dev, pm_generic_resume_noirq); +} + +/** * genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain. * @dev: Device to freeze. * @@ -1293,24 +1331,11 @@ static int genpd_resume_noirq(struct device *dev) */ static int genpd_freeze_noirq(struct device *dev) { - const struct generic_pm_domain *genpd; - int ret = 0; - dev_dbg(dev, "%s()\n", __func__); - genpd = dev_to_genpd(dev); - if (IS_ERR(genpd)) - return -EINVAL; - - ret = pm_generic_freeze_noirq(dev); - if (ret) - return ret; - - if (genpd->dev_ops.stop && genpd->dev_ops.start && - !pm_runtime_status_suspended(dev)) - ret = genpd_stop_dev(genpd, dev); - - return ret; + return genpd_finish_suspend(dev, + pm_generic_freeze_noirq, + pm_generic_thaw_noirq); } /** @@ -1322,23 +1347,9 @@ static int genpd_freeze_noirq(struct device *dev) */ static int genpd_thaw_noirq(struct device *dev) { - const struct generic_pm_domain *genpd; - int ret = 0; - dev_dbg(dev, "%s()\n", __func__); - genpd = dev_to_genpd(dev); - if (IS_ERR(genpd)) - return -EINVAL; - - if (genpd->dev_ops.stop && genpd->dev_ops.start && - !pm_runtime_status_suspended(dev)) { - ret = genpd_start_dev(genpd, dev); - if (ret) - return ret; - } - - return pm_generic_thaw_noirq(dev); + return genpd_finish_resume(dev, pm_generic_thaw_noirq); } /** @@ -1353,7 +1364,9 @@ static int genpd_poweroff_noirq(struct device *dev) { dev_dbg(dev, "%s()\n", __func__); - return genpd_finish_suspend(dev, true); + return genpd_finish_suspend(dev, + pm_generic_poweroff_noirq, + pm_generic_restore_noirq); } /** @@ -1365,40 +1378,9 @@ static int genpd_poweroff_noirq(struct device *dev) */ static int genpd_restore_noirq(struct device *dev) { - struct generic_pm_domain *genpd; - int ret = 0; - dev_dbg(dev, "%s()\n", __func__); - genpd = dev_to_genpd(dev); - if (IS_ERR(genpd)) - return -EINVAL; - - /* - * At this point suspended_count == 0 means we are being run for the - * first time for the given domain in the present cycle. - */ - genpd_lock(genpd); - if (genpd->suspended_count++ == 0) { - /* - * The boot kernel might put the domain into arbitrary state, - * so make it appear as powered off to genpd_sync_power_on(), - * so that it tries to power it on in case it was really off. - */ - genpd->status = GENPD_STATE_OFF; - } - - genpd_sync_power_on(genpd, true, 0); - genpd_unlock(genpd); - - if (genpd->dev_ops.stop && genpd->dev_ops.start && - !pm_runtime_status_suspended(dev)) { - ret = genpd_start_dev(genpd, dev); - if (ret) - return ret; - } - - return pm_generic_restore_noirq(dev); + return genpd_finish_resume(dev, pm_generic_restore_noirq); } /** @@ -1994,6 +1976,7 @@ static int genpd_alloc_data(struct generic_pm_domain *genpd) gd->max_off_time_ns = -1; gd->max_off_time_changed = true; gd->next_wakeup = KTIME_MAX; + gd->next_hrtimer = KTIME_MAX; } /* Use only one "off" state if there were no states declared */ @@ -2749,17 +2732,6 @@ static int __genpd_dev_pm_attach(struct device *dev, struct device *base_dev, dev->pm_domain->detach = genpd_dev_pm_detach; dev->pm_domain->sync = genpd_dev_pm_sync; - if (power_on) { - genpd_lock(pd); - ret = genpd_power_on(pd, 0); - genpd_unlock(pd); - } - - if (ret) { - genpd_remove_device(pd, dev); - return -EPROBE_DEFER; - } - /* Set the default performance state */ pstate = of_get_required_opp_performance_state(dev->of_node, index); if (pstate < 0 && pstate != -ENODEV && pstate != -EOPNOTSUPP) { @@ -2771,6 +2743,24 @@ static int __genpd_dev_pm_attach(struct device *dev, struct device *base_dev, goto err; dev_gpd_data(dev)->default_pstate = pstate; } + + if (power_on) { + genpd_lock(pd); + ret = genpd_power_on(pd, 0); + genpd_unlock(pd); + } + + if (ret) { + /* Drop the default performance state */ + if (dev_gpd_data(dev)->default_pstate) { + dev_pm_genpd_set_performance_state(dev, 0); + dev_gpd_data(dev)->default_pstate = 0; + } + + genpd_remove_device(pd, dev); + return -EPROBE_DEFER; + } + return 1; err: @@ -2952,6 +2942,10 @@ static int genpd_iterate_idle_states(struct device_node *dn, np = it.node; if (!of_match_node(idle_state_match, np)) continue; + + if (!of_device_is_available(np)) + continue; + if (states) { ret = genpd_parse_state(&states[i], np); if (ret) { diff --git a/drivers/base/power/domain_governor.c b/drivers/base/power/domain_governor.c index 282a3a135827..cc2c3a5a6d35 100644 --- a/drivers/base/power/domain_governor.c +++ b/drivers/base/power/domain_governor.c @@ -375,6 +375,9 @@ static bool cpu_power_down_ok(struct dev_pm_domain *pd) if (idle_duration_ns <= 0) return false; + /* Store the next domain_wakeup to allow consumers to use it. */ + genpd->gd->next_hrtimer = domain_wakeup; + /* * Find the deepest idle state that has its residency value satisfied * and by also taking into account the power off latency for the state. diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index b52049098d4e..50e726b6c2cf 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -243,8 +243,7 @@ void pm_runtime_set_memalloc_noio(struct device *dev, bool enable) * flag was set by any one of the descendants. */ if (!dev || (!enable && - device_for_each_child(dev, NULL, - dev_memalloc_noio))) + device_for_each_child(dev, NULL, dev_memalloc_noio))) break; } mutex_unlock(&dev_hotplug_mutex); @@ -265,15 +264,13 @@ static int rpm_check_suspend_allowed(struct device *dev) retval = -EACCES; else if (atomic_read(&dev->power.usage_count)) retval = -EAGAIN; - else if (!dev->power.ignore_children && - atomic_read(&dev->power.child_count)) + else if (!dev->power.ignore_children && atomic_read(&dev->power.child_count)) retval = -EBUSY; /* Pending resume requests take precedence over suspends. */ - else if ((dev->power.deferred_resume - && dev->power.runtime_status == RPM_SUSPENDING) - || (dev->power.request_pending - && dev->power.request == RPM_REQ_RESUME)) + else if ((dev->power.deferred_resume && + dev->power.runtime_status == RPM_SUSPENDING) || + (dev->power.request_pending && dev->power.request == RPM_REQ_RESUME)) retval = -EAGAIN; else if (__dev_pm_qos_resume_latency(dev) == 0) retval = -EPERM; @@ -404,9 +401,9 @@ static int __rpm_callback(int (*cb)(struct device *), struct device *dev) * * Do that if resume fails too. */ - if (use_links - && ((dev->power.runtime_status == RPM_SUSPENDING && !retval) - || (dev->power.runtime_status == RPM_RESUMING && retval))) { + if (use_links && + ((dev->power.runtime_status == RPM_SUSPENDING && !retval) || + (dev->power.runtime_status == RPM_RESUMING && retval))) { idx = device_links_read_lock(); __rpm_put_suppliers(dev, false); @@ -422,6 +419,38 @@ fail: } /** + * rpm_callback - Run a given runtime PM callback for a given device. + * @cb: Runtime PM callback to run. + * @dev: Device to run the callback for. + */ +static int rpm_callback(int (*cb)(struct device *), struct device *dev) +{ + int retval; + + if (dev->power.memalloc_noio) { + unsigned int noio_flag; + + /* + * Deadlock might be caused if memory allocation with + * GFP_KERNEL happens inside runtime_suspend and + * runtime_resume callbacks of one block device's + * ancestor or the block device itself. Network + * device might be thought as part of iSCSI block + * device, so network device and its ancestor should + * be marked as memalloc_noio too. + */ + noio_flag = memalloc_noio_save(); + retval = __rpm_callback(cb, dev); + memalloc_noio_restore(noio_flag); + } else { + retval = __rpm_callback(cb, dev); + } + + dev->power.runtime_error = retval; + return retval != -EACCES ? retval : -EIO; +} + +/** * rpm_idle - Notify device bus type if the device can be suspended. * @dev: Device to notify the bus type about. * @rpmflags: Flag bits. @@ -459,6 +488,7 @@ static int rpm_idle(struct device *dev, int rpmflags) /* Act as though RPM_NOWAIT is always set. */ else if (dev->power.idle_notification) retval = -EINPROGRESS; + if (retval) goto out; @@ -484,7 +514,17 @@ static int rpm_idle(struct device *dev, int rpmflags) dev->power.idle_notification = true; - retval = __rpm_callback(callback, dev); + if (dev->power.irq_safe) + spin_unlock(&dev->power.lock); + else + spin_unlock_irq(&dev->power.lock); + + retval = callback(dev); + + if (dev->power.irq_safe) + spin_lock(&dev->power.lock); + else + spin_lock_irq(&dev->power.lock); dev->power.idle_notification = false; wake_up_all(&dev->power.wait_queue); @@ -495,38 +535,6 @@ static int rpm_idle(struct device *dev, int rpmflags) } /** - * rpm_callback - Run a given runtime PM callback for a given device. - * @cb: Runtime PM callback to run. - * @dev: Device to run the callback for. - */ -static int rpm_callback(int (*cb)(struct device *), struct device *dev) -{ - int retval; - - if (dev->power.memalloc_noio) { - unsigned int noio_flag; - - /* - * Deadlock might be caused if memory allocation with - * GFP_KERNEL happens inside runtime_suspend and - * runtime_resume callbacks of one block device's - * ancestor or the block device itself. Network - * device might be thought as part of iSCSI block - * device, so network device and its ancestor should - * be marked as memalloc_noio too. - */ - noio_flag = memalloc_noio_save(); - retval = __rpm_callback(cb, dev); - memalloc_noio_restore(noio_flag); - } else { - retval = __rpm_callback(cb, dev); - } - - dev->power.runtime_error = retval; - return retval != -EACCES ? retval : -EIO; -} - -/** * rpm_suspend - Carry out runtime suspend of given device. * @dev: Device to suspend. * @rpmflags: Flag bits. @@ -564,12 +572,12 @@ static int rpm_suspend(struct device *dev, int rpmflags) /* Synchronous suspends are not allowed in the RPM_RESUMING state. */ if (dev->power.runtime_status == RPM_RESUMING && !(rpmflags & RPM_ASYNC)) retval = -EAGAIN; + if (retval) goto out; /* If the autosuspend_delay time hasn't expired yet, reschedule. */ - if ((rpmflags & RPM_AUTO) - && dev->power.runtime_status != RPM_SUSPENDING) { + if ((rpmflags & RPM_AUTO) && dev->power.runtime_status != RPM_SUSPENDING) { u64 expires = pm_runtime_autosuspend_expiration(dev); if (expires != 0) { @@ -584,7 +592,7 @@ static int rpm_suspend(struct device *dev, int rpmflags) * rest. */ if (!(dev->power.timer_expires && - dev->power.timer_expires <= expires)) { + dev->power.timer_expires <= expires)) { /* * We add a slack of 25% to gather wakeups * without sacrificing the granularity. @@ -594,9 +602,9 @@ static int rpm_suspend(struct device *dev, int rpmflags) dev->power.timer_expires = expires; hrtimer_start_range_ns(&dev->power.suspend_timer, - ns_to_ktime(expires), - slack, - HRTIMER_MODE_ABS); + ns_to_ktime(expires), + slack, + HRTIMER_MODE_ABS); } dev->power.timer_autosuspends = 1; goto out; @@ -787,8 +795,8 @@ static int rpm_resume(struct device *dev, int rpmflags) goto out; } - if (dev->power.runtime_status == RPM_RESUMING - || dev->power.runtime_status == RPM_SUSPENDING) { + if (dev->power.runtime_status == RPM_RESUMING || + dev->power.runtime_status == RPM_SUSPENDING) { DEFINE_WAIT(wait); if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) { @@ -815,8 +823,8 @@ static int rpm_resume(struct device *dev, int rpmflags) for (;;) { prepare_to_wait(&dev->power.wait_queue, &wait, TASK_UNINTERRUPTIBLE); - if (dev->power.runtime_status != RPM_RESUMING - && dev->power.runtime_status != RPM_SUSPENDING) + if (dev->power.runtime_status != RPM_RESUMING && + dev->power.runtime_status != RPM_SUSPENDING) break; spin_unlock_irq(&dev->power.lock); @@ -836,9 +844,9 @@ static int rpm_resume(struct device *dev, int rpmflags) */ if (dev->power.no_callbacks && !parent && dev->parent) { spin_lock_nested(&dev->parent->power.lock, SINGLE_DEPTH_NESTING); - if (dev->parent->power.disable_depth > 0 - || dev->parent->power.ignore_children - || dev->parent->power.runtime_status == RPM_ACTIVE) { + if (dev->parent->power.disable_depth > 0 || + dev->parent->power.ignore_children || + dev->parent->power.runtime_status == RPM_ACTIVE) { atomic_inc(&dev->parent->power.child_count); spin_unlock(&dev->parent->power.lock); retval = 1; @@ -867,6 +875,7 @@ static int rpm_resume(struct device *dev, int rpmflags) parent = dev->parent; if (dev->power.irq_safe) goto skip_parent; + spin_unlock(&dev->power.lock); pm_runtime_get_noresume(parent); @@ -876,8 +885,8 @@ static int rpm_resume(struct device *dev, int rpmflags) * Resume the parent if it has runtime PM enabled and not been * set to ignore its children. */ - if (!parent->power.disable_depth - && !parent->power.ignore_children) { + if (!parent->power.disable_depth && + !parent->power.ignore_children) { rpm_resume(parent, 0); if (parent->power.runtime_status != RPM_ACTIVE) retval = -EBUSY; @@ -887,6 +896,7 @@ static int rpm_resume(struct device *dev, int rpmflags) spin_lock(&dev->power.lock); if (retval) goto out; + goto repeat; } skip_parent: @@ -1291,9 +1301,9 @@ int __pm_runtime_set_status(struct device *dev, unsigned int status) * not active, has runtime PM enabled and the * 'power.ignore_children' flag unset. */ - if (!parent->power.disable_depth - && !parent->power.ignore_children - && parent->power.runtime_status != RPM_ACTIVE) { + if (!parent->power.disable_depth && + !parent->power.ignore_children && + parent->power.runtime_status != RPM_ACTIVE) { dev_err(dev, "runtime PM trying to activate child device %s but parent (%s) is not active\n", dev_name(dev), dev_name(parent)); @@ -1358,9 +1368,9 @@ static void __pm_runtime_barrier(struct device *dev) dev->power.request_pending = false; } - if (dev->power.runtime_status == RPM_SUSPENDING - || dev->power.runtime_status == RPM_RESUMING - || dev->power.idle_notification) { + if (dev->power.runtime_status == RPM_SUSPENDING || + dev->power.runtime_status == RPM_RESUMING || + dev->power.idle_notification) { DEFINE_WAIT(wait); /* Suspend, wake-up or idle notification in progress. */ @@ -1445,8 +1455,8 @@ void __pm_runtime_disable(struct device *dev, bool check_resume) * means there probably is some I/O to process and disabling runtime PM * shouldn't prevent the device from processing the I/O. */ - if (check_resume && dev->power.request_pending - && dev->power.request == RPM_REQ_RESUME) { + if (check_resume && dev->power.request_pending && + dev->power.request == RPM_REQ_RESUME) { /* * Prevent suspends and idle notifications from being carried * out after we have woken up the device. @@ -1606,6 +1616,7 @@ void pm_runtime_irq_safe(struct device *dev) { if (dev->parent) pm_runtime_get_sync(dev->parent); + spin_lock_irq(&dev->power.lock); dev->power.irq_safe = 1; spin_unlock_irq(&dev->power.lock); diff --git a/drivers/base/property.c b/drivers/base/property.c index 4d6278a84868..2a5a37fcd998 100644 --- a/drivers/base/property.c +++ b/drivers/base/property.c @@ -229,7 +229,7 @@ EXPORT_SYMBOL_GPL(device_property_read_string); * Find a given string in a string array and if it is found return the * index back. * - * Return: %0 if the property was found (success), + * Return: index, starting from %0, if the property was found (success), * %-EINVAL if given arguments are not valid, * %-ENODATA if the property does not have a value, * %-EPROTO if the property is not an array of strings, @@ -450,7 +450,7 @@ EXPORT_SYMBOL_GPL(fwnode_property_read_string); * Find a given string in a string array and if it is found return the * index back. * - * Return: %0 if the property was found (success), + * Return: index, starting from %0, if the property was found (success), * %-EINVAL if given arguments are not valid, * %-ENODATA if the property does not have a value, * %-EPROTO if the property is not an array of strings, diff --git a/drivers/base/regmap/Kconfig b/drivers/base/regmap/Kconfig index 159bac6c5046..cd4bb642b9de 100644 --- a/drivers/base/regmap/Kconfig +++ b/drivers/base/regmap/Kconfig @@ -4,7 +4,7 @@ # subsystems should select the appropriate symbols. config REGMAP - default y if (REGMAP_I2C || REGMAP_SPI || REGMAP_SPMI || REGMAP_W1 || REGMAP_AC97 || REGMAP_MMIO || REGMAP_IRQ || REGMAP_SOUNDWIRE || REGMAP_SOUNDWIRE_MBQ || REGMAP_SCCB || REGMAP_I3C || REGMAP_SPI_AVMM || REGMAP_MDIO) + default y if (REGMAP_I2C || REGMAP_SPI || REGMAP_SPMI || REGMAP_W1 || REGMAP_AC97 || REGMAP_MMIO || REGMAP_IRQ || REGMAP_SOUNDWIRE || REGMAP_SOUNDWIRE_MBQ || REGMAP_SCCB || REGMAP_I3C || REGMAP_SPI_AVMM || REGMAP_MDIO || REGMAP_FSI) select IRQ_DOMAIN if REGMAP_IRQ select MDIO_BUS if REGMAP_MDIO bool @@ -65,3 +65,7 @@ config REGMAP_I3C config REGMAP_SPI_AVMM tristate depends on SPI + +config REGMAP_FSI + tristate + depends on FSI diff --git a/drivers/base/regmap/Makefile b/drivers/base/regmap/Makefile index 11facb32a027..6990de7ca9a9 100644 --- a/drivers/base/regmap/Makefile +++ b/drivers/base/regmap/Makefile @@ -20,3 +20,4 @@ obj-$(CONFIG_REGMAP_SCCB) += regmap-sccb.o obj-$(CONFIG_REGMAP_I3C) += regmap-i3c.o obj-$(CONFIG_REGMAP_SPI_AVMM) += regmap-spi-avmm.o obj-$(CONFIG_REGMAP_MDIO) += regmap-mdio.o +obj-$(CONFIG_REGMAP_FSI) += regmap-fsi.o diff --git a/drivers/base/regmap/regmap-fsi.c b/drivers/base/regmap/regmap-fsi.c new file mode 100644 index 000000000000..3d2f3cb31d5e --- /dev/null +++ b/drivers/base/regmap/regmap-fsi.c @@ -0,0 +1,231 @@ +// SPDX-License-Identifier: GPL-2.0 +// +// Register map access API - FSI support +// +// Copyright 2022 IBM Corp +// +// Author: Eddie James <eajames@linux.ibm.com> + +#include <linux/fsi.h> +#include <linux/module.h> +#include <linux/regmap.h> + +#include "internal.h" + +static int regmap_fsi32_reg_read(void *context, unsigned int reg, unsigned int *val) +{ + u32 v; + int ret; + + ret = fsi_slave_read(context, reg, &v, sizeof(v)); + if (ret) + return ret; + + *val = v; + return 0; +} + +static int regmap_fsi32_reg_write(void *context, unsigned int reg, unsigned int val) +{ + u32 v = val; + + return fsi_slave_write(context, reg, &v, sizeof(v)); +} + +static const struct regmap_bus regmap_fsi32 = { + .reg_write = regmap_fsi32_reg_write, + .reg_read = regmap_fsi32_reg_read, +}; + +static int regmap_fsi32le_reg_read(void *context, unsigned int reg, unsigned int *val) +{ + __be32 v; + int ret; + + ret = fsi_slave_read(context, reg, &v, sizeof(v)); + if (ret) + return ret; + + *val = be32_to_cpu(v); + return 0; +} + +static int regmap_fsi32le_reg_write(void *context, unsigned int reg, unsigned int val) +{ + __be32 v = cpu_to_be32(val); + + return fsi_slave_write(context, reg, &v, sizeof(v)); +} + +static const struct regmap_bus regmap_fsi32le = { + .reg_write = regmap_fsi32le_reg_write, + .reg_read = regmap_fsi32le_reg_read, +}; + +static int regmap_fsi16_reg_read(void *context, unsigned int reg, unsigned int *val) +{ + u16 v; + int ret; + + ret = fsi_slave_read(context, reg, &v, sizeof(v)); + if (ret) + return ret; + + *val = v; + return 0; +} + +static int regmap_fsi16_reg_write(void *context, unsigned int reg, unsigned int val) +{ + u16 v; + + if (val > 0xffff) + return -EINVAL; + + v = val; + return fsi_slave_write(context, reg, &v, sizeof(v)); +} + +static const struct regmap_bus regmap_fsi16 = { + .reg_write = regmap_fsi16_reg_write, + .reg_read = regmap_fsi16_reg_read, +}; + +static int regmap_fsi16le_reg_read(void *context, unsigned int reg, unsigned int *val) +{ + __be16 v; + int ret; + + ret = fsi_slave_read(context, reg, &v, sizeof(v)); + if (ret) + return ret; + + *val = be16_to_cpu(v); + return 0; +} + +static int regmap_fsi16le_reg_write(void *context, unsigned int reg, unsigned int val) +{ + __be16 v; + + if (val > 0xffff) + return -EINVAL; + + v = cpu_to_be16(val); + return fsi_slave_write(context, reg, &v, sizeof(v)); +} + +static const struct regmap_bus regmap_fsi16le = { + .reg_write = regmap_fsi16le_reg_write, + .reg_read = regmap_fsi16le_reg_read, +}; + +static int regmap_fsi8_reg_read(void *context, unsigned int reg, unsigned int *val) +{ + u8 v; + int ret; + + ret = fsi_slave_read(context, reg, &v, sizeof(v)); + if (ret) + return ret; + + *val = v; + return 0; +} + +static int regmap_fsi8_reg_write(void *context, unsigned int reg, unsigned int val) +{ + u8 v; + + if (val > 0xff) + return -EINVAL; + + v = val; + return fsi_slave_write(context, reg, &v, sizeof(v)); +} + +static const struct regmap_bus regmap_fsi8 = { + .reg_write = regmap_fsi8_reg_write, + .reg_read = regmap_fsi8_reg_read, +}; + +static const struct regmap_bus *regmap_get_fsi_bus(struct fsi_device *fsi_dev, + const struct regmap_config *config) +{ + const struct regmap_bus *bus = NULL; + + if (config->reg_bits == 8 || config->reg_bits == 16 || config->reg_bits == 32) { + switch (config->val_bits) { + case 8: + bus = ®map_fsi8; + break; + case 16: + switch (regmap_get_val_endian(&fsi_dev->dev, NULL, config)) { + case REGMAP_ENDIAN_LITTLE: +#ifdef __LITTLE_ENDIAN + case REGMAP_ENDIAN_NATIVE: +#endif + bus = ®map_fsi16le; + break; + case REGMAP_ENDIAN_DEFAULT: + case REGMAP_ENDIAN_BIG: +#ifdef __BIG_ENDIAN + case REGMAP_ENDIAN_NATIVE: +#endif + bus = ®map_fsi16; + break; + default: + break; + } + break; + case 32: + switch (regmap_get_val_endian(&fsi_dev->dev, NULL, config)) { + case REGMAP_ENDIAN_LITTLE: +#ifdef __LITTLE_ENDIAN + case REGMAP_ENDIAN_NATIVE: +#endif + bus = ®map_fsi32le; + break; + case REGMAP_ENDIAN_DEFAULT: + case REGMAP_ENDIAN_BIG: +#ifdef __BIG_ENDIAN + case REGMAP_ENDIAN_NATIVE: +#endif + bus = ®map_fsi32; + break; + default: + break; + } + break; + } + } + + return bus ?: ERR_PTR(-EOPNOTSUPP); +} + +struct regmap *__regmap_init_fsi(struct fsi_device *fsi_dev, const struct regmap_config *config, + struct lock_class_key *lock_key, const char *lock_name) +{ + const struct regmap_bus *bus = regmap_get_fsi_bus(fsi_dev, config); + + if (IS_ERR(bus)) + return ERR_CAST(bus); + + return __regmap_init(&fsi_dev->dev, bus, fsi_dev->slave, config, lock_key, lock_name); +} +EXPORT_SYMBOL_GPL(__regmap_init_fsi); + +struct regmap *__devm_regmap_init_fsi(struct fsi_device *fsi_dev, + const struct regmap_config *config, + struct lock_class_key *lock_key, const char *lock_name) +{ + const struct regmap_bus *bus = regmap_get_fsi_bus(fsi_dev, config); + + if (IS_ERR(bus)) + return ERR_CAST(bus); + + return __devm_regmap_init(&fsi_dev->dev, bus, fsi_dev->slave, config, lock_key, lock_name); +} +EXPORT_SYMBOL_GPL(__devm_regmap_init_fsi); + +MODULE_LICENSE("GPL"); diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c index 4ef9488d05cd..a8f185430a07 100644 --- a/drivers/base/regmap/regmap-irq.c +++ b/drivers/base/regmap/regmap-irq.c @@ -115,12 +115,20 @@ static void regmap_irq_sync_unlock(struct irq_data *data) */ for (i = 0; i < d->chip->num_regs; i++) { if (d->mask_base) { - reg = d->get_irq_reg(d, d->mask_base, i); - ret = regmap_update_bits(d->map, reg, - d->mask_buf_def[i], d->mask_buf[i]); - if (ret) - dev_err(d->map->dev, "Failed to sync masks in %x\n", - reg); + if (d->chip->handle_mask_sync) + d->chip->handle_mask_sync(d->map, i, + d->mask_buf_def[i], + d->mask_buf[i], + d->chip->irq_drv_data); + else { + reg = d->get_irq_reg(d, d->mask_base, i); + ret = regmap_update_bits(d->map, reg, + d->mask_buf_def[i], + d->mask_buf[i]); + if (ret) + dev_err(d->map->dev, "Failed to sync masks in %x\n", + reg); + } } if (d->unmask_base) { @@ -722,6 +730,7 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode, int i; int ret = -ENOMEM; int num_type_reg; + int num_regs; u32 reg; if (chip->num_regs <= 0) @@ -796,14 +805,20 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode, goto err_alloc; } - num_type_reg = chip->type_in_mask ? chip->num_regs : chip->num_type_reg; - if (num_type_reg) { - d->type_buf_def = kcalloc(num_type_reg, + /* + * Use num_config_regs if defined, otherwise fall back to num_type_reg + * to maintain backward compatibility. + */ + num_type_reg = chip->num_config_regs ? chip->num_config_regs + : chip->num_type_reg; + num_regs = chip->type_in_mask ? chip->num_regs : num_type_reg; + if (num_regs) { + d->type_buf_def = kcalloc(num_regs, sizeof(*d->type_buf_def), GFP_KERNEL); if (!d->type_buf_def) goto err_alloc; - d->type_buf = kcalloc(num_type_reg, sizeof(*d->type_buf), + d->type_buf = kcalloc(num_regs, sizeof(*d->type_buf), GFP_KERNEL); if (!d->type_buf) goto err_alloc; @@ -917,13 +932,23 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode, d->mask_buf[i] = d->mask_buf_def[i]; if (d->mask_base) { - reg = d->get_irq_reg(d, d->mask_base, i); - ret = regmap_update_bits(d->map, reg, - d->mask_buf_def[i], d->mask_buf[i]); - if (ret) { - dev_err(map->dev, "Failed to set masks in 0x%x: %d\n", - reg, ret); - goto err_alloc; + if (chip->handle_mask_sync) { + ret = chip->handle_mask_sync(d->map, i, + d->mask_buf_def[i], + d->mask_buf[i], + chip->irq_drv_data); + if (ret) + goto err_alloc; + } else { + reg = d->get_irq_reg(d, d->mask_base, i); + ret = regmap_update_bits(d->map, reg, + d->mask_buf_def[i], + d->mask_buf[i]); + if (ret) { + dev_err(map->dev, "Failed to set masks in 0x%x: %d\n", + reg, ret); + goto err_alloc; + } } } diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c index c6d6d53e8cd3..d12d669157f2 100644 --- a/drivers/base/regmap/regmap.c +++ b/drivers/base/regmap/regmap.c @@ -3486,6 +3486,19 @@ int regmap_get_reg_stride(struct regmap *map) } EXPORT_SYMBOL_GPL(regmap_get_reg_stride); +/** + * regmap_might_sleep() - Returns whether a regmap access might sleep. + * + * @map: Register map to operate on. + * + * Returns true if an access to the register might sleep, else false. + */ +bool regmap_might_sleep(struct regmap *map) +{ + return map->can_sleep; +} +EXPORT_SYMBOL_GPL(regmap_might_sleep); + int regmap_parse_val(struct regmap *map, const void *buf, unsigned int *val) { |