summaryrefslogtreecommitdiffstats
path: root/drivers/base
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/base')
-rw-r--r--drivers/base/arch_topology.c89
-rw-r--r--drivers/base/attribute_container.c4
-rw-r--r--drivers/base/auxiliary.c5
-rw-r--r--drivers/base/base.h2
-rw-r--r--drivers/base/component.c9
-rw-r--r--drivers/base/core.c112
-rw-r--r--drivers/base/cpu.c6
-rw-r--r--drivers/base/dd.c55
-rw-r--r--drivers/base/devcoredump.c19
-rw-r--r--drivers/base/devres.c6
-rw-r--r--drivers/base/devtmpfs.c6
-rw-r--r--drivers/base/node.c26
-rw-r--r--drivers/base/platform-msi.c3
-rw-r--r--drivers/base/platform.c11
-rw-r--r--drivers/base/power/clock_ops.c2
-rw-r--r--drivers/base/power/domain.c36
-rw-r--r--drivers/base/power/runtime.c57
-rw-r--r--drivers/base/power/wakeup.c17
-rw-r--r--drivers/base/power/wakeup_stats.c4
-rw-r--r--drivers/base/regmap/regmap-debugfs.c1
-rw-r--r--drivers/base/regmap/regmap-irq.c126
-rw-r--r--drivers/base/swnode.c106
-rw-r--r--drivers/base/test/Kconfig2
-rw-r--r--drivers/base/test/Makefile2
-rw-r--r--drivers/base/test/property-entry-test.c61
25 files changed, 537 insertions, 230 deletions
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c
index de8587cc119e..c1179edc0f3b 100644
--- a/drivers/base/arch_topology.c
+++ b/drivers/base/arch_topology.c
@@ -21,17 +21,94 @@
#include <linux/sched.h>
#include <linux/smp.h>
+static DEFINE_PER_CPU(struct scale_freq_data *, sft_data);
+static struct cpumask scale_freq_counters_mask;
+static bool scale_freq_invariant;
+
+static bool supports_scale_freq_counters(const struct cpumask *cpus)
+{
+ return cpumask_subset(cpus, &scale_freq_counters_mask);
+}
+
bool topology_scale_freq_invariant(void)
{
return cpufreq_supports_freq_invariance() ||
- arch_freq_counters_available(cpu_online_mask);
+ supports_scale_freq_counters(cpu_online_mask);
}
-__weak bool arch_freq_counters_available(const struct cpumask *cpus)
+static void update_scale_freq_invariant(bool status)
{
- return false;
+ if (scale_freq_invariant == status)
+ return;
+
+ /*
+ * Task scheduler behavior depends on frequency invariance support,
+ * either cpufreq or counter driven. If the support status changes as
+ * a result of counter initialisation and use, retrigger the build of
+ * scheduling domains to ensure the information is propagated properly.
+ */
+ if (topology_scale_freq_invariant() == status) {
+ scale_freq_invariant = status;
+ rebuild_sched_domains_energy();
+ }
}
-DEFINE_PER_CPU(unsigned long, freq_scale) = SCHED_CAPACITY_SCALE;
+
+void topology_set_scale_freq_source(struct scale_freq_data *data,
+ const struct cpumask *cpus)
+{
+ struct scale_freq_data *sfd;
+ int cpu;
+
+ /*
+ * Avoid calling rebuild_sched_domains() unnecessarily if FIE is
+ * supported by cpufreq.
+ */
+ if (cpumask_empty(&scale_freq_counters_mask))
+ scale_freq_invariant = topology_scale_freq_invariant();
+
+ for_each_cpu(cpu, cpus) {
+ sfd = per_cpu(sft_data, cpu);
+
+ /* Use ARCH provided counters whenever possible */
+ if (!sfd || sfd->source != SCALE_FREQ_SOURCE_ARCH) {
+ per_cpu(sft_data, cpu) = data;
+ cpumask_set_cpu(cpu, &scale_freq_counters_mask);
+ }
+ }
+
+ update_scale_freq_invariant(true);
+}
+EXPORT_SYMBOL_GPL(topology_set_scale_freq_source);
+
+void topology_clear_scale_freq_source(enum scale_freq_source source,
+ const struct cpumask *cpus)
+{
+ struct scale_freq_data *sfd;
+ int cpu;
+
+ for_each_cpu(cpu, cpus) {
+ sfd = per_cpu(sft_data, cpu);
+
+ if (sfd && sfd->source == source) {
+ per_cpu(sft_data, cpu) = NULL;
+ cpumask_clear_cpu(cpu, &scale_freq_counters_mask);
+ }
+ }
+
+ update_scale_freq_invariant(false);
+}
+EXPORT_SYMBOL_GPL(topology_clear_scale_freq_source);
+
+void topology_scale_freq_tick(void)
+{
+ struct scale_freq_data *sfd = *this_cpu_ptr(&sft_data);
+
+ if (sfd)
+ sfd->set_freq_scale();
+}
+
+DEFINE_PER_CPU(unsigned long, arch_freq_scale) = SCHED_CAPACITY_SCALE;
+EXPORT_PER_CPU_SYMBOL_GPL(arch_freq_scale);
void topology_set_freq_scale(const struct cpumask *cpus, unsigned long cur_freq,
unsigned long max_freq)
@@ -47,13 +124,13 @@ void topology_set_freq_scale(const struct cpumask *cpus, unsigned long cur_freq,
* want to update the scale factor with information from CPUFREQ.
* Instead the scale factor will be updated from arch_scale_freq_tick.
*/
- if (arch_freq_counters_available(cpus))
+ if (supports_scale_freq_counters(cpus))
return;
scale = (cur_freq << SCHED_CAPACITY_SHIFT) / max_freq;
for_each_cpu(i, cpus)
- per_cpu(freq_scale, i) = scale;
+ per_cpu(arch_freq_scale, i) = scale;
}
DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE;
diff --git a/drivers/base/attribute_container.c b/drivers/base/attribute_container.c
index f7bd0f4db13d..9c00d203d61e 100644
--- a/drivers/base/attribute_container.c
+++ b/drivers/base/attribute_container.c
@@ -461,6 +461,10 @@ attribute_container_add_class_device(struct device *classdev)
/**
* attribute_container_add_class_device_adapter - simple adapter for triggers
*
+ * @cont: the container to register.
+ * @dev: the generic device to activate the trigger for
+ * @classdev: the class device to add
+ *
* This function is identical to attribute_container_add_class_device except
* that it is designed to be called from the triggers
*/
diff --git a/drivers/base/auxiliary.c b/drivers/base/auxiliary.c
index d8b314e7d0fd..adc199dfba3c 100644
--- a/drivers/base/auxiliary.c
+++ b/drivers/base/auxiliary.c
@@ -265,8 +265,3 @@ void __init auxiliary_bus_init(void)
{
WARN_ON(bus_register(&auxiliary_bus_type));
}
-
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("Auxiliary Bus");
-MODULE_AUTHOR("David Ertman <david.m.ertman@intel.com>");
-MODULE_AUTHOR("Kiran Patil <kiran.patil@intel.com>");
diff --git a/drivers/base/base.h b/drivers/base/base.h
index 52b3d7b75c27..e5f9b7e656c3 100644
--- a/drivers/base/base.h
+++ b/drivers/base/base.h
@@ -185,11 +185,13 @@ extern int device_links_read_lock(void);
extern void device_links_read_unlock(int idx);
extern int device_links_read_lock_held(void);
extern int device_links_check_suppliers(struct device *dev);
+extern void device_links_force_bind(struct device *dev);
extern void device_links_driver_bound(struct device *dev);
extern void device_links_driver_cleanup(struct device *dev);
extern void device_links_no_driver(struct device *dev);
extern bool device_links_busy(struct device *dev);
extern void device_links_unbind_consumers(struct device *dev);
+extern void fw_devlink_drivers_done(void);
/* device pm support */
void device_pm_move_to_tail(struct device *dev);
diff --git a/drivers/base/component.c b/drivers/base/component.c
index dcfbe7251dc4..272ba42392f0 100644
--- a/drivers/base/component.c
+++ b/drivers/base/component.c
@@ -65,7 +65,6 @@ struct master {
const struct component_master_ops *ops;
struct device *dev;
struct component_match *match;
- struct dentry *dentry;
};
struct component {
@@ -125,15 +124,13 @@ core_initcall(component_debug_init);
static void component_master_debugfs_add(struct master *m)
{
- m->dentry = debugfs_create_file(dev_name(m->dev), 0444,
- component_debugfs_dir,
- m, &component_devices_fops);
+ debugfs_create_file(dev_name(m->dev), 0444, component_debugfs_dir, m,
+ &component_devices_fops);
}
static void component_master_debugfs_del(struct master *m)
{
- debugfs_remove(m->dentry);
- m->dentry = NULL;
+ debugfs_remove(debugfs_lookup(dev_name(m->dev), component_debugfs_dir));
}
#else
diff --git a/drivers/base/core.c b/drivers/base/core.c
index f29839382f81..4a8bf8cda52b 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -51,6 +51,7 @@ static LIST_HEAD(deferred_sync);
static unsigned int defer_sync_state_count = 1;
static DEFINE_MUTEX(fwnode_link_lock);
static bool fw_devlink_is_permissive(void);
+static bool fw_devlink_drv_reg_done;
/**
* fwnode_link_add - Create a link between two fwnode_handles.
@@ -1154,6 +1155,41 @@ static ssize_t waiting_for_supplier_show(struct device *dev,
static DEVICE_ATTR_RO(waiting_for_supplier);
/**
+ * device_links_force_bind - Prepares device to be force bound
+ * @dev: Consumer device.
+ *
+ * device_bind_driver() force binds a device to a driver without calling any
+ * driver probe functions. So the consumer really isn't going to wait for any
+ * supplier before it's bound to the driver. We still want the device link
+ * states to be sensible when this happens.
+ *
+ * In preparation for device_bind_driver(), this function goes through each
+ * supplier device links and checks if the supplier is bound. If it is, then
+ * the device link status is set to CONSUMER_PROBE. Otherwise, the device link
+ * is dropped. Links without the DL_FLAG_MANAGED flag set are ignored.
+ */
+void device_links_force_bind(struct device *dev)
+{
+ struct device_link *link, *ln;
+
+ device_links_write_lock();
+
+ list_for_each_entry_safe(link, ln, &dev->links.suppliers, c_node) {
+ if (!(link->flags & DL_FLAG_MANAGED))
+ continue;
+
+ if (link->status != DL_STATE_AVAILABLE) {
+ device_link_drop_managed(link);
+ continue;
+ }
+ WRITE_ONCE(link->status, DL_STATE_CONSUMER_PROBE);
+ }
+ dev->links.status = DL_DEV_PROBING;
+
+ device_links_write_unlock();
+}
+
+/**
* device_links_driver_bound - Update device links after probing its driver.
* @dev: Device to update the links for.
*
@@ -1503,7 +1539,7 @@ static void device_links_purge(struct device *dev)
#define FW_DEVLINK_FLAGS_RPM (FW_DEVLINK_FLAGS_ON | \
DL_FLAG_PM_RUNTIME)
-static u32 fw_devlink_flags = FW_DEVLINK_FLAGS_PERMISSIVE;
+static u32 fw_devlink_flags = FW_DEVLINK_FLAGS_ON;
static int __init fw_devlink_setup(char *arg)
{
if (!arg)
@@ -1563,6 +1599,52 @@ static void fw_devlink_parse_fwtree(struct fwnode_handle *fwnode)
fw_devlink_parse_fwtree(child);
}
+static void fw_devlink_relax_link(struct device_link *link)
+{
+ if (!(link->flags & DL_FLAG_INFERRED))
+ return;
+
+ if (link->flags == (DL_FLAG_MANAGED | FW_DEVLINK_FLAGS_PERMISSIVE))
+ return;
+
+ pm_runtime_drop_link(link);
+ link->flags = DL_FLAG_MANAGED | FW_DEVLINK_FLAGS_PERMISSIVE;
+ dev_dbg(link->consumer, "Relaxing link with %s\n",
+ dev_name(link->supplier));
+}
+
+static int fw_devlink_no_driver(struct device *dev, void *data)
+{
+ struct device_link *link = to_devlink(dev);
+
+ if (!link->supplier->can_match)
+ fw_devlink_relax_link(link);
+
+ return 0;
+}
+
+void fw_devlink_drivers_done(void)
+{
+ fw_devlink_drv_reg_done = true;
+ device_links_write_lock();
+ class_for_each_device(&devlink_class, NULL, NULL,
+ fw_devlink_no_driver);
+ device_links_write_unlock();
+}
+
+static void fw_devlink_unblock_consumers(struct device *dev)
+{
+ struct device_link *link;
+
+ if (!fw_devlink_flags || fw_devlink_is_permissive())
+ return;
+
+ device_links_write_lock();
+ list_for_each_entry(link, &dev->links.consumers, s_node)
+ fw_devlink_relax_link(link);
+ device_links_write_unlock();
+}
+
/**
* fw_devlink_relax_cycle - Convert cyclic links to SYNC_STATE_ONLY links
* @con: Device to check dependencies for.
@@ -1599,21 +1681,16 @@ static int fw_devlink_relax_cycle(struct device *con, void *sup)
ret = 1;
- if (!(link->flags & DL_FLAG_INFERRED))
- continue;
-
- pm_runtime_drop_link(link);
- link->flags = DL_FLAG_MANAGED | FW_DEVLINK_FLAGS_PERMISSIVE;
- dev_dbg(link->consumer, "Relaxing link with %s\n",
- dev_name(link->supplier));
+ fw_devlink_relax_link(link);
}
return ret;
}
/**
* fw_devlink_create_devlink - Create a device link from a consumer to fwnode
- * @con - Consumer device for the device link
- * @sup_handle - fwnode handle of supplier
+ * @con: consumer device for the device link
+ * @sup_handle: fwnode handle of supplier
+ * @flags: devlink flags
*
* This function will try to create a device link between the consumer device
* @con and the supplier device represented by @sup_handle.
@@ -1709,7 +1786,7 @@ out:
/**
* __fw_devlink_link_to_consumers - Create device links to consumers of a device
- * @dev - Device that needs to be linked to its consumers
+ * @dev: Device that needs to be linked to its consumers
*
* This function looks at all the consumer fwnodes of @dev and creates device
* links between the consumer device and @dev (supplier).
@@ -1779,8 +1856,8 @@ static void __fw_devlink_link_to_consumers(struct device *dev)
/**
* __fw_devlink_link_to_suppliers - Create device links to suppliers of a device
- * @dev - The consumer device that needs to be linked to its suppliers
- * @fwnode - Root of the fwnode tree that is used to create device links
+ * @dev: The consumer device that needs to be linked to its suppliers
+ * @fwnode: Root of the fwnode tree that is used to create device links
*
* This function looks at all the supplier fwnodes of fwnode tree rooted at
* @fwnode and creates device links between @dev (consumer) and all the
@@ -3240,6 +3317,15 @@ int device_add(struct device *dev)
}
bus_probe_device(dev);
+
+ /*
+ * If all driver registration is done and a newly added device doesn't
+ * match with any driver, don't block its consumers from probing in
+ * case the consumer device is able to operate without this supplier.
+ */
+ if (dev->fwnode && fw_devlink_drv_reg_done && !dev->can_match)
+ fw_devlink_unblock_consumers(dev);
+
if (parent)
klist_add_tail(&dev->p->knode_parent,
&parent->p->klist_children);
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 8f1d6569564c..2b9e41377a07 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -409,13 +409,11 @@ __cpu_device_create(struct device *parent, void *drvdata,
const char *fmt, va_list args)
{
struct device *dev = NULL;
- int retval = -ENODEV;
+ int retval = -ENOMEM;
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (!dev) {
- retval = -ENOMEM;
+ if (!dev)
goto error;
- }
device_initialize(dev);
dev->parent = parent;
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index 9179825ff646..ecd7cf848daf 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -55,7 +55,6 @@ static DEFINE_MUTEX(deferred_probe_mutex);
static LIST_HEAD(deferred_probe_pending_list);
static LIST_HEAD(deferred_probe_active_list);
static atomic_t deferred_trigger_count = ATOMIC_INIT(0);
-static struct dentry *deferred_devices;
static bool initcalls_done;
/* Save the async probe drivers' name from kernel cmdline */
@@ -69,6 +68,12 @@ static char async_probe_drv_names[ASYNC_DRV_NAMES_MAX_LEN];
*/
static bool defer_all_probes;
+static void __device_set_deferred_probe_reason(const struct device *dev, char *reason)
+{
+ kfree(dev->p->deferred_probe_reason);
+ dev->p->deferred_probe_reason = reason;
+}
+
/*
* deferred_probe_work_func() - Retry probing devices in the active list.
*/
@@ -97,6 +102,8 @@ static void deferred_probe_work_func(struct work_struct *work)
get_device(dev);
+ __device_set_deferred_probe_reason(dev, NULL);
+
/*
* Drop the mutex while probing each device; the probe path may
* manipulate the deferred list
@@ -123,6 +130,9 @@ static DECLARE_WORK(deferred_probe_work, deferred_probe_work_func);
void driver_deferred_probe_add(struct device *dev)
{
+ if (!dev->can_match)
+ return;
+
mutex_lock(&deferred_probe_mutex);
if (list_empty(&dev->p->deferred_probe)) {
dev_dbg(dev, "Added to deferred list\n");
@@ -137,8 +147,7 @@ void driver_deferred_probe_del(struct device *dev)
if (!list_empty(&dev->p->deferred_probe)) {
dev_dbg(dev, "Removed from deferred list\n");
list_del_init(&dev->p->deferred_probe);
- kfree(dev->p->deferred_probe_reason);
- dev->p->deferred_probe_reason = NULL;
+ __device_set_deferred_probe_reason(dev, NULL);
}
mutex_unlock(&deferred_probe_mutex);
}
@@ -182,7 +191,7 @@ static void driver_deferred_probe_trigger(void)
* Kick the re-probe thread. It may already be scheduled, but it is
* safe to kick it again.
*/
- schedule_work(&deferred_probe_work);
+ queue_work(system_unbound_wq, &deferred_probe_work);
}
/**
@@ -217,11 +226,12 @@ void device_unblock_probing(void)
void device_set_deferred_probe_reason(const struct device *dev, struct va_format *vaf)
{
const char *drv = dev_driver_string(dev);
+ char *reason;
mutex_lock(&deferred_probe_mutex);
- kfree(dev->p->deferred_probe_reason);
- dev->p->deferred_probe_reason = kasprintf(GFP_KERNEL, "%s: %pV", drv, vaf);
+ reason = kasprintf(GFP_KERNEL, "%s: %pV", drv, vaf);
+ __device_set_deferred_probe_reason(dev, reason);
mutex_unlock(&deferred_probe_mutex);
}
@@ -289,14 +299,18 @@ int driver_deferred_probe_check_state(struct device *dev)
static void deferred_probe_timeout_work_func(struct work_struct *work)
{
- struct device_private *private, *p;
+ struct device_private *p;
+
+ fw_devlink_drivers_done();
driver_deferred_probe_timeout = 0;
driver_deferred_probe_trigger();
flush_work(&deferred_probe_work);
- list_for_each_entry_safe(private, p, &deferred_probe_pending_list, deferred_probe)
- dev_info(private->device, "deferred probe pending\n");
+ mutex_lock(&deferred_probe_mutex);
+ list_for_each_entry(p, &deferred_probe_pending_list, deferred_probe)
+ dev_info(p->device, "deferred probe pending\n");
+ mutex_unlock(&deferred_probe_mutex);
wake_up_all(&probe_timeout_waitqueue);
}
static DECLARE_DELAYED_WORK(deferred_probe_timeout_work, deferred_probe_timeout_work_func);
@@ -310,8 +324,8 @@ static DECLARE_DELAYED_WORK(deferred_probe_timeout_work, deferred_probe_timeout_
*/
static int deferred_probe_initcall(void)
{
- deferred_devices = debugfs_create_file("devices_deferred", 0444, NULL,
- NULL, &deferred_devs_fops);
+ debugfs_create_file("devices_deferred", 0444, NULL, NULL,
+ &deferred_devs_fops);
driver_deferred_probe_enable = true;
driver_deferred_probe_trigger();
@@ -319,6 +333,9 @@ static int deferred_probe_initcall(void)
flush_work(&deferred_probe_work);
initcalls_done = true;
+ if (!IS_ENABLED(CONFIG_MODULES))
+ fw_devlink_drivers_done();
+
/*
* Trigger deferred probe again, this time we won't defer anything
* that is optional
@@ -336,7 +353,7 @@ late_initcall(deferred_probe_initcall);
static void __exit deferred_probe_exit(void)
{
- debugfs_remove_recursive(deferred_devices);
+ debugfs_remove_recursive(debugfs_lookup("devices_deferred", NULL));
}
__exitcall(deferred_probe_exit);
@@ -413,8 +430,11 @@ static int driver_sysfs_add(struct device *dev)
if (ret)
goto rm_dev;
- if (!IS_ENABLED(CONFIG_DEV_COREDUMP) || !dev->driver->coredump ||
- !device_create_file(dev, &dev_attr_coredump))
+ if (!IS_ENABLED(CONFIG_DEV_COREDUMP) || !dev->driver->coredump)
+ return 0;
+
+ ret = device_create_file(dev, &dev_attr_coredump);
+ if (!ret)
return 0;
sysfs_remove_link(&dev->kobj, "driver");
@@ -457,8 +477,10 @@ int device_bind_driver(struct device *dev)
int ret;
ret = driver_sysfs_add(dev);
- if (!ret)
+ if (!ret) {
+ device_links_force_bind(dev);
driver_bound(dev);
+ }
else if (dev->bus)
blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
BUS_NOTIFY_DRIVER_NOT_BOUND, dev);
@@ -726,6 +748,7 @@ static int driver_probe_device(struct device_driver *drv, struct device *dev)
if (!device_is_registered(dev))
return -ENODEV;
+ dev->can_match = true;
pr_debug("bus: '%s': %s: matched device %s with driver %s\n",
drv->bus->name, __func__, dev_name(dev), drv->name);
@@ -829,6 +852,7 @@ static int __device_attach_driver(struct device_driver *drv, void *_data)
return 0;
} else if (ret == -EPROBE_DEFER) {
dev_dbg(dev, "Device match requests probe deferral\n");
+ dev->can_match = true;
driver_deferred_probe_add(dev);
} else if (ret < 0) {
dev_dbg(dev, "Bus failed to match device: %d\n", ret);
@@ -1064,6 +1088,7 @@ static int __driver_attach(struct device *dev, void *data)
return 0;
} else if (ret == -EPROBE_DEFER) {
dev_dbg(dev, "Device match requests probe deferral\n");
+ dev->can_match = true;
driver_deferred_probe_add(dev);
} else if (ret < 0) {
dev_dbg(dev, "Bus failed to match device: %d\n", ret);
diff --git a/drivers/base/devcoredump.c b/drivers/base/devcoredump.c
index 9243468e2c99..8eec0e0ddff7 100644
--- a/drivers/base/devcoredump.c
+++ b/drivers/base/devcoredump.c
@@ -202,7 +202,7 @@ static int devcd_match_failing(struct device *dev, const void *failing)
* NOTE: if two tables allocated with devcd_alloc_sgtable and then chained
* using the sg_chain function then that function should be called only once
* on the chained table
- * @table: pointer to sg_table to free
+ * @data: pointer to sg_table to free
*/
static void devcd_free_sgtable(void *data)
{
@@ -210,7 +210,7 @@ static void devcd_free_sgtable(void *data)
}
/**
- * devcd_read_from_table - copy data from sg_table to a given buffer
+ * devcd_read_from_sgtable - copy data from sg_table to a given buffer
* and return the number of bytes read
* @buffer: the buffer to copy the data to it
* @buf_len: the length of the buffer
@@ -292,13 +292,16 @@ void dev_coredumpm(struct device *dev, struct module *owner,
if (device_add(&devcd->devcd_dev))
goto put_device;
+ /*
+ * These should normally not fail, but there is no problem
+ * continuing without the links, so just warn instead of
+ * failing.
+ */
if (sysfs_create_link(&devcd->devcd_dev.kobj, &dev->kobj,
- "failing_device"))
- /* nothing - symlink will be missing */;
-
- if (sysfs_create_link(&dev->kobj, &devcd->devcd_dev.kobj,
- "devcoredump"))
- /* nothing - symlink will be missing */;
+ "failing_device") ||
+ sysfs_create_link(&dev->kobj, &devcd->devcd_dev.kobj,
+ "devcoredump"))
+ dev_warn(dev, "devcoredump create_link failed\n");
INIT_DELAYED_WORK(&devcd->del_wk, devcd_del);
schedule_delayed_work(&devcd->del_wk, DEVCD_TIMEOUT);
diff --git a/drivers/base/devres.c b/drivers/base/devres.c
index fb9d5289a620..8746f2212781 100644
--- a/drivers/base/devres.c
+++ b/drivers/base/devres.c
@@ -58,8 +58,8 @@ static void devres_log(struct device *dev, struct devres_node *node,
const char *op)
{
if (unlikely(log_devres))
- dev_err(dev, "DEVRES %3s %p %s (%lu bytes)\n",
- op, node, node->name, (unsigned long)node->size);
+ dev_err(dev, "DEVRES %3s %p %s (%zu bytes)\n",
+ op, node, node->name, node->size);
}
#else /* CONFIG_DEBUG_DEVRES */
#define set_node_dbginfo(node, n, s) do {} while (0)
@@ -1228,6 +1228,6 @@ EXPORT_SYMBOL_GPL(__devm_alloc_percpu);
void devm_free_percpu(struct device *dev, void __percpu *pdata)
{
WARN_ON(devres_destroy(dev, devm_percpu_release, devm_percpu_match,
- (void *)pdata));
+ (__force void *)pdata));
}
EXPORT_SYMBOL_GPL(devm_free_percpu);
diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
index 653c8c6ac7a7..8be352ab4ddb 100644
--- a/drivers/base/devtmpfs.c
+++ b/drivers/base/devtmpfs.c
@@ -371,7 +371,7 @@ int __init devtmpfs_mount(void)
return err;
}
-static DECLARE_COMPLETION(setup_done);
+static __initdata DECLARE_COMPLETION(setup_done);
static int handle(const char *name, umode_t mode, kuid_t uid, kgid_t gid,
struct device *dev)
@@ -405,7 +405,7 @@ static void __noreturn devtmpfs_work_loop(void)
}
}
-static int __init devtmpfs_setup(void *p)
+static noinline int __init devtmpfs_setup(void *p)
{
int err;
@@ -419,7 +419,6 @@ static int __init devtmpfs_setup(void *p)
init_chroot(".");
out:
*(int *)p = err;
- complete(&setup_done);
return err;
}
@@ -432,6 +431,7 @@ static int __ref devtmpfsd(void *p)
{
int err = devtmpfs_setup(p);
+ complete(&setup_done);
if (err)
return err;
devtmpfs_work_loop();
diff --git a/drivers/base/node.c b/drivers/base/node.c
index f449dbb2c746..2c36f61d30bc 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -268,21 +268,20 @@ static void node_init_cache_dev(struct node *node)
if (!dev)
return;
+ device_initialize(dev);
dev->parent = &node->dev;
dev->release = node_cache_release;
if (dev_set_name(dev, "memory_side_cache"))
- goto free_dev;
+ goto put_device;
- if (device_register(dev))
- goto free_name;
+ if (device_add(dev))
+ goto put_device;
pm_runtime_no_callbacks(dev);
node->cache_dev = dev;
return;
-free_name:
- kfree_const(dev->kobj.name);
-free_dev:
- kfree(dev);
+put_device:
+ put_device(dev);
}
/**
@@ -319,25 +318,24 @@ void node_add_cache(unsigned int nid, struct node_cache_attrs *cache_attrs)
return;
dev = &info->dev;
+ device_initialize(dev);
dev->parent = node->cache_dev;
dev->release = node_cacheinfo_release;
dev->groups = cache_groups;
if (dev_set_name(dev, "index%d", cache_attrs->level))
- goto free_cache;
+ goto put_device;
info->cache_attrs = *cache_attrs;
- if (device_register(dev)) {
+ if (device_add(dev)) {
dev_warn(&node->dev, "failed to add cache level:%d\n",
cache_attrs->level);
- goto free_name;
+ goto put_device;
}
pm_runtime_no_callbacks(dev);
list_add_tail(&info->node, &node->cache_attrs);
return;
-free_name:
- kfree_const(dev->kobj.name);
-free_cache:
- kfree(info);
+put_device:
+ put_device(dev);
}
static void node_remove_caches(struct node *node)
diff --git a/drivers/base/platform-msi.c b/drivers/base/platform-msi.c
index 2c1e2e0c1a59..0b72b134a304 100644
--- a/drivers/base/platform-msi.c
+++ b/drivers/base/platform-msi.c
@@ -316,10 +316,11 @@ void *platform_msi_get_host_data(struct irq_domain *domain)
}
/**
- * platform_msi_create_device_domain - Create a platform-msi domain
+ * __platform_msi_create_device_domain - Create a platform-msi domain
*
* @dev: The device generating the MSIs
* @nvec: The number of MSIs that need to be allocated
+ * @is_tree: flag to indicate tree hierarchy
* @write_msi_msg: Callback to write an interrupt message for @dev
* @ops: The hierarchy domain operations to use
* @host_data: Private data associated to this domain
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 6e1f8e0b661c..9cd34def2237 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -192,7 +192,7 @@ int platform_get_irq_optional(struct platform_device *dev, unsigned int num)
#ifdef CONFIG_SPARC
/* sparc does not have irqs represented as IORESOURCE_IRQ resources */
if (!dev || num >= dev->archdata.num_irqs)
- return -ENXIO;
+ goto out_not_found;
ret = dev->archdata.irqs[num];
goto out;
#else
@@ -223,10 +223,8 @@ int platform_get_irq_optional(struct platform_device *dev, unsigned int num)
struct irq_data *irqd;
irqd = irq_get_irq_data(r->start);
- if (!irqd) {
- ret = -ENXIO;
- goto out;
- }
+ if (!irqd)
+ goto out_not_found;
irqd_set_trigger_type(irqd, r->flags & IORESOURCE_BITS);
}
@@ -249,8 +247,9 @@ int platform_get_irq_optional(struct platform_device *dev, unsigned int num)
goto out;
}
- ret = -ENXIO;
#endif
+out_not_found:
+ ret = -ENXIO;
out:
WARN(ret == 0, "0 is an invalid IRQ number\n");
return ret;
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
index 84d5acb6301b..0251f3e6e61d 100644
--- a/drivers/base/power/clock_ops.c
+++ b/drivers/base/power/clock_ops.c
@@ -140,7 +140,7 @@ static void pm_clk_op_unlock(struct pm_subsys_data *psd, unsigned long *flags)
}
/**
- * pm_clk_enable - Enable a clock, reporting any errors
+ * __pm_clk_enable - Enable a clock, reporting any errors
* @dev: The device for the given clock
* @ce: PM clock entry corresponding to the clock.
*/
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 78c310d3179d..b6a782c31613 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -1088,34 +1088,6 @@ static void genpd_sync_power_on(struct generic_pm_domain *genpd, bool use_lock,
}
/**
- * resume_needed - Check whether to resume a device before system suspend.
- * @dev: Device to check.
- * @genpd: PM domain the device belongs to.
- *
- * There are two cases in which a device that can wake up the system from sleep
- * states should be resumed by genpd_prepare(): (1) if the device is enabled
- * to wake up the system and it has to remain active for this purpose while the
- * system is in the sleep state and (2) if the device is not enabled to wake up
- * the system from sleep states and it generally doesn't generate wakeup signals
- * by itself (those signals are generated on its behalf by other parts of the
- * system). In the latter case it may be necessary to reconfigure the device's
- * wakeup settings during system suspend, because it may have been set up to
- * signal remote wakeup from the system's working state as needed by runtime PM.
- * Return 'true' in either of the above cases.
- */
-static bool resume_needed(struct device *dev,
- const struct generic_pm_domain *genpd)
-{
- bool active_wakeup;
-
- if (!device_can_wakeup(dev))
- return false;
-
- active_wakeup = genpd_is_active_wakeup(genpd);
- return device_may_wakeup(dev) ? active_wakeup : !active_wakeup;
-}
-
-/**
* genpd_prepare - Start power transition of a device in a PM domain.
* @dev: Device to start the transition of.
*
@@ -1135,14 +1107,6 @@ static int genpd_prepare(struct device *dev)
if (IS_ERR(genpd))
return -EINVAL;
- /*
- * If a wakeup request is pending for the device, it should be woken up
- * at this point and a system wakeup event should be reported if it's
- * set up to wake up the system from sleep states.
- */
- if (resume_needed(dev, genpd))
- pm_runtime_resume(dev);
-
genpd_lock(genpd);
if (genpd->prepared_count++ == 0)
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index a46a7e30881b..1fc1a992f90c 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -305,7 +305,7 @@ static int rpm_get_suppliers(struct device *dev)
return 0;
}
-static void rpm_put_suppliers(struct device *dev)
+static void __rpm_put_suppliers(struct device *dev, bool try_to_suspend)
{
struct device_link *link;
@@ -313,10 +313,30 @@ static void rpm_put_suppliers(struct device *dev)
device_links_read_lock_held()) {
while (refcount_dec_not_one(&link->rpm_active))
- pm_runtime_put(link->supplier);
+ pm_runtime_put_noidle(link->supplier);
+
+ if (try_to_suspend)
+ pm_request_idle(link->supplier);
}
}
+static void rpm_put_suppliers(struct device *dev)
+{
+ __rpm_put_suppliers(dev, true);
+}
+
+static void rpm_suspend_suppliers(struct device *dev)
+{
+ struct device_link *link;
+ int idx = device_links_read_lock();
+
+ list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
+ device_links_read_lock_held())
+ pm_request_idle(link->supplier);
+
+ device_links_read_unlock(idx);
+}
+
/**
* __rpm_callback - Run a given runtime PM callback for a given device.
* @cb: Runtime PM callback to run.
@@ -344,8 +364,10 @@ static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
idx = device_links_read_lock();
retval = rpm_get_suppliers(dev);
- if (retval)
+ if (retval) {
+ rpm_put_suppliers(dev);
goto fail;
+ }
device_links_read_unlock(idx);
}
@@ -368,9 +390,9 @@ static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
|| (dev->power.runtime_status == RPM_RESUMING && retval))) {
idx = device_links_read_lock();
- fail:
- rpm_put_suppliers(dev);
+ __rpm_put_suppliers(dev, false);
+fail:
device_links_read_unlock(idx);
}
@@ -642,8 +664,11 @@ static int rpm_suspend(struct device *dev, int rpmflags)
goto out;
}
+ if (dev->power.irq_safe)
+ goto out;
+
/* Maybe the parent is now able to suspend. */
- if (parent && !parent->power.ignore_children && !dev->power.irq_safe) {
+ if (parent && !parent->power.ignore_children) {
spin_unlock(&dev->power.lock);
spin_lock(&parent->power.lock);
@@ -652,6 +677,14 @@ static int rpm_suspend(struct device *dev, int rpmflags)
spin_lock(&dev->power.lock);
}
+ /* Maybe the suppliers are now able to suspend. */
+ if (dev->power.links_count > 0) {
+ spin_unlock_irq(&dev->power.lock);
+
+ rpm_suspend_suppliers(dev);
+
+ spin_lock_irq(&dev->power.lock);
+ }
out:
trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
@@ -918,7 +951,7 @@ static void pm_runtime_work(struct work_struct *work)
/**
* pm_suspend_timer_fn - Timer function for pm_schedule_suspend().
- * @data: Device pointer passed by pm_schedule_suspend().
+ * @timer: hrtimer used by pm_schedule_suspend().
*
* Check if the time is right and queue a suspend request.
*/
@@ -1657,8 +1690,8 @@ void pm_runtime_get_suppliers(struct device *dev)
device_links_read_lock_held())
if (link->flags & DL_FLAG_PM_RUNTIME) {
link->supplier_preactivated = true;
- refcount_inc(&link->rpm_active);
pm_runtime_get_sync(link->supplier);
+ refcount_inc(&link->rpm_active);
}
device_links_read_unlock(idx);
@@ -1671,6 +1704,8 @@ void pm_runtime_get_suppliers(struct device *dev)
void pm_runtime_put_suppliers(struct device *dev)
{
struct device_link *link;
+ unsigned long flags;
+ bool put;
int idx;
idx = device_links_read_lock();
@@ -1679,7 +1714,11 @@ void pm_runtime_put_suppliers(struct device *dev)
device_links_read_lock_held())
if (link->supplier_preactivated) {
link->supplier_preactivated = false;
- if (refcount_dec_not_one(&link->rpm_active))
+ spin_lock_irqsave(&dev->power.lock, flags);
+ put = pm_runtime_status_suspended(dev) &&
+ refcount_dec_not_one(&link->rpm_active);
+ spin_unlock_irqrestore(&dev->power.lock, flags);
+ if (put)
pm_runtime_put(link->supplier);
}
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index 92073ac68473..f0b37c188514 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -400,9 +400,9 @@ void device_wakeup_detach_irq(struct device *dev)
}
/**
- * device_wakeup_arm_wake_irqs(void)
+ * device_wakeup_arm_wake_irqs -
*
- * Itereates over the list of device wakeirqs to arm them.
+ * Iterates over the list of device wakeirqs to arm them.
*/
void device_wakeup_arm_wake_irqs(void)
{
@@ -416,9 +416,9 @@ void device_wakeup_arm_wake_irqs(void)
}
/**
- * device_wakeup_disarm_wake_irqs(void)
+ * device_wakeup_disarm_wake_irqs -
*
- * Itereates over the list of device wakeirqs to disarm them.
+ * Iterates over the list of device wakeirqs to disarm them.
*/
void device_wakeup_disarm_wake_irqs(void)
{
@@ -532,6 +532,7 @@ EXPORT_SYMBOL_GPL(device_init_wakeup);
/**
* device_set_wakeup_enable - Enable or disable a device to wake up the system.
* @dev: Device to handle.
+ * @enable: enable/disable flag
*/
int device_set_wakeup_enable(struct device *dev, bool enable)
{
@@ -581,7 +582,7 @@ static bool wakeup_source_not_registered(struct wakeup_source *ws)
*/
/**
- * wakup_source_activate - Mark given wakeup source as active.
+ * wakeup_source_activate - Mark given wakeup source as active.
* @ws: Wakeup source to handle.
*
* Update the @ws' statistics and, if @ws has just been activated, notify the PM
@@ -686,7 +687,7 @@ static inline void update_prevent_sleep_time(struct wakeup_source *ws,
#endif
/**
- * wakup_source_deactivate - Mark given wakeup source as inactive.
+ * wakeup_source_deactivate - Mark given wakeup source as inactive.
* @ws: Wakeup source to handle.
*
* Update the @ws' statistics and notify the PM core that the wakeup source has
@@ -785,7 +786,7 @@ EXPORT_SYMBOL_GPL(pm_relax);
/**
* pm_wakeup_timer_fn - Delayed finalization of a wakeup event.
- * @data: Address of the wakeup source object associated with the event source.
+ * @t: timer list
*
* Call wakeup_source_deactivate() for the wakeup source whose address is stored
* in @data if it is currently active and its timer has not been canceled and
@@ -1021,7 +1022,7 @@ bool pm_save_wakeup_count(unsigned int count)
#ifdef CONFIG_PM_AUTOSLEEP
/**
* pm_wakep_autosleep_enabled - Modify autosleep_enabled for all wakeup sources.
- * @enabled: Whether to set or to clear the autosleep_enabled flags.
+ * @set: Whether to set or to clear the autosleep_enabled flags.
*/
void pm_wakep_autosleep_enabled(bool set)
{
diff --git a/drivers/base/power/wakeup_stats.c b/drivers/base/power/wakeup_stats.c
index d638259b829a..924fac493c4f 100644
--- a/drivers/base/power/wakeup_stats.c
+++ b/drivers/base/power/wakeup_stats.c
@@ -137,7 +137,7 @@ static struct device *wakeup_source_device_create(struct device *parent,
struct wakeup_source *ws)
{
struct device *dev = NULL;
- int retval = -ENODEV;
+ int retval;
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev) {
@@ -154,7 +154,7 @@ static struct device *wakeup_source_device_create(struct device *parent,
dev_set_drvdata(dev, ws);
device_set_pm_not_required(dev);
- retval = kobject_set_name(&dev->kobj, "wakeup%d", ws->id);
+ retval = dev_set_name(dev, "wakeup%d", ws->id);
if (retval)
goto error;
diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
index ff2ee87987c7..211a335a608d 100644
--- a/drivers/base/regmap/regmap-debugfs.c
+++ b/drivers/base/regmap/regmap-debugfs.c
@@ -660,6 +660,7 @@ void regmap_debugfs_exit(struct regmap *map)
regmap_debugfs_free_dump_cache(map);
mutex_unlock(&map->cache_lock);
kfree(map->debugfs_name);
+ map->debugfs_name = NULL;
} else {
struct regmap_debugfs_node *node, *tmp;
diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c
index 19db764ffa4a..760296a4b606 100644
--- a/drivers/base/regmap/regmap-irq.c
+++ b/drivers/base/regmap/regmap-irq.c
@@ -38,6 +38,7 @@ struct regmap_irq_chip_data {
unsigned int *wake_buf;
unsigned int *type_buf;
unsigned int *type_buf_def;
+ unsigned int **virt_buf;
unsigned int irq_reg_stride;
unsigned int type_reg_stride;
@@ -45,6 +46,27 @@ struct regmap_irq_chip_data {
bool clear_status:1;
};
+static int sub_irq_reg(struct regmap_irq_chip_data *data,
+ unsigned int base_reg, int i)
+{
+ const struct regmap_irq_chip *chip = data->chip;
+ struct regmap *map = data->map;
+ struct regmap_irq_sub_irq_map *subreg;
+ unsigned int offset;
+ int reg = 0;
+
+ if (!chip->sub_reg_offsets || !chip->not_fixed_stride) {
+ /* Assume linear mapping */
+ reg = base_reg + (i * map->reg_stride * data->irq_reg_stride);
+ } else {
+ subreg = &chip->sub_reg_offsets[i];
+ offset = subreg->offset[0];
+ reg = base_reg + offset;
+ }
+
+ return reg;
+}
+
static inline const
struct regmap_irq *irq_to_regmap_irq(struct regmap_irq_chip_data *data,
int irq)
@@ -73,7 +95,7 @@ static void regmap_irq_sync_unlock(struct irq_data *data)
{
struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
struct regmap *map = d->map;
- int i, ret;
+ int i, j, ret;
u32 reg;
u32 unmask_offset;
u32 val;
@@ -87,8 +109,7 @@ static void regmap_irq_sync_unlock(struct irq_data *data)
if (d->clear_status) {
for (i = 0; i < d->chip->num_regs; i++) {
- reg = d->chip->status_base +
- (i * map->reg_stride * d->irq_reg_stride);
+ reg = sub_irq_reg(d, d->chip->status_base, i);
ret = regmap_read(map, reg, &val);
if (ret)
@@ -108,8 +129,7 @@ static void regmap_irq_sync_unlock(struct irq_data *data)
if (!d->chip->mask_base)
continue;
- reg = d->chip->mask_base +
- (i * map->reg_stride * d->irq_reg_stride);
+ reg = sub_irq_reg(d, d->chip->mask_base, i);
if (d->chip->mask_invert) {
ret = regmap_irq_update_bits(d, reg,
d->mask_buf_def[i], ~d->mask_buf[i]);
@@ -136,8 +156,7 @@ static void regmap_irq_sync_unlock(struct irq_data *data)
dev_err(d->map->dev, "Failed to sync masks in %x\n",
reg);
- reg = d->chip->wake_base +
- (i * map->reg_stride * d->irq_reg_stride);
+ reg = sub_irq_reg(d, d->chip->wake_base, i);
if (d->wake_buf) {
if (d->chip->wake_invert)
ret = regmap_irq_update_bits(d, reg,
@@ -161,8 +180,8 @@ static void regmap_irq_sync_unlock(struct irq_data *data)
* it'll be ignored in irq handler, then may introduce irq storm
*/
if (d->mask_buf[i] && (d->chip->ack_base || d->chip->use_ack)) {
- reg = d->chip->ack_base +
- (i * map->reg_stride * d->irq_reg_stride);
+ reg = sub_irq_reg(d, d->chip->ack_base, i);
+
/* some chips ack by write 0 */
if (d->chip->ack_invert)
ret = regmap_write(map, reg, ~d->mask_buf[i]);
@@ -187,8 +206,7 @@ static void regmap_irq_sync_unlock(struct irq_data *data)
for (i = 0; i < d->chip->num_type_reg; i++) {
if (!d->type_buf_def[i])
continue;
- reg = d->chip->type_base +
- (i * map->reg_stride * d->type_reg_stride);
+ reg = sub_irq_reg(d, d->chip->type_base, i);
if (d->chip->type_invert)
ret = regmap_irq_update_bits(d, reg,
d->type_buf_def[i], ~d->type_buf[i]);
@@ -201,6 +219,20 @@ static void regmap_irq_sync_unlock(struct irq_data *data)
}
}
+ if (d->chip->num_virt_regs) {
+ for (i = 0; i < d->chip->num_virt_regs; i++) {
+ for (j = 0; j < d->chip->num_regs; j++) {
+ reg = sub_irq_reg(d, d->chip->virt_reg_base[i],
+ j);
+ ret = regmap_write(map, reg, d->virt_buf[i][j]);
+ if (ret != 0)
+ dev_err(d->map->dev,
+ "Failed to write virt 0x%x: %d\n",
+ reg, ret);
+ }
+ }
+ }
+
if (d->chip->runtime_pm)
pm_runtime_put(map->dev);
@@ -301,6 +333,11 @@ static int regmap_irq_set_type(struct irq_data *data, unsigned int type)
default:
return -EINVAL;
}
+
+ if (d->chip->set_type_virt)
+ return d->chip->set_type_virt(d->virt_buf, type, data->hwirq,
+ reg);
+
return 0;
}
@@ -352,8 +389,15 @@ static inline int read_sub_irq_data(struct regmap_irq_chip_data *data,
for (i = 0; i < subreg->num_regs; i++) {
unsigned int offset = subreg->offset[i];
- ret = regmap_read(map, chip->status_base + offset,
- &data->status_buf[offset]);
+ if (chip->not_fixed_stride)
+ ret = regmap_read(map,
+ chip->status_base + offset,
+ &data->status_buf[b]);
+ else
+ ret = regmap_read(map,
+ chip->status_base + offset,
+ &data->status_buf[offset]);
+
if (ret)
break;
}
@@ -474,10 +518,9 @@ static irqreturn_t regmap_irq_thread(int irq, void *d)
} else {
for (i = 0; i < data->chip->num_regs; i++) {
- ret = regmap_read(map, chip->status_base +
- (i * map->reg_stride
- * data->irq_reg_stride),
- &data->status_buf[i]);
+ unsigned int reg = sub_irq_reg(data,
+ data->chip->status_base, i);
+ ret = regmap_read(map, reg, &data->status_buf[i]);
if (ret != 0) {
dev_err(map->dev,
@@ -499,8 +542,8 @@ static irqreturn_t regmap_irq_thread(int irq, void *d)
data->status_buf[i] &= ~data->mask_buf[i];
if (data->status_buf[i] && (chip->ack_base || chip->use_ack)) {
- reg = chip->ack_base +
- (i * map->reg_stride * data->irq_reg_stride);
+ reg = sub_irq_reg(data, data->chip->ack_base, i);
+
if (chip->ack_invert)
ret = regmap_write(map, reg,
~data->status_buf[i]);
@@ -605,6 +648,12 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
return -EINVAL;
}
+ if (chip->not_fixed_stride) {
+ for (i = 0; i < chip->num_regs; i++)
+ if (chip->sub_reg_offsets[i].num_regs != 1)
+ return -EINVAL;
+ }
+
if (irq_base) {
irq_base = irq_alloc_descs(irq_base, 0, chip->num_irqs, 0);
if (irq_base < 0) {
@@ -662,6 +711,24 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
goto err_alloc;
}
+ if (chip->num_virt_regs) {
+ /*
+ * Create virt_buf[chip->num_extra_config_regs][chip->num_regs]
+ */
+ d->virt_buf = kcalloc(chip->num_virt_regs, sizeof(*d->virt_buf),
+ GFP_KERNEL);
+ if (!d->virt_buf)
+ goto err_alloc;
+
+ for (i = 0; i < chip->num_virt_regs; i++) {
+ d->virt_buf[i] = kcalloc(chip->num_regs,
+ sizeof(unsigned int),
+ GFP_KERNEL);
+ if (!d->virt_buf[i])
+ goto err_alloc;
+ }
+ }
+
d->irq_chip = regmap_irq_chip;
d->irq_chip.name = chip->name;
d->irq = irq;
@@ -700,8 +767,8 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
if (!chip->mask_base)
continue;
- reg = chip->mask_base +
- (i * map->reg_stride * d->irq_reg_stride);
+ reg = sub_irq_reg(d, d->chip->mask_base, i);
+
if (chip->mask_invert)
ret = regmap_irq_update_bits(d, reg,
d->mask_buf[i], ~d->mask_buf[i]);
@@ -725,8 +792,7 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
continue;
/* Ack masked but set interrupts */
- reg = chip->status_base +
- (i * map->reg_stride * d->irq_reg_stride);
+ reg = sub_irq_reg(d, d->chip->status_base, i);
ret = regmap_read(map, reg, &d->status_buf[i]);
if (ret != 0) {
dev_err(map->dev, "Failed to read IRQ status: %d\n",
@@ -735,8 +801,7 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
}
if (d->status_buf[i] && (chip->ack_base || chip->use_ack)) {
- reg = chip->ack_base +
- (i * map->reg_stride * d->irq_reg_stride);
+ reg = sub_irq_reg(d, d->chip->ack_base, i);
if (chip->ack_invert)
ret = regmap_write(map, reg,
~(d->status_buf[i] & d->mask_buf[i]));
@@ -765,8 +830,7 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
if (d->wake_buf) {
for (i = 0; i < chip->num_regs; i++) {
d->wake_buf[i] = d->mask_buf_def[i];
- reg = chip->wake_base +
- (i * map->reg_stride * d->irq_reg_stride);
+ reg = sub_irq_reg(d, d->chip->wake_base, i);
if (chip->wake_invert)
ret = regmap_irq_update_bits(d, reg,
@@ -786,8 +850,7 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
if (chip->num_type_reg && !chip->type_in_mask) {
for (i = 0; i < chip->num_type_reg; ++i) {
- reg = chip->type_base +
- (i * map->reg_stride * d->type_reg_stride);
+ reg = sub_irq_reg(d, d->chip->type_base, i);
ret = regmap_read(map, reg, &d->type_buf_def[i]);
@@ -838,6 +901,11 @@ err_alloc:
kfree(d->mask_buf);
kfree(d->status_buf);
kfree(d->status_reg_buf);
+ if (d->virt_buf) {
+ for (i = 0; i < chip->num_virt_regs; i++)
+ kfree(d->virt_buf[i]);
+ kfree(d->virt_buf);
+ }
kfree(d);
return ret;
}
diff --git a/drivers/base/swnode.c b/drivers/base/swnode.c
index fa3719ef80e4..3cc11b813f28 100644
--- a/drivers/base/swnode.c
+++ b/drivers/base/swnode.c
@@ -12,10 +12,10 @@
#include <linux/slab.h>
struct swnode {
- int id;
struct kobject kobj;
struct fwnode_handle fwnode;
const struct software_node *node;
+ int id;
/* hierarchy */
struct ida child_ids;
@@ -720,19 +720,30 @@ software_node_find_by_name(const struct software_node *parent, const char *name)
}
EXPORT_SYMBOL_GPL(software_node_find_by_name);
-static int
-software_node_register_properties(struct software_node *node,
- const struct property_entry *properties)
+static struct software_node *software_node_alloc(const struct property_entry *properties)
{
struct property_entry *props;
+ struct software_node *node;
props = property_entries_dup(properties);
if (IS_ERR(props))
- return PTR_ERR(props);
+ return ERR_CAST(props);
+
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (!node) {
+ property_entries_free(props);
+ return ERR_PTR(-ENOMEM);
+ }
node->properties = props;
- return 0;
+ return node;
+}
+
+static void software_node_free(const struct software_node *node)
+{
+ property_entries_free(node->properties);
+ kfree(node);
}
static void software_node_release(struct kobject *kobj)
@@ -746,10 +757,9 @@ static void software_node_release(struct kobject *kobj)
ida_simple_remove(&swnode_root_ids, swnode->id);
}
- if (swnode->allocated) {
- property_entries_free(swnode->node->properties);
- kfree(swnode->node);
- }
+ if (swnode->allocated)
+ software_node_free(swnode->node);
+
ida_destroy(&swnode->child_ids);
kfree(swnode);
}
@@ -767,22 +777,19 @@ swnode_register(const struct software_node *node, struct swnode *parent,
int ret;
swnode = kzalloc(sizeof(*swnode), GFP_KERNEL);
- if (!swnode) {
- ret = -ENOMEM;
- goto out_err;
- }
+ if (!swnode)
+ return ERR_PTR(-ENOMEM);
ret = ida_simple_get(parent ? &parent->child_ids : &swnode_root_ids,
0, 0, GFP_KERNEL);
if (ret < 0) {
kfree(swnode);
- goto out_err;
+ return ERR_PTR(ret);
}
swnode->id = ret;
swnode->node = node;
swnode->parent = parent;
- swnode->allocated = allocated;
swnode->kobj.kset = swnode_kset;
fwnode_init(&swnode->fwnode, &software_node_ops);
@@ -803,16 +810,17 @@ swnode_register(const struct software_node *node, struct swnode *parent,
return ERR_PTR(ret);
}
+ /*
+ * Assign the flag only in the successful case, so
+ * the above kobject_put() won't mess up with properties.
+ */
+ swnode->allocated = allocated;
+
if (parent)
list_add_tail(&swnode->entry, &parent->children);
kobject_uevent(&swnode->kobj, KOBJ_ADD);
return &swnode->fwnode;
-
-out_err:
- if (allocated)
- property_entries_free(node->properties);
- return ERR_PTR(ret);
}
/**
@@ -880,7 +888,11 @@ EXPORT_SYMBOL_GPL(software_node_unregister_nodes);
* software_node_register_node_group - Register a group of software nodes
* @node_group: NULL terminated array of software node pointers to be registered
*
- * Register multiple software nodes at once.
+ * Register multiple software nodes at once. If any node in the array
+ * has its .parent pointer set (which can only be to another software_node),
+ * then its parent **must** have been registered before it is; either outside
+ * of this function or by ordering the array such that parent comes before
+ * child.
*/
int software_node_register_node_group(const struct software_node **node_group)
{
@@ -906,10 +918,14 @@ EXPORT_SYMBOL_GPL(software_node_register_node_group);
* software_node_unregister_node_group - Unregister a group of software nodes
* @node_group: NULL terminated array of software node pointers to be unregistered
*
- * Unregister multiple software nodes at once. The array will be unwound in
- * reverse order (i.e. last entry first) and thus if any members of the array are
- * children of another member then the children must appear later in the list such
- * that they are unregistered first.
+ * Unregister multiple software nodes at once. If parent pointers are set up
+ * in any of the software nodes then the array **must** be ordered such that
+ * parents come before their children.
+ *
+ * NOTE: If you are uncertain whether the array is ordered such that
+ * parents will be unregistered before their children, it is wiser to
+ * remove the nodes individually, in the correct order (child before
+ * parent).
*/
void software_node_unregister_node_group(
const struct software_node **node_group)
@@ -963,31 +979,28 @@ struct fwnode_handle *
fwnode_create_software_node(const struct property_entry *properties,
const struct fwnode_handle *parent)
{
+ struct fwnode_handle *fwnode;
struct software_node *node;
- struct swnode *p = NULL;
- int ret;
+ struct swnode *p;
- if (parent) {
- if (IS_ERR(parent))
- return ERR_CAST(parent);
- if (!is_software_node(parent))
- return ERR_PTR(-EINVAL);
- p = to_swnode(parent);
- }
+ if (IS_ERR(parent))
+ return ERR_CAST(parent);
- node = kzalloc(sizeof(*node), GFP_KERNEL);
- if (!node)
- return ERR_PTR(-ENOMEM);
+ p = to_swnode(parent);
+ if (parent && !p)
+ return ERR_PTR(-EINVAL);
- ret = software_node_register_properties(node, properties);
- if (ret) {
- kfree(node);
- return ERR_PTR(ret);
- }
+ node = software_node_alloc(properties);
+ if (IS_ERR(node))
+ return ERR_CAST(node);
node->parent = p ? p->node : NULL;
- return swnode_register(node, p, 1);
+ fwnode = swnode_register(node, p, 1);
+ if (IS_ERR(fwnode))
+ software_node_free(node);
+
+ return fwnode;
}
EXPORT_SYMBOL_GPL(fwnode_create_software_node);
@@ -1032,6 +1045,7 @@ int device_add_software_node(struct device *dev, const struct software_node *nod
}
set_secondary_fwnode(dev, &swnode->fwnode);
+ software_node_notify(dev, KOBJ_ADD);
return 0;
}
@@ -1105,8 +1119,8 @@ int software_node_notify(struct device *dev, unsigned long action)
switch (action) {
case KOBJ_ADD:
- ret = sysfs_create_link(&dev->kobj, &swnode->kobj,
- "software_node");
+ ret = sysfs_create_link_nowarn(&dev->kobj, &swnode->kobj,
+ "software_node");
if (ret)
break;
diff --git a/drivers/base/test/Kconfig b/drivers/base/test/Kconfig
index ba225eb1b761..2f3fa31a948e 100644
--- a/drivers/base/test/Kconfig
+++ b/drivers/base/test/Kconfig
@@ -8,7 +8,7 @@ config TEST_ASYNC_DRIVER_PROBE
The module name will be test_async_driver_probe.ko
If unsure say N.
-config KUNIT_DRIVER_PE_TEST
+config DRIVER_PE_KUNIT_TEST
bool "KUnit Tests for property entry API" if !KUNIT_ALL_TESTS
depends on KUNIT=y
default KUNIT_ALL_TESTS
diff --git a/drivers/base/test/Makefile b/drivers/base/test/Makefile
index 2f15fae8625f..64b2f3d744d5 100644
--- a/drivers/base/test/Makefile
+++ b/drivers/base/test/Makefile
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_TEST_ASYNC_DRIVER_PROBE) += test_async_driver_probe.o
-obj-$(CONFIG_KUNIT_DRIVER_PE_TEST) += property-entry-test.o
+obj-$(CONFIG_DRIVER_PE_KUNIT_TEST) += property-entry-test.o
CFLAGS_REMOVE_property-entry-test.o += -fplugin-arg-structleak_plugin-byref -fplugin-arg-structleak_plugin-byref-all
diff --git a/drivers/base/test/property-entry-test.c b/drivers/base/test/property-entry-test.c
index abe03315180f..1106fedcceed 100644
--- a/drivers/base/test/property-entry-test.c
+++ b/drivers/base/test/property-entry-test.c
@@ -27,6 +27,9 @@ static void pe_test_uints(struct kunit *test)
node = fwnode_create_software_node(entries, NULL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, node);
+ error = fwnode_property_count_u8(node, "prop-u8");
+ KUNIT_EXPECT_EQ(test, error, 1);
+
error = fwnode_property_read_u8(node, "prop-u8", &val_u8);
KUNIT_EXPECT_EQ(test, error, 0);
KUNIT_EXPECT_EQ(test, (int)val_u8, 8);
@@ -48,6 +51,9 @@ static void pe_test_uints(struct kunit *test)
KUNIT_EXPECT_EQ(test, error, 0);
KUNIT_EXPECT_EQ(test, (int)val_u16, 16);
+ error = fwnode_property_count_u16(node, "prop-u16");
+ KUNIT_EXPECT_EQ(test, error, 1);
+
error = fwnode_property_read_u16_array(node, "prop-u16", array_u16, 1);
KUNIT_EXPECT_EQ(test, error, 0);
KUNIT_EXPECT_EQ(test, (int)array_u16[0], 16);
@@ -65,6 +71,9 @@ static void pe_test_uints(struct kunit *test)
KUNIT_EXPECT_EQ(test, error, 0);
KUNIT_EXPECT_EQ(test, (int)val_u32, 32);
+ error = fwnode_property_count_u32(node, "prop-u32");
+ KUNIT_EXPECT_EQ(test, error, 1);
+
error = fwnode_property_read_u32_array(node, "prop-u32", array_u32, 1);
KUNIT_EXPECT_EQ(test, error, 0);
KUNIT_EXPECT_EQ(test, (int)array_u32[0], 32);
@@ -82,6 +91,9 @@ static void pe_test_uints(struct kunit *test)
KUNIT_EXPECT_EQ(test, error, 0);
KUNIT_EXPECT_EQ(test, (int)val_u64, 64);
+ error = fwnode_property_count_u64(node, "prop-u64");
+ KUNIT_EXPECT_EQ(test, error, 1);
+
error = fwnode_property_read_u64_array(node, "prop-u64", array_u64, 1);
KUNIT_EXPECT_EQ(test, error, 0);
KUNIT_EXPECT_EQ(test, (int)array_u64[0], 64);
@@ -95,15 +107,19 @@ static void pe_test_uints(struct kunit *test)
error = fwnode_property_read_u64_array(node, "no-prop-u64", array_u64, 1);
KUNIT_EXPECT_NE(test, error, 0);
+ /* Count 64-bit values as 16-bit */
+ error = fwnode_property_count_u16(node, "prop-u64");
+ KUNIT_EXPECT_EQ(test, error, 4);
+
fwnode_remove_software_node(node);
}
static void pe_test_uint_arrays(struct kunit *test)
{
- static const u8 a_u8[16] = { 8, 9 };
- static const u16 a_u16[16] = { 16, 17 };
- static const u32 a_u32[16] = { 32, 33 };
- static const u64 a_u64[16] = { 64, 65 };
+ static const u8 a_u8[10] = { 8, 9 };
+ static const u16 a_u16[10] = { 16, 17 };
+ static const u32 a_u32[10] = { 32, 33 };
+ static const u64 a_u64[10] = { 64, 65 };
static const struct property_entry entries[] = {
PROPERTY_ENTRY_U8_ARRAY("prop-u8", a_u8),
PROPERTY_ENTRY_U16_ARRAY("prop-u16", a_u16),
@@ -126,6 +142,9 @@ static void pe_test_uint_arrays(struct kunit *test)
KUNIT_EXPECT_EQ(test, error, 0);
KUNIT_EXPECT_EQ(test, (int)val_u8, 8);
+ error = fwnode_property_count_u8(node, "prop-u8");
+ KUNIT_EXPECT_EQ(test, error, 10);
+
error = fwnode_property_read_u8_array(node, "prop-u8", array_u8, 1);
KUNIT_EXPECT_EQ(test, error, 0);
KUNIT_EXPECT_EQ(test, (int)array_u8[0], 8);
@@ -148,6 +167,9 @@ static void pe_test_uint_arrays(struct kunit *test)
KUNIT_EXPECT_EQ(test, error, 0);
KUNIT_EXPECT_EQ(test, (int)val_u16, 16);
+ error = fwnode_property_count_u16(node, "prop-u16");
+ KUNIT_EXPECT_EQ(test, error, 10);
+
error = fwnode_property_read_u16_array(node, "prop-u16", array_u16, 1);
KUNIT_EXPECT_EQ(test, error, 0);
KUNIT_EXPECT_EQ(test, (int)array_u16[0], 16);
@@ -170,6 +192,9 @@ static void pe_test_uint_arrays(struct kunit *test)
KUNIT_EXPECT_EQ(test, error, 0);
KUNIT_EXPECT_EQ(test, (int)val_u32, 32);
+ error = fwnode_property_count_u32(node, "prop-u32");
+ KUNIT_EXPECT_EQ(test, error, 10);
+
error = fwnode_property_read_u32_array(node, "prop-u32", array_u32, 1);
KUNIT_EXPECT_EQ(test, error, 0);
KUNIT_EXPECT_EQ(test, (int)array_u32[0], 32);
@@ -192,6 +217,9 @@ static void pe_test_uint_arrays(struct kunit *test)
KUNIT_EXPECT_EQ(test, error, 0);
KUNIT_EXPECT_EQ(test, (int)val_u64, 64);
+ error = fwnode_property_count_u64(node, "prop-u64");
+ KUNIT_EXPECT_EQ(test, error, 10);
+
error = fwnode_property_read_u64_array(node, "prop-u64", array_u64, 1);
KUNIT_EXPECT_EQ(test, error, 0);
KUNIT_EXPECT_EQ(test, (int)array_u64[0], 64);
@@ -210,6 +238,14 @@ static void pe_test_uint_arrays(struct kunit *test)
error = fwnode_property_read_u64_array(node, "no-prop-u64", array_u64, 1);
KUNIT_EXPECT_NE(test, error, 0);
+ /* Count 64-bit values as 16-bit */
+ error = fwnode_property_count_u16(node, "prop-u64");
+ KUNIT_EXPECT_EQ(test, error, 40);
+
+ /* Other way around */
+ error = fwnode_property_count_u64(node, "prop-u16");
+ KUNIT_EXPECT_EQ(test, error, 2);
+
fwnode_remove_software_node(node);
}
@@ -239,6 +275,9 @@ static void pe_test_strings(struct kunit *test)
KUNIT_EXPECT_EQ(test, error, 0);
KUNIT_EXPECT_STREQ(test, str, "single");
+ error = fwnode_property_string_array_count(node, "str");
+ KUNIT_EXPECT_EQ(test, error, 1);
+
error = fwnode_property_read_string_array(node, "str", strs, 1);
KUNIT_EXPECT_EQ(test, error, 1);
KUNIT_EXPECT_STREQ(test, strs[0], "single");
@@ -258,6 +297,9 @@ static void pe_test_strings(struct kunit *test)
KUNIT_EXPECT_EQ(test, error, 0);
KUNIT_EXPECT_STREQ(test, str, "");
+ error = fwnode_property_string_array_count(node, "strs");
+ KUNIT_EXPECT_EQ(test, error, 2);
+
error = fwnode_property_read_string_array(node, "strs", strs, 3);
KUNIT_EXPECT_EQ(test, error, 2);
KUNIT_EXPECT_STREQ(test, strs[0], "string-a");
@@ -370,15 +412,8 @@ static void pe_test_reference(struct kunit *test)
};
static const struct software_node_ref_args refs[] = {
- {
- .node = &nodes[0],
- .nargs = 0,
- },
- {
- .node = &nodes[1],
- .nargs = 2,
- .args = { 3, 4 },
- },
+ SOFTWARE_NODE_REFERENCE(&nodes[0]),
+ SOFTWARE_NODE_REFERENCE(&nodes[1], 3, 4),
};
const struct property_entry entries[] = {