summaryrefslogtreecommitdiffstats
path: root/drivers/base
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/base')
-rw-r--r--drivers/base/Kconfig11
-rw-r--r--drivers/base/arch_topology.c42
-rw-r--r--drivers/base/auxiliary.c152
-rw-r--r--drivers/base/bus.c4
-rw-r--r--drivers/base/core.c34
-rw-r--r--drivers/base/dd.c7
-rw-r--r--drivers/base/devtmpfs.c10
-rw-r--r--drivers/base/node.c3
-rw-r--r--drivers/base/platform.c9
-rw-r--r--drivers/base/power/main.c2
-rw-r--r--drivers/base/power/runtime.c98
-rw-r--r--drivers/base/property.c150
-rw-r--r--drivers/base/regmap/regmap-debugfs.c2
-rw-r--r--drivers/base/regmap/regmap.c2
-rw-r--r--drivers/base/swnode.c2
-rw-r--r--drivers/base/test/test_async_driver_probe.c14
-rw-r--r--drivers/base/topology.c28
17 files changed, 394 insertions, 176 deletions
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index ffcbe2bc460e..6f04b831a5c0 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -62,6 +62,17 @@ config DEVTMPFS_MOUNT
rescue mode with init=/bin/sh, even when the /dev directory
on the rootfs is completely empty.
+config DEVTMPFS_SAFE
+ bool "Use nosuid,noexec mount options on devtmpfs"
+ depends on DEVTMPFS
+ help
+ This instructs the kernel to include the MS_NOEXEC and MS_NOSUID mount
+ flags when mounting devtmpfs.
+
+ Notice: If enabled, things like /dev/mem cannot be mmapped
+ with the PROT_EXEC flag. This can break, for example, non-KMS
+ video drivers.
+
config STANDALONE
bool "Select only drivers that don't need compile-time external firmware"
default y
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c
index ff16a36a908b..976154140f0b 100644
--- a/drivers/base/arch_topology.c
+++ b/drivers/base/arch_topology.c
@@ -22,6 +22,7 @@
static DEFINE_PER_CPU(struct scale_freq_data __rcu *, sft_data);
static struct cpumask scale_freq_counters_mask;
static bool scale_freq_invariant;
+static DEFINE_PER_CPU(u32, freq_factor) = 1;
static bool supports_scale_freq_counters(const struct cpumask *cpus)
{
@@ -155,15 +156,49 @@ void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity)
DEFINE_PER_CPU(unsigned long, thermal_pressure);
-void topology_set_thermal_pressure(const struct cpumask *cpus,
- unsigned long th_pressure)
+/**
+ * topology_update_thermal_pressure() - Update thermal pressure for CPUs
+ * @cpus : The related CPUs for which capacity has been reduced
+ * @capped_freq : The maximum allowed frequency that CPUs can run at
+ *
+ * Update the value of thermal pressure for all @cpus in the mask. The
+ * cpumask should include all (online+offline) affected CPUs, to avoid
+ * operating on stale data when hot-plug is used for some CPUs. The
+ * @capped_freq reflects the currently allowed max CPUs frequency due to
+ * thermal capping. It might be also a boost frequency value, which is bigger
+ * than the internal 'freq_factor' max frequency. In such case the pressure
+ * value should simply be removed, since this is an indication that there is
+ * no thermal throttling. The @capped_freq must be provided in kHz.
+ */
+void topology_update_thermal_pressure(const struct cpumask *cpus,
+ unsigned long capped_freq)
{
+ unsigned long max_capacity, capacity, th_pressure;
+ u32 max_freq;
int cpu;
+ cpu = cpumask_first(cpus);
+ max_capacity = arch_scale_cpu_capacity(cpu);
+ max_freq = per_cpu(freq_factor, cpu);
+
+ /* Convert to MHz scale which is used in 'freq_factor' */
+ capped_freq /= 1000;
+
+ /*
+ * Handle properly the boost frequencies, which should simply clean
+ * the thermal pressure value.
+ */
+ if (max_freq <= capped_freq)
+ capacity = max_capacity;
+ else
+ capacity = mult_frac(max_capacity, capped_freq, max_freq);
+
+ th_pressure = max_capacity - capacity;
+
for_each_cpu(cpu, cpus)
WRITE_ONCE(per_cpu(thermal_pressure, cpu), th_pressure);
}
-EXPORT_SYMBOL_GPL(topology_set_thermal_pressure);
+EXPORT_SYMBOL_GPL(topology_update_thermal_pressure);
static ssize_t cpu_capacity_show(struct device *dev,
struct device_attribute *attr,
@@ -217,7 +252,6 @@ static void update_topology_flags_workfn(struct work_struct *work)
update_topology = 0;
}
-static DEFINE_PER_CPU(u32, freq_factor) = 1;
static u32 *raw_capacity;
static int free_raw_capacity(void)
diff --git a/drivers/base/auxiliary.c b/drivers/base/auxiliary.c
index 9230c9472bb0..8c5e65930617 100644
--- a/drivers/base/auxiliary.c
+++ b/drivers/base/auxiliary.c
@@ -17,6 +17,147 @@
#include <linux/auxiliary_bus.h>
#include "base.h"
+/**
+ * DOC: PURPOSE
+ *
+ * In some subsystems, the functionality of the core device (PCI/ACPI/other) is
+ * too complex for a single device to be managed by a monolithic driver (e.g.
+ * Sound Open Firmware), multiple devices might implement a common intersection
+ * of functionality (e.g. NICs + RDMA), or a driver may want to export an
+ * interface for another subsystem to drive (e.g. SIOV Physical Function export
+ * Virtual Function management). A split of the functionality into child-
+ * devices representing sub-domains of functionality makes it possible to
+ * compartmentalize, layer, and distribute domain-specific concerns via a Linux
+ * device-driver model.
+ *
+ * An example for this kind of requirement is the audio subsystem where a
+ * single IP is handling multiple entities such as HDMI, Soundwire, local
+ * devices such as mics/speakers etc. The split for the core's functionality
+ * can be arbitrary or be defined by the DSP firmware topology and include
+ * hooks for test/debug. This allows for the audio core device to be minimal
+ * and focused on hardware-specific control and communication.
+ *
+ * Each auxiliary_device represents a part of its parent functionality. The
+ * generic behavior can be extended and specialized as needed by encapsulating
+ * an auxiliary_device within other domain-specific structures and the use of
+ * .ops callbacks. Devices on the auxiliary bus do not share any structures and
+ * the use of a communication channel with the parent is domain-specific.
+ *
+ * Note that ops are intended as a way to augment instance behavior within a
+ * class of auxiliary devices, it is not the mechanism for exporting common
+ * infrastructure from the parent. Consider EXPORT_SYMBOL_NS() to convey
+ * infrastructure from the parent module to the auxiliary module(s).
+ */
+
+/**
+ * DOC: USAGE
+ *
+ * The auxiliary bus is to be used when a driver and one or more kernel
+ * modules, who share a common header file with the driver, need a mechanism to
+ * connect and provide access to a shared object allocated by the
+ * auxiliary_device's registering driver. The registering driver for the
+ * auxiliary_device(s) and the kernel module(s) registering auxiliary_drivers
+ * can be from the same subsystem, or from multiple subsystems.
+ *
+ * The emphasis here is on a common generic interface that keeps subsystem
+ * customization out of the bus infrastructure.
+ *
+ * One example is a PCI network device that is RDMA-capable and exports a child
+ * device to be driven by an auxiliary_driver in the RDMA subsystem. The PCI
+ * driver allocates and registers an auxiliary_device for each physical
+ * function on the NIC. The RDMA driver registers an auxiliary_driver that
+ * claims each of these auxiliary_devices. This conveys data/ops published by
+ * the parent PCI device/driver to the RDMA auxiliary_driver.
+ *
+ * Another use case is for the PCI device to be split out into multiple sub
+ * functions. For each sub function an auxiliary_device is created. A PCI sub
+ * function driver binds to such devices that creates its own one or more class
+ * devices. A PCI sub function auxiliary device is likely to be contained in a
+ * struct with additional attributes such as user defined sub function number
+ * and optional attributes such as resources and a link to the parent device.
+ * These attributes could be used by systemd/udev; and hence should be
+ * initialized before a driver binds to an auxiliary_device.
+ *
+ * A key requirement for utilizing the auxiliary bus is that there is no
+ * dependency on a physical bus, device, register accesses or regmap support.
+ * These individual devices split from the core cannot live on the platform bus
+ * as they are not physical devices that are controlled by DT/ACPI. The same
+ * argument applies for not using MFD in this scenario as MFD relies on
+ * individual function devices being physical devices.
+ */
+
+/**
+ * DOC: EXAMPLE
+ *
+ * Auxiliary devices are created and registered by a subsystem-level core
+ * device that needs to break up its functionality into smaller fragments. One
+ * way to extend the scope of an auxiliary_device is to encapsulate it within a
+ * domain- pecific structure defined by the parent device. This structure
+ * contains the auxiliary_device and any associated shared data/callbacks
+ * needed to establish the connection with the parent.
+ *
+ * An example is:
+ *
+ * .. code-block:: c
+ *
+ * struct foo {
+ * struct auxiliary_device auxdev;
+ * void (*connect)(struct auxiliary_device *auxdev);
+ * void (*disconnect)(struct auxiliary_device *auxdev);
+ * void *data;
+ * };
+ *
+ * The parent device then registers the auxiliary_device by calling
+ * auxiliary_device_init(), and then auxiliary_device_add(), with the pointer
+ * to the auxdev member of the above structure. The parent provides a name for
+ * the auxiliary_device that, combined with the parent's KBUILD_MODNAME,
+ * creates a match_name that is be used for matching and binding with a driver.
+ *
+ * Whenever an auxiliary_driver is registered, based on the match_name, the
+ * auxiliary_driver's probe() is invoked for the matching devices. The
+ * auxiliary_driver can also be encapsulated inside custom drivers that make
+ * the core device's functionality extensible by adding additional
+ * domain-specific ops as follows:
+ *
+ * .. code-block:: c
+ *
+ * struct my_ops {
+ * void (*send)(struct auxiliary_device *auxdev);
+ * void (*receive)(struct auxiliary_device *auxdev);
+ * };
+ *
+ *
+ * struct my_driver {
+ * struct auxiliary_driver auxiliary_drv;
+ * const struct my_ops ops;
+ * };
+ *
+ * An example of this type of usage is:
+ *
+ * .. code-block:: c
+ *
+ * const struct auxiliary_device_id my_auxiliary_id_table[] = {
+ * { .name = "foo_mod.foo_dev" },
+ * { },
+ * };
+ *
+ * const struct my_ops my_custom_ops = {
+ * .send = my_tx,
+ * .receive = my_rx,
+ * };
+ *
+ * const struct my_driver my_drv = {
+ * .auxiliary_drv = {
+ * .name = "myauxiliarydrv",
+ * .id_table = my_auxiliary_id_table,
+ * .probe = my_probe,
+ * .remove = my_remove,
+ * .shutdown = my_shutdown,
+ * },
+ * .ops = my_custom_ops,
+ * };
+ */
+
static const struct auxiliary_device_id *auxiliary_match_id(const struct auxiliary_device_id *id,
const struct auxiliary_device *auxdev)
{
@@ -117,7 +258,7 @@ static struct bus_type auxiliary_bus_type = {
* auxiliary_device_init - check auxiliary_device and initialize
* @auxdev: auxiliary device struct
*
- * This is the first step in the two-step process to register an
+ * This is the second step in the three-step process to register an
* auxiliary_device.
*
* When this function returns an error code, then the device_initialize will
@@ -155,7 +296,7 @@ EXPORT_SYMBOL_GPL(auxiliary_device_init);
* @auxdev: auxiliary bus device to add to the bus
* @modname: name of the parent device's driver module
*
- * This is the second step in the two-step process to register an
+ * This is the third step in the three-step process to register an
* auxiliary_device.
*
* This function must be called after a successful call to
@@ -202,6 +343,8 @@ EXPORT_SYMBOL_GPL(__auxiliary_device_add);
* This function returns a reference to a device that is 'found'
* for later use, as determined by the @match callback.
*
+ * The reference returned should be released with put_device().
+ *
* The callback should return 0 if the device doesn't match and non-zero
* if it does. If the callback returns non-zero, this function will
* return to the caller and not iterate over any more devices.
@@ -225,6 +368,11 @@ EXPORT_SYMBOL_GPL(auxiliary_find_device);
* @auxdrv: auxiliary_driver structure
* @owner: owning module/driver
* @modname: KBUILD_MODNAME for parent driver
+ *
+ * The expectation is that users will call the "auxiliary_driver_register"
+ * macro so that the caller's KBUILD_MODNAME is automatically inserted for the
+ * modname parameter. Only if a user requires a custom name would this version
+ * be called directly.
*/
int __auxiliary_driver_register(struct auxiliary_driver *auxdrv,
struct module *owner, const char *modname)
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index bdc98c5713d5..97936ec49bde 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -163,9 +163,9 @@ static struct kobj_type bus_ktype = {
.release = bus_release,
};
-static int bus_uevent_filter(struct kset *kset, struct kobject *kobj)
+static int bus_uevent_filter(struct kobject *kobj)
{
- struct kobj_type *ktype = get_ktype(kobj);
+ const struct kobj_type *ktype = get_ktype(kobj);
if (ktype == &bus_ktype)
return 1;
diff --git a/drivers/base/core.c b/drivers/base/core.c
index f8987867789f..7bb957b11861 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -485,8 +485,7 @@ static void device_link_release_fn(struct work_struct *work)
/* Ensure that all references to the link object have been dropped. */
device_link_synchronize_removal();
- while (refcount_dec_not_one(&link->rpm_active))
- pm_runtime_put(link->supplier);
+ pm_runtime_release_supplier(link, true);
put_device(link->consumer);
put_device(link->supplier);
@@ -2261,9 +2260,9 @@ static struct kobj_type device_ktype = {
};
-static int dev_uevent_filter(struct kset *kset, struct kobject *kobj)
+static int dev_uevent_filter(struct kobject *kobj)
{
- struct kobj_type *ktype = get_ktype(kobj);
+ const struct kobj_type *ktype = get_ktype(kobj);
if (ktype == &device_ktype) {
struct device *dev = kobj_to_dev(kobj);
@@ -2275,7 +2274,7 @@ static int dev_uevent_filter(struct kset *kset, struct kobject *kobj)
return 0;
}
-static const char *dev_uevent_name(struct kset *kset, struct kobject *kobj)
+static const char *dev_uevent_name(struct kobject *kobj)
{
struct device *dev = kobj_to_dev(kobj);
@@ -2286,8 +2285,7 @@ static const char *dev_uevent_name(struct kset *kset, struct kobject *kobj)
return NULL;
}
-static int dev_uevent(struct kset *kset, struct kobject *kobj,
- struct kobj_uevent_env *env)
+static int dev_uevent(struct kobject *kobj, struct kobj_uevent_env *env)
{
struct device *dev = kobj_to_dev(kobj);
int retval = 0;
@@ -2382,7 +2380,7 @@ static ssize_t uevent_show(struct device *dev, struct device_attribute *attr,
/* respect filter */
if (kset->uevent_ops && kset->uevent_ops->filter)
- if (!kset->uevent_ops->filter(kset, &dev->kobj))
+ if (!kset->uevent_ops->filter(&dev->kobj))
goto out;
env = kzalloc(sizeof(struct kobj_uevent_env), GFP_KERNEL);
@@ -2390,7 +2388,7 @@ static ssize_t uevent_show(struct device *dev, struct device_attribute *attr,
return -ENOMEM;
/* let the kset specific function add its keys */
- retval = kset->uevent_ops->uevent(kset, &dev->kobj, env);
+ retval = kset->uevent_ops->uevent(&dev->kobj, env);
if (retval)
goto out;
@@ -3025,6 +3023,23 @@ static inline struct kobject *get_glue_dir(struct device *dev)
return dev->kobj.parent;
}
+/**
+ * kobject_has_children - Returns whether a kobject has children.
+ * @kobj: the object to test
+ *
+ * This will return whether a kobject has other kobjects as children.
+ *
+ * It does NOT account for the presence of attribute files, only sub
+ * directories. It also assumes there is no concurrent addition or
+ * removal of such children, and thus relies on external locking.
+ */
+static inline bool kobject_has_children(struct kobject *kobj)
+{
+ WARN_ON_ONCE(kref_read(&kobj->kref) == 0);
+
+ return kobj->sd && kobj->sd->dir.subdirs;
+}
+
/*
* make sure cleaning up dir as the last step, we need to make
* sure .release handler of kobject is run with holding the
@@ -3578,7 +3593,6 @@ void device_del(struct device *dev)
device_pm_remove(dev);
driver_deferred_probe_del(dev);
device_platform_notify_remove(dev);
- device_remove_properties(dev);
device_links_purge(dev);
if (dev->bus)
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index 68ea1f949daa..9eaaff2f556c 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -577,14 +577,14 @@ re_probe:
if (dev->bus->dma_configure) {
ret = dev->bus->dma_configure(dev);
if (ret)
- goto probe_failed;
+ goto pinctrl_bind_failed;
}
ret = driver_sysfs_add(dev);
if (ret) {
pr_err("%s: driver_sysfs_add(%s) failed\n",
__func__, dev_name(dev));
- goto probe_failed;
+ goto sysfs_failed;
}
if (dev->pm_domain && dev->pm_domain->activate) {
@@ -657,6 +657,8 @@ dev_groups_failed:
else if (drv->remove)
drv->remove(dev);
probe_failed:
+ driver_sysfs_remove(dev);
+sysfs_failed:
if (dev->bus)
blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
BUS_NOTIFY_DRIVER_NOT_BOUND, dev);
@@ -666,7 +668,6 @@ pinctrl_bind_failed:
arch_teardown_dma_ops(dev);
kfree(dev->dma_range_map);
dev->dma_range_map = NULL;
- driver_sysfs_remove(dev);
dev->driver = NULL;
dev_set_drvdata(dev, NULL);
if (dev->pm_domain && dev->pm_domain->dismiss)
diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
index 8be352ab4ddb..1e2c2d3882e2 100644
--- a/drivers/base/devtmpfs.c
+++ b/drivers/base/devtmpfs.c
@@ -29,6 +29,12 @@
#include <uapi/linux/mount.h>
#include "base.h"
+#ifdef CONFIG_DEVTMPFS_SAFE
+#define DEVTMPFS_MFLAGS (MS_SILENT | MS_NOEXEC | MS_NOSUID)
+#else
+#define DEVTMPFS_MFLAGS (MS_SILENT)
+#endif
+
static struct task_struct *thread;
static int __initdata mount_dev = IS_ENABLED(CONFIG_DEVTMPFS_MOUNT);
@@ -363,7 +369,7 @@ int __init devtmpfs_mount(void)
if (!thread)
return 0;
- err = init_mount("devtmpfs", "dev", "devtmpfs", MS_SILENT, NULL);
+ err = init_mount("devtmpfs", "dev", "devtmpfs", DEVTMPFS_MFLAGS, NULL);
if (err)
printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
else
@@ -412,7 +418,7 @@ static noinline int __init devtmpfs_setup(void *p)
err = ksys_unshare(CLONE_NEWNS);
if (err)
goto out;
- err = init_mount("devtmpfs", "/", "devtmpfs", MS_SILENT, NULL);
+ err = init_mount("devtmpfs", "/", "devtmpfs", DEVTMPFS_MFLAGS, NULL);
if (err)
goto out;
init_chdir("/.."); /* will traverse into overmounted root */
diff --git a/drivers/base/node.c b/drivers/base/node.c
index b5a4ba18f9f9..87acc47e8951 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -581,6 +581,9 @@ static const struct attribute_group node_dev_group = {
static const struct attribute_group *node_dev_groups[] = {
&node_dev_group,
+#ifdef CONFIG_HAVE_ARCH_NODE_DEV_GROUP
+ &arch_node_dev_group,
+#endif
NULL
};
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 598acf93a360..6cb04ac48bf0 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -258,8 +258,9 @@ int platform_get_irq(struct platform_device *dev, unsigned int num)
int ret;
ret = platform_get_irq_optional(dev, num);
- if (ret < 0 && ret != -EPROBE_DEFER)
- dev_err(&dev->dev, "IRQ index %u not found\n", num);
+ if (ret < 0)
+ return dev_err_probe(&dev->dev, ret,
+ "IRQ index %u not found\n", num);
return ret;
}
@@ -762,6 +763,10 @@ EXPORT_SYMBOL_GPL(platform_device_del);
/**
* platform_device_register - add a platform-level device
* @pdev: platform device we're adding
+ *
+ * NOTE: _Never_ directly free @pdev after calling this function, even if it
+ * returned an error! Always use platform_device_put() to give up the
+ * reference initialised in this function instead.
*/
int platform_device_register(struct platform_device *pdev)
{
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index f4d0c555de29..04ea92cbd9cf 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -1902,7 +1902,7 @@ int dpm_prepare(pm_message_t state)
device_block_probing();
mutex_lock(&dpm_list_mtx);
- while (!list_empty(&dpm_list)) {
+ while (!list_empty(&dpm_list) && !error) {
struct device *dev = to_device(dpm_list.next);
get_device(dev);
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index d504cd4ab3cb..2f3cce17219b 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -305,19 +305,40 @@ static int rpm_get_suppliers(struct device *dev)
return 0;
}
+/**
+ * pm_runtime_release_supplier - Drop references to device link's supplier.
+ * @link: Target device link.
+ * @check_idle: Whether or not to check if the supplier device is idle.
+ *
+ * Drop all runtime PM references associated with @link to its supplier device
+ * and if @check_idle is set, check if that device is idle (and so it can be
+ * suspended).
+ */
+void pm_runtime_release_supplier(struct device_link *link, bool check_idle)
+{
+ struct device *supplier = link->supplier;
+
+ /*
+ * The additional power.usage_count check is a safety net in case
+ * the rpm_active refcount becomes saturated, in which case
+ * refcount_dec_not_one() would return true forever, but it is not
+ * strictly necessary.
+ */
+ while (refcount_dec_not_one(&link->rpm_active) &&
+ atomic_read(&supplier->power.usage_count) > 0)
+ pm_runtime_put_noidle(supplier);
+
+ if (check_idle)
+ pm_request_idle(supplier);
+}
+
static void __rpm_put_suppliers(struct device *dev, bool try_to_suspend)
{
struct device_link *link;
list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
- device_links_read_lock_held()) {
-
- while (refcount_dec_not_one(&link->rpm_active))
- pm_runtime_put_noidle(link->supplier);
-
- if (try_to_suspend)
- pm_request_idle(link->supplier);
- }
+ device_links_read_lock_held())
+ pm_runtime_release_supplier(link, try_to_suspend);
}
static void rpm_put_suppliers(struct device *dev)
@@ -742,13 +763,15 @@ static int rpm_resume(struct device *dev, int rpmflags)
trace_rpm_resume_rcuidle(dev, rpmflags);
repeat:
- if (dev->power.runtime_error)
+ if (dev->power.runtime_error) {
retval = -EINVAL;
- else if (dev->power.disable_depth == 1 && dev->power.is_suspended
- && dev->power.runtime_status == RPM_ACTIVE)
- retval = 1;
- else if (dev->power.disable_depth > 0)
- retval = -EACCES;
+ } else if (dev->power.disable_depth > 0) {
+ if (dev->power.runtime_status == RPM_ACTIVE &&
+ dev->power.last_status == RPM_ACTIVE)
+ retval = 1;
+ else
+ retval = -EACCES;
+ }
if (retval)
goto out;
@@ -1410,8 +1433,10 @@ void __pm_runtime_disable(struct device *dev, bool check_resume)
/* Update time accounting before disabling PM-runtime. */
update_pm_runtime_accounting(dev);
- if (!dev->power.disable_depth++)
+ if (!dev->power.disable_depth++) {
__pm_runtime_barrier(dev);
+ dev->power.last_status = dev->power.runtime_status;
+ }
out:
spin_unlock_irq(&dev->power.lock);
@@ -1428,23 +1453,23 @@ void pm_runtime_enable(struct device *dev)
spin_lock_irqsave(&dev->power.lock, flags);
- if (dev->power.disable_depth > 0) {
- dev->power.disable_depth--;
-
- /* About to enable runtime pm, set accounting_timestamp to now */
- if (!dev->power.disable_depth)
- dev->power.accounting_timestamp = ktime_get_mono_fast_ns();
- } else {
+ if (!dev->power.disable_depth) {
dev_warn(dev, "Unbalanced %s!\n", __func__);
+ goto out;
}
- WARN(!dev->power.disable_depth &&
- dev->power.runtime_status == RPM_SUSPENDED &&
- !dev->power.ignore_children &&
- atomic_read(&dev->power.child_count) > 0,
- "Enabling runtime PM for inactive device (%s) with active children\n",
- dev_name(dev));
+ if (--dev->power.disable_depth > 0)
+ goto out;
+
+ dev->power.last_status = RPM_INVALID;
+ dev->power.accounting_timestamp = ktime_get_mono_fast_ns();
+
+ if (dev->power.runtime_status == RPM_SUSPENDED &&
+ !dev->power.ignore_children &&
+ atomic_read(&dev->power.child_count) > 0)
+ dev_warn(dev, "Enabling runtime PM for inactive device with active children\n");
+out:
spin_unlock_irqrestore(&dev->power.lock, flags);
}
EXPORT_SYMBOL_GPL(pm_runtime_enable);
@@ -1640,6 +1665,7 @@ EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend);
void pm_runtime_init(struct device *dev)
{
dev->power.runtime_status = RPM_SUSPENDED;
+ dev->power.last_status = RPM_INVALID;
dev->power.idle_notification = false;
dev->power.disable_depth = 1;
@@ -1722,8 +1748,6 @@ void pm_runtime_get_suppliers(struct device *dev)
void pm_runtime_put_suppliers(struct device *dev)
{
struct device_link *link;
- unsigned long flags;
- bool put;
int idx;
idx = device_links_read_lock();
@@ -1731,11 +1755,17 @@ void pm_runtime_put_suppliers(struct device *dev)
list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
device_links_read_lock_held())
if (link->supplier_preactivated) {
+ bool put;
+
link->supplier_preactivated = false;
- spin_lock_irqsave(&dev->power.lock, flags);
+
+ spin_lock_irq(&dev->power.lock);
+
put = pm_runtime_status_suspended(dev) &&
refcount_dec_not_one(&link->rpm_active);
- spin_unlock_irqrestore(&dev->power.lock, flags);
+
+ spin_unlock_irq(&dev->power.lock);
+
if (put)
pm_runtime_put(link->supplier);
}
@@ -1772,9 +1802,7 @@ void pm_runtime_drop_link(struct device_link *link)
return;
pm_runtime_drop_link_count(link->consumer);
-
- while (refcount_dec_not_one(&link->rpm_active))
- pm_runtime_put(link->supplier);
+ pm_runtime_release_supplier(link, true);
}
static bool pm_runtime_need_not_resume(struct device *dev)
diff --git a/drivers/base/property.c b/drivers/base/property.c
index f1f35b48ab8b..a74c21af97c1 100644
--- a/drivers/base/property.c
+++ b/drivers/base/property.c
@@ -478,8 +478,17 @@ int fwnode_property_get_reference_args(const struct fwnode_handle *fwnode,
unsigned int nargs, unsigned int index,
struct fwnode_reference_args *args)
{
- return fwnode_call_int_op(fwnode, get_reference_args, prop, nargs_prop,
- nargs, index, args);
+ int ret;
+
+ ret = fwnode_call_int_op(fwnode, get_reference_args, prop, nargs_prop,
+ nargs, index, args);
+
+ if (ret < 0 && !IS_ERR_OR_NULL(fwnode) &&
+ !IS_ERR_OR_NULL(fwnode->secondary))
+ ret = fwnode_call_int_op(fwnode->secondary, get_reference_args,
+ prop, nargs_prop, nargs, index, args);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(fwnode_property_get_reference_args);
@@ -508,54 +517,6 @@ struct fwnode_handle *fwnode_find_reference(const struct fwnode_handle *fwnode,
EXPORT_SYMBOL_GPL(fwnode_find_reference);
/**
- * device_remove_properties - Remove properties from a device object.
- * @dev: Device whose properties to remove.
- *
- * The function removes properties previously associated to the device
- * firmware node with device_add_properties(). Memory allocated to the
- * properties will also be released.
- */
-void device_remove_properties(struct device *dev)
-{
- struct fwnode_handle *fwnode = dev_fwnode(dev);
-
- if (!fwnode)
- return;
-
- if (is_software_node(fwnode->secondary)) {
- fwnode_remove_software_node(fwnode->secondary);
- set_secondary_fwnode(dev, NULL);
- }
-}
-EXPORT_SYMBOL_GPL(device_remove_properties);
-
-/**
- * device_add_properties - Add a collection of properties to a device object.
- * @dev: Device to add properties to.
- * @properties: Collection of properties to add.
- *
- * Associate a collection of device properties represented by @properties with
- * @dev. The function takes a copy of @properties.
- *
- * WARNING: The callers should not use this function if it is known that there
- * is no real firmware node associated with @dev! In that case the callers
- * should create a software node and assign it to @dev directly.
- */
-int device_add_properties(struct device *dev,
- const struct property_entry *properties)
-{
- struct fwnode_handle *fwnode;
-
- fwnode = fwnode_create_software_node(properties, NULL);
- if (IS_ERR(fwnode))
- return PTR_ERR(fwnode);
-
- set_secondary_fwnode(dev, fwnode);
- return 0;
-}
-EXPORT_SYMBOL_GPL(device_add_properties);
-
-/**
* fwnode_get_name - Return the name of a node
* @fwnode: The firmware node
*
@@ -1059,43 +1020,17 @@ fwnode_graph_get_remote_endpoint(const struct fwnode_handle *fwnode)
}
EXPORT_SYMBOL_GPL(fwnode_graph_get_remote_endpoint);
-/**
- * fwnode_graph_get_remote_node - get remote parent node for given port/endpoint
- * @fwnode: pointer to parent fwnode_handle containing graph port/endpoint
- * @port_id: identifier of the parent port node
- * @endpoint_id: identifier of the endpoint node
- *
- * Return: Remote fwnode handle associated with remote endpoint node linked
- * to @node. Use fwnode_node_put() on it when done.
- */
-struct fwnode_handle *
-fwnode_graph_get_remote_node(const struct fwnode_handle *fwnode, u32 port_id,
- u32 endpoint_id)
+static bool fwnode_graph_remote_available(struct fwnode_handle *ep)
{
- struct fwnode_handle *endpoint = NULL;
+ struct fwnode_handle *dev_node;
+ bool available;
- while ((endpoint = fwnode_graph_get_next_endpoint(fwnode, endpoint))) {
- struct fwnode_endpoint fwnode_ep;
- struct fwnode_handle *remote;
- int ret;
+ dev_node = fwnode_graph_get_remote_port_parent(ep);
+ available = fwnode_device_is_available(dev_node);
+ fwnode_handle_put(dev_node);
- ret = fwnode_graph_parse_endpoint(endpoint, &fwnode_ep);
- if (ret < 0)
- continue;
-
- if (fwnode_ep.port != port_id || fwnode_ep.id != endpoint_id)
- continue;
-
- remote = fwnode_graph_get_remote_port_parent(endpoint);
- if (!remote)
- return NULL;
-
- return fwnode_device_is_available(remote) ? remote : NULL;
- }
-
- return NULL;
+ return available;
}
-EXPORT_SYMBOL_GPL(fwnode_graph_get_remote_node);
/**
* fwnode_graph_get_endpoint_by_id - get endpoint by port and endpoint numbers
@@ -1111,8 +1046,8 @@ EXPORT_SYMBOL_GPL(fwnode_graph_get_remote_node);
* has not been found, look for the closest endpoint ID greater than the
* specified one and return the endpoint that corresponds to it, if present.
*
- * Do not return endpoints that belong to disabled devices, unless
- * FWNODE_GRAPH_DEVICE_DISABLED is passed in @flags.
+ * Does not return endpoints that belong to disabled devices or endpoints that
+ * are unconnected, unless FWNODE_GRAPH_DEVICE_DISABLED is passed in @flags.
*
* The returned endpoint needs to be released by calling fwnode_handle_put() on
* it when it is not needed any more.
@@ -1121,25 +1056,17 @@ struct fwnode_handle *
fwnode_graph_get_endpoint_by_id(const struct fwnode_handle *fwnode,
u32 port, u32 endpoint, unsigned long flags)
{
- struct fwnode_handle *ep = NULL, *best_ep = NULL;
+ struct fwnode_handle *ep, *best_ep = NULL;
unsigned int best_ep_id = 0;
bool endpoint_next = flags & FWNODE_GRAPH_ENDPOINT_NEXT;
bool enabled_only = !(flags & FWNODE_GRAPH_DEVICE_DISABLED);
- while ((ep = fwnode_graph_get_next_endpoint(fwnode, ep))) {
+ fwnode_graph_for_each_endpoint(fwnode, ep) {
struct fwnode_endpoint fwnode_ep = { 0 };
int ret;
- if (enabled_only) {
- struct fwnode_handle *dev_node;
- bool available;
-
- dev_node = fwnode_graph_get_remote_port_parent(ep);
- available = fwnode_device_is_available(dev_node);
- fwnode_handle_put(dev_node);
- if (!available)
- continue;
- }
+ if (enabled_only && !fwnode_graph_remote_available(ep))
+ continue;
ret = fwnode_graph_parse_endpoint(ep, &fwnode_ep);
if (ret < 0)
@@ -1173,6 +1100,31 @@ fwnode_graph_get_endpoint_by_id(const struct fwnode_handle *fwnode,
EXPORT_SYMBOL_GPL(fwnode_graph_get_endpoint_by_id);
/**
+ * fwnode_graph_get_endpoint_count - Count endpoints on a device node
+ * @fwnode: The node related to a device
+ * @flags: fwnode lookup flags
+ * Count endpoints in a device node.
+ *
+ * If FWNODE_GRAPH_DEVICE_DISABLED flag is specified, also unconnected endpoints
+ * and endpoints connected to disabled devices are counted.
+ */
+unsigned int fwnode_graph_get_endpoint_count(struct fwnode_handle *fwnode,
+ unsigned long flags)
+{
+ struct fwnode_handle *ep;
+ unsigned int count = 0;
+
+ fwnode_graph_for_each_endpoint(fwnode, ep) {
+ if (flags & FWNODE_GRAPH_DEVICE_DISABLED ||
+ fwnode_graph_remote_available(ep))
+ count++;
+ }
+
+ return count;
+}
+EXPORT_SYMBOL_GPL(fwnode_graph_get_endpoint_count);
+
+/**
* fwnode_graph_parse_endpoint - parse common endpoint node properties
* @fwnode: pointer to endpoint fwnode_handle
* @endpoint: pointer to the fwnode endpoint data structure
@@ -1206,8 +1158,10 @@ fwnode_graph_devcon_match(struct fwnode_handle *fwnode, const char *con_id,
fwnode_graph_for_each_endpoint(fwnode, ep) {
node = fwnode_graph_get_remote_port_parent(ep);
- if (!fwnode_device_is_available(node))
+ if (!fwnode_device_is_available(node)) {
+ fwnode_handle_put(node);
continue;
+ }
ret = match(node, con_id, data);
fwnode_handle_put(node);
diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
index ad684d37c2da..817eda2075aa 100644
--- a/drivers/base/regmap/regmap-debugfs.c
+++ b/drivers/base/regmap/regmap-debugfs.c
@@ -598,7 +598,7 @@ void regmap_debugfs_init(struct regmap *map)
map->debugfs_name = kasprintf(GFP_KERNEL, "dummy%d",
dummy_index);
if (!map->debugfs_name)
- return;
+ return;
name = map->debugfs_name;
dummy_index++;
}
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index 21a0c2562ec0..8f9fe5fd4707 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -647,6 +647,7 @@ int regmap_attach_dev(struct device *dev, struct regmap *map,
if (ret)
return ret;
+ regmap_debugfs_exit(map);
regmap_debugfs_init(map);
/* Add a devres resource for dev_get_regmap() */
@@ -876,6 +877,7 @@ struct regmap *__regmap_init(struct device *dev,
if (!bus) {
map->reg_read = config->reg_read;
map->reg_write = config->reg_write;
+ map->reg_update_bits = config->reg_update_bits;
map->defer_caching = false;
goto skip_format_initialization;
diff --git a/drivers/base/swnode.c b/drivers/base/swnode.c
index 4debcea4fb12..0a482212c7e8 100644
--- a/drivers/base/swnode.c
+++ b/drivers/base/swnode.c
@@ -529,7 +529,7 @@ software_node_get_reference_args(const struct fwnode_handle *fwnode,
return -ENOENT;
if (nargs_prop) {
- error = property_entry_read_int_array(swnode->node->properties,
+ error = property_entry_read_int_array(ref->node->properties,
nargs_prop, sizeof(u32),
&nargs_prop_val, 1);
if (error)
diff --git a/drivers/base/test/test_async_driver_probe.c b/drivers/base/test/test_async_driver_probe.c
index 3bb7beb127a9..4d1976ca5072 100644
--- a/drivers/base/test/test_async_driver_probe.c
+++ b/drivers/base/test/test_async_driver_probe.c
@@ -104,7 +104,7 @@ static int __init test_async_probe_init(void)
struct platform_device **pdev = NULL;
int async_id = 0, sync_id = 0;
unsigned long long duration;
- ktime_t calltime, delta;
+ ktime_t calltime;
int err, nid, cpu;
pr_info("registering first set of asynchronous devices...\n");
@@ -133,8 +133,7 @@ static int __init test_async_probe_init(void)
goto err_unregister_async_devs;
}
- delta = ktime_sub(ktime_get(), calltime);
- duration = (unsigned long long) ktime_to_ms(delta);
+ duration = (unsigned long long)ktime_ms_delta(ktime_get(), calltime);
pr_info("registration took %lld msecs\n", duration);
if (duration > TEST_PROBE_THRESHOLD) {
pr_err("test failed: probe took too long\n");
@@ -161,8 +160,7 @@ static int __init test_async_probe_init(void)
async_id++;
}
- delta = ktime_sub(ktime_get(), calltime);
- duration = (unsigned long long) ktime_to_ms(delta);
+ duration = (unsigned long long)ktime_ms_delta(ktime_get(), calltime);
dev_info(&(*pdev)->dev,
"registration took %lld msecs\n", duration);
if (duration > TEST_PROBE_THRESHOLD) {
@@ -197,8 +195,7 @@ static int __init test_async_probe_init(void)
goto err_unregister_sync_devs;
}
- delta = ktime_sub(ktime_get(), calltime);
- duration = (unsigned long long) ktime_to_ms(delta);
+ duration = (unsigned long long)ktime_ms_delta(ktime_get(), calltime);
pr_info("registration took %lld msecs\n", duration);
if (duration < TEST_PROBE_THRESHOLD) {
dev_err(&(*pdev)->dev,
@@ -223,8 +220,7 @@ static int __init test_async_probe_init(void)
sync_id++;
- delta = ktime_sub(ktime_get(), calltime);
- duration = (unsigned long long) ktime_to_ms(delta);
+ duration = (unsigned long long)ktime_ms_delta(ktime_get(), calltime);
dev_info(&(*pdev)->dev,
"registration took %lld msecs\n", duration);
if (duration < TEST_PROBE_THRESHOLD) {
diff --git a/drivers/base/topology.c b/drivers/base/topology.c
index 8f2b641d0b8c..fc24e89f9592 100644
--- a/drivers/base/topology.c
+++ b/drivers/base/topology.c
@@ -45,11 +45,15 @@ static ssize_t name##_list_read(struct file *file, struct kobject *kobj, \
define_id_show_func(physical_package_id);
static DEVICE_ATTR_RO(physical_package_id);
+#ifdef TOPOLOGY_DIE_SYSFS
define_id_show_func(die_id);
static DEVICE_ATTR_RO(die_id);
+#endif
+#ifdef TOPOLOGY_CLUSTER_SYSFS
define_id_show_func(cluster_id);
static DEVICE_ATTR_RO(cluster_id);
+#endif
define_id_show_func(core_id);
static DEVICE_ATTR_RO(core_id);
@@ -66,19 +70,23 @@ define_siblings_read_func(core_siblings, core_cpumask);
static BIN_ATTR_RO(core_siblings, 0);
static BIN_ATTR_RO(core_siblings_list, 0);
+#ifdef TOPOLOGY_CLUSTER_SYSFS
define_siblings_read_func(cluster_cpus, cluster_cpumask);
static BIN_ATTR_RO(cluster_cpus, 0);
static BIN_ATTR_RO(cluster_cpus_list, 0);
+#endif
+#ifdef TOPOLOGY_DIE_SYSFS
define_siblings_read_func(die_cpus, die_cpumask);
static BIN_ATTR_RO(die_cpus, 0);
static BIN_ATTR_RO(die_cpus_list, 0);
+#endif
define_siblings_read_func(package_cpus, core_cpumask);
static BIN_ATTR_RO(package_cpus, 0);
static BIN_ATTR_RO(package_cpus_list, 0);
-#ifdef CONFIG_SCHED_BOOK
+#ifdef TOPOLOGY_BOOK_SYSFS
define_id_show_func(book_id);
static DEVICE_ATTR_RO(book_id);
define_siblings_read_func(book_siblings, book_cpumask);
@@ -86,7 +94,7 @@ static BIN_ATTR_RO(book_siblings, 0);
static BIN_ATTR_RO(book_siblings_list, 0);
#endif
-#ifdef CONFIG_SCHED_DRAWER
+#ifdef TOPOLOGY_DRAWER_SYSFS
define_id_show_func(drawer_id);
static DEVICE_ATTR_RO(drawer_id);
define_siblings_read_func(drawer_siblings, drawer_cpumask);
@@ -101,17 +109,21 @@ static struct bin_attribute *bin_attrs[] = {
&bin_attr_thread_siblings_list,
&bin_attr_core_siblings,
&bin_attr_core_siblings_list,
+#ifdef TOPOLOGY_CLUSTER_SYSFS
&bin_attr_cluster_cpus,
&bin_attr_cluster_cpus_list,
+#endif
+#ifdef TOPOLOGY_DIE_SYSFS
&bin_attr_die_cpus,
&bin_attr_die_cpus_list,
+#endif
&bin_attr_package_cpus,
&bin_attr_package_cpus_list,
-#ifdef CONFIG_SCHED_BOOK
+#ifdef TOPOLOGY_BOOK_SYSFS
&bin_attr_book_siblings,
&bin_attr_book_siblings_list,
#endif
-#ifdef CONFIG_SCHED_DRAWER
+#ifdef TOPOLOGY_DRAWER_SYSFS
&bin_attr_drawer_siblings,
&bin_attr_drawer_siblings_list,
#endif
@@ -120,13 +132,17 @@ static struct bin_attribute *bin_attrs[] = {
static struct attribute *default_attrs[] = {
&dev_attr_physical_package_id.attr,
+#ifdef TOPOLOGY_DIE_SYSFS
&dev_attr_die_id.attr,
+#endif
+#ifdef TOPOLOGY_CLUSTER_SYSFS
&dev_attr_cluster_id.attr,
+#endif
&dev_attr_core_id.attr,
-#ifdef CONFIG_SCHED_BOOK
+#ifdef TOPOLOGY_BOOK_SYSFS
&dev_attr_book_id.attr,
#endif
-#ifdef CONFIG_SCHED_DRAWER
+#ifdef TOPOLOGY_DRAWER_SYSFS
&dev_attr_drawer_id.attr,
#endif
NULL