summaryrefslogtreecommitdiffstats
path: root/drivers/base
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/base')
-rw-r--r--drivers/base/base.h2
-rw-r--r--drivers/base/bus.c81
-rw-r--r--drivers/base/core.c28
-rw-r--r--drivers/base/cpu.c39
-rw-r--r--drivers/base/dd.c6
-rw-r--r--drivers/base/devres.c74
-rw-r--r--drivers/base/devtmpfs.c28
-rw-r--r--drivers/base/dma-buf.c169
-rw-r--r--drivers/base/memory.c49
-rw-r--r--drivers/base/node.c8
-rw-r--r--drivers/base/platform.c24
-rw-r--r--drivers/base/power/domain.c6
-rw-r--r--drivers/base/power/generic_ops.c2
-rw-r--r--drivers/base/power/main.c2
-rw-r--r--drivers/base/power/opp.c1
-rw-r--r--drivers/base/power/qos.c60
-rw-r--r--drivers/base/power/runtime.c2
-rw-r--r--drivers/base/regmap/internal.h40
-rw-r--r--drivers/base/regmap/regcache-lzo.c6
-rw-r--r--drivers/base/regmap/regcache-rbtree.c102
-rw-r--r--drivers/base/regmap/regcache.c196
-rw-r--r--drivers/base/regmap/regmap-debugfs.c94
-rw-r--r--drivers/base/regmap/regmap-irq.c3
-rw-r--r--drivers/base/regmap/regmap.c98
24 files changed, 893 insertions, 227 deletions
diff --git a/drivers/base/base.h b/drivers/base/base.h
index 6ee17bb391a9..b8bdfe61daa6 100644
--- a/drivers/base/base.h
+++ b/drivers/base/base.h
@@ -101,6 +101,8 @@ static inline int hypervisor_init(void) { return 0; }
extern int platform_bus_init(void);
extern void cpu_dev_init(void);
+struct kobject *virtual_device_parent(struct device *dev);
+
extern int bus_add_device(struct device *dev);
extern void bus_probe_device(struct device *dev);
extern void bus_remove_device(struct device *dev);
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index 519865b53f76..1a68f947ded8 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -898,18 +898,18 @@ static ssize_t bus_uevent_store(struct bus_type *bus,
static BUS_ATTR(uevent, S_IWUSR, NULL, bus_uevent_store);
/**
- * __bus_register - register a driver-core subsystem
+ * bus_register - register a driver-core subsystem
* @bus: bus to register
- * @key: lockdep class key
*
* Once we have that, we register the bus with the kobject
* infrastructure, then register the children subsystems it has:
* the devices and drivers that belong to the subsystem.
*/
-int __bus_register(struct bus_type *bus, struct lock_class_key *key)
+int bus_register(struct bus_type *bus)
{
int retval;
struct subsys_private *priv;
+ struct lock_class_key *key = &bus->lock_key;
priv = kzalloc(sizeof(struct subsys_private), GFP_KERNEL);
if (!priv)
@@ -981,7 +981,7 @@ out:
bus->p = NULL;
return retval;
}
-EXPORT_SYMBOL_GPL(__bus_register);
+EXPORT_SYMBOL_GPL(bus_register);
/**
* bus_unregister - remove a bus from the system
@@ -1205,26 +1205,10 @@ static void system_root_device_release(struct device *dev)
{
kfree(dev);
}
-/**
- * subsys_system_register - register a subsystem at /sys/devices/system/
- * @subsys: system subsystem
- * @groups: default attributes for the root device
- *
- * All 'system' subsystems have a /sys/devices/system/<name> root device
- * with the name of the subsystem. The root device can carry subsystem-
- * wide attributes. All registered devices are below this single root
- * device and are named after the subsystem with a simple enumeration
- * number appended. The registered devices are not explicitely named;
- * only 'id' in the device needs to be set.
- *
- * Do not use this interface for anything new, it exists for compatibility
- * with bad ideas only. New subsystems should use plain subsystems; and
- * add the subsystem-wide attributes should be added to the subsystem
- * directory itself and not some create fake root-device placed in
- * /sys/devices/system/<name>.
- */
-int subsys_system_register(struct bus_type *subsys,
- const struct attribute_group **groups)
+
+static int subsys_register(struct bus_type *subsys,
+ const struct attribute_group **groups,
+ struct kobject *parent_of_root)
{
struct device *dev;
int err;
@@ -1243,7 +1227,7 @@ int subsys_system_register(struct bus_type *subsys,
if (err < 0)
goto err_name;
- dev->kobj.parent = &system_kset->kobj;
+ dev->kobj.parent = parent_of_root;
dev->groups = groups;
dev->release = system_root_device_release;
@@ -1263,8 +1247,55 @@ err_dev:
bus_unregister(subsys);
return err;
}
+
+/**
+ * subsys_system_register - register a subsystem at /sys/devices/system/
+ * @subsys: system subsystem
+ * @groups: default attributes for the root device
+ *
+ * All 'system' subsystems have a /sys/devices/system/<name> root device
+ * with the name of the subsystem. The root device can carry subsystem-
+ * wide attributes. All registered devices are below this single root
+ * device and are named after the subsystem with a simple enumeration
+ * number appended. The registered devices are not explicitely named;
+ * only 'id' in the device needs to be set.
+ *
+ * Do not use this interface for anything new, it exists for compatibility
+ * with bad ideas only. New subsystems should use plain subsystems; and
+ * add the subsystem-wide attributes should be added to the subsystem
+ * directory itself and not some create fake root-device placed in
+ * /sys/devices/system/<name>.
+ */
+int subsys_system_register(struct bus_type *subsys,
+ const struct attribute_group **groups)
+{
+ return subsys_register(subsys, groups, &system_kset->kobj);
+}
EXPORT_SYMBOL_GPL(subsys_system_register);
+/**
+ * subsys_virtual_register - register a subsystem at /sys/devices/virtual/
+ * @subsys: virtual subsystem
+ * @groups: default attributes for the root device
+ *
+ * All 'virtual' subsystems have a /sys/devices/system/<name> root device
+ * with the name of the subystem. The root device can carry subsystem-wide
+ * attributes. All registered devices are below this single root device.
+ * There's no restriction on device naming. This is for kernel software
+ * constructs which need sysfs interface.
+ */
+int subsys_virtual_register(struct bus_type *subsys,
+ const struct attribute_group **groups)
+{
+ struct kobject *virtual_dir;
+
+ virtual_dir = virtual_device_parent(NULL);
+ if (!virtual_dir)
+ return -ENOMEM;
+
+ return subsys_register(subsys, groups, virtual_dir);
+}
+
int __init buses_init(void)
{
bus_kset = kset_create_and_add("bus", &bus_uevent_ops, NULL);
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 56536f4b0f6b..016312437577 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -283,15 +283,21 @@ static int dev_uevent(struct kset *kset, struct kobject *kobj,
const char *tmp;
const char *name;
umode_t mode = 0;
+ kuid_t uid = GLOBAL_ROOT_UID;
+ kgid_t gid = GLOBAL_ROOT_GID;
add_uevent_var(env, "MAJOR=%u", MAJOR(dev->devt));
add_uevent_var(env, "MINOR=%u", MINOR(dev->devt));
- name = device_get_devnode(dev, &mode, &tmp);
+ name = device_get_devnode(dev, &mode, &uid, &gid, &tmp);
if (name) {
add_uevent_var(env, "DEVNAME=%s", name);
- kfree(tmp);
if (mode)
add_uevent_var(env, "DEVMODE=%#o", mode & 0777);
+ if (!uid_eq(uid, GLOBAL_ROOT_UID))
+ add_uevent_var(env, "DEVUID=%u", from_kuid(&init_user_ns, uid));
+ if (!gid_eq(gid, GLOBAL_ROOT_GID))
+ add_uevent_var(env, "DEVGID=%u", from_kgid(&init_user_ns, gid));
+ kfree(tmp);
}
}
@@ -563,8 +569,15 @@ int device_create_file(struct device *dev,
const struct device_attribute *attr)
{
int error = 0;
- if (dev)
+
+ if (dev) {
+ WARN(((attr->attr.mode & S_IWUGO) && !attr->store),
+ "Write permission without 'store'\n");
+ WARN(((attr->attr.mode & S_IRUGO) && !attr->show),
+ "Read permission without 'show'\n");
error = sysfs_create_file(&dev->kobj, &attr->attr);
+ }
+
return error;
}
@@ -690,7 +703,7 @@ void device_initialize(struct device *dev)
set_dev_node(dev, -1);
}
-static struct kobject *virtual_device_parent(struct device *dev)
+struct kobject *virtual_device_parent(struct device *dev)
{
static struct kobject *virtual_dir = NULL;
@@ -1274,6 +1287,8 @@ static struct device *next_device(struct klist_iter *i)
* device_get_devnode - path of device node file
* @dev: device
* @mode: returned file access mode
+ * @uid: returned file owner
+ * @gid: returned file group
* @tmp: possibly allocated string
*
* Return the relative path of a possible device node.
@@ -1282,7 +1297,8 @@ static struct device *next_device(struct klist_iter *i)
* freed by the caller.
*/
const char *device_get_devnode(struct device *dev,
- umode_t *mode, const char **tmp)
+ umode_t *mode, kuid_t *uid, kgid_t *gid,
+ const char **tmp)
{
char *s;
@@ -1290,7 +1306,7 @@ const char *device_get_devnode(struct device *dev,
/* the device type may provide a specific name */
if (dev->type && dev->type->devnode)
- *tmp = dev->type->devnode(dev, mode);
+ *tmp = dev->type->devnode(dev, mode, uid, gid);
if (*tmp)
return *tmp;
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index fb10728f6372..3d48fc887ef4 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -25,6 +25,15 @@ EXPORT_SYMBOL_GPL(cpu_subsys);
static DEFINE_PER_CPU(struct device *, cpu_sys_devices);
#ifdef CONFIG_HOTPLUG_CPU
+static void change_cpu_under_node(struct cpu *cpu,
+ unsigned int from_nid, unsigned int to_nid)
+{
+ int cpuid = cpu->dev.id;
+ unregister_cpu_under_node(cpuid, from_nid);
+ register_cpu_under_node(cpuid, to_nid);
+ cpu->node_id = to_nid;
+}
+
static ssize_t show_online(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -39,17 +48,29 @@ static ssize_t __ref store_online(struct device *dev,
const char *buf, size_t count)
{
struct cpu *cpu = container_of(dev, struct cpu, dev);
+ int cpuid = cpu->dev.id;
+ int from_nid, to_nid;
ssize_t ret;
cpu_hotplug_driver_lock();
switch (buf[0]) {
case '0':
- ret = cpu_down(cpu->dev.id);
+ ret = cpu_down(cpuid);
if (!ret)
kobject_uevent(&dev->kobj, KOBJ_OFFLINE);
break;
case '1':
- ret = cpu_up(cpu->dev.id);
+ from_nid = cpu_to_node(cpuid);
+ ret = cpu_up(cpuid);
+
+ /*
+ * When hot adding memory to memoryless node and enabling a cpu
+ * on the node, node number of the cpu may internally change.
+ */
+ to_nid = cpu_to_node(cpuid);
+ if (from_nid != to_nid)
+ change_cpu_under_node(cpu, from_nid, to_nid);
+
if (!ret)
kobject_uevent(&dev->kobj, KOBJ_ONLINE);
break;
@@ -132,6 +153,17 @@ static ssize_t show_crash_notes(struct device *dev, struct device_attribute *att
return rc;
}
static DEVICE_ATTR(crash_notes, 0400, show_crash_notes, NULL);
+
+static ssize_t show_crash_notes_size(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ ssize_t rc;
+
+ rc = sprintf(buf, "%zu\n", sizeof(note_buf_t));
+ return rc;
+}
+static DEVICE_ATTR(crash_notes_size, 0400, show_crash_notes_size, NULL);
#endif
/*
@@ -259,6 +291,9 @@ int __cpuinit register_cpu(struct cpu *cpu, int num)
#ifdef CONFIG_KEXEC
if (!error)
error = device_create_file(&cpu->dev, &dev_attr_crash_notes);
+ if (!error)
+ error = device_create_file(&cpu->dev,
+ &dev_attr_crash_notes_size);
#endif
return error;
}
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index bb5645ea0282..35fa36898916 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -380,7 +380,7 @@ int driver_probe_device(struct device_driver *drv, struct device *dev)
pm_runtime_barrier(dev);
ret = really_probe(dev, drv);
- pm_runtime_idle(dev);
+ pm_request_idle(dev);
return ret;
}
@@ -428,7 +428,7 @@ int device_attach(struct device *dev)
}
} else {
ret = bus_for_each_drv(dev->bus, NULL, dev, __device_attach);
- pm_runtime_idle(dev);
+ pm_request_idle(dev);
}
out_unlock:
device_unlock(dev);
@@ -499,7 +499,7 @@ static void __device_release_driver(struct device *dev)
BUS_NOTIFY_UNBIND_DRIVER,
dev);
- pm_runtime_put_sync(dev);
+ pm_runtime_put(dev);
if (dev->bus && dev->bus->remove)
dev->bus->remove(dev);
diff --git a/drivers/base/devres.c b/drivers/base/devres.c
index 668390664764..507379e7b763 100644
--- a/drivers/base/devres.c
+++ b/drivers/base/devres.c
@@ -671,6 +671,80 @@ int devres_release_group(struct device *dev, void *id)
EXPORT_SYMBOL_GPL(devres_release_group);
/*
+ * Custom devres actions allow inserting a simple function call
+ * into the teadown sequence.
+ */
+
+struct action_devres {
+ void *data;
+ void (*action)(void *);
+};
+
+static int devm_action_match(struct device *dev, void *res, void *p)
+{
+ struct action_devres *devres = res;
+ struct action_devres *target = p;
+
+ return devres->action == target->action &&
+ devres->data == target->data;
+}
+
+static void devm_action_release(struct device *dev, void *res)
+{
+ struct action_devres *devres = res;
+
+ devres->action(devres->data);
+}
+
+/**
+ * devm_add_action() - add a custom action to list of managed resources
+ * @dev: Device that owns the action
+ * @action: Function that should be called
+ * @data: Pointer to data passed to @action implementation
+ *
+ * This adds a custom action to the list of managed resources so that
+ * it gets executed as part of standard resource unwinding.
+ */
+int devm_add_action(struct device *dev, void (*action)(void *), void *data)
+{
+ struct action_devres *devres;
+
+ devres = devres_alloc(devm_action_release,
+ sizeof(struct action_devres), GFP_KERNEL);
+ if (!devres)
+ return -ENOMEM;
+
+ devres->data = data;
+ devres->action = action;
+
+ devres_add(dev, devres);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(devm_add_action);
+
+/**
+ * devm_remove_action() - removes previously added custom action
+ * @dev: Device that owns the action
+ * @action: Function implementing the action
+ * @data: Pointer to data passed to @action implementation
+ *
+ * Removes instance of @action previously added by devm_add_action().
+ * Both action and data should match one of the existing entries.
+ */
+void devm_remove_action(struct device *dev, void (*action)(void *), void *data)
+{
+ struct action_devres devres = {
+ .data = data,
+ .action = action,
+ };
+
+ WARN_ON(devres_destroy(dev, devm_action_release, devm_action_match,
+ &devres));
+
+}
+EXPORT_SYMBOL_GPL(devm_remove_action);
+
+/*
* Managed kzalloc/kfree
*/
static void devm_kzalloc_release(struct device *dev, void *res)
diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
index 01fc5b07f951..7413d065906b 100644
--- a/drivers/base/devtmpfs.c
+++ b/drivers/base/devtmpfs.c
@@ -24,6 +24,7 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/kthread.h>
+#include "base.h"
static struct task_struct *thread;
@@ -41,6 +42,8 @@ static struct req {
int err;
const char *name;
umode_t mode; /* 0 => delete */
+ kuid_t uid;
+ kgid_t gid;
struct device *dev;
} *requests;
@@ -85,7 +88,9 @@ int devtmpfs_create_node(struct device *dev)
return 0;
req.mode = 0;
- req.name = device_get_devnode(dev, &req.mode, &tmp);
+ req.uid = GLOBAL_ROOT_UID;
+ req.gid = GLOBAL_ROOT_GID;
+ req.name = device_get_devnode(dev, &req.mode, &req.uid, &req.gid, &tmp);
if (!req.name)
return -ENOMEM;
@@ -121,7 +126,7 @@ int devtmpfs_delete_node(struct device *dev)
if (!thread)
return 0;
- req.name = device_get_devnode(dev, NULL, &tmp);
+ req.name = device_get_devnode(dev, NULL, NULL, NULL, &tmp);
if (!req.name)
return -ENOMEM;
@@ -187,7 +192,8 @@ static int create_path(const char *nodepath)
return err;
}
-static int handle_create(const char *nodename, umode_t mode, struct device *dev)
+static int handle_create(const char *nodename, umode_t mode, kuid_t uid,
+ kgid_t gid, struct device *dev)
{
struct dentry *dentry;
struct path path;
@@ -201,14 +207,14 @@ static int handle_create(const char *nodename, umode_t mode, struct device *dev)
if (IS_ERR(dentry))
return PTR_ERR(dentry);
- err = vfs_mknod(path.dentry->d_inode,
- dentry, mode, dev->devt);
+ err = vfs_mknod(path.dentry->d_inode, dentry, mode, dev->devt);
if (!err) {
struct iattr newattrs;
- /* fixup possibly umasked mode */
newattrs.ia_mode = mode;
- newattrs.ia_valid = ATTR_MODE;
+ newattrs.ia_uid = uid;
+ newattrs.ia_gid = gid;
+ newattrs.ia_valid = ATTR_MODE|ATTR_UID|ATTR_GID;
mutex_lock(&dentry->d_inode->i_mutex);
notify_change(dentry, &newattrs);
mutex_unlock(&dentry->d_inode->i_mutex);
@@ -358,10 +364,11 @@ int devtmpfs_mount(const char *mntdir)
static DECLARE_COMPLETION(setup_done);
-static int handle(const char *name, umode_t mode, struct device *dev)
+static int handle(const char *name, umode_t mode, kuid_t uid, kgid_t gid,
+ struct device *dev)
{
if (mode)
- return handle_create(name, mode, dev);
+ return handle_create(name, mode, uid, gid, dev);
else
return handle_remove(name, dev);
}
@@ -387,7 +394,8 @@ static int devtmpfsd(void *p)
spin_unlock(&req_lock);
while (req) {
struct req *next = req->next;
- req->err = handle(req->name, req->mode, req->dev);
+ req->err = handle(req->name, req->mode,
+ req->uid, req->gid, req->dev);
complete(&req->done);
req = next;
}
diff --git a/drivers/base/dma-buf.c b/drivers/base/dma-buf.c
index 2a7cb0df176b..08fe897c0b4c 100644
--- a/drivers/base/dma-buf.c
+++ b/drivers/base/dma-buf.c
@@ -27,9 +27,18 @@
#include <linux/dma-buf.h>
#include <linux/anon_inodes.h>
#include <linux/export.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
static inline int is_dma_buf_file(struct file *);
+struct dma_buf_list {
+ struct list_head head;
+ struct mutex lock;
+};
+
+static struct dma_buf_list db_list;
+
static int dma_buf_release(struct inode *inode, struct file *file)
{
struct dma_buf *dmabuf;
@@ -42,6 +51,11 @@ static int dma_buf_release(struct inode *inode, struct file *file)
BUG_ON(dmabuf->vmapping_counter);
dmabuf->ops->release(dmabuf);
+
+ mutex_lock(&db_list.lock);
+ list_del(&dmabuf->list_node);
+ mutex_unlock(&db_list.lock);
+
kfree(dmabuf);
return 0;
}
@@ -77,22 +91,24 @@ static inline int is_dma_buf_file(struct file *file)
}
/**
- * dma_buf_export - Creates a new dma_buf, and associates an anon file
+ * dma_buf_export_named - Creates a new dma_buf, and associates an anon file
* with this buffer, so it can be exported.
* Also connect the allocator specific data and ops to the buffer.
+ * Additionally, provide a name string for exporter; useful in debugging.
*
* @priv: [in] Attach private data of allocator to this buffer
* @ops: [in] Attach allocator-defined dma buf ops to the new buffer.
* @size: [in] Size of the buffer
* @flags: [in] mode flags for the file.
+ * @exp_name: [in] name of the exporting module - useful for debugging.
*
* Returns, on success, a newly created dma_buf object, which wraps the
* supplied private data and operations for dma_buf_ops. On either missing
* ops, or error in allocating struct dma_buf, will return negative error.
*
*/
-struct dma_buf *dma_buf_export(void *priv, const struct dma_buf_ops *ops,
- size_t size, int flags)
+struct dma_buf *dma_buf_export_named(void *priv, const struct dma_buf_ops *ops,
+ size_t size, int flags, const char *exp_name)
{
struct dma_buf *dmabuf;
struct file *file;
@@ -114,6 +130,7 @@ struct dma_buf *dma_buf_export(void *priv, const struct dma_buf_ops *ops,
dmabuf->priv = priv;
dmabuf->ops = ops;
dmabuf->size = size;
+ dmabuf->exp_name = exp_name;
file = anon_inode_getfile("dmabuf", &dma_buf_fops, dmabuf, flags);
@@ -122,9 +139,13 @@ struct dma_buf *dma_buf_export(void *priv, const struct dma_buf_ops *ops,
mutex_init(&dmabuf->lock);
INIT_LIST_HEAD(&dmabuf->attachments);
+ mutex_lock(&db_list.lock);
+ list_add(&dmabuf->list_node, &db_list.head);
+ mutex_unlock(&db_list.lock);
+
return dmabuf;
}
-EXPORT_SYMBOL_GPL(dma_buf_export);
+EXPORT_SYMBOL_GPL(dma_buf_export_named);
/**
@@ -548,3 +569,143 @@ void dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
mutex_unlock(&dmabuf->lock);
}
EXPORT_SYMBOL_GPL(dma_buf_vunmap);
+
+#ifdef CONFIG_DEBUG_FS
+static int dma_buf_describe(struct seq_file *s)
+{
+ int ret;
+ struct dma_buf *buf_obj;
+ struct dma_buf_attachment *attach_obj;
+ int count = 0, attach_count;
+ size_t size = 0;
+
+ ret = mutex_lock_interruptible(&db_list.lock);
+
+ if (ret)
+ return ret;
+
+ seq_printf(s, "\nDma-buf Objects:\n");
+ seq_printf(s, "\texp_name\tsize\tflags\tmode\tcount\n");
+
+ list_for_each_entry(buf_obj, &db_list.head, list_node) {
+ ret = mutex_lock_interruptible(&buf_obj->lock);
+
+ if (ret) {
+ seq_printf(s,
+ "\tERROR locking buffer object: skipping\n");
+ continue;
+ }
+
+ seq_printf(s, "\t");
+
+ seq_printf(s, "\t%s\t%08zu\t%08x\t%08x\t%08ld\n",
+ buf_obj->exp_name, buf_obj->size,
+ buf_obj->file->f_flags, buf_obj->file->f_mode,
+ (long)(buf_obj->file->f_count.counter));
+
+ seq_printf(s, "\t\tAttached Devices:\n");
+ attach_count = 0;
+
+ list_for_each_entry(attach_obj, &buf_obj->attachments, node) {
+ seq_printf(s, "\t\t");
+
+ seq_printf(s, "%s\n", attach_obj->dev->init_name);
+ attach_count++;
+ }
+
+ seq_printf(s, "\n\t\tTotal %d devices attached\n",
+ attach_count);
+
+ count++;
+ size += buf_obj->size;
+ mutex_unlock(&buf_obj->lock);
+ }
+
+ seq_printf(s, "\nTotal %d objects, %zu bytes\n", count, size);
+
+ mutex_unlock(&db_list.lock);
+ return 0;
+}
+
+static int dma_buf_show(struct seq_file *s, void *unused)
+{
+ void (*func)(struct seq_file *) = s->private;
+ func(s);
+ return 0;
+}
+
+static int dma_buf_debug_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, dma_buf_show, inode->i_private);
+}
+
+static const struct file_operations dma_buf_debug_fops = {
+ .open = dma_buf_debug_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static struct dentry *dma_buf_debugfs_dir;
+
+static int dma_buf_init_debugfs(void)
+{
+ int err = 0;
+ dma_buf_debugfs_dir = debugfs_create_dir("dma_buf", NULL);
+ if (IS_ERR(dma_buf_debugfs_dir)) {
+ err = PTR_ERR(dma_buf_debugfs_dir);
+ dma_buf_debugfs_dir = NULL;
+ return err;
+ }
+
+ err = dma_buf_debugfs_create_file("bufinfo", dma_buf_describe);
+
+ if (err)
+ pr_debug("dma_buf: debugfs: failed to create node bufinfo\n");
+
+ return err;
+}
+
+static void dma_buf_uninit_debugfs(void)
+{
+ if (dma_buf_debugfs_dir)
+ debugfs_remove_recursive(dma_buf_debugfs_dir);
+}
+
+int dma_buf_debugfs_create_file(const char *name,
+ int (*write)(struct seq_file *))
+{
+ struct dentry *d;
+
+ d = debugfs_create_file(name, S_IRUGO, dma_buf_debugfs_dir,
+ write, &dma_buf_debug_fops);
+
+ if (IS_ERR(d))
+ return PTR_ERR(d);
+
+ return 0;
+}
+#else
+static inline int dma_buf_init_debugfs(void)
+{
+ return 0;
+}
+static inline void dma_buf_uninit_debugfs(void)
+{
+}
+#endif
+
+static int __init dma_buf_init(void)
+{
+ mutex_init(&db_list.lock);
+ INIT_LIST_HEAD(&db_list.head);
+ dma_buf_init_debugfs();
+ return 0;
+}
+subsys_initcall(dma_buf_init);
+
+static void __exit dma_buf_deinit(void)
+{
+ dma_buf_uninit_debugfs();
+}
+__exitcall(dma_buf_deinit);
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index a51007b79032..14f8a6954da0 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -93,16 +93,6 @@ int register_memory(struct memory_block *memory)
return error;
}
-static void
-unregister_memory(struct memory_block *memory)
-{
- BUG_ON(memory->dev.bus != &memory_subsys);
-
- /* drop the ref. we got in remove_memory_block() */
- kobject_put(&memory->dev.kobj);
- device_unregister(&memory->dev);
-}
-
unsigned long __weak memory_block_size_bytes(void)
{
return MIN_MEMORY_BLOCK_SIZE;
@@ -217,8 +207,7 @@ int memory_isolate_notify(unsigned long val, void *v)
* The probe routines leave the pages reserved, just as the bootmem code does.
* Make sure they're still that way.
*/
-static bool pages_correctly_reserved(unsigned long start_pfn,
- unsigned long nr_pages)
+static bool pages_correctly_reserved(unsigned long start_pfn)
{
int i, j;
struct page *page;
@@ -266,7 +255,7 @@ memory_block_action(unsigned long phys_index, unsigned long action, int online_t
switch (action) {
case MEM_ONLINE:
- if (!pages_correctly_reserved(start_pfn, nr_pages))
+ if (!pages_correctly_reserved(start_pfn))
return -EBUSY;
ret = online_pages(start_pfn, nr_pages, online_type);
@@ -637,8 +626,28 @@ static int add_memory_section(int nid, struct mem_section *section,
return ret;
}
-int remove_memory_block(unsigned long node_id, struct mem_section *section,
- int phys_device)
+/*
+ * need an interface for the VM to add new memory regions,
+ * but without onlining it.
+ */
+int register_new_memory(int nid, struct mem_section *section)
+{
+ return add_memory_section(nid, section, NULL, MEM_OFFLINE, HOTPLUG);
+}
+
+#ifdef CONFIG_MEMORY_HOTREMOVE
+static void
+unregister_memory(struct memory_block *memory)
+{
+ BUG_ON(memory->dev.bus != &memory_subsys);
+
+ /* drop the ref. we got in remove_memory_block() */
+ kobject_put(&memory->dev.kobj);
+ device_unregister(&memory->dev);
+}
+
+static int remove_memory_block(unsigned long node_id,
+ struct mem_section *section, int phys_device)
{
struct memory_block *mem;
@@ -661,15 +670,6 @@ int remove_memory_block(unsigned long node_id, struct mem_section *section,
return 0;
}
-/*
- * need an interface for the VM to add new memory regions,
- * but without onlining it.
- */
-int register_new_memory(int nid, struct mem_section *section)
-{
- return add_memory_section(nid, section, NULL, MEM_OFFLINE, HOTPLUG);
-}
-
int unregister_memory_section(struct mem_section *section)
{
if (!present_section(section))
@@ -677,6 +677,7 @@ int unregister_memory_section(struct mem_section *section)
return remove_memory_block(0, section, 0);
}
+#endif /* CONFIG_MEMORY_HOTREMOVE */
/*
* offline one memory block. If the memory block has been offlined, do nothing.
diff --git a/drivers/base/node.c b/drivers/base/node.c
index fac124a7e1c5..7616a77ca322 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -7,6 +7,7 @@
#include <linux/mm.h>
#include <linux/memory.h>
#include <linux/vmstat.h>
+#include <linux/notifier.h>
#include <linux/node.h>
#include <linux/hugetlb.h>
#include <linux/compaction.h>
@@ -683,8 +684,11 @@ static int __init register_node_type(void)
ret = subsys_system_register(&node_subsys, cpu_root_attr_groups);
if (!ret) {
- hotplug_memory_notifier(node_memory_callback,
- NODE_CALLBACK_PRI);
+ static struct notifier_block node_memory_callback_nb = {
+ .notifier_call = node_memory_callback,
+ .priority = NODE_CALLBACK_PRI,
+ };
+ register_hotmemory_notifier(&node_memory_callback_nb);
}
/*
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index c0b8df38402b..9eda84246ffd 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -46,8 +46,8 @@ EXPORT_SYMBOL_GPL(platform_bus);
* manipulate any relevant information in the pdev_archdata they can do:
*
* platform_device_alloc()
- * ... manipulate ...
- * platform_device_add()
+ * ... manipulate ...
+ * platform_device_add()
*
* And if they don't care they can just call platform_device_register() and
* everything will just work out.
@@ -326,9 +326,7 @@ int platform_device_add(struct platform_device *pdev)
}
if (p && insert_resource(p, r)) {
- printk(KERN_ERR
- "%s: failed to claim resource %d\n",
- dev_name(&pdev->dev), i);
+ dev_err(&pdev->dev, "failed to claim resource %d\n", i);
ret = -EBUSY;
goto failed;
}
@@ -555,7 +553,8 @@ EXPORT_SYMBOL_GPL(platform_driver_unregister);
/**
* platform_driver_probe - register driver for non-hotpluggable device
* @drv: platform driver structure
- * @probe: the driver probe routine, probably from an __init section
+ * @probe: the driver probe routine, probably from an __init section,
+ * must not return -EPROBE_DEFER.
*
* Use this instead of platform_driver_register() when you know the device
* is not hotpluggable and has already been registered, and you want to
@@ -566,6 +565,9 @@ EXPORT_SYMBOL_GPL(platform_driver_unregister);
* into system-on-chip processors, where the controller devices have been
* configured as part of board setup.
*
+ * This is incompatible with deferred probing so probe() must not
+ * return -EPROBE_DEFER.
+ *
* Returns zero if the driver registered and bound to a device, else returns
* a negative error code and with the driver not registered.
*/
@@ -682,7 +684,7 @@ static int platform_uevent(struct device *dev, struct kobj_uevent_env *env)
int rc;
/* Some devices have extra OF data and an OF-style MODALIAS */
- rc = of_device_uevent_modalias(dev,env);
+ rc = of_device_uevent_modalias(dev, env);
if (rc != -ENODEV)
return rc;
@@ -1126,8 +1128,8 @@ static int __init early_platform_driver_probe_id(char *class_str,
switch (match_id) {
case EARLY_PLATFORM_ID_ERROR:
- pr_warning("%s: unable to parse %s parameter\n",
- class_str, epdrv->pdrv->driver.name);
+ pr_warn("%s: unable to parse %s parameter\n",
+ class_str, epdrv->pdrv->driver.name);
/* fall-through */
case EARLY_PLATFORM_ID_UNSET:
match = NULL;
@@ -1158,8 +1160,8 @@ static int __init early_platform_driver_probe_id(char *class_str,
}
if (epdrv->pdrv->probe(match))
- pr_warning("%s: unable to probe %s early.\n",
- class_str, match->name);
+ pr_warn("%s: unable to probe %s early.\n",
+ class_str, match->name);
else
n++;
}
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 9a6b05a35603..7072404c8b6d 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -920,7 +920,7 @@ static int pm_genpd_prepare(struct device *dev)
pm_wakeup_event(dev, 0);
if (pm_wakeup_pending()) {
- pm_runtime_put_sync(dev);
+ pm_runtime_put(dev);
return -EBUSY;
}
@@ -961,7 +961,7 @@ static int pm_genpd_prepare(struct device *dev)
pm_runtime_enable(dev);
}
- pm_runtime_put_sync(dev);
+ pm_runtime_put(dev);
return ret;
}
@@ -1327,7 +1327,7 @@ static void pm_genpd_complete(struct device *dev)
pm_generic_complete(dev);
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
- pm_runtime_idle(dev);
+ pm_request_idle(dev);
}
}
diff --git a/drivers/base/power/generic_ops.c b/drivers/base/power/generic_ops.c
index d03d290f31c2..bfd898b8988e 100644
--- a/drivers/base/power/generic_ops.c
+++ b/drivers/base/power/generic_ops.c
@@ -324,6 +324,6 @@ void pm_generic_complete(struct device *dev)
* Let runtime PM try to suspend devices that haven't been in use before
* going into the system-wide sleep state we're resuming from.
*/
- pm_runtime_idle(dev);
+ pm_request_idle(dev);
}
#endif /* CONFIG_PM_SLEEP */
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 15beb500a4e4..5a9b6569dd74 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -756,7 +756,7 @@ static void device_complete(struct device *dev, pm_message_t state)
device_unlock(dev);
- pm_runtime_put_sync(dev);
+ pm_runtime_put(dev);
}
/**
diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c
index 32ee0fc7ea54..f0077cb8e249 100644
--- a/drivers/base/power/opp.c
+++ b/drivers/base/power/opp.c
@@ -55,6 +55,7 @@
* @rate: Frequency in hertz
* @u_volt: Nominal voltage in microvolts corresponding to this OPP
* @dev_opp: points back to the device_opp struct this opp belongs to
+ * @head: RCU callback head used for deferred freeing
*
* This structure stores the OPP information for a given device.
*/
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c
index 5f74587ef258..71671c42ef45 100644
--- a/drivers/base/power/qos.c
+++ b/drivers/base/power/qos.c
@@ -46,6 +46,7 @@
#include "power.h"
static DEFINE_MUTEX(dev_pm_qos_mtx);
+static DEFINE_MUTEX(dev_pm_qos_sysfs_mtx);
static BLOCKING_NOTIFIER_HEAD(dev_pm_notifiers);
@@ -216,12 +217,17 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
struct pm_qos_constraints *c;
struct pm_qos_flags *f;
- mutex_lock(&dev_pm_qos_mtx);
+ mutex_lock(&dev_pm_qos_sysfs_mtx);
/*
* If the device's PM QoS resume latency limit or PM QoS flags have been
* exposed to user space, they have to be hidden at this point.
*/
+ pm_qos_sysfs_remove_latency(dev);
+ pm_qos_sysfs_remove_flags(dev);
+
+ mutex_lock(&dev_pm_qos_mtx);
+
__dev_pm_qos_hide_latency_limit(dev);
__dev_pm_qos_hide_flags(dev);
@@ -254,6 +260,8 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
out:
mutex_unlock(&dev_pm_qos_mtx);
+
+ mutex_unlock(&dev_pm_qos_sysfs_mtx);
}
/**
@@ -558,6 +566,14 @@ static void __dev_pm_qos_drop_user_request(struct device *dev,
kfree(req);
}
+static void dev_pm_qos_drop_user_request(struct device *dev,
+ enum dev_pm_qos_req_type type)
+{
+ mutex_lock(&dev_pm_qos_mtx);
+ __dev_pm_qos_drop_user_request(dev, type);
+ mutex_unlock(&dev_pm_qos_mtx);
+}
+
/**
* dev_pm_qos_expose_latency_limit - Expose PM QoS latency limit to user space.
* @dev: Device whose PM QoS latency limit is to be exposed to user space.
@@ -581,6 +597,8 @@ int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
return ret;
}
+ mutex_lock(&dev_pm_qos_sysfs_mtx);
+
mutex_lock(&dev_pm_qos_mtx);
if (IS_ERR_OR_NULL(dev->power.qos))
@@ -591,26 +609,27 @@ int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
if (ret < 0) {
__dev_pm_qos_remove_request(req);
kfree(req);
+ mutex_unlock(&dev_pm_qos_mtx);
goto out;
}
-
dev->power.qos->latency_req = req;
+
+ mutex_unlock(&dev_pm_qos_mtx);
+
ret = pm_qos_sysfs_add_latency(dev);
if (ret)
- __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY);
+ dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY);
out:
- mutex_unlock(&dev_pm_qos_mtx);
+ mutex_unlock(&dev_pm_qos_sysfs_mtx);
return ret;
}
EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit);
static void __dev_pm_qos_hide_latency_limit(struct device *dev)
{
- if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->latency_req) {
- pm_qos_sysfs_remove_latency(dev);
+ if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->latency_req)
__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY);
- }
}
/**
@@ -619,9 +638,15 @@ static void __dev_pm_qos_hide_latency_limit(struct device *dev)
*/
void dev_pm_qos_hide_latency_limit(struct device *dev)
{
+ mutex_lock(&dev_pm_qos_sysfs_mtx);
+
+ pm_qos_sysfs_remove_latency(dev);
+
mutex_lock(&dev_pm_qos_mtx);
__dev_pm_qos_hide_latency_limit(dev);
mutex_unlock(&dev_pm_qos_mtx);
+
+ mutex_unlock(&dev_pm_qos_sysfs_mtx);
}
EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_limit);
@@ -649,6 +674,8 @@ int dev_pm_qos_expose_flags(struct device *dev, s32 val)
}
pm_runtime_get_sync(dev);
+ mutex_lock(&dev_pm_qos_sysfs_mtx);
+
mutex_lock(&dev_pm_qos_mtx);
if (IS_ERR_OR_NULL(dev->power.qos))
@@ -659,16 +686,19 @@ int dev_pm_qos_expose_flags(struct device *dev, s32 val)
if (ret < 0) {
__dev_pm_qos_remove_request(req);
kfree(req);
+ mutex_unlock(&dev_pm_qos_mtx);
goto out;
}
-
dev->power.qos->flags_req = req;
+
+ mutex_unlock(&dev_pm_qos_mtx);
+
ret = pm_qos_sysfs_add_flags(dev);
if (ret)
- __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
+ dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
out:
- mutex_unlock(&dev_pm_qos_mtx);
+ mutex_unlock(&dev_pm_qos_sysfs_mtx);
pm_runtime_put(dev);
return ret;
}
@@ -676,10 +706,8 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_expose_flags);
static void __dev_pm_qos_hide_flags(struct device *dev)
{
- if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->flags_req) {
- pm_qos_sysfs_remove_flags(dev);
+ if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->flags_req)
__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
- }
}
/**
@@ -689,9 +717,15 @@ static void __dev_pm_qos_hide_flags(struct device *dev)
void dev_pm_qos_hide_flags(struct device *dev)
{
pm_runtime_get_sync(dev);
+ mutex_lock(&dev_pm_qos_sysfs_mtx);
+
+ pm_qos_sysfs_remove_flags(dev);
+
mutex_lock(&dev_pm_qos_mtx);
__dev_pm_qos_hide_flags(dev);
mutex_unlock(&dev_pm_qos_mtx);
+
+ mutex_unlock(&dev_pm_qos_sysfs_mtx);
pm_runtime_put(dev);
}
EXPORT_SYMBOL_GPL(dev_pm_qos_hide_flags);
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 1244930e3d7a..ef13ad08afb2 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -1400,5 +1400,5 @@ void pm_runtime_remove(struct device *dev)
if (dev->power.runtime_status == RPM_ACTIVE)
pm_runtime_set_suspended(dev);
if (dev->power.irq_safe && dev->parent)
- pm_runtime_put_sync(dev->parent);
+ pm_runtime_put(dev->parent);
}
diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h
index 5a22bd33ce3d..c130536e0ab0 100644
--- a/drivers/base/regmap/internal.h
+++ b/drivers/base/regmap/internal.h
@@ -38,7 +38,8 @@ struct regmap_format {
unsigned int reg, unsigned int val);
void (*format_reg)(void *buf, unsigned int reg, unsigned int shift);
void (*format_val)(void *buf, unsigned int val, unsigned int shift);
- unsigned int (*parse_val)(void *buf);
+ unsigned int (*parse_val)(const void *buf);
+ void (*parse_inplace)(void *buf);
};
struct regmap_async {
@@ -76,6 +77,7 @@ struct regmap {
unsigned int debugfs_tot_len;
struct list_head debugfs_off_cache;
+ struct mutex cache_lock;
#endif
unsigned int max_register;
@@ -125,6 +127,9 @@ struct regmap {
void *cache;
u32 cache_dirty;
+ unsigned long *cache_present;
+ unsigned int cache_present_nbits;
+
struct reg_default *patch;
int patch_regs;
@@ -187,12 +192,35 @@ int regcache_read(struct regmap *map,
int regcache_write(struct regmap *map,
unsigned int reg, unsigned int value);
int regcache_sync(struct regmap *map);
-
-unsigned int regcache_get_val(const void *base, unsigned int idx,
- unsigned int word_size);
-bool regcache_set_val(void *base, unsigned int idx,
- unsigned int val, unsigned int word_size);
+int regcache_sync_block(struct regmap *map, void *block,
+ unsigned int block_base, unsigned int start,
+ unsigned int end);
+
+static inline const void *regcache_get_val_addr(struct regmap *map,
+ const void *base,
+ unsigned int idx)
+{
+ return base + (map->cache_word_size * idx);
+}
+
+unsigned int regcache_get_val(struct regmap *map, const void *base,
+ unsigned int idx);
+bool regcache_set_val(struct regmap *map, void *base, unsigned int idx,
+ unsigned int val);
int regcache_lookup_reg(struct regmap *map, unsigned int reg);
+int regcache_set_reg_present(struct regmap *map, unsigned int reg);
+
+static inline bool regcache_reg_present(struct regmap *map, unsigned int reg)
+{
+ if (!map->cache_present)
+ return true;
+ if (reg > map->cache_present_nbits)
+ return false;
+ return map->cache_present[BIT_WORD(reg)] & BIT_MASK(reg);
+}
+
+int _regmap_raw_write(struct regmap *map, unsigned int reg,
+ const void *val, size_t val_len, bool async);
void regmap_async_complete_cb(struct regmap_async *async, int ret);
diff --git a/drivers/base/regmap/regcache-lzo.c b/drivers/base/regmap/regcache-lzo.c
index afd6aa91a0df..e210a6d1406a 100644
--- a/drivers/base/regmap/regcache-lzo.c
+++ b/drivers/base/regmap/regcache-lzo.c
@@ -260,8 +260,7 @@ static int regcache_lzo_read(struct regmap *map,
ret = regcache_lzo_decompress_cache_block(map, lzo_block);
if (ret >= 0)
/* fetch the value from the cache */
- *value = regcache_get_val(lzo_block->dst, blkpos,
- map->cache_word_size);
+ *value = regcache_get_val(map, lzo_block->dst, blkpos);
kfree(lzo_block->dst);
/* restore the pointer and length of the compressed block */
@@ -304,8 +303,7 @@ static int regcache_lzo_write(struct regmap *map,
}
/* write the new value to the cache */
- if (regcache_set_val(lzo_block->dst, blkpos, value,
- map->cache_word_size)) {
+ if (regcache_set_val(map, lzo_block->dst, blkpos, value)) {
kfree(lzo_block->dst);
goto out;
}
diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c
index e6732cf7c06e..aa0875f6f1b7 100644
--- a/drivers/base/regmap/regcache-rbtree.c
+++ b/drivers/base/regmap/regcache-rbtree.c
@@ -47,22 +47,21 @@ static inline void regcache_rbtree_get_base_top_reg(
*top = rbnode->base_reg + ((rbnode->blklen - 1) * map->reg_stride);
}
-static unsigned int regcache_rbtree_get_register(
- struct regcache_rbtree_node *rbnode, unsigned int idx,
- unsigned int word_size)
+static unsigned int regcache_rbtree_get_register(struct regmap *map,
+ struct regcache_rbtree_node *rbnode, unsigned int idx)
{
- return regcache_get_val(rbnode->block, idx, word_size);
+ return regcache_get_val(map, rbnode->block, idx);
}
-static void regcache_rbtree_set_register(struct regcache_rbtree_node *rbnode,
- unsigned int idx, unsigned int val,
- unsigned int word_size)
+static void regcache_rbtree_set_register(struct regmap *map,
+ struct regcache_rbtree_node *rbnode,
+ unsigned int idx, unsigned int val)
{
- regcache_set_val(rbnode->block, idx, val, word_size);
+ regcache_set_val(map, rbnode->block, idx, val);
}
static struct regcache_rbtree_node *regcache_rbtree_lookup(struct regmap *map,
- unsigned int reg)
+ unsigned int reg)
{
struct regcache_rbtree_ctx *rbtree_ctx = map->cache;
struct rb_node *node;
@@ -139,15 +138,21 @@ static int rbtree_show(struct seq_file *s, void *ignored)
struct regcache_rbtree_node *n;
struct rb_node *node;
unsigned int base, top;
+ size_t mem_size;
int nodes = 0;
int registers = 0;
int this_registers, average;
map->lock(map);
+ mem_size = sizeof(*rbtree_ctx);
+ mem_size += BITS_TO_LONGS(map->cache_present_nbits) * sizeof(long);
+
for (node = rb_first(&rbtree_ctx->root); node != NULL;
node = rb_next(node)) {
n = container_of(node, struct regcache_rbtree_node, node);
+ mem_size += sizeof(*n);
+ mem_size += (n->blklen * map->cache_word_size);
regcache_rbtree_get_base_top_reg(map, n, &base, &top);
this_registers = ((top - base) / map->reg_stride) + 1;
@@ -162,8 +167,8 @@ static int rbtree_show(struct seq_file *s, void *ignored)
else
average = 0;
- seq_printf(s, "%d nodes, %d registers, average %d registers\n",
- nodes, registers, average);
+ seq_printf(s, "%d nodes, %d registers, average %d registers, used %zu bytes\n",
+ nodes, registers, average, mem_size);
map->unlock(map);
@@ -260,8 +265,9 @@ static int regcache_rbtree_read(struct regmap *map,
rbnode = regcache_rbtree_lookup(map, reg);
if (rbnode) {
reg_tmp = (reg - rbnode->base_reg) / map->reg_stride;
- *value = regcache_rbtree_get_register(rbnode, reg_tmp,
- map->cache_word_size);
+ if (!regcache_reg_present(map, reg))
+ return -ENOENT;
+ *value = regcache_rbtree_get_register(map, rbnode, reg_tmp);
} else {
return -ENOENT;
}
@@ -270,21 +276,23 @@ static int regcache_rbtree_read(struct regmap *map,
}
-static int regcache_rbtree_insert_to_block(struct regcache_rbtree_node *rbnode,
+static int regcache_rbtree_insert_to_block(struct regmap *map,
+ struct regcache_rbtree_node *rbnode,
unsigned int pos, unsigned int reg,
- unsigned int value, unsigned int word_size)
+ unsigned int value)
{
u8 *blk;
blk = krealloc(rbnode->block,
- (rbnode->blklen + 1) * word_size, GFP_KERNEL);
+ (rbnode->blklen + 1) * map->cache_word_size,
+ GFP_KERNEL);
if (!blk)
return -ENOMEM;
/* insert the register value in the correct place in the rbnode block */
- memmove(blk + (pos + 1) * word_size,
- blk + pos * word_size,
- (rbnode->blklen - pos) * word_size);
+ memmove(blk + (pos + 1) * map->cache_word_size,
+ blk + pos * map->cache_word_size,
+ (rbnode->blklen - pos) * map->cache_word_size);
/* update the rbnode block, its size and the base register */
rbnode->block = blk;
@@ -292,7 +300,7 @@ static int regcache_rbtree_insert_to_block(struct regcache_rbtree_node *rbnode,
if (!pos)
rbnode->base_reg = reg;
- regcache_rbtree_set_register(rbnode, pos, value, word_size);
+ regcache_rbtree_set_register(map, rbnode, pos, value);
return 0;
}
@@ -302,25 +310,24 @@ static int regcache_rbtree_write(struct regmap *map, unsigned int reg,
struct regcache_rbtree_ctx *rbtree_ctx;
struct regcache_rbtree_node *rbnode, *rbnode_tmp;
struct rb_node *node;
- unsigned int val;
unsigned int reg_tmp;
unsigned int pos;
int i;
int ret;
rbtree_ctx = map->cache;
+ /* update the reg_present bitmap, make space if necessary */
+ ret = regcache_set_reg_present(map, reg);
+ if (ret < 0)
+ return ret;
+
/* if we can't locate it in the cached rbnode we'll have
* to traverse the rbtree looking for it.
*/
rbnode = regcache_rbtree_lookup(map, reg);
if (rbnode) {
reg_tmp = (reg - rbnode->base_reg) / map->reg_stride;
- val = regcache_rbtree_get_register(rbnode, reg_tmp,
- map->cache_word_size);
- if (val == value)
- return 0;
- regcache_rbtree_set_register(rbnode, reg_tmp, value,
- map->cache_word_size);
+ regcache_rbtree_set_register(map, rbnode, reg_tmp, value);
} else {
/* look for an adjacent register to the one we are about to add */
for (node = rb_first(&rbtree_ctx->root); node;
@@ -337,9 +344,10 @@ static int regcache_rbtree_write(struct regmap *map, unsigned int reg,
pos = i + 1;
else
pos = i;
- ret = regcache_rbtree_insert_to_block(rbnode_tmp, pos,
- reg, value,
- map->cache_word_size);
+ ret = regcache_rbtree_insert_to_block(map,
+ rbnode_tmp,
+ pos, reg,
+ value);
if (ret)
return ret;
rbtree_ctx->cached_rbnode = rbnode_tmp;
@@ -354,7 +362,7 @@ static int regcache_rbtree_write(struct regmap *map, unsigned int reg,
rbnode = kzalloc(sizeof *rbnode, GFP_KERNEL);
if (!rbnode)
return -ENOMEM;
- rbnode->blklen = 1;
+ rbnode->blklen = sizeof(*rbnode);
rbnode->base_reg = reg;
rbnode->block = kmalloc(rbnode->blklen * map->cache_word_size,
GFP_KERNEL);
@@ -362,7 +370,7 @@ static int regcache_rbtree_write(struct regmap *map, unsigned int reg,
kfree(rbnode);
return -ENOMEM;
}
- regcache_rbtree_set_register(rbnode, 0, value, map->cache_word_size);
+ regcache_rbtree_set_register(map, rbnode, 0, value);
regcache_rbtree_insert(map, &rbtree_ctx->root, rbnode);
rbtree_ctx->cached_rbnode = rbnode;
}
@@ -376,10 +384,8 @@ static int regcache_rbtree_sync(struct regmap *map, unsigned int min,
struct regcache_rbtree_ctx *rbtree_ctx;
struct rb_node *node;
struct regcache_rbtree_node *rbnode;
- unsigned int regtmp;
- unsigned int val;
int ret;
- int i, base, end;
+ int base, end;
rbtree_ctx = map->cache;
for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) {
@@ -398,31 +404,17 @@ static int regcache_rbtree_sync(struct regmap *map, unsigned int min,
base = 0;
if (max < rbnode->base_reg + rbnode->blklen)
- end = rbnode->base_reg + rbnode->blklen - max;
+ end = max - rbnode->base_reg + 1;
else
end = rbnode->blklen;
- for (i = base; i < end; i++) {
- regtmp = rbnode->base_reg + (i * map->reg_stride);
- val = regcache_rbtree_get_register(rbnode, i,
- map->cache_word_size);
-
- /* Is this the hardware default? If so skip. */
- ret = regcache_lookup_reg(map, regtmp);
- if (ret >= 0 && val == map->reg_defaults[ret].def)
- continue;
-
- map->cache_bypass = 1;
- ret = _regmap_write(map, regtmp, val);
- map->cache_bypass = 0;
- if (ret)
- return ret;
- dev_dbg(map->dev, "Synced register %#x, value %#x\n",
- regtmp, val);
- }
+ ret = regcache_sync_block(map, rbnode->block, rbnode->base_reg,
+ base, end);
+ if (ret != 0)
+ return ret;
}
- return 0;
+ return regmap_async_complete(map);
}
struct regcache_ops regcache_rbtree_ops = {
diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
index e69ff3e4742c..75923f2396bd 100644
--- a/drivers/base/regmap/regcache.c
+++ b/drivers/base/regmap/regcache.c
@@ -45,8 +45,8 @@ static int regcache_hw_init(struct regmap *map)
tmp_buf = kmalloc(map->cache_size_raw, GFP_KERNEL);
if (!tmp_buf)
return -EINVAL;
- ret = regmap_bulk_read(map, 0, tmp_buf,
- map->num_reg_defaults_raw);
+ ret = regmap_raw_read(map, 0, tmp_buf,
+ map->num_reg_defaults_raw);
map->cache_bypass = cache_bypass;
if (ret < 0) {
kfree(tmp_buf);
@@ -58,8 +58,7 @@ static int regcache_hw_init(struct regmap *map)
/* calculate the size of reg_defaults */
for (count = 0, i = 0; i < map->num_reg_defaults_raw; i++) {
- val = regcache_get_val(map->reg_defaults_raw,
- i, map->cache_word_size);
+ val = regcache_get_val(map, map->reg_defaults_raw, i);
if (regmap_volatile(map, i * map->reg_stride))
continue;
count++;
@@ -75,8 +74,7 @@ static int regcache_hw_init(struct regmap *map)
/* fill the reg_defaults */
map->num_reg_defaults = count;
for (i = 0, j = 0; i < map->num_reg_defaults_raw; i++) {
- val = regcache_get_val(map->reg_defaults_raw,
- i, map->cache_word_size);
+ val = regcache_get_val(map, map->reg_defaults_raw, i);
if (regmap_volatile(map, i * map->reg_stride))
continue;
map->reg_defaults[j].reg = i * map->reg_stride;
@@ -123,6 +121,8 @@ int regcache_init(struct regmap *map, const struct regmap_config *config)
map->reg_defaults_raw = config->reg_defaults_raw;
map->cache_word_size = DIV_ROUND_UP(config->val_bits, 8);
map->cache_size_raw = map->cache_word_size * config->num_reg_defaults_raw;
+ map->cache_present = NULL;
+ map->cache_present_nbits = 0;
map->cache = NULL;
map->cache_ops = cache_types[i];
@@ -181,6 +181,7 @@ void regcache_exit(struct regmap *map)
BUG_ON(!map->cache_ops);
+ kfree(map->cache_present);
kfree(map->reg_defaults);
if (map->cache_free)
kfree(map->reg_defaults_raw);
@@ -417,28 +418,68 @@ void regcache_cache_bypass(struct regmap *map, bool enable)
}
EXPORT_SYMBOL_GPL(regcache_cache_bypass);
-bool regcache_set_val(void *base, unsigned int idx,
- unsigned int val, unsigned int word_size)
+int regcache_set_reg_present(struct regmap *map, unsigned int reg)
{
- switch (word_size) {
+ unsigned long *cache_present;
+ unsigned int cache_present_size;
+ unsigned int nregs;
+ int i;
+
+ nregs = reg + 1;
+ cache_present_size = BITS_TO_LONGS(nregs);
+ cache_present_size *= sizeof(long);
+
+ if (!map->cache_present) {
+ cache_present = kmalloc(cache_present_size, GFP_KERNEL);
+ if (!cache_present)
+ return -ENOMEM;
+ bitmap_zero(cache_present, nregs);
+ map->cache_present = cache_present;
+ map->cache_present_nbits = nregs;
+ }
+
+ if (nregs > map->cache_present_nbits) {
+ cache_present = krealloc(map->cache_present,
+ cache_present_size, GFP_KERNEL);
+ if (!cache_present)
+ return -ENOMEM;
+ for (i = 0; i < nregs; i++)
+ if (i >= map->cache_present_nbits)
+ clear_bit(i, cache_present);
+ map->cache_present = cache_present;
+ map->cache_present_nbits = nregs;
+ }
+
+ set_bit(reg, map->cache_present);
+ return 0;
+}
+
+bool regcache_set_val(struct regmap *map, void *base, unsigned int idx,
+ unsigned int val)
+{
+ if (regcache_get_val(map, base, idx) == val)
+ return true;
+
+ /* Use device native format if possible */
+ if (map->format.format_val) {
+ map->format.format_val(base + (map->cache_word_size * idx),
+ val, 0);
+ return false;
+ }
+
+ switch (map->cache_word_size) {
case 1: {
u8 *cache = base;
- if (cache[idx] == val)
- return true;
cache[idx] = val;
break;
}
case 2: {
u16 *cache = base;
- if (cache[idx] == val)
- return true;
cache[idx] = val;
break;
}
case 4: {
u32 *cache = base;
- if (cache[idx] == val)
- return true;
cache[idx] = val;
break;
}
@@ -448,13 +489,18 @@ bool regcache_set_val(void *base, unsigned int idx,
return false;
}
-unsigned int regcache_get_val(const void *base, unsigned int idx,
- unsigned int word_size)
+unsigned int regcache_get_val(struct regmap *map, const void *base,
+ unsigned int idx)
{
if (!base)
return -EINVAL;
- switch (word_size) {
+ /* Use device native format if possible */
+ if (map->format.parse_val)
+ return map->format.parse_val(regcache_get_val_addr(map, base,
+ idx));
+
+ switch (map->cache_word_size) {
case 1: {
const u8 *cache = base;
return cache[idx];
@@ -498,3 +544,117 @@ int regcache_lookup_reg(struct regmap *map, unsigned int reg)
else
return -ENOENT;
}
+
+static int regcache_sync_block_single(struct regmap *map, void *block,
+ unsigned int block_base,
+ unsigned int start, unsigned int end)
+{
+ unsigned int i, regtmp, val;
+ int ret;
+
+ for (i = start; i < end; i++) {
+ regtmp = block_base + (i * map->reg_stride);
+
+ if (!regcache_reg_present(map, regtmp))
+ continue;
+
+ val = regcache_get_val(map, block, i);
+
+ /* Is this the hardware default? If so skip. */
+ ret = regcache_lookup_reg(map, regtmp);
+ if (ret >= 0 && val == map->reg_defaults[ret].def)
+ continue;
+
+ map->cache_bypass = 1;
+
+ ret = _regmap_write(map, regtmp, val);
+
+ map->cache_bypass = 0;
+ if (ret != 0)
+ return ret;
+ dev_dbg(map->dev, "Synced register %#x, value %#x\n",
+ regtmp, val);
+ }
+
+ return 0;
+}
+
+static int regcache_sync_block_raw_flush(struct regmap *map, const void **data,
+ unsigned int base, unsigned int cur)
+{
+ size_t val_bytes = map->format.val_bytes;
+ int ret, count;
+
+ if (*data == NULL)
+ return 0;
+
+ count = cur - base;
+
+ dev_dbg(map->dev, "Writing %zu bytes for %d registers from 0x%x-0x%x\n",
+ count * val_bytes, count, base, cur - 1);
+
+ map->cache_bypass = 1;
+
+ ret = _regmap_raw_write(map, base, *data, count * val_bytes,
+ false);
+
+ map->cache_bypass = 0;
+
+ *data = NULL;
+
+ return ret;
+}
+
+static int regcache_sync_block_raw(struct regmap *map, void *block,
+ unsigned int block_base, unsigned int start,
+ unsigned int end)
+{
+ unsigned int i, val;
+ unsigned int regtmp = 0;
+ unsigned int base = 0;
+ const void *data = NULL;
+ int ret;
+
+ for (i = start; i < end; i++) {
+ regtmp = block_base + (i * map->reg_stride);
+
+ if (!regcache_reg_present(map, regtmp)) {
+ ret = regcache_sync_block_raw_flush(map, &data,
+ base, regtmp);
+ if (ret != 0)
+ return ret;
+ continue;
+ }
+
+ val = regcache_get_val(map, block, i);
+
+ /* Is this the hardware default? If so skip. */
+ ret = regcache_lookup_reg(map, regtmp);
+ if (ret >= 0 && val == map->reg_defaults[ret].def) {
+ ret = regcache_sync_block_raw_flush(map, &data,
+ base, regtmp);
+ if (ret != 0)
+ return ret;
+ continue;
+ }
+
+ if (!data) {
+ data = regcache_get_val_addr(map, block, i);
+ base = regtmp;
+ }
+ }
+
+ return regcache_sync_block_raw_flush(map, &data, base, regtmp);
+}
+
+int regcache_sync_block(struct regmap *map, void *block,
+ unsigned int block_base, unsigned int start,
+ unsigned int end)
+{
+ if (regmap_can_raw_write(map))
+ return regcache_sync_block_raw(map, block, block_base,
+ start, end);
+ else
+ return regcache_sync_block_single(map, block, block_base,
+ start, end);
+}
diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
index 81d6f605c92e..23b701f5fd2f 100644
--- a/drivers/base/regmap/regmap-debugfs.c
+++ b/drivers/base/regmap/regmap-debugfs.c
@@ -88,16 +88,16 @@ static unsigned int regmap_debugfs_get_dump_start(struct regmap *map,
* If we don't have a cache build one so we don't have to do a
* linear scan each time.
*/
+ mutex_lock(&map->cache_lock);
+ i = base;
if (list_empty(&map->debugfs_off_cache)) {
- for (i = base; i <= map->max_register; i += map->reg_stride) {
+ for (; i <= map->max_register; i += map->reg_stride) {
/* Skip unprinted registers, closing off cache entry */
if (!regmap_readable(map, i) ||
regmap_precious(map, i)) {
if (c) {
c->max = p - 1;
- fpos_offset = c->max - c->min;
- reg_offset = fpos_offset / map->debugfs_tot_len;
- c->max_reg = c->base_reg + reg_offset;
+ c->max_reg = i - map->reg_stride;
list_add_tail(&c->list,
&map->debugfs_off_cache);
c = NULL;
@@ -111,6 +111,7 @@ static unsigned int regmap_debugfs_get_dump_start(struct regmap *map,
c = kzalloc(sizeof(*c), GFP_KERNEL);
if (!c) {
regmap_debugfs_free_dump_cache(map);
+ mutex_unlock(&map->cache_lock);
return base;
}
c->min = p;
@@ -124,9 +125,7 @@ static unsigned int regmap_debugfs_get_dump_start(struct regmap *map,
/* Close the last entry off if we didn't scan beyond it */
if (c) {
c->max = p - 1;
- fpos_offset = c->max - c->min;
- reg_offset = fpos_offset / map->debugfs_tot_len;
- c->max_reg = c->base_reg + reg_offset;
+ c->max_reg = i - map->reg_stride;
list_add_tail(&c->list,
&map->debugfs_off_cache);
}
@@ -145,12 +144,14 @@ static unsigned int regmap_debugfs_get_dump_start(struct regmap *map,
fpos_offset = from - c->min;
reg_offset = fpos_offset / map->debugfs_tot_len;
*pos = c->min + (reg_offset * map->debugfs_tot_len);
+ mutex_unlock(&map->cache_lock);
return c->base_reg + reg_offset;
}
*pos = c->max;
ret = c->max_reg;
}
+ mutex_unlock(&map->cache_lock);
return ret;
}
@@ -311,6 +312,79 @@ static const struct file_operations regmap_range_fops = {
.llseek = default_llseek,
};
+static ssize_t regmap_reg_ranges_read_file(struct file *file,
+ char __user *user_buf, size_t count,
+ loff_t *ppos)
+{
+ struct regmap *map = file->private_data;
+ struct regmap_debugfs_off_cache *c;
+ loff_t p = 0;
+ size_t buf_pos = 0;
+ char *buf;
+ char *entry;
+ int ret;
+
+ if (*ppos < 0 || !count)
+ return -EINVAL;
+
+ buf = kmalloc(count, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ entry = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!entry) {
+ kfree(buf);
+ return -ENOMEM;
+ }
+
+ /* While we are at it, build the register dump cache
+ * now so the read() operation on the `registers' file
+ * can benefit from using the cache. We do not care
+ * about the file position information that is contained
+ * in the cache, just about the actual register blocks */
+ regmap_calc_tot_len(map, buf, count);
+ regmap_debugfs_get_dump_start(map, 0, *ppos, &p);
+
+ /* Reset file pointer as the fixed-format of the `registers'
+ * file is not compatible with the `range' file */
+ p = 0;
+ mutex_lock(&map->cache_lock);
+ list_for_each_entry(c, &map->debugfs_off_cache, list) {
+ snprintf(entry, PAGE_SIZE, "%x-%x",
+ c->base_reg, c->max_reg);
+ if (p >= *ppos) {
+ if (buf_pos + 1 + strlen(entry) > count)
+ break;
+ snprintf(buf + buf_pos, count - buf_pos,
+ "%s", entry);
+ buf_pos += strlen(entry);
+ buf[buf_pos] = '\n';
+ buf_pos++;
+ }
+ p += strlen(entry) + 1;
+ }
+ mutex_unlock(&map->cache_lock);
+
+ kfree(entry);
+ ret = buf_pos;
+
+ if (copy_to_user(user_buf, buf, buf_pos)) {
+ ret = -EFAULT;
+ goto out_buf;
+ }
+
+ *ppos += buf_pos;
+out_buf:
+ kfree(buf);
+ return ret;
+}
+
+static const struct file_operations regmap_reg_ranges_fops = {
+ .open = simple_open,
+ .read = regmap_reg_ranges_read_file,
+ .llseek = default_llseek,
+};
+
static ssize_t regmap_access_read_file(struct file *file,
char __user *user_buf, size_t count,
loff_t *ppos)
@@ -385,6 +459,7 @@ void regmap_debugfs_init(struct regmap *map, const char *name)
struct regmap_range_node *range_node;
INIT_LIST_HEAD(&map->debugfs_off_cache);
+ mutex_init(&map->cache_lock);
if (name) {
map->debugfs_name = kasprintf(GFP_KERNEL, "%s-%s",
@@ -403,6 +478,9 @@ void regmap_debugfs_init(struct regmap *map, const char *name)
debugfs_create_file("name", 0400, map->debugfs,
map, &regmap_name_fops);
+ debugfs_create_file("range", 0400, map->debugfs,
+ map, &regmap_reg_ranges_fops);
+
if (map->max_register) {
debugfs_create_file("registers", 0400, map->debugfs,
map, &regmap_map_fops);
@@ -435,7 +513,9 @@ void regmap_debugfs_init(struct regmap *map, const char *name)
void regmap_debugfs_exit(struct regmap *map)
{
debugfs_remove_recursive(map->debugfs);
+ mutex_lock(&map->cache_lock);
regmap_debugfs_free_dump_cache(map);
+ mutex_unlock(&map->cache_lock);
kfree(map->debugfs_name);
}
diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c
index 020ea2b9fd2f..1643e889bafc 100644
--- a/drivers/base/regmap/regmap-irq.c
+++ b/drivers/base/regmap/regmap-irq.c
@@ -460,7 +460,8 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
ret = request_threaded_irq(irq, NULL, regmap_irq_thread, irq_flags,
chip->name, d);
if (ret != 0) {
- dev_err(map->dev, "Failed to request IRQ %d: %d\n", irq, ret);
+ dev_err(map->dev, "Failed to request IRQ %d for %s: %d\n",
+ irq, chip->name, ret);
goto err_domain;
}
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index 3d2367501fd0..a941dcfe7590 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -228,30 +228,39 @@ static void regmap_format_32_native(void *buf, unsigned int val,
*(u32 *)buf = val << shift;
}
-static unsigned int regmap_parse_8(void *buf)
+static void regmap_parse_inplace_noop(void *buf)
{
- u8 *b = buf;
+}
+
+static unsigned int regmap_parse_8(const void *buf)
+{
+ const u8 *b = buf;
return b[0];
}
-static unsigned int regmap_parse_16_be(void *buf)
+static unsigned int regmap_parse_16_be(const void *buf)
+{
+ const __be16 *b = buf;
+
+ return be16_to_cpu(b[0]);
+}
+
+static void regmap_parse_16_be_inplace(void *buf)
{
__be16 *b = buf;
b[0] = be16_to_cpu(b[0]);
-
- return b[0];
}
-static unsigned int regmap_parse_16_native(void *buf)
+static unsigned int regmap_parse_16_native(const void *buf)
{
return *(u16 *)buf;
}
-static unsigned int regmap_parse_24(void *buf)
+static unsigned int regmap_parse_24(const void *buf)
{
- u8 *b = buf;
+ const u8 *b = buf;
unsigned int ret = b[2];
ret |= ((unsigned int)b[1]) << 8;
ret |= ((unsigned int)b[0]) << 16;
@@ -259,16 +268,21 @@ static unsigned int regmap_parse_24(void *buf)
return ret;
}
-static unsigned int regmap_parse_32_be(void *buf)
+static unsigned int regmap_parse_32_be(const void *buf)
+{
+ const __be32 *b = buf;
+
+ return be32_to_cpu(b[0]);
+}
+
+static void regmap_parse_32_be_inplace(void *buf)
{
__be32 *b = buf;
b[0] = be32_to_cpu(b[0]);
-
- return b[0];
}
-static unsigned int regmap_parse_32_native(void *buf)
+static unsigned int regmap_parse_32_native(const void *buf)
{
return *(u32 *)buf;
}
@@ -555,16 +569,21 @@ struct regmap *regmap_init(struct device *dev,
goto err_map;
}
+ if (val_endian == REGMAP_ENDIAN_NATIVE)
+ map->format.parse_inplace = regmap_parse_inplace_noop;
+
switch (config->val_bits) {
case 8:
map->format.format_val = regmap_format_8;
map->format.parse_val = regmap_parse_8;
+ map->format.parse_inplace = regmap_parse_inplace_noop;
break;
case 16:
switch (val_endian) {
case REGMAP_ENDIAN_BIG:
map->format.format_val = regmap_format_16_be;
map->format.parse_val = regmap_parse_16_be;
+ map->format.parse_inplace = regmap_parse_16_be_inplace;
break;
case REGMAP_ENDIAN_NATIVE:
map->format.format_val = regmap_format_16_native;
@@ -585,6 +604,7 @@ struct regmap *regmap_init(struct device *dev,
case REGMAP_ENDIAN_BIG:
map->format.format_val = regmap_format_32_be;
map->format.parse_val = regmap_parse_32_be;
+ map->format.parse_inplace = regmap_parse_32_be_inplace;
break;
case REGMAP_ENDIAN_NATIVE:
map->format.format_val = regmap_format_32_native;
@@ -710,12 +730,12 @@ skip_format_initialization:
}
}
+ regmap_debugfs_init(map, config->name);
+
ret = regcache_init(map, config);
if (ret != 0)
goto err_range;
- regmap_debugfs_init(map, config->name);
-
/* Add a devres resource for dev_get_regmap() */
m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL);
if (!m) {
@@ -917,8 +937,8 @@ static int _regmap_select_page(struct regmap *map, unsigned int *reg,
return 0;
}
-static int _regmap_raw_write(struct regmap *map, unsigned int reg,
- const void *val, size_t val_len, bool async)
+int _regmap_raw_write(struct regmap *map, unsigned int reg,
+ const void *val, size_t val_len, bool async)
{
struct regmap_range_node *range;
unsigned long flags;
@@ -930,7 +950,7 @@ static int _regmap_raw_write(struct regmap *map, unsigned int reg,
size_t len;
int i;
- BUG_ON(!map->bus);
+ WARN_ON(!map->bus);
/* Check for unwritable registers before we start */
if (map->writeable_reg)
@@ -943,8 +963,7 @@ static int _regmap_raw_write(struct regmap *map, unsigned int reg,
unsigned int ival;
int val_bytes = map->format.val_bytes;
for (i = 0; i < val_len / val_bytes; i++) {
- memcpy(map->work_buf, val + (i * val_bytes), val_bytes);
- ival = map->format.parse_val(map->work_buf);
+ ival = map->format.parse_val(val + (i * val_bytes));
ret = regcache_write(map, reg + (i * map->reg_stride),
ival);
if (ret) {
@@ -999,6 +1018,8 @@ static int _regmap_raw_write(struct regmap *map, unsigned int reg,
if (!async)
return -ENOMEM;
+ trace_regmap_async_write_start(map->dev, reg, val_len);
+
async->work_buf = kzalloc(map->format.buf_size,
GFP_KERNEL | GFP_DMA);
if (!async->work_buf) {
@@ -1036,6 +1057,8 @@ static int _regmap_raw_write(struct regmap *map, unsigned int reg,
kfree(async->work_buf);
kfree(async);
}
+
+ return ret;
}
trace_regmap_hw_write_start(map->dev, reg,
@@ -1077,6 +1100,17 @@ static int _regmap_raw_write(struct regmap *map, unsigned int reg,
return ret;
}
+/**
+ * regmap_can_raw_write - Test if regmap_raw_write() is supported
+ *
+ * @map: Map to check.
+ */
+bool regmap_can_raw_write(struct regmap *map)
+{
+ return map->bus && map->format.format_val && map->format.format_reg;
+}
+EXPORT_SYMBOL_GPL(regmap_can_raw_write);
+
static int _regmap_bus_formatted_write(void *context, unsigned int reg,
unsigned int val)
{
@@ -1084,7 +1118,7 @@ static int _regmap_bus_formatted_write(void *context, unsigned int reg,
struct regmap_range_node *range;
struct regmap *map = context;
- BUG_ON(!map->bus || !map->format.format_write);
+ WARN_ON(!map->bus || !map->format.format_write);
range = _regmap_range_lookup(map, reg);
if (range) {
@@ -1110,7 +1144,7 @@ static int _regmap_bus_raw_write(void *context, unsigned int reg,
{
struct regmap *map = context;
- BUG_ON(!map->bus || !map->format.format_val);
+ WARN_ON(!map->bus || !map->format.format_val);
map->format.format_val(map->work_buf + map->format.reg_bytes
+ map->format.pad_bytes, val, 0);
@@ -1200,12 +1234,10 @@ int regmap_raw_write(struct regmap *map, unsigned int reg,
{
int ret;
- if (!map->bus)
+ if (!regmap_can_raw_write(map))
return -EINVAL;
if (val_len % map->format.val_bytes)
return -EINVAL;
- if (reg % map->reg_stride)
- return -EINVAL;
map->lock(map->lock_arg);
@@ -1240,7 +1272,7 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
if (!map->bus)
return -EINVAL;
- if (!map->format.parse_val)
+ if (!map->format.parse_inplace)
return -EINVAL;
if (reg % map->reg_stride)
return -EINVAL;
@@ -1258,7 +1290,7 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
goto out;
}
for (i = 0; i < val_count * val_bytes; i += val_bytes)
- map->format.parse_val(wval + i);
+ map->format.parse_inplace(wval + i);
}
/*
* Some devices does not support bulk write, for
@@ -1336,7 +1368,7 @@ static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
u8 *u8 = map->work_buf;
int ret;
- BUG_ON(!map->bus);
+ WARN_ON(!map->bus);
range = _regmap_range_lookup(map, reg);
if (range) {
@@ -1391,7 +1423,7 @@ static int _regmap_read(struct regmap *map, unsigned int reg,
int ret;
void *context = _regmap_map_get_context(map);
- BUG_ON(!map->reg_read);
+ WARN_ON(!map->reg_read);
if (!map->cache_bypass) {
ret = regcache_read(map, reg, val);
@@ -1519,7 +1551,7 @@ int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
if (!map->bus)
return -EINVAL;
- if (!map->format.parse_val)
+ if (!map->format.parse_inplace)
return -EINVAL;
if (reg % map->reg_stride)
return -EINVAL;
@@ -1546,7 +1578,7 @@ int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
}
for (i = 0; i < val_count * val_bytes; i += val_bytes)
- map->format.parse_val(val + i);
+ map->format.parse_inplace(val + i);
} else {
for (i = 0; i < val_count; i++) {
unsigned int ival;
@@ -1640,6 +1672,8 @@ void regmap_async_complete_cb(struct regmap_async *async, int ret)
struct regmap *map = async->map;
bool wake;
+ trace_regmap_async_io_complete(map->dev);
+
spin_lock(&map->async_lock);
list_del(&async->list);
@@ -1686,6 +1720,8 @@ int regmap_async_complete(struct regmap *map)
if (!map->bus->async_write)
return 0;
+ trace_regmap_async_complete_start(map->dev);
+
wait_event(map->async_waitq, regmap_async_is_done(map));
spin_lock_irqsave(&map->async_lock, flags);
@@ -1693,6 +1729,8 @@ int regmap_async_complete(struct regmap *map)
map->async_ret = 0;
spin_unlock_irqrestore(&map->async_lock, flags);
+ trace_regmap_async_complete_done(map->dev);
+
return ret;
}
EXPORT_SYMBOL_GPL(regmap_async_complete);