summaryrefslogtreecommitdiffstats
path: root/drivers/iio/industrialio-buffer.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/iio/industrialio-buffer.c')
-rw-r--r--drivers/iio/industrialio-buffer.c517
1 files changed, 409 insertions, 108 deletions
diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
index 2f7426a2f47c..9a8e16c7e9af 100644
--- a/drivers/iio/industrialio-buffer.c
+++ b/drivers/iio/industrialio-buffer.c
@@ -9,9 +9,11 @@
* - Better memory allocation techniques?
* - Alternative access techniques?
*/
+#include <linux/anon_inodes.h>
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/device.h>
+#include <linux/file.h>
#include <linux/fs.h>
#include <linux/cdev.h>
#include <linux/slab.h>
@@ -89,7 +91,7 @@ static bool iio_buffer_ready(struct iio_dev *indio_dev, struct iio_buffer *buf,
}
/**
- * iio_buffer_read_outer() - chrdev read for buffer access
+ * iio_buffer_read() - chrdev read for buffer access
* @filp: File structure pointer for the char device
* @buf: Destination buffer for iio buffer read
* @n: First n bytes to read
@@ -101,11 +103,12 @@ static bool iio_buffer_ready(struct iio_dev *indio_dev, struct iio_buffer *buf,
* Return: negative values corresponding to error codes or ret != 0
* for ending the reading activity
**/
-ssize_t iio_buffer_read_outer(struct file *filp, char __user *buf,
- size_t n, loff_t *f_ps)
+static ssize_t iio_buffer_read(struct file *filp, char __user *buf,
+ size_t n, loff_t *f_ps)
{
- struct iio_dev *indio_dev = filp->private_data;
- struct iio_buffer *rb = indio_dev->buffer;
+ struct iio_dev_buffer_pair *ib = filp->private_data;
+ struct iio_buffer *rb = ib->buffer;
+ struct iio_dev *indio_dev = ib->indio_dev;
DEFINE_WAIT_FUNC(wait, woken_wake_function);
size_t datum_size;
size_t to_wait;
@@ -167,11 +170,12 @@ ssize_t iio_buffer_read_outer(struct file *filp, char __user *buf,
* Return: (EPOLLIN | EPOLLRDNORM) if data is available for reading
* or 0 for other cases
*/
-__poll_t iio_buffer_poll(struct file *filp,
- struct poll_table_struct *wait)
+static __poll_t iio_buffer_poll(struct file *filp,
+ struct poll_table_struct *wait)
{
- struct iio_dev *indio_dev = filp->private_data;
- struct iio_buffer *rb = indio_dev->buffer;
+ struct iio_dev_buffer_pair *ib = filp->private_data;
+ struct iio_buffer *rb = ib->buffer;
+ struct iio_dev *indio_dev = ib->indio_dev;
if (!indio_dev->info || rb == NULL)
return 0;
@@ -182,6 +186,32 @@ __poll_t iio_buffer_poll(struct file *filp,
return 0;
}
+ssize_t iio_buffer_read_wrapper(struct file *filp, char __user *buf,
+ size_t n, loff_t *f_ps)
+{
+ struct iio_dev_buffer_pair *ib = filp->private_data;
+ struct iio_buffer *rb = ib->buffer;
+
+ /* check if buffer was opened through new API */
+ if (test_bit(IIO_BUSY_BIT_POS, &rb->flags))
+ return -EBUSY;
+
+ return iio_buffer_read(filp, buf, n, f_ps);
+}
+
+__poll_t iio_buffer_poll_wrapper(struct file *filp,
+ struct poll_table_struct *wait)
+{
+ struct iio_dev_buffer_pair *ib = filp->private_data;
+ struct iio_buffer *rb = ib->buffer;
+
+ /* check if buffer was opened through new API */
+ if (test_bit(IIO_BUSY_BIT_POS, &rb->flags))
+ return 0;
+
+ return iio_buffer_poll(filp, wait);
+}
+
/**
* iio_buffer_wakeup_poll - Wakes up the buffer waitqueue
* @indio_dev: The IIO device
@@ -191,12 +221,14 @@ __poll_t iio_buffer_poll(struct file *filp,
*/
void iio_buffer_wakeup_poll(struct iio_dev *indio_dev)
{
- struct iio_buffer *buffer = indio_dev->buffer;
-
- if (!buffer)
- return;
+ struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
+ struct iio_buffer *buffer;
+ unsigned int i;
- wake_up(&buffer->pollq);
+ for (i = 0; i < iio_dev_opaque->attached_buffers_cnt; i++) {
+ buffer = iio_dev_opaque->attached_buffers[i];
+ wake_up(&buffer->pollq);
+ }
}
void iio_buffer_init(struct iio_buffer *buffer)
@@ -210,11 +242,25 @@ void iio_buffer_init(struct iio_buffer *buffer)
}
EXPORT_SYMBOL(iio_buffer_init);
+void iio_device_detach_buffers(struct iio_dev *indio_dev)
+{
+ struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
+ struct iio_buffer *buffer;
+ unsigned int i;
+
+ for (i = 0; i < iio_dev_opaque->attached_buffers_cnt; i++) {
+ buffer = iio_dev_opaque->attached_buffers[i];
+ iio_buffer_put(buffer);
+ }
+
+ kfree(iio_dev_opaque->attached_buffers);
+}
+
static ssize_t iio_show_scan_index(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- return sprintf(buf, "%u\n", to_iio_dev_attr(attr)->c->scan_index);
+ return sysfs_emit(buf, "%u\n", to_iio_dev_attr(attr)->c->scan_index);
}
static ssize_t iio_show_fixed_type(struct device *dev,
@@ -232,15 +278,15 @@ static ssize_t iio_show_fixed_type(struct device *dev,
#endif
}
if (this_attr->c->scan_type.repeat > 1)
- return sprintf(buf, "%s:%c%d/%dX%d>>%u\n",
+ return sysfs_emit(buf, "%s:%c%d/%dX%d>>%u\n",
iio_endian_prefix[type],
this_attr->c->scan_type.sign,
this_attr->c->scan_type.realbits,
this_attr->c->scan_type.storagebits,
this_attr->c->scan_type.repeat,
this_attr->c->scan_type.shift);
- else
- return sprintf(buf, "%s:%c%d/%d>>%u\n",
+ else
+ return sysfs_emit(buf, "%s:%c%d/%d>>%u\n",
iio_endian_prefix[type],
this_attr->c->scan_type.sign,
this_attr->c->scan_type.realbits,
@@ -253,14 +299,13 @@ static ssize_t iio_scan_el_show(struct device *dev,
char *buf)
{
int ret;
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct iio_buffer *buffer = indio_dev->buffer;
+ struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
/* Ensure ret is 0 or 1. */
ret = !!test_bit(to_iio_dev_attr(attr)->address,
buffer->scan_mask);
- return sprintf(buf, "%d\n", ret);
+ return sysfs_emit(buf, "%d\n", ret);
}
/* Note NULL used as error indicator as it doesn't make sense. */
@@ -367,8 +412,8 @@ static ssize_t iio_scan_el_store(struct device *dev,
int ret;
bool state;
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct iio_buffer *buffer = indio_dev->buffer;
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ struct iio_buffer *buffer = this_attr->buffer;
ret = strtobool(buf, &state);
if (ret < 0)
@@ -402,10 +447,9 @@ static ssize_t iio_scan_el_ts_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct iio_buffer *buffer = indio_dev->buffer;
+ struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
- return sprintf(buf, "%d\n", buffer->scan_timestamp);
+ return sysfs_emit(buf, "%d\n", buffer->scan_timestamp);
}
static ssize_t iio_scan_el_ts_store(struct device *dev,
@@ -415,7 +459,7 @@ static ssize_t iio_scan_el_ts_store(struct device *dev,
{
int ret;
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct iio_buffer *buffer = indio_dev->buffer;
+ struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
bool state;
ret = strtobool(buf, &state);
@@ -447,7 +491,8 @@ static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev,
0,
IIO_SEPARATE,
&indio_dev->dev,
- &buffer->scan_el_dev_attr_list);
+ buffer,
+ &buffer->buffer_attr_list);
if (ret)
return ret;
attrcount++;
@@ -458,7 +503,8 @@ static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev,
0,
0,
&indio_dev->dev,
- &buffer->scan_el_dev_attr_list);
+ buffer,
+ &buffer->buffer_attr_list);
if (ret)
return ret;
attrcount++;
@@ -470,7 +516,8 @@ static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev,
chan->scan_index,
0,
&indio_dev->dev,
- &buffer->scan_el_dev_attr_list);
+ buffer,
+ &buffer->buffer_attr_list);
else
ret = __iio_add_chan_devattr("en",
chan,
@@ -479,7 +526,8 @@ static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev,
chan->scan_index,
0,
&indio_dev->dev,
- &buffer->scan_el_dev_attr_list);
+ buffer,
+ &buffer->buffer_attr_list);
if (ret)
return ret;
attrcount++;
@@ -491,10 +539,9 @@ static ssize_t iio_buffer_read_length(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct iio_buffer *buffer = indio_dev->buffer;
+ struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
- return sprintf(buf, "%d\n", buffer->length);
+ return sysfs_emit(buf, "%d\n", buffer->length);
}
static ssize_t iio_buffer_write_length(struct device *dev,
@@ -502,7 +549,7 @@ static ssize_t iio_buffer_write_length(struct device *dev,
const char *buf, size_t len)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct iio_buffer *buffer = indio_dev->buffer;
+ struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
unsigned int val;
int ret;
@@ -534,10 +581,9 @@ static ssize_t iio_buffer_show_enable(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct iio_buffer *buffer = indio_dev->buffer;
+ struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
- return sprintf(buf, "%d\n", iio_buffer_is_active(buffer));
+ return sysfs_emit(buf, "%d\n", iio_buffer_is_active(buffer));
}
static unsigned int iio_storage_bytes_for_si(struct iio_dev *indio_dev,
@@ -1150,7 +1196,7 @@ static ssize_t iio_buffer_store_enable(struct device *dev,
int ret;
bool requested_state;
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct iio_buffer *buffer = indio_dev->buffer;
+ struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
bool inlist;
ret = strtobool(buf, &requested_state);
@@ -1175,16 +1221,13 @@ done:
return (ret < 0) ? ret : len;
}
-static const char * const iio_scan_elements_group_name = "scan_elements";
-
static ssize_t iio_buffer_show_watermark(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct iio_buffer *buffer = indio_dev->buffer;
+ struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
- return sprintf(buf, "%u\n", buffer->watermark);
+ return sysfs_emit(buf, "%u\n", buffer->watermark);
}
static ssize_t iio_buffer_store_watermark(struct device *dev,
@@ -1193,7 +1236,7 @@ static ssize_t iio_buffer_store_watermark(struct device *dev,
size_t len)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct iio_buffer *buffer = indio_dev->buffer;
+ struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
unsigned int val;
int ret;
@@ -1226,10 +1269,9 @@ static ssize_t iio_dma_show_data_available(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct iio_buffer *buffer = indio_dev->buffer;
+ struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
- return sprintf(buf, "%zu\n", iio_buffer_data_available(buffer));
+ return sysfs_emit(buf, "%zu\n", iio_buffer_data_available(buffer));
}
static DEVICE_ATTR(length, S_IRUGO | S_IWUSR, iio_buffer_read_length,
@@ -1252,45 +1294,194 @@ static struct attribute *iio_buffer_attrs[] = {
&dev_attr_data_available.attr,
};
-static int __iio_buffer_alloc_sysfs_and_mask(struct iio_buffer *buffer,
- struct iio_dev *indio_dev)
+#define to_dev_attr(_attr) container_of(_attr, struct device_attribute, attr)
+
+static struct attribute *iio_buffer_wrap_attr(struct iio_buffer *buffer,
+ struct attribute *attr)
{
- struct iio_dev_attr *p;
- struct attribute **attr;
- int ret, i, attrn, attrcount;
- const struct iio_chan_spec *channels;
+ struct device_attribute *dattr = to_dev_attr(attr);
+ struct iio_dev_attr *iio_attr;
- attrcount = 0;
- if (buffer->attrs) {
- while (buffer->attrs[attrcount] != NULL)
- attrcount++;
- }
+ iio_attr = kzalloc(sizeof(*iio_attr), GFP_KERNEL);
+ if (!iio_attr)
+ return NULL;
+
+ iio_attr->buffer = buffer;
+ memcpy(&iio_attr->dev_attr, dattr, sizeof(iio_attr->dev_attr));
+ iio_attr->dev_attr.attr.name = kstrdup_const(attr->name, GFP_KERNEL);
+ sysfs_attr_init(&iio_attr->dev_attr.attr);
+
+ list_add(&iio_attr->l, &buffer->buffer_attr_list);
+
+ return &iio_attr->dev_attr.attr;
+}
+
+static int iio_buffer_register_legacy_sysfs_groups(struct iio_dev *indio_dev,
+ struct attribute **buffer_attrs,
+ int buffer_attrcount,
+ int scan_el_attrcount)
+{
+ struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
+ struct attribute_group *group;
+ struct attribute **attrs;
+ int ret;
- attr = kcalloc(attrcount + ARRAY_SIZE(iio_buffer_attrs) + 1,
- sizeof(struct attribute *), GFP_KERNEL);
- if (!attr)
+ attrs = kcalloc(buffer_attrcount + 1, sizeof(*attrs), GFP_KERNEL);
+ if (!attrs)
return -ENOMEM;
- memcpy(attr, iio_buffer_attrs, sizeof(iio_buffer_attrs));
- if (!buffer->access->set_length)
- attr[0] = &dev_attr_length_ro.attr;
+ memcpy(attrs, buffer_attrs, buffer_attrcount * sizeof(*attrs));
- if (buffer->access->flags & INDIO_BUFFER_FLAG_FIXED_WATERMARK)
- attr[2] = &dev_attr_watermark_ro.attr;
+ group = &iio_dev_opaque->legacy_buffer_group;
+ group->attrs = attrs;
+ group->name = "buffer";
- if (buffer->attrs)
- memcpy(&attr[ARRAY_SIZE(iio_buffer_attrs)], buffer->attrs,
- sizeof(struct attribute *) * attrcount);
+ ret = iio_device_register_sysfs_group(indio_dev, group);
+ if (ret)
+ goto error_free_buffer_attrs;
- attr[attrcount + ARRAY_SIZE(iio_buffer_attrs)] = NULL;
+ attrs = kcalloc(scan_el_attrcount + 1, sizeof(*attrs), GFP_KERNEL);
+ if (!attrs) {
+ ret = -ENOMEM;
+ goto error_free_buffer_attrs;
+ }
- buffer->buffer_group.name = "buffer";
- buffer->buffer_group.attrs = attr;
+ memcpy(attrs, &buffer_attrs[buffer_attrcount],
+ scan_el_attrcount * sizeof(*attrs));
+
+ group = &iio_dev_opaque->legacy_scan_el_group;
+ group->attrs = attrs;
+ group->name = "scan_elements";
+
+ ret = iio_device_register_sysfs_group(indio_dev, group);
+ if (ret)
+ goto error_free_scan_el_attrs;
+
+ return 0;
+
+error_free_buffer_attrs:
+ kfree(iio_dev_opaque->legacy_buffer_group.attrs);
+error_free_scan_el_attrs:
+ kfree(iio_dev_opaque->legacy_scan_el_group.attrs);
+
+ return ret;
+}
+
+static void iio_buffer_unregister_legacy_sysfs_groups(struct iio_dev *indio_dev)
+{
+ struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
- indio_dev->groups[indio_dev->groupcounter++] = &buffer->buffer_group;
+ kfree(iio_dev_opaque->legacy_buffer_group.attrs);
+ kfree(iio_dev_opaque->legacy_scan_el_group.attrs);
+}
+
+static int iio_buffer_chrdev_release(struct inode *inode, struct file *filep)
+{
+ struct iio_dev_buffer_pair *ib = filep->private_data;
+ struct iio_dev *indio_dev = ib->indio_dev;
+ struct iio_buffer *buffer = ib->buffer;
+
+ wake_up(&buffer->pollq);
+
+ kfree(ib);
+ clear_bit(IIO_BUSY_BIT_POS, &buffer->flags);
+ iio_device_put(indio_dev);
+
+ return 0;
+}
+
+static const struct file_operations iio_buffer_chrdev_fileops = {
+ .owner = THIS_MODULE,
+ .llseek = noop_llseek,
+ .read = iio_buffer_read,
+ .poll = iio_buffer_poll,
+ .release = iio_buffer_chrdev_release,
+};
+
+static long iio_device_buffer_getfd(struct iio_dev *indio_dev, unsigned long arg)
+{
+ struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
+ int __user *ival = (int __user *)arg;
+ struct iio_dev_buffer_pair *ib;
+ struct iio_buffer *buffer;
+ int fd, idx, ret;
+
+ if (copy_from_user(&idx, ival, sizeof(idx)))
+ return -EFAULT;
+
+ if (idx >= iio_dev_opaque->attached_buffers_cnt)
+ return -ENODEV;
- attrcount = 0;
- INIT_LIST_HEAD(&buffer->scan_el_dev_attr_list);
+ iio_device_get(indio_dev);
+
+ buffer = iio_dev_opaque->attached_buffers[idx];
+
+ if (test_and_set_bit(IIO_BUSY_BIT_POS, &buffer->flags)) {
+ ret = -EBUSY;
+ goto error_iio_dev_put;
+ }
+
+ ib = kzalloc(sizeof(*ib), GFP_KERNEL);
+ if (!ib) {
+ ret = -ENOMEM;
+ goto error_clear_busy_bit;
+ }
+
+ ib->indio_dev = indio_dev;
+ ib->buffer = buffer;
+
+ fd = anon_inode_getfd("iio:buffer", &iio_buffer_chrdev_fileops,
+ ib, O_RDWR | O_CLOEXEC);
+ if (fd < 0) {
+ ret = fd;
+ goto error_free_ib;
+ }
+
+ if (copy_to_user(ival, &fd, sizeof(fd))) {
+ put_unused_fd(fd);
+ ret = -EFAULT;
+ goto error_free_ib;
+ }
+
+ return 0;
+
+error_free_ib:
+ kfree(ib);
+error_clear_busy_bit:
+ clear_bit(IIO_BUSY_BIT_POS, &buffer->flags);
+error_iio_dev_put:
+ iio_device_put(indio_dev);
+ return ret;
+}
+
+static long iio_device_buffer_ioctl(struct iio_dev *indio_dev, struct file *filp,
+ unsigned int cmd, unsigned long arg)
+{
+ switch (cmd) {
+ case IIO_BUFFER_GET_FD_IOCTL:
+ return iio_device_buffer_getfd(indio_dev, arg);
+ default:
+ return IIO_IOCTL_UNHANDLED;
+ }
+}
+
+static int __iio_buffer_alloc_sysfs_and_mask(struct iio_buffer *buffer,
+ struct iio_dev *indio_dev,
+ int index)
+{
+ struct iio_dev_attr *p;
+ struct attribute **attr;
+ int ret, i, attrn, scan_el_attrcount, buffer_attrcount;
+ const struct iio_chan_spec *channels;
+
+ buffer_attrcount = 0;
+ if (buffer->attrs) {
+ while (buffer->attrs[buffer_attrcount] != NULL)
+ buffer_attrcount++;
+ }
+
+ scan_el_attrcount = 0;
+ INIT_LIST_HEAD(&buffer->buffer_attr_list);
channels = indio_dev->channels;
if (channels) {
/* new magic */
@@ -1302,7 +1493,7 @@ static int __iio_buffer_alloc_sysfs_and_mask(struct iio_buffer *buffer,
&channels[i]);
if (ret < 0)
goto error_cleanup_dynamic;
- attrcount += ret;
+ scan_el_attrcount += ret;
if (channels[i].type == IIO_TIMESTAMP)
indio_dev->scan_index_timestamp =
channels[i].scan_index;
@@ -1317,37 +1508,93 @@ static int __iio_buffer_alloc_sysfs_and_mask(struct iio_buffer *buffer,
}
}
- buffer->scan_el_group.name = iio_scan_elements_group_name;
-
- buffer->scan_el_group.attrs = kcalloc(attrcount + 1,
- sizeof(buffer->scan_el_group.attrs[0]),
- GFP_KERNEL);
- if (buffer->scan_el_group.attrs == NULL) {
+ attrn = buffer_attrcount + scan_el_attrcount + ARRAY_SIZE(iio_buffer_attrs);
+ attr = kcalloc(attrn + 1, sizeof(* attr), GFP_KERNEL);
+ if (!attr) {
ret = -ENOMEM;
goto error_free_scan_mask;
}
+
+ memcpy(attr, iio_buffer_attrs, sizeof(iio_buffer_attrs));
+ if (!buffer->access->set_length)
+ attr[0] = &dev_attr_length_ro.attr;
+
+ if (buffer->access->flags & INDIO_BUFFER_FLAG_FIXED_WATERMARK)
+ attr[2] = &dev_attr_watermark_ro.attr;
+
+ if (buffer->attrs)
+ memcpy(&attr[ARRAY_SIZE(iio_buffer_attrs)], buffer->attrs,
+ sizeof(struct attribute *) * buffer_attrcount);
+
+ buffer_attrcount += ARRAY_SIZE(iio_buffer_attrs);
+
+ for (i = 0; i < buffer_attrcount; i++) {
+ struct attribute *wrapped;
+
+ wrapped = iio_buffer_wrap_attr(buffer, attr[i]);
+ if (!wrapped) {
+ ret = -ENOMEM;
+ goto error_free_scan_mask;
+ }
+ attr[i] = wrapped;
+ }
+
attrn = 0;
+ list_for_each_entry(p, &buffer->buffer_attr_list, l)
+ attr[attrn++] = &p->dev_attr.attr;
- list_for_each_entry(p, &buffer->scan_el_dev_attr_list, l)
- buffer->scan_el_group.attrs[attrn++] = &p->dev_attr.attr;
- indio_dev->groups[indio_dev->groupcounter++] = &buffer->scan_el_group;
+ buffer->buffer_group.name = kasprintf(GFP_KERNEL, "buffer%d", index);
+ if (!buffer->buffer_group.name) {
+ ret = -ENOMEM;
+ goto error_free_buffer_attrs;
+ }
+
+ buffer->buffer_group.attrs = attr;
+
+ ret = iio_device_register_sysfs_group(indio_dev, &buffer->buffer_group);
+ if (ret)
+ goto error_free_buffer_attr_group_name;
+
+ /* we only need to register the legacy groups for the first buffer */
+ if (index > 0)
+ return 0;
+
+ ret = iio_buffer_register_legacy_sysfs_groups(indio_dev, attr,
+ buffer_attrcount,
+ scan_el_attrcount);
+ if (ret)
+ goto error_free_buffer_attr_group_name;
return 0;
+error_free_buffer_attr_group_name:
+ kfree(buffer->buffer_group.name);
+error_free_buffer_attrs:
+ kfree(buffer->buffer_group.attrs);
error_free_scan_mask:
bitmap_free(buffer->scan_mask);
error_cleanup_dynamic:
- iio_free_chan_devattr_list(&buffer->scan_el_dev_attr_list);
- kfree(buffer->buffer_group.attrs);
+ iio_free_chan_devattr_list(&buffer->buffer_attr_list);
return ret;
}
-int iio_buffer_alloc_sysfs_and_mask(struct iio_dev *indio_dev)
+static void __iio_buffer_free_sysfs_and_mask(struct iio_buffer *buffer)
+{
+ bitmap_free(buffer->scan_mask);
+ kfree(buffer->buffer_group.name);
+ kfree(buffer->buffer_group.attrs);
+ iio_free_chan_devattr_list(&buffer->buffer_attr_list);
+}
+
+int iio_buffers_alloc_sysfs_and_mask(struct iio_dev *indio_dev)
{
- struct iio_buffer *buffer = indio_dev->buffer;
+ struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
const struct iio_chan_spec *channels;
- int i;
+ struct iio_buffer *buffer;
+ int unwind_idx;
+ int ret, i;
+ size_t sz;
channels = indio_dev->channels;
if (channels) {
@@ -1358,28 +1605,58 @@ int iio_buffer_alloc_sysfs_and_mask(struct iio_dev *indio_dev)
indio_dev->masklength = ml;
}
- if (!buffer)
+ if (!iio_dev_opaque->attached_buffers_cnt)
return 0;
- return __iio_buffer_alloc_sysfs_and_mask(buffer, indio_dev);
-}
+ for (i = 0; i < iio_dev_opaque->attached_buffers_cnt; i++) {
+ buffer = iio_dev_opaque->attached_buffers[i];
+ ret = __iio_buffer_alloc_sysfs_and_mask(buffer, indio_dev, i);
+ if (ret) {
+ unwind_idx = i;
+ goto error_unwind_sysfs_and_mask;
+ }
+ }
+ unwind_idx = iio_dev_opaque->attached_buffers_cnt - 1;
-static void __iio_buffer_free_sysfs_and_mask(struct iio_buffer *buffer)
-{
- bitmap_free(buffer->scan_mask);
- kfree(buffer->buffer_group.attrs);
- kfree(buffer->scan_el_group.attrs);
- iio_free_chan_devattr_list(&buffer->scan_el_dev_attr_list);
+ sz = sizeof(*(iio_dev_opaque->buffer_ioctl_handler));
+ iio_dev_opaque->buffer_ioctl_handler = kzalloc(sz, GFP_KERNEL);
+ if (!iio_dev_opaque->buffer_ioctl_handler) {
+ ret = -ENOMEM;
+ goto error_unwind_sysfs_and_mask;
+ }
+
+ iio_dev_opaque->buffer_ioctl_handler->ioctl = iio_device_buffer_ioctl;
+ iio_device_ioctl_handler_register(indio_dev,
+ iio_dev_opaque->buffer_ioctl_handler);
+
+ return 0;
+
+error_unwind_sysfs_and_mask:
+ for (; unwind_idx >= 0; unwind_idx--) {
+ buffer = iio_dev_opaque->attached_buffers[unwind_idx];
+ __iio_buffer_free_sysfs_and_mask(buffer);
+ }
+ return ret;
}
-void iio_buffer_free_sysfs_and_mask(struct iio_dev *indio_dev)
+void iio_buffers_free_sysfs_and_mask(struct iio_dev *indio_dev)
{
- struct iio_buffer *buffer = indio_dev->buffer;
+ struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
+ struct iio_buffer *buffer;
+ int i;
- if (!buffer)
+ if (!iio_dev_opaque->attached_buffers_cnt)
return;
- __iio_buffer_free_sysfs_and_mask(buffer);
+ iio_device_ioctl_handler_unregister(iio_dev_opaque->buffer_ioctl_handler);
+ kfree(iio_dev_opaque->buffer_ioctl_handler);
+
+ iio_buffer_unregister_legacy_sysfs_groups(indio_dev);
+
+ for (i = iio_dev_opaque->attached_buffers_cnt - 1; i >= 0; i--) {
+ buffer = iio_dev_opaque->attached_buffers[i];
+ __iio_buffer_free_sysfs_and_mask(buffer);
+ }
}
/**
@@ -1497,13 +1774,37 @@ EXPORT_SYMBOL_GPL(iio_buffer_put);
* @indio_dev: The device the buffer should be attached to
* @buffer: The buffer to attach to the device
*
+ * Return 0 if successful, negative if error.
+ *
* This function attaches a buffer to a IIO device. The buffer stays attached to
- * the device until the device is freed. The function should only be called at
- * most once per device.
+ * the device until the device is freed. For legacy reasons, the first attached
+ * buffer will also be assigned to 'indio_dev->buffer'.
+ * The array allocated here, will be free'd via the iio_device_detach_buffers()
+ * call which is handled by the iio_device_free().
*/
-void iio_device_attach_buffer(struct iio_dev *indio_dev,
- struct iio_buffer *buffer)
+int iio_device_attach_buffer(struct iio_dev *indio_dev,
+ struct iio_buffer *buffer)
{
- indio_dev->buffer = iio_buffer_get(buffer);
+ struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
+ struct iio_buffer **new, **old = iio_dev_opaque->attached_buffers;
+ unsigned int cnt = iio_dev_opaque->attached_buffers_cnt;
+
+ cnt++;
+
+ new = krealloc(old, sizeof(*new) * cnt, GFP_KERNEL);
+ if (!new)
+ return -ENOMEM;
+ iio_dev_opaque->attached_buffers = new;
+
+ buffer = iio_buffer_get(buffer);
+
+ /* first buffer is legacy; attach it to the IIO device directly */
+ if (!indio_dev->buffer)
+ indio_dev->buffer = buffer;
+
+ iio_dev_opaque->attached_buffers[cnt - 1] = buffer;
+ iio_dev_opaque->attached_buffers_cnt = cnt;
+
+ return 0;
}
EXPORT_SYMBOL_GPL(iio_device_attach_buffer);