summaryrefslogtreecommitdiffstats
path: root/drivers/iio/buffer
diff options
context:
space:
mode:
authorAlexandru Ardelean <alexandru.ardelean@analog.com>2020-03-24 14:46:32 +0100
committerJonathan Cameron <Jonathan.Cameron@huawei.com>2020-04-19 17:56:22 +0200
commite0fcca9fbd99e959855aa1d66c125d696f969e68 (patch)
treebc9dec13b29e4d4fe40d49ad297feee15fbd0331 /drivers/iio/buffer
parentiio: buffer-dmaengine: use %zu specifier for sprintf(align) (diff)
downloadlinux-e0fcca9fbd99e959855aa1d66c125d696f969e68.tar.xz
linux-e0fcca9fbd99e959855aa1d66c125d696f969e68.zip
iio: buffer-dmaengine: add dev-managed calls for buffer alloc
Currently, when using a 'iio_dmaengine_buffer_alloc()', an matching call to 'iio_dmaengine_buffer_free()' must be made. With this change, this can be avoided by using 'devm_iio_dmaengine_buffer_alloc()'. The buffer will get free'd via the device's devres handling. Signed-off-by: Alexandru Ardelean <alexandru.ardelean@analog.com> Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Diffstat (limited to 'drivers/iio/buffer')
-rw-r--r--drivers/iio/buffer/industrialio-buffer-dmaengine.c39
1 files changed, 39 insertions, 0 deletions
diff --git a/drivers/iio/buffer/industrialio-buffer-dmaengine.c b/drivers/iio/buffer/industrialio-buffer-dmaengine.c
index 94da3b1ca3a2..6dedf12b69a4 100644
--- a/drivers/iio/buffer/industrialio-buffer-dmaengine.c
+++ b/drivers/iio/buffer/industrialio-buffer-dmaengine.c
@@ -229,6 +229,45 @@ void iio_dmaengine_buffer_free(struct iio_buffer *buffer)
}
EXPORT_SYMBOL_GPL(iio_dmaengine_buffer_free);
+static void __devm_iio_dmaengine_buffer_free(struct device *dev, void *res)
+{
+ iio_dmaengine_buffer_free(*(struct iio_buffer **)res);
+}
+
+/**
+ * devm_iio_dmaengine_buffer_alloc() - Resource-managed iio_dmaengine_buffer_alloc()
+ * @dev: Parent device for the buffer
+ * @channel: DMA channel name, typically "rx".
+ *
+ * This allocates a new IIO buffer which internally uses the DMAengine framework
+ * to perform its transfers. The parent device will be used to request the DMA
+ * channel.
+ *
+ * The buffer will be automatically de-allocated once the device gets destroyed.
+ */
+struct iio_buffer *devm_iio_dmaengine_buffer_alloc(struct device *dev,
+ const char *channel)
+{
+ struct iio_buffer **bufferp, *buffer;
+
+ bufferp = devres_alloc(__devm_iio_dmaengine_buffer_free,
+ sizeof(*bufferp), GFP_KERNEL);
+ if (!bufferp)
+ return ERR_PTR(-ENOMEM);
+
+ buffer = iio_dmaengine_buffer_alloc(dev, channel);
+ if (IS_ERR(buffer)) {
+ devres_free(bufferp);
+ return buffer;
+ }
+
+ *bufferp = buffer;
+ devres_add(dev, bufferp);
+
+ return buffer;
+}
+EXPORT_SYMBOL_GPL(devm_iio_dmaengine_buffer_alloc);
+
MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
MODULE_DESCRIPTION("DMA buffer for the IIO framework");
MODULE_LICENSE("GPL");