summaryrefslogtreecommitdiffstats
path: root/drivers/dma-buf
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/dma-buf')
-rw-r--r--drivers/dma-buf/Kconfig13
-rw-r--r--drivers/dma-buf/Makefile1
-rw-r--r--drivers/dma-buf/dma-buf.c23
-rw-r--r--drivers/dma-buf/fence-array.c7
-rw-r--r--drivers/dma-buf/reservation.c2
-rw-r--r--drivers/dma-buf/sw_sync.c375
-rw-r--r--drivers/dma-buf/sync_debug.c236
-rw-r--r--drivers/dma-buf/sync_debug.h84
-rw-r--r--drivers/dma-buf/sync_file.c204
-rw-r--r--drivers/dma-buf/sync_trace.h32
10 files changed, 908 insertions, 69 deletions
diff --git a/drivers/dma-buf/Kconfig b/drivers/dma-buf/Kconfig
index 25bcfa0b474f..2585821b24ab 100644
--- a/drivers/dma-buf/Kconfig
+++ b/drivers/dma-buf/Kconfig
@@ -17,4 +17,17 @@ config SYNC_FILE
Files fds, to the DRM driver for example. More details at
Documentation/sync_file.txt.
+config SW_SYNC
+ bool "Sync File Validation Framework"
+ default n
+ depends on SYNC_FILE
+ depends on DEBUG_FS
+ ---help---
+ A sync object driver that uses a 32bit counter to coordinate
+ synchronization. Useful when there is no hardware primitive backing
+ the synchronization.
+
+ WARNING: improper use of this can result in deadlocking kernel
+ drivers from userspace. Intended for test and debug only.
+
endmenu
diff --git a/drivers/dma-buf/Makefile b/drivers/dma-buf/Makefile
index f353db213a81..210a10bfad2b 100644
--- a/drivers/dma-buf/Makefile
+++ b/drivers/dma-buf/Makefile
@@ -1,2 +1,3 @@
obj-y := dma-buf.o fence.o reservation.o seqno-fence.o fence-array.o
obj-$(CONFIG_SYNC_FILE) += sync_file.o
+obj-$(CONFIG_SW_SYNC) += sw_sync.o sync_debug.o
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index ddaee60ae52a..cf04d249a6a4 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -586,6 +586,22 @@ void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
}
EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);
+static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
+ enum dma_data_direction direction)
+{
+ bool write = (direction == DMA_BIDIRECTIONAL ||
+ direction == DMA_TO_DEVICE);
+ struct reservation_object *resv = dmabuf->resv;
+ long ret;
+
+ /* Wait on any implicit rendering fences */
+ ret = reservation_object_wait_timeout_rcu(resv, write, true,
+ MAX_SCHEDULE_TIMEOUT);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
/**
* dma_buf_begin_cpu_access - Must be called before accessing a dma_buf from the
@@ -608,6 +624,13 @@ int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
if (dmabuf->ops->begin_cpu_access)
ret = dmabuf->ops->begin_cpu_access(dmabuf, direction);
+ /* Ensure that all fences are waited upon - but we first allow
+ * the native handler the chance to do so more efficiently if it
+ * chooses. A double invocation here will be reasonably cheap no-op.
+ */
+ if (ret == 0)
+ ret = __dma_buf_begin_cpu_access(dmabuf, direction);
+
return ret;
}
EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access);
diff --git a/drivers/dma-buf/fence-array.c b/drivers/dma-buf/fence-array.c
index a8731c853da6..f1989fcaf354 100644
--- a/drivers/dma-buf/fence-array.c
+++ b/drivers/dma-buf/fence-array.c
@@ -99,6 +99,7 @@ const struct fence_ops fence_array_ops = {
.wait = fence_default_wait,
.release = fence_array_release,
};
+EXPORT_SYMBOL(fence_array_ops);
/**
* fence_array_create - Create a custom fence array
@@ -106,14 +107,14 @@ const struct fence_ops fence_array_ops = {
* @fences: [in] array containing the fences
* @context: [in] fence context to use
* @seqno: [in] sequence number to use
- * @signal_on_any [in] signal on any fence in the array
+ * @signal_on_any: [in] signal on any fence in the array
*
* Allocate a fence_array object and initialize the base fence with fence_init().
* In case of error it returns NULL.
*
- * The caller should allocte the fences array with num_fences size
+ * The caller should allocate the fences array with num_fences size
* and fill it with the fences it wants to add to the object. Ownership of this
- * array is take and fence_put() is used on each fence on release.
+ * array is taken and fence_put() is used on each fence on release.
*
* If @signal_on_any is true the fence array signals if any fence in the array
* signals, otherwise it signals when all fences in the array signal.
diff --git a/drivers/dma-buf/reservation.c b/drivers/dma-buf/reservation.c
index 9566a62ad8e3..723d8af988e5 100644
--- a/drivers/dma-buf/reservation.c
+++ b/drivers/dma-buf/reservation.c
@@ -205,7 +205,7 @@ done:
* @fence: the shared fence to add
*
* Add a fence to a shared slot, obj->lock must be held, and
- * reservation_object_reserve_shared_fence has been called.
+ * reservation_object_reserve_shared() has been called.
*/
void reservation_object_add_shared_fence(struct reservation_object *obj,
struct fence *fence)
diff --git a/drivers/dma-buf/sw_sync.c b/drivers/dma-buf/sw_sync.c
new file mode 100644
index 000000000000..62e8e6dc7953
--- /dev/null
+++ b/drivers/dma-buf/sw_sync.c
@@ -0,0 +1,375 @@
+/*
+ * Sync File validation framework
+ *
+ * Copyright (C) 2012 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <linux/sync_file.h>
+
+#include "sync_debug.h"
+
+#define CREATE_TRACE_POINTS
+#include "sync_trace.h"
+
+/*
+ * SW SYNC validation framework
+ *
+ * A sync object driver that uses a 32bit counter to coordinate
+ * synchronization. Useful when there is no hardware primitive backing
+ * the synchronization.
+ *
+ * To start the framework just open:
+ *
+ * <debugfs>/sync/sw_sync
+ *
+ * That will create a sync timeline, all fences created under this timeline
+ * file descriptor will belong to the this timeline.
+ *
+ * The 'sw_sync' file can be opened many times as to create different
+ * timelines.
+ *
+ * Fences can be created with SW_SYNC_IOC_CREATE_FENCE ioctl with struct
+ * sw_sync_ioctl_create_fence as parameter.
+ *
+ * To increment the timeline counter, SW_SYNC_IOC_INC ioctl should be used
+ * with the increment as u32. This will update the last signaled value
+ * from the timeline and signal any fence that has a seqno smaller or equal
+ * to it.
+ *
+ * struct sw_sync_ioctl_create_fence
+ * @value: the seqno to initialise the fence with
+ * @name: the name of the new sync point
+ * @fence: return the fd of the new sync_file with the created fence
+ */
+struct sw_sync_create_fence_data {
+ __u32 value;
+ char name[32];
+ __s32 fence; /* fd of new fence */
+};
+
+#define SW_SYNC_IOC_MAGIC 'W'
+
+#define SW_SYNC_IOC_CREATE_FENCE _IOWR(SW_SYNC_IOC_MAGIC, 0,\
+ struct sw_sync_create_fence_data)
+
+#define SW_SYNC_IOC_INC _IOW(SW_SYNC_IOC_MAGIC, 1, __u32)
+
+static const struct fence_ops timeline_fence_ops;
+
+static inline struct sync_pt *fence_to_sync_pt(struct fence *fence)
+{
+ if (fence->ops != &timeline_fence_ops)
+ return NULL;
+ return container_of(fence, struct sync_pt, base);
+}
+
+/**
+ * sync_timeline_create() - creates a sync object
+ * @name: sync_timeline name
+ *
+ * Creates a new sync_timeline. Returns the sync_timeline object or NULL in
+ * case of error.
+ */
+struct sync_timeline *sync_timeline_create(const char *name)
+{
+ struct sync_timeline *obj;
+
+ obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+ if (!obj)
+ return NULL;
+
+ kref_init(&obj->kref);
+ obj->context = fence_context_alloc(1);
+ strlcpy(obj->name, name, sizeof(obj->name));
+
+ INIT_LIST_HEAD(&obj->child_list_head);
+ INIT_LIST_HEAD(&obj->active_list_head);
+ spin_lock_init(&obj->child_list_lock);
+
+ sync_timeline_debug_add(obj);
+
+ return obj;
+}
+
+static void sync_timeline_free(struct kref *kref)
+{
+ struct sync_timeline *obj =
+ container_of(kref, struct sync_timeline, kref);
+
+ sync_timeline_debug_remove(obj);
+
+ kfree(obj);
+}
+
+static void sync_timeline_get(struct sync_timeline *obj)
+{
+ kref_get(&obj->kref);
+}
+
+static void sync_timeline_put(struct sync_timeline *obj)
+{
+ kref_put(&obj->kref, sync_timeline_free);
+}
+
+/**
+ * sync_timeline_signal() - signal a status change on a sync_timeline
+ * @obj: sync_timeline to signal
+ * @inc: num to increment on timeline->value
+ *
+ * A sync implementation should call this any time one of it's fences
+ * has signaled or has an error condition.
+ */
+static void sync_timeline_signal(struct sync_timeline *obj, unsigned int inc)
+{
+ unsigned long flags;
+ struct sync_pt *pt, *next;
+
+ trace_sync_timeline(obj);
+
+ spin_lock_irqsave(&obj->child_list_lock, flags);
+
+ obj->value += inc;
+
+ list_for_each_entry_safe(pt, next, &obj->active_list_head,
+ active_list) {
+ if (fence_is_signaled_locked(&pt->base))
+ list_del_init(&pt->active_list);
+ }
+
+ spin_unlock_irqrestore(&obj->child_list_lock, flags);
+}
+
+/**
+ * sync_pt_create() - creates a sync pt
+ * @parent: fence's parent sync_timeline
+ * @size: size to allocate for this pt
+ * @inc: value of the fence
+ *
+ * Creates a new sync_pt as a child of @parent. @size bytes will be
+ * allocated allowing for implementation specific data to be kept after
+ * the generic sync_timeline struct. Returns the sync_pt object or
+ * NULL in case of error.
+ */
+static struct sync_pt *sync_pt_create(struct sync_timeline *obj, int size,
+ unsigned int value)
+{
+ unsigned long flags;
+ struct sync_pt *pt;
+
+ if (size < sizeof(*pt))
+ return NULL;
+
+ pt = kzalloc(size, GFP_KERNEL);
+ if (!pt)
+ return NULL;
+
+ spin_lock_irqsave(&obj->child_list_lock, flags);
+ sync_timeline_get(obj);
+ fence_init(&pt->base, &timeline_fence_ops, &obj->child_list_lock,
+ obj->context, value);
+ list_add_tail(&pt->child_list, &obj->child_list_head);
+ INIT_LIST_HEAD(&pt->active_list);
+ spin_unlock_irqrestore(&obj->child_list_lock, flags);
+ return pt;
+}
+
+static const char *timeline_fence_get_driver_name(struct fence *fence)
+{
+ return "sw_sync";
+}
+
+static const char *timeline_fence_get_timeline_name(struct fence *fence)
+{
+ struct sync_timeline *parent = fence_parent(fence);
+
+ return parent->name;
+}
+
+static void timeline_fence_release(struct fence *fence)
+{
+ struct sync_pt *pt = fence_to_sync_pt(fence);
+ struct sync_timeline *parent = fence_parent(fence);
+ unsigned long flags;
+
+ spin_lock_irqsave(fence->lock, flags);
+ list_del(&pt->child_list);
+ if (!list_empty(&pt->active_list))
+ list_del(&pt->active_list);
+ spin_unlock_irqrestore(fence->lock, flags);
+
+ sync_timeline_put(parent);
+ fence_free(fence);
+}
+
+static bool timeline_fence_signaled(struct fence *fence)
+{
+ struct sync_timeline *parent = fence_parent(fence);
+
+ return (fence->seqno > parent->value) ? false : true;
+}
+
+static bool timeline_fence_enable_signaling(struct fence *fence)
+{
+ struct sync_pt *pt = fence_to_sync_pt(fence);
+ struct sync_timeline *parent = fence_parent(fence);
+
+ if (timeline_fence_signaled(fence))
+ return false;
+
+ list_add_tail(&pt->active_list, &parent->active_list_head);
+ return true;
+}
+
+static void timeline_fence_value_str(struct fence *fence,
+ char *str, int size)
+{
+ snprintf(str, size, "%d", fence->seqno);
+}
+
+static void timeline_fence_timeline_value_str(struct fence *fence,
+ char *str, int size)
+{
+ struct sync_timeline *parent = fence_parent(fence);
+
+ snprintf(str, size, "%d", parent->value);
+}
+
+static const struct fence_ops timeline_fence_ops = {
+ .get_driver_name = timeline_fence_get_driver_name,
+ .get_timeline_name = timeline_fence_get_timeline_name,
+ .enable_signaling = timeline_fence_enable_signaling,
+ .signaled = timeline_fence_signaled,
+ .wait = fence_default_wait,
+ .release = timeline_fence_release,
+ .fence_value_str = timeline_fence_value_str,
+ .timeline_value_str = timeline_fence_timeline_value_str,
+};
+
+/*
+ * *WARNING*
+ *
+ * improper use of this can result in deadlocking kernel drivers from userspace.
+ */
+
+/* opening sw_sync create a new sync obj */
+static int sw_sync_debugfs_open(struct inode *inode, struct file *file)
+{
+ struct sync_timeline *obj;
+ char task_comm[TASK_COMM_LEN];
+
+ get_task_comm(task_comm, current);
+
+ obj = sync_timeline_create(task_comm);
+ if (!obj)
+ return -ENOMEM;
+
+ file->private_data = obj;
+
+ return 0;
+}
+
+static int sw_sync_debugfs_release(struct inode *inode, struct file *file)
+{
+ struct sync_timeline *obj = file->private_data;
+
+ smp_wmb();
+
+ sync_timeline_put(obj);
+ return 0;
+}
+
+static long sw_sync_ioctl_create_fence(struct sync_timeline *obj,
+ unsigned long arg)
+{
+ int fd = get_unused_fd_flags(O_CLOEXEC);
+ int err;
+ struct sync_pt *pt;
+ struct sync_file *sync_file;
+ struct sw_sync_create_fence_data data;
+
+ if (fd < 0)
+ return fd;
+
+ if (copy_from_user(&data, (void __user *)arg, sizeof(data))) {
+ err = -EFAULT;
+ goto err;
+ }
+
+ pt = sync_pt_create(obj, sizeof(*pt), data.value);
+ if (!pt) {
+ err = -ENOMEM;
+ goto err;
+ }
+
+ sync_file = sync_file_create(&pt->base);
+ if (!sync_file) {
+ fence_put(&pt->base);
+ err = -ENOMEM;
+ goto err;
+ }
+
+ data.fence = fd;
+ if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
+ fput(sync_file->file);
+ err = -EFAULT;
+ goto err;
+ }
+
+ fd_install(fd, sync_file->file);
+
+ return 0;
+
+err:
+ put_unused_fd(fd);
+ return err;
+}
+
+static long sw_sync_ioctl_inc(struct sync_timeline *obj, unsigned long arg)
+{
+ u32 value;
+
+ if (copy_from_user(&value, (void __user *)arg, sizeof(value)))
+ return -EFAULT;
+
+ sync_timeline_signal(obj, value);
+
+ return 0;
+}
+
+static long sw_sync_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ struct sync_timeline *obj = file->private_data;
+
+ switch (cmd) {
+ case SW_SYNC_IOC_CREATE_FENCE:
+ return sw_sync_ioctl_create_fence(obj, arg);
+
+ case SW_SYNC_IOC_INC:
+ return sw_sync_ioctl_inc(obj, arg);
+
+ default:
+ return -ENOTTY;
+ }
+}
+
+const struct file_operations sw_sync_debugfs_fops = {
+ .open = sw_sync_debugfs_open,
+ .release = sw_sync_debugfs_release,
+ .unlocked_ioctl = sw_sync_ioctl,
+ .compat_ioctl = sw_sync_ioctl,
+};
diff --git a/drivers/dma-buf/sync_debug.c b/drivers/dma-buf/sync_debug.c
new file mode 100644
index 000000000000..2dd4c3db6caa
--- /dev/null
+++ b/drivers/dma-buf/sync_debug.c
@@ -0,0 +1,236 @@
+/*
+ * Sync File validation framework and debug information
+ *
+ * Copyright (C) 2012 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/debugfs.h>
+#include "sync_debug.h"
+
+static struct dentry *dbgfs;
+
+static LIST_HEAD(sync_timeline_list_head);
+static DEFINE_SPINLOCK(sync_timeline_list_lock);
+static LIST_HEAD(sync_file_list_head);
+static DEFINE_SPINLOCK(sync_file_list_lock);
+
+void sync_timeline_debug_add(struct sync_timeline *obj)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&sync_timeline_list_lock, flags);
+ list_add_tail(&obj->sync_timeline_list, &sync_timeline_list_head);
+ spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
+}
+
+void sync_timeline_debug_remove(struct sync_timeline *obj)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&sync_timeline_list_lock, flags);
+ list_del(&obj->sync_timeline_list);
+ spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
+}
+
+void sync_file_debug_add(struct sync_file *sync_file)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&sync_file_list_lock, flags);
+ list_add_tail(&sync_file->sync_file_list, &sync_file_list_head);
+ spin_unlock_irqrestore(&sync_file_list_lock, flags);
+}
+
+void sync_file_debug_remove(struct sync_file *sync_file)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&sync_file_list_lock, flags);
+ list_del(&sync_file->sync_file_list);
+ spin_unlock_irqrestore(&sync_file_list_lock, flags);
+}
+
+static const char *sync_status_str(int status)
+{
+ if (status == 0)
+ return "signaled";
+
+ if (status > 0)
+ return "active";
+
+ return "error";
+}
+
+static void sync_print_fence(struct seq_file *s, struct fence *fence, bool show)
+{
+ int status = 1;
+ struct sync_timeline *parent = fence_parent(fence);
+
+ if (fence_is_signaled_locked(fence))
+ status = fence->status;
+
+ seq_printf(s, " %s%sfence %s",
+ show ? parent->name : "",
+ show ? "_" : "",
+ sync_status_str(status));
+
+ if (status <= 0) {
+ struct timespec64 ts64 =
+ ktime_to_timespec64(fence->timestamp);
+
+ seq_printf(s, "@%lld.%09ld", (s64)ts64.tv_sec, ts64.tv_nsec);
+ }
+
+ if (fence->ops->timeline_value_str &&
+ fence->ops->fence_value_str) {
+ char value[64];
+ bool success;
+
+ fence->ops->fence_value_str(fence, value, sizeof(value));
+ success = strlen(value);
+
+ if (success) {
+ seq_printf(s, ": %s", value);
+
+ fence->ops->timeline_value_str(fence, value,
+ sizeof(value));
+
+ if (strlen(value))
+ seq_printf(s, " / %s", value);
+ }
+ }
+
+ seq_puts(s, "\n");
+}
+
+static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj)
+{
+ struct list_head *pos;
+ unsigned long flags;
+
+ seq_printf(s, "%s: %d\n", obj->name, obj->value);
+
+ spin_lock_irqsave(&obj->child_list_lock, flags);
+ list_for_each(pos, &obj->child_list_head) {
+ struct sync_pt *pt =
+ container_of(pos, struct sync_pt, child_list);
+ sync_print_fence(s, &pt->base, false);
+ }
+ spin_unlock_irqrestore(&obj->child_list_lock, flags);
+}
+
+static void sync_print_sync_file(struct seq_file *s,
+ struct sync_file *sync_file)
+{
+ int i;
+
+ seq_printf(s, "[%p] %s: %s\n", sync_file, sync_file->name,
+ sync_status_str(!fence_is_signaled(sync_file->fence)));
+
+ if (fence_is_array(sync_file->fence)) {
+ struct fence_array *array = to_fence_array(sync_file->fence);
+
+ for (i = 0; i < array->num_fences; ++i)
+ sync_print_fence(s, array->fences[i], true);
+ } else {
+ sync_print_fence(s, sync_file->fence, true);
+ }
+}
+
+static int sync_debugfs_show(struct seq_file *s, void *unused)
+{
+ unsigned long flags;
+ struct list_head *pos;
+
+ seq_puts(s, "objs:\n--------------\n");
+
+ spin_lock_irqsave(&sync_timeline_list_lock, flags);
+ list_for_each(pos, &sync_timeline_list_head) {
+ struct sync_timeline *obj =
+ container_of(pos, struct sync_timeline,
+ sync_timeline_list);
+
+ sync_print_obj(s, obj);
+ seq_puts(s, "\n");
+ }
+ spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
+
+ seq_puts(s, "fences:\n--------------\n");
+
+ spin_lock_irqsave(&sync_file_list_lock, flags);
+ list_for_each(pos, &sync_file_list_head) {
+ struct sync_file *sync_file =
+ container_of(pos, struct sync_file, sync_file_list);
+
+ sync_print_sync_file(s, sync_file);
+ seq_puts(s, "\n");
+ }
+ spin_unlock_irqrestore(&sync_file_list_lock, flags);
+ return 0;
+}
+
+static int sync_info_debugfs_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, sync_debugfs_show, inode->i_private);
+}
+
+static const struct file_operations sync_info_debugfs_fops = {
+ .open = sync_info_debugfs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static __init int sync_debugfs_init(void)
+{
+ dbgfs = debugfs_create_dir("sync", NULL);
+
+ /*
+ * The debugfs files won't ever get removed and thus, there is
+ * no need to protect it against removal races. The use of
+ * debugfs_create_file_unsafe() is actually safe here.
+ */
+ debugfs_create_file_unsafe("info", 0444, dbgfs, NULL,
+ &sync_info_debugfs_fops);
+ debugfs_create_file_unsafe("sw_sync", 0644, dbgfs, NULL,
+ &sw_sync_debugfs_fops);
+
+ return 0;
+}
+late_initcall(sync_debugfs_init);
+
+#define DUMP_CHUNK 256
+static char sync_dump_buf[64 * 1024];
+void sync_dump(void)
+{
+ struct seq_file s = {
+ .buf = sync_dump_buf,
+ .size = sizeof(sync_dump_buf) - 1,
+ };
+ int i;
+
+ sync_debugfs_show(&s, NULL);
+
+ for (i = 0; i < s.count; i += DUMP_CHUNK) {
+ if ((s.count - i) > DUMP_CHUNK) {
+ char c = s.buf[i + DUMP_CHUNK];
+
+ s.buf[i + DUMP_CHUNK] = 0;
+ pr_cont("%s", s.buf + i);
+ s.buf[i + DUMP_CHUNK] = c;
+ } else {
+ s.buf[s.count] = 0;
+ pr_cont("%s", s.buf + i);
+ }
+ }
+}
diff --git a/drivers/dma-buf/sync_debug.h b/drivers/dma-buf/sync_debug.h
new file mode 100644
index 000000000000..d269aa6783aa
--- /dev/null
+++ b/drivers/dma-buf/sync_debug.h
@@ -0,0 +1,84 @@
+/*
+ * Sync File validation framework and debug infomation
+ *
+ * Copyright (C) 2012 Google, Inc.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_SYNC_H
+#define _LINUX_SYNC_H
+
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/fence.h>
+
+#include <linux/sync_file.h>
+#include <uapi/linux/sync_file.h>
+
+/**
+ * struct sync_timeline - sync object
+ * @kref: reference count on fence.
+ * @name: name of the sync_timeline. Useful for debugging
+ * @child_list_head: list of children sync_pts for this sync_timeline
+ * @child_list_lock: lock protecting @child_list_head and fence.status
+ * @active_list_head: list of active (unsignaled/errored) sync_pts
+ * @sync_timeline_list: membership in global sync_timeline_list
+ */
+struct sync_timeline {
+ struct kref kref;
+ char name[32];
+
+ /* protected by child_list_lock */
+ u64 context;
+ int value;
+
+ struct list_head child_list_head;
+ spinlock_t child_list_lock;
+
+ struct list_head active_list_head;
+
+ struct list_head sync_timeline_list;
+};
+
+static inline struct sync_timeline *fence_parent(struct fence *fence)
+{
+ return container_of(fence->lock, struct sync_timeline,
+ child_list_lock);
+}
+
+/**
+ * struct sync_pt - sync_pt object
+ * @base: base fence object
+ * @child_list: sync timeline child's list
+ * @active_list: sync timeline active child's list
+ */
+struct sync_pt {
+ struct fence base;
+ struct list_head child_list;
+ struct list_head active_list;
+};
+
+#ifdef CONFIG_SW_SYNC
+
+extern const struct file_operations sw_sync_debugfs_fops;
+
+void sync_timeline_debug_add(struct sync_timeline *obj);
+void sync_timeline_debug_remove(struct sync_timeline *obj);
+void sync_file_debug_add(struct sync_file *fence);
+void sync_file_debug_remove(struct sync_file *fence);
+void sync_dump(void);
+
+#else
+# define sync_timeline_debug_add(obj)
+# define sync_timeline_debug_remove(obj)
+# define sync_file_debug_add(fence)
+# define sync_file_debug_remove(fence)
+# define sync_dump()
+#endif
+
+#endif /* _LINUX_SYNC_H */
diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c
index 9aaa608dfe01..b29a9e817320 100644
--- a/drivers/dma-buf/sync_file.c
+++ b/drivers/dma-buf/sync_file.c
@@ -28,11 +28,11 @@
static const struct file_operations sync_file_fops;
-static struct sync_file *sync_file_alloc(int size)
+static struct sync_file *sync_file_alloc(void)
{
struct sync_file *sync_file;
- sync_file = kzalloc(size, GFP_KERNEL);
+ sync_file = kzalloc(sizeof(*sync_file), GFP_KERNEL);
if (!sync_file)
return NULL;
@@ -45,6 +45,8 @@ static struct sync_file *sync_file_alloc(int size)
init_waitqueue_head(&sync_file->wq);
+ INIT_LIST_HEAD(&sync_file->cb.node);
+
return sync_file;
err:
@@ -54,14 +56,11 @@ err:
static void fence_check_cb_func(struct fence *f, struct fence_cb *cb)
{
- struct sync_file_cb *check;
struct sync_file *sync_file;
- check = container_of(cb, struct sync_file_cb, cb);
- sync_file = check->sync_file;
+ sync_file = container_of(cb, struct sync_file, cb);
- if (atomic_dec_and_test(&sync_file->status))
- wake_up_all(&sync_file->wq);
+ wake_up_all(&sync_file->wq);
}
/**
@@ -76,23 +75,17 @@ struct sync_file *sync_file_create(struct fence *fence)
{
struct sync_file *sync_file;
- sync_file = sync_file_alloc(offsetof(struct sync_file, cbs[1]));
+ sync_file = sync_file_alloc();
if (!sync_file)
return NULL;
- sync_file->num_fences = 1;
- atomic_set(&sync_file->status, 1);
+ sync_file->fence = fence;
+
snprintf(sync_file->name, sizeof(sync_file->name), "%s-%s%llu-%d",
fence->ops->get_driver_name(fence),
fence->ops->get_timeline_name(fence), fence->context,
fence->seqno);
- sync_file->cbs[0].fence = fence;
- sync_file->cbs[0].sync_file = sync_file;
- if (fence_add_callback(fence, &sync_file->cbs[0].cb,
- fence_check_cb_func))
- atomic_dec(&sync_file->status);
-
return sync_file;
}
EXPORT_SYMBOL(sync_file_create);
@@ -121,14 +114,73 @@ err:
return NULL;
}
-static void sync_file_add_pt(struct sync_file *sync_file, int *i,
- struct fence *fence)
+/**
+ * sync_file_get_fence - get the fence related to the sync_file fd
+ * @fd: sync_file fd to get the fence from
+ *
+ * Ensures @fd references a valid sync_file and returns a fence that
+ * represents all fence in the sync_file. On error NULL is returned.
+ */
+struct fence *sync_file_get_fence(int fd)
+{
+ struct sync_file *sync_file;
+ struct fence *fence;
+
+ sync_file = sync_file_fdget(fd);
+ if (!sync_file)
+ return NULL;
+
+ fence = fence_get(sync_file->fence);
+ fput(sync_file->file);
+
+ return fence;
+}
+EXPORT_SYMBOL(sync_file_get_fence);
+
+static int sync_file_set_fence(struct sync_file *sync_file,
+ struct fence **fences, int num_fences)
+{
+ struct fence_array *array;
+
+ /*
+ * The reference for the fences in the new sync_file and held
+ * in add_fence() during the merge procedure, so for num_fences == 1
+ * we already own a new reference to the fence. For num_fence > 1
+ * we own the reference of the fence_array creation.
+ */
+ if (num_fences == 1) {
+ sync_file->fence = fences[0];
+ kfree(fences);
+ } else {
+ array = fence_array_create(num_fences, fences,
+ fence_context_alloc(1), 1, false);
+ if (!array)
+ return -ENOMEM;
+
+ sync_file->fence = &array->base;
+ }
+
+ return 0;
+}
+
+static struct fence **get_fences(struct sync_file *sync_file, int *num_fences)
+{
+ if (fence_is_array(sync_file->fence)) {
+ struct fence_array *array = to_fence_array(sync_file->fence);
+
+ *num_fences = array->num_fences;
+ return array->fences;
+ }
+
+ *num_fences = 1;
+ return &sync_file->fence;
+}
+
+static void add_fence(struct fence **fences, int *i, struct fence *fence)
{
- sync_file->cbs[*i].fence = fence;
- sync_file->cbs[*i].sync_file = sync_file;
+ fences[*i] = fence;
- if (!fence_add_callback(fence, &sync_file->cbs[*i].cb,
- fence_check_cb_func)) {
+ if (!fence_is_signaled(fence)) {
fence_get(fence);
(*i)++;
}
@@ -147,16 +199,24 @@ static void sync_file_add_pt(struct sync_file *sync_file, int *i,
static struct sync_file *sync_file_merge(const char *name, struct sync_file *a,
struct sync_file *b)
{
- int num_fences = a->num_fences + b->num_fences;
struct sync_file *sync_file;
- int i, i_a, i_b;
- unsigned long size = offsetof(struct sync_file, cbs[num_fences]);
+ struct fence **fences, **nfences, **a_fences, **b_fences;
+ int i, i_a, i_b, num_fences, a_num_fences, b_num_fences;
- sync_file = sync_file_alloc(size);
+ sync_file = sync_file_alloc();
if (!sync_file)
return NULL;
- atomic_set(&sync_file->status, num_fences);
+ a_fences = get_fences(a, &a_num_fences);
+ b_fences = get_fences(b, &b_num_fences);
+ if (a_num_fences > INT_MAX - b_num_fences)
+ return NULL;
+
+ num_fences = a_num_fences + b_num_fences;
+
+ fences = kcalloc(num_fences, sizeof(*fences), GFP_KERNEL);
+ if (!fences)
+ goto err;
/*
* Assume sync_file a and b are both ordered and have no
@@ -165,55 +225,69 @@ static struct sync_file *sync_file_merge(const char *name, struct sync_file *a,
* If a sync_file can only be created with sync_file_merge
* and sync_file_create, this is a reasonable assumption.
*/
- for (i = i_a = i_b = 0; i_a < a->num_fences && i_b < b->num_fences; ) {
- struct fence *pt_a = a->cbs[i_a].fence;
- struct fence *pt_b = b->cbs[i_b].fence;
+ for (i = i_a = i_b = 0; i_a < a_num_fences && i_b < b_num_fences; ) {
+ struct fence *pt_a = a_fences[i_a];
+ struct fence *pt_b = b_fences[i_b];
if (pt_a->context < pt_b->context) {
- sync_file_add_pt(sync_file, &i, pt_a);
+ add_fence(fences, &i, pt_a);
i_a++;
} else if (pt_a->context > pt_b->context) {
- sync_file_add_pt(sync_file, &i, pt_b);
+ add_fence(fences, &i, pt_b);
i_b++;
} else {
if (pt_a->seqno - pt_b->seqno <= INT_MAX)
- sync_file_add_pt(sync_file, &i, pt_a);
+ add_fence(fences, &i, pt_a);
else
- sync_file_add_pt(sync_file, &i, pt_b);
+ add_fence(fences, &i, pt_b);
i_a++;
i_b++;
}
}
- for (; i_a < a->num_fences; i_a++)
- sync_file_add_pt(sync_file, &i, a->cbs[i_a].fence);
+ for (; i_a < a_num_fences; i_a++)
+ add_fence(fences, &i, a_fences[i_a]);
+
+ for (; i_b < b_num_fences; i_b++)
+ add_fence(fences, &i, b_fences[i_b]);
- for (; i_b < b->num_fences; i_b++)
- sync_file_add_pt(sync_file, &i, b->cbs[i_b].fence);
+ if (i == 0)
+ fences[i++] = fence_get(a_fences[0]);
- if (num_fences > i)
- atomic_sub(num_fences - i, &sync_file->status);
- sync_file->num_fences = i;
+ if (num_fences > i) {
+ nfences = krealloc(fences, i * sizeof(*fences),
+ GFP_KERNEL);
+ if (!nfences)
+ goto err;
+
+ fences = nfences;
+ }
+
+ if (sync_file_set_fence(sync_file, fences, i) < 0) {
+ kfree(fences);
+ goto err;
+ }
strlcpy(sync_file->name, name, sizeof(sync_file->name));
return sync_file;
+
+err:
+ fput(sync_file->file);
+ return NULL;
+
}
static void sync_file_free(struct kref *kref)
{
struct sync_file *sync_file = container_of(kref, struct sync_file,
kref);
- int i;
-
- for (i = 0; i < sync_file->num_fences; ++i) {
- fence_remove_callback(sync_file->cbs[i].fence,
- &sync_file->cbs[i].cb);
- fence_put(sync_file->cbs[i].fence);
- }
+ if (test_bit(POLL_ENABLED, &sync_file->fence->flags))
+ fence_remove_callback(sync_file->fence, &sync_file->cb);
+ fence_put(sync_file->fence);
kfree(sync_file);
}
@@ -228,17 +302,17 @@ static int sync_file_release(struct inode *inode, struct file *file)
static unsigned int sync_file_poll(struct file *file, poll_table *wait)
{
struct sync_file *sync_file = file->private_data;
- int status;
poll_wait(file, &sync_file->wq, wait);
- status = atomic_read(&sync_file->status);
+ if (!poll_does_not_wait(wait) &&
+ !test_and_set_bit(POLL_ENABLED, &sync_file->fence->flags)) {
+ if (fence_add_callback(sync_file->fence, &sync_file->cb,
+ fence_check_cb_func) < 0)
+ wake_up_all(&sync_file->wq);
+ }
- if (!status)
- return POLLIN;
- if (status < 0)
- return POLLERR;
- return 0;
+ return fence_is_signaled(sync_file->fence) ? POLLIN : 0;
}
static long sync_file_ioctl_merge(struct sync_file *sync_file,
@@ -315,8 +389,9 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
{
struct sync_file_info info;
struct sync_fence_info *fence_info = NULL;
+ struct fence **fences;
__u32 size;
- int ret, i;
+ int num_fences, ret, i;
if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
return -EFAULT;
@@ -324,6 +399,8 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
if (info.flags || info.pad)
return -EINVAL;
+ fences = get_fences(sync_file, &num_fences);
+
/*
* Passing num_fences = 0 means that userspace doesn't want to
* retrieve any sync_fence_info. If num_fences = 0 we skip filling
@@ -333,16 +410,16 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
if (!info.num_fences)
goto no_fences;
- if (info.num_fences < sync_file->num_fences)
+ if (info.num_fences < num_fences)
return -EINVAL;
- size = sync_file->num_fences * sizeof(*fence_info);
+ size = num_fences * sizeof(*fence_info);
fence_info = kzalloc(size, GFP_KERNEL);
if (!fence_info)
return -ENOMEM;
- for (i = 0; i < sync_file->num_fences; ++i)
- sync_fill_fence_info(sync_file->cbs[i].fence, &fence_info[i]);
+ for (i = 0; i < num_fences; i++)
+ sync_fill_fence_info(fences[i], &fence_info[i]);
if (copy_to_user(u64_to_user_ptr(info.sync_fence_info), fence_info,
size)) {
@@ -352,11 +429,8 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
no_fences:
strlcpy(info.name, sync_file->name, sizeof(info.name));
- info.status = atomic_read(&sync_file->status);
- if (info.status >= 0)
- info.status = !info.status;
-
- info.num_fences = sync_file->num_fences;
+ info.status = fence_is_signaled(sync_file->fence);
+ info.num_fences = num_fences;
if (copy_to_user((void __user *)arg, &info, sizeof(info)))
ret = -EFAULT;
diff --git a/drivers/dma-buf/sync_trace.h b/drivers/dma-buf/sync_trace.h
new file mode 100644
index 000000000000..d13d59ff1b85
--- /dev/null
+++ b/drivers/dma-buf/sync_trace.h
@@ -0,0 +1,32 @@
+#undef TRACE_SYSTEM
+#define TRACE_INCLUDE_PATH ../../drivers/dma-buf
+#define TRACE_SYSTEM sync_trace
+
+#if !defined(_TRACE_SYNC_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_SYNC_H
+
+#include "sync_debug.h"
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(sync_timeline,
+ TP_PROTO(struct sync_timeline *timeline),
+
+ TP_ARGS(timeline),
+
+ TP_STRUCT__entry(
+ __string(name, timeline->name)
+ __field(u32, value)
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, timeline->name);
+ __entry->value = timeline->value;
+ ),
+
+ TP_printk("name=%s value=%d", __get_str(name), __entry->value)
+);
+
+#endif /* if !defined(_TRACE_SYNC_H) || defined(TRACE_HEADER_MULTI_READ) */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>