summaryrefslogtreecommitdiffstats
path: root/drivers/dma-buf
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/dma-buf')
-rw-r--r--drivers/dma-buf/Kconfig16
-rw-r--r--drivers/dma-buf/Makefile10
-rw-r--r--drivers/dma-buf/dma-buf.c215
-rw-r--r--drivers/dma-buf/dma-fence-array.c32
-rw-r--r--drivers/dma-buf/dma-fence-chain.c24
-rw-r--r--drivers/dma-buf/dma-fence.c133
-rw-r--r--drivers/dma-buf/dma-heap.c298
-rw-r--r--drivers/dma-buf/dma-resv.c (renamed from drivers/dma-buf/reservation.c)283
-rw-r--r--drivers/dma-buf/heaps/Kconfig14
-rw-r--r--drivers/dma-buf/heaps/Makefile4
-rw-r--r--drivers/dma-buf/heaps/cma_heap.c177
-rw-r--r--drivers/dma-buf/heaps/heap-helpers.c271
-rw-r--r--drivers/dma-buf/heaps/heap-helpers.h53
-rw-r--r--drivers/dma-buf/heaps/system_heap.c123
-rw-r--r--drivers/dma-buf/selftest.c167
-rw-r--r--drivers/dma-buf/selftest.h30
-rw-r--r--drivers/dma-buf/selftests.h13
-rw-r--r--drivers/dma-buf/st-dma-fence.c574
-rw-r--r--drivers/dma-buf/sw_sync.c18
-rw-r--r--drivers/dma-buf/sync_file.c6
-rw-r--r--drivers/dma-buf/udmabuf.c84
21 files changed, 2230 insertions, 315 deletions
diff --git a/drivers/dma-buf/Kconfig b/drivers/dma-buf/Kconfig
index b6a9c2f1bc41..0613bb7770f5 100644
--- a/drivers/dma-buf/Kconfig
+++ b/drivers/dma-buf/Kconfig
@@ -39,4 +39,20 @@ config UDMABUF
A driver to let userspace turn memfd regions into dma-bufs.
Qemu can use this to create host dmabufs for guest framebuffers.
+config DMABUF_SELFTESTS
+ tristate "Selftests for the dma-buf interfaces"
+ default n
+ depends on DMA_SHARED_BUFFER
+
+menuconfig DMABUF_HEAPS
+ bool "DMA-BUF Userland Memory Heaps"
+ select DMA_SHARED_BUFFER
+ help
+ Choose this option to enable the DMA-BUF userland memory heaps.
+ This options creates per heap chardevs in /dev/dma_heap/ which
+ allows userspace to allocate dma-bufs that can be shared
+ between drivers.
+
+source "drivers/dma-buf/heaps/Kconfig"
+
endmenu
diff --git a/drivers/dma-buf/Makefile b/drivers/dma-buf/Makefile
index e8c7310cb800..9c190026bfab 100644
--- a/drivers/dma-buf/Makefile
+++ b/drivers/dma-buf/Makefile
@@ -1,6 +1,14 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-y := dma-buf.o dma-fence.o dma-fence-array.o dma-fence-chain.o \
- reservation.o seqno-fence.o
+ dma-resv.o seqno-fence.o
+obj-$(CONFIG_DMABUF_HEAPS) += dma-heap.o
+obj-$(CONFIG_DMABUF_HEAPS) += heaps/
obj-$(CONFIG_SYNC_FILE) += sync_file.o
obj-$(CONFIG_SW_SYNC) += sw_sync.o sync_debug.o
obj-$(CONFIG_UDMABUF) += udmabuf.o
+
+dmabuf_selftests-y := \
+ selftest.o \
+ st-dma-fence.o
+
+obj-$(CONFIG_DMABUF_SELFTESTS) += dmabuf_selftests.o
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index f45bfb29ef96..d4097856c86b 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -21,7 +21,7 @@
#include <linux/module.h>
#include <linux/seq_file.h>
#include <linux/poll.h>
-#include <linux/reservation.h>
+#include <linux/dma-resv.h>
#include <linux/mm.h>
#include <linux/mount.h>
#include <linux/pseudo_fs.h>
@@ -45,10 +45,10 @@ static char *dmabuffs_dname(struct dentry *dentry, char *buffer, int buflen)
size_t ret = 0;
dmabuf = dentry->d_fsdata;
- mutex_lock(&dmabuf->lock);
+ dma_resv_lock(dmabuf->resv, NULL);
if (dmabuf->name)
ret = strlcpy(name, dmabuf->name, DMA_BUF_NAME_LEN);
- mutex_unlock(&dmabuf->lock);
+ dma_resv_unlock(dmabuf->resv);
return dynamic_dname(dentry, buffer, buflen, "/%s:%s",
dentry->d_name.name, ret > 0 ? name : "");
@@ -104,8 +104,8 @@ static int dma_buf_release(struct inode *inode, struct file *file)
list_del(&dmabuf->list_node);
mutex_unlock(&db_list.lock);
- if (dmabuf->resv == (struct reservation_object *)&dmabuf[1])
- reservation_object_fini(dmabuf->resv);
+ if (dmabuf->resv == (struct dma_resv *)&dmabuf[1])
+ dma_resv_fini(dmabuf->resv);
module_put(dmabuf->owner);
kfree(dmabuf);
@@ -165,7 +165,7 @@ static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence)
* To support cross-device and cross-driver synchronization of buffer access
* implicit fences (represented internally in the kernel with &struct fence) can
* be attached to a &dma_buf. The glue for that and a few related things are
- * provided in the &reservation_object structure.
+ * provided in the &dma_resv structure.
*
* Userspace can query the state of these implicitly tracked fences using poll()
* and related system calls:
@@ -195,8 +195,8 @@ static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
{
struct dma_buf *dmabuf;
- struct reservation_object *resv;
- struct reservation_object_list *fobj;
+ struct dma_resv *resv;
+ struct dma_resv_list *fobj;
struct dma_fence *fence_excl;
__poll_t events;
unsigned shared_count, seq;
@@ -334,7 +334,7 @@ static long dma_buf_set_name(struct dma_buf *dmabuf, const char __user *buf)
if (IS_ERR(name))
return PTR_ERR(name);
- mutex_lock(&dmabuf->lock);
+ dma_resv_lock(dmabuf->resv, NULL);
if (!list_empty(&dmabuf->attachments)) {
ret = -EBUSY;
kfree(name);
@@ -344,7 +344,7 @@ static long dma_buf_set_name(struct dma_buf *dmabuf, const char __user *buf)
dmabuf->name = name;
out_unlock:
- mutex_unlock(&dmabuf->lock);
+ dma_resv_unlock(dmabuf->resv);
return ret;
}
@@ -403,10 +403,10 @@ static void dma_buf_show_fdinfo(struct seq_file *m, struct file *file)
/* Don't count the temporary reference taken inside procfs seq_show */
seq_printf(m, "count:\t%ld\n", file_count(dmabuf->file) - 1);
seq_printf(m, "exp_name:\t%s\n", dmabuf->exp_name);
- mutex_lock(&dmabuf->lock);
+ dma_resv_lock(dmabuf->resv, NULL);
if (dmabuf->name)
seq_printf(m, "name:\t%s\n", dmabuf->name);
- mutex_unlock(&dmabuf->lock);
+ dma_resv_unlock(dmabuf->resv);
}
static const struct file_operations dma_buf_fops = {
@@ -415,9 +415,7 @@ static const struct file_operations dma_buf_fops = {
.llseek = dma_buf_llseek,
.poll = dma_buf_poll,
.unlocked_ioctl = dma_buf_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = dma_buf_ioctl,
-#endif
+ .compat_ioctl = compat_ptr_ioctl,
.show_fdinfo = dma_buf_show_fdinfo,
};
@@ -506,13 +504,13 @@ err_alloc_file:
struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
{
struct dma_buf *dmabuf;
- struct reservation_object *resv = exp_info->resv;
+ struct dma_resv *resv = exp_info->resv;
struct file *file;
size_t alloc_size = sizeof(struct dma_buf);
int ret;
if (!exp_info->resv)
- alloc_size += sizeof(struct reservation_object);
+ alloc_size += sizeof(struct dma_resv);
else
/* prevent &dma_buf[1] == dma_buf->resv */
alloc_size += 1;
@@ -525,6 +523,10 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
return ERR_PTR(-EINVAL);
}
+ if (WARN_ON(exp_info->ops->cache_sgt_mapping &&
+ exp_info->ops->dynamic_mapping))
+ return ERR_PTR(-EINVAL);
+
if (!try_module_get(exp_info->owner))
return ERR_PTR(-ENOENT);
@@ -544,8 +546,8 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
dmabuf->cb_excl.active = dmabuf->cb_shared.active = 0;
if (!resv) {
- resv = (struct reservation_object *)&dmabuf[1];
- reservation_object_init(resv);
+ resv = (struct dma_resv *)&dmabuf[1];
+ dma_resv_init(resv);
}
dmabuf->resv = resv;
@@ -645,10 +647,11 @@ void dma_buf_put(struct dma_buf *dmabuf)
EXPORT_SYMBOL_GPL(dma_buf_put);
/**
- * dma_buf_attach - Add the device to dma_buf's attachments list; optionally,
+ * dma_buf_dynamic_attach - Add the device to dma_buf's attachments list; optionally,
* calls attach() of dma_buf_ops to allow device-specific attach functionality
- * @dmabuf: [in] buffer to attach device to.
- * @dev: [in] device to be attached.
+ * @dmabuf: [in] buffer to attach device to.
+ * @dev: [in] device to be attached.
+ * @dynamic_mapping: [in] calling convention for map/unmap
*
* Returns struct dma_buf_attachment pointer for this attachment. Attachments
* must be cleaned up by calling dma_buf_detach().
@@ -662,8 +665,9 @@ EXPORT_SYMBOL_GPL(dma_buf_put);
* accessible to @dev, and cannot be moved to a more suitable place. This is
* indicated with the error code -EBUSY.
*/
-struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
- struct device *dev)
+struct dma_buf_attachment *
+dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
+ bool dynamic_mapping)
{
struct dma_buf_attachment *attach;
int ret;
@@ -677,25 +681,69 @@ struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
attach->dev = dev;
attach->dmabuf = dmabuf;
-
- mutex_lock(&dmabuf->lock);
+ attach->dynamic_mapping = dynamic_mapping;
if (dmabuf->ops->attach) {
ret = dmabuf->ops->attach(dmabuf, attach);
if (ret)
goto err_attach;
}
+ dma_resv_lock(dmabuf->resv, NULL);
list_add(&attach->node, &dmabuf->attachments);
+ dma_resv_unlock(dmabuf->resv);
- mutex_unlock(&dmabuf->lock);
+ /* When either the importer or the exporter can't handle dynamic
+ * mappings we cache the mapping here to avoid issues with the
+ * reservation object lock.
+ */
+ if (dma_buf_attachment_is_dynamic(attach) !=
+ dma_buf_is_dynamic(dmabuf)) {
+ struct sg_table *sgt;
+
+ if (dma_buf_is_dynamic(attach->dmabuf))
+ dma_resv_lock(attach->dmabuf->resv, NULL);
+
+ sgt = dmabuf->ops->map_dma_buf(attach, DMA_BIDIRECTIONAL);
+ if (!sgt)
+ sgt = ERR_PTR(-ENOMEM);
+ if (IS_ERR(sgt)) {
+ ret = PTR_ERR(sgt);
+ goto err_unlock;
+ }
+ if (dma_buf_is_dynamic(attach->dmabuf))
+ dma_resv_unlock(attach->dmabuf->resv);
+ attach->sgt = sgt;
+ attach->dir = DMA_BIDIRECTIONAL;
+ }
return attach;
err_attach:
kfree(attach);
- mutex_unlock(&dmabuf->lock);
+ return ERR_PTR(ret);
+
+err_unlock:
+ if (dma_buf_is_dynamic(attach->dmabuf))
+ dma_resv_unlock(attach->dmabuf->resv);
+
+ dma_buf_detach(dmabuf, attach);
return ERR_PTR(ret);
}
+EXPORT_SYMBOL_GPL(dma_buf_dynamic_attach);
+
+/**
+ * dma_buf_attach - Wrapper for dma_buf_dynamic_attach
+ * @dmabuf: [in] buffer to attach device to.
+ * @dev: [in] device to be attached.
+ *
+ * Wrapper to call dma_buf_dynamic_attach() for drivers which still use a static
+ * mapping.
+ */
+struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
+ struct device *dev)
+{
+ return dma_buf_dynamic_attach(dmabuf, dev, false);
+}
EXPORT_SYMBOL_GPL(dma_buf_attach);
/**
@@ -711,15 +759,22 @@ void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
if (WARN_ON(!dmabuf || !attach))
return;
- if (attach->sgt)
+ if (attach->sgt) {
+ if (dma_buf_is_dynamic(attach->dmabuf))
+ dma_resv_lock(attach->dmabuf->resv, NULL);
+
dmabuf->ops->unmap_dma_buf(attach, attach->sgt, attach->dir);
- mutex_lock(&dmabuf->lock);
+ if (dma_buf_is_dynamic(attach->dmabuf))
+ dma_resv_unlock(attach->dmabuf->resv);
+ }
+
+ dma_resv_lock(dmabuf->resv, NULL);
list_del(&attach->node);
+ dma_resv_unlock(dmabuf->resv);
if (dmabuf->ops->detach)
dmabuf->ops->detach(dmabuf, attach);
- mutex_unlock(&dmabuf->lock);
kfree(attach);
}
EXPORT_SYMBOL_GPL(dma_buf_detach);
@@ -749,6 +804,9 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
if (WARN_ON(!attach || !attach->dmabuf))
return ERR_PTR(-EINVAL);
+ if (dma_buf_attachment_is_dynamic(attach))
+ dma_resv_assert_held(attach->dmabuf->resv);
+
if (attach->sgt) {
/*
* Two mappings with different directions for the same
@@ -761,6 +819,9 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
return attach->sgt;
}
+ if (dma_buf_is_dynamic(attach->dmabuf))
+ dma_resv_assert_held(attach->dmabuf->resv);
+
sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
if (!sg_table)
sg_table = ERR_PTR(-ENOMEM);
@@ -793,9 +854,15 @@ void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
if (WARN_ON(!attach || !attach->dmabuf || !sg_table))
return;
+ if (dma_buf_attachment_is_dynamic(attach))
+ dma_resv_assert_held(attach->dmabuf->resv);
+
if (attach->sgt == sg_table)
return;
+ if (dma_buf_is_dynamic(attach->dmabuf))
+ dma_resv_assert_held(attach->dmabuf->resv);
+
attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
}
EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);
@@ -811,29 +878,9 @@ EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);
* with calls to dma_buf_begin_cpu_access() and dma_buf_end_cpu_access()
* access.
*
- * To support dma_buf objects residing in highmem cpu access is page-based
- * using an api similar to kmap. Accessing a dma_buf is done in aligned chunks
- * of PAGE_SIZE size. Before accessing a chunk it needs to be mapped, which
- * returns a pointer in kernel virtual address space. Afterwards the chunk
- * needs to be unmapped again. There is no limit on how often a given chunk
- * can be mapped and unmapped, i.e. the importer does not need to call
- * begin_cpu_access again before mapping the same chunk again.
- *
- * Interfaces::
- * void \*dma_buf_kmap(struct dma_buf \*, unsigned long);
- * void dma_buf_kunmap(struct dma_buf \*, unsigned long, void \*);
- *
- * Implementing the functions is optional for exporters and for importers all
- * the restrictions of using kmap apply.
- *
- * dma_buf kmap calls outside of the range specified in begin_cpu_access are
- * undefined. If the range is not PAGE_SIZE aligned, kmap needs to succeed on
- * the partial chunks at the beginning and end but may return stale or bogus
- * data outside of the range (in these partial chunks).
- *
- * For some cases the overhead of kmap can be too high, a vmap interface
- * is introduced. This interface should be used very carefully, as vmalloc
- * space is a limited resources on many architectures.
+ * Since for most kernel internal dma-buf accesses need the entire buffer, a
+ * vmap interface is introduced. Note that on very old 32-bit architectures
+ * vmalloc space might be limited and result in vmap calls failing.
*
* Interfaces::
* void \*dma_buf_vmap(struct dma_buf \*dmabuf)
@@ -909,11 +956,11 @@ static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
{
bool write = (direction == DMA_BIDIRECTIONAL ||
direction == DMA_TO_DEVICE);
- struct reservation_object *resv = dmabuf->resv;
+ struct dma_resv *resv = dmabuf->resv;
long ret;
/* Wait on any implicit rendering fences */
- ret = reservation_object_wait_timeout_rcu(resv, write, true,
+ ret = dma_resv_wait_timeout_rcu(resv, write, true,
MAX_SCHEDULE_TIMEOUT);
if (ret < 0)
return ret;
@@ -983,43 +1030,6 @@ int dma_buf_end_cpu_access(struct dma_buf *dmabuf,
}
EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access);
-/**
- * dma_buf_kmap - Map a page of the buffer object into kernel address space. The
- * same restrictions as for kmap and friends apply.
- * @dmabuf: [in] buffer to map page from.
- * @page_num: [in] page in PAGE_SIZE units to map.
- *
- * This call must always succeed, any necessary preparations that might fail
- * need to be done in begin_cpu_access.
- */
-void *dma_buf_kmap(struct dma_buf *dmabuf, unsigned long page_num)
-{
- WARN_ON(!dmabuf);
-
- if (!dmabuf->ops->map)
- return NULL;
- return dmabuf->ops->map(dmabuf, page_num);
-}
-EXPORT_SYMBOL_GPL(dma_buf_kmap);
-
-/**
- * dma_buf_kunmap - Unmap a page obtained by dma_buf_kmap.
- * @dmabuf: [in] buffer to unmap page from.
- * @page_num: [in] page in PAGE_SIZE units to unmap.
- * @vaddr: [in] kernel space pointer obtained from dma_buf_kmap.
- *
- * This call must always succeed.
- */
-void dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long page_num,
- void *vaddr)
-{
- WARN_ON(!dmabuf);
-
- if (dmabuf->ops->unmap)
- dmabuf->ops->unmap(dmabuf, page_num, vaddr);
-}
-EXPORT_SYMBOL_GPL(dma_buf_kunmap);
-
/**
* dma_buf_mmap - Setup up a userspace mmap with the given vma
@@ -1154,8 +1164,8 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
int ret;
struct dma_buf *buf_obj;
struct dma_buf_attachment *attach_obj;
- struct reservation_object *robj;
- struct reservation_object_list *fobj;
+ struct dma_resv *robj;
+ struct dma_resv_list *fobj;
struct dma_fence *fence;
unsigned seq;
int count = 0, attach_count, shared_count, i;
@@ -1171,13 +1181,10 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
"size", "flags", "mode", "count", "ino");
list_for_each_entry(buf_obj, &db_list.head, list_node) {
- ret = mutex_lock_interruptible(&buf_obj->lock);
- if (ret) {
- seq_puts(s,
- "\tERROR locking buffer object: skipping\n");
- continue;
- }
+ ret = dma_resv_lock_interruptible(buf_obj->resv, NULL);
+ if (ret)
+ goto error_unlock;
seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%s\t%08lu\t%s\n",
buf_obj->size,
@@ -1223,19 +1230,23 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
seq_printf(s, "\t%s\n", dev_name(attach_obj->dev));
attach_count++;
}
+ dma_resv_unlock(buf_obj->resv);
seq_printf(s, "Total %d devices attached\n\n",
attach_count);
count++;
size += buf_obj->size;
- mutex_unlock(&buf_obj->lock);
}
seq_printf(s, "\nTotal %d objects, %zu bytes\n", count, size);
mutex_unlock(&db_list.lock);
return 0;
+
+error_unlock:
+ mutex_unlock(&db_list.lock);
+ return ret;
}
DEFINE_SHOW_ATTRIBUTE(dma_buf_debug);
diff --git a/drivers/dma-buf/dma-fence-array.c b/drivers/dma-buf/dma-fence-array.c
index 12c6f64c0bc2..d3fbd950be94 100644
--- a/drivers/dma-buf/dma-fence-array.c
+++ b/drivers/dma-buf/dma-fence-array.c
@@ -13,6 +13,8 @@
#include <linux/slab.h>
#include <linux/dma-fence-array.h>
+#define PENDING_ERROR 1
+
static const char *dma_fence_array_get_driver_name(struct dma_fence *fence)
{
return "dma_fence_array";
@@ -23,10 +25,29 @@ static const char *dma_fence_array_get_timeline_name(struct dma_fence *fence)
return "unbound";
}
+static void dma_fence_array_set_pending_error(struct dma_fence_array *array,
+ int error)
+{
+ /*
+ * Propagate the first error reported by any of our fences, but only
+ * before we ourselves are signaled.
+ */
+ if (error)
+ cmpxchg(&array->base.error, PENDING_ERROR, error);
+}
+
+static void dma_fence_array_clear_pending_error(struct dma_fence_array *array)
+{
+ /* Clear the error flag if not actually set. */
+ cmpxchg(&array->base.error, PENDING_ERROR, 0);
+}
+
static void irq_dma_fence_array_work(struct irq_work *wrk)
{
struct dma_fence_array *array = container_of(wrk, typeof(*array), work);
+ dma_fence_array_clear_pending_error(array);
+
dma_fence_signal(&array->base);
dma_fence_put(&array->base);
}
@@ -38,6 +59,8 @@ static void dma_fence_array_cb_func(struct dma_fence *f,
container_of(cb, struct dma_fence_array_cb, cb);
struct dma_fence_array *array = array_cb->array;
+ dma_fence_array_set_pending_error(array, f->error);
+
if (atomic_dec_and_test(&array->num_pending))
irq_work_queue(&array->work);
else
@@ -63,9 +86,14 @@ static bool dma_fence_array_enable_signaling(struct dma_fence *fence)
dma_fence_get(&array->base);
if (dma_fence_add_callback(array->fences[i], &cb[i].cb,
dma_fence_array_cb_func)) {
+ int error = array->fences[i]->error;
+
+ dma_fence_array_set_pending_error(array, error);
dma_fence_put(&array->base);
- if (atomic_dec_and_test(&array->num_pending))
+ if (atomic_dec_and_test(&array->num_pending)) {
+ dma_fence_array_clear_pending_error(array);
return false;
+ }
}
}
@@ -142,6 +170,8 @@ struct dma_fence_array *dma_fence_array_create(int num_fences,
atomic_set(&array->num_pending, signal_on_any ? 1 : num_fences);
array->fences = fences;
+ array->base.error = PENDING_ERROR;
+
return array;
}
EXPORT_SYMBOL(dma_fence_array_create);
diff --git a/drivers/dma-buf/dma-fence-chain.c b/drivers/dma-buf/dma-fence-chain.c
index b5089f64be2a..44a741677d25 100644
--- a/drivers/dma-buf/dma-fence-chain.c
+++ b/drivers/dma-buf/dma-fence-chain.c
@@ -178,8 +178,30 @@ static bool dma_fence_chain_signaled(struct dma_fence *fence)
static void dma_fence_chain_release(struct dma_fence *fence)
{
struct dma_fence_chain *chain = to_dma_fence_chain(fence);
+ struct dma_fence *prev;
+
+ /* Manually unlink the chain as much as possible to avoid recursion
+ * and potential stack overflow.
+ */
+ while ((prev = rcu_dereference_protected(chain->prev, true))) {
+ struct dma_fence_chain *prev_chain;
+
+ if (kref_read(&prev->refcount) > 1)
+ break;
+
+ prev_chain = to_dma_fence_chain(prev);
+ if (!prev_chain)
+ break;
+
+ /* No need for atomic operations since we hold the last
+ * reference to prev_chain.
+ */
+ chain->prev = prev_chain->prev;
+ RCU_INIT_POINTER(prev_chain->prev, NULL);
+ dma_fence_put(prev);
+ }
+ dma_fence_put(prev);
- dma_fence_put(rcu_dereference_protected(chain->prev, true));
dma_fence_put(chain->fence);
dma_fence_free(fence);
}
diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c
index 59ac96ec7ba8..052a41e2451c 100644
--- a/drivers/dma-buf/dma-fence.c
+++ b/drivers/dma-buf/dma-fence.c
@@ -60,7 +60,7 @@ static atomic64_t dma_fence_context_counter = ATOMIC64_INIT(1);
*
* - Then there's also implicit fencing, where the synchronization points are
* implicitly passed around as part of shared &dma_buf instances. Such
- * implicit fences are stored in &struct reservation_object through the
+ * implicit fences are stored in &struct dma_resv through the
* &dma_buf.resv pointer.
*/
@@ -129,31 +129,27 @@ EXPORT_SYMBOL(dma_fence_context_alloc);
int dma_fence_signal_locked(struct dma_fence *fence)
{
struct dma_fence_cb *cur, *tmp;
- int ret = 0;
+ struct list_head cb_list;
lockdep_assert_held(fence->lock);
- if (WARN_ON(!fence))
+ if (unlikely(test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
+ &fence->flags)))
return -EINVAL;
- if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
- ret = -EINVAL;
+ /* Stash the cb_list before replacing it with the timestamp */
+ list_replace(&fence->cb_list, &cb_list);
- /*
- * we might have raced with the unlocked dma_fence_signal,
- * still run through all callbacks
- */
- } else {
- fence->timestamp = ktime_get();
- set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags);
- trace_dma_fence_signaled(fence);
- }
+ fence->timestamp = ktime_get();
+ set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags);
+ trace_dma_fence_signaled(fence);
- list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) {
- list_del_init(&cur->node);
+ list_for_each_entry_safe(cur, tmp, &cb_list, node) {
+ INIT_LIST_HEAD(&cur->node);
cur->func(fence, cur);
}
- return ret;
+
+ return 0;
}
EXPORT_SYMBOL(dma_fence_signal_locked);
@@ -173,28 +169,16 @@ EXPORT_SYMBOL(dma_fence_signal_locked);
int dma_fence_signal(struct dma_fence *fence)
{
unsigned long flags;
+ int ret;
if (!fence)
return -EINVAL;
- if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
- return -EINVAL;
-
- fence->timestamp = ktime_get();
- set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags);
- trace_dma_fence_signaled(fence);
-
- if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags)) {
- struct dma_fence_cb *cur, *tmp;
+ spin_lock_irqsave(fence->lock, flags);
+ ret = dma_fence_signal_locked(fence);
+ spin_unlock_irqrestore(fence->lock, flags);
- spin_lock_irqsave(fence->lock, flags);
- list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) {
- list_del_init(&cur->node);
- cur->func(fence, cur);
- }
- spin_unlock_irqrestore(fence->lock, flags);
- }
- return 0;
+ return ret;
}
EXPORT_SYMBOL(dma_fence_signal);
@@ -248,7 +232,8 @@ void dma_fence_release(struct kref *kref)
trace_dma_fence_destroy(fence);
- if (WARN(!list_empty(&fence->cb_list),
+ if (WARN(!list_empty(&fence->cb_list) &&
+ !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags),
"Fence %s:%s:%llx:%llx released with pending signals!\n",
fence->ops->get_driver_name(fence),
fence->ops->get_timeline_name(fence),
@@ -288,6 +273,30 @@ void dma_fence_free(struct dma_fence *fence)
}
EXPORT_SYMBOL(dma_fence_free);
+static bool __dma_fence_enable_signaling(struct dma_fence *fence)
+{
+ bool was_set;
+
+ lockdep_assert_held(fence->lock);
+
+ was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
+ &fence->flags);
+
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+ return false;
+
+ if (!was_set && fence->ops->enable_signaling) {
+ trace_dma_fence_enable_signal(fence);
+
+ if (!fence->ops->enable_signaling(fence)) {
+ dma_fence_signal_locked(fence);
+ return false;
+ }
+ }
+
+ return true;
+}
+
/**
* dma_fence_enable_sw_signaling - enable signaling on fence
* @fence: the fence to enable
@@ -300,19 +309,12 @@ void dma_fence_enable_sw_signaling(struct dma_fence *fence)
{
unsigned long flags;
- if (!test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
- &fence->flags) &&
- !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) &&
- fence->ops->enable_signaling) {
- trace_dma_fence_enable_signal(fence);
-
- spin_lock_irqsave(fence->lock, flags);
-
- if (!fence->ops->enable_signaling(fence))
- dma_fence_signal_locked(fence);
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+ return;
- spin_unlock_irqrestore(fence->lock, flags);
- }
+ spin_lock_irqsave(fence->lock, flags);
+ __dma_fence_enable_signaling(fence);
+ spin_unlock_irqrestore(fence->lock, flags);
}
EXPORT_SYMBOL(dma_fence_enable_sw_signaling);
@@ -346,7 +348,6 @@ int dma_fence_add_callback(struct dma_fence *fence, struct dma_fence_cb *cb,
{
unsigned long flags;
int ret = 0;
- bool was_set;
if (WARN_ON(!fence || !func))
return -EINVAL;
@@ -358,25 +359,14 @@ int dma_fence_add_callback(struct dma_fence *fence, struct dma_fence_cb *cb,
spin_lock_irqsave(fence->lock, flags);
- was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
- &fence->flags);
-
- if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
- ret = -ENOENT;
- else if (!was_set && fence->ops->enable_signaling) {
- trace_dma_fence_enable_signal(fence);
-
- if (!fence->ops->enable_signaling(fence)) {
- dma_fence_signal_locked(fence);
- ret = -ENOENT;
- }
- }
-
- if (!ret) {
+ if (__dma_fence_enable_signaling(fence)) {
cb->func = func;
list_add_tail(&cb->node, &fence->cb_list);
- } else
+ } else {
INIT_LIST_HEAD(&cb->node);
+ ret = -ENOENT;
+ }
+
spin_unlock_irqrestore(fence->lock, flags);
return ret;
@@ -476,7 +466,6 @@ dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout)
struct default_wait_cb cb;
unsigned long flags;
signed long ret = timeout ? timeout : 1;
- bool was_set;
if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
return ret;
@@ -488,21 +477,9 @@ dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout)
goto out;
}
- was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
- &fence->flags);
-
- if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+ if (!__dma_fence_enable_signaling(fence))
goto out;
- if (!was_set && fence->ops->enable_signaling) {
- trace_dma_fence_enable_signal(fence);
-
- if (!fence->ops->enable_signaling(fence)) {
- dma_fence_signal_locked(fence);
- goto out;
- }
- }
-
if (!timeout) {
ret = 0;
goto out;
diff --git a/drivers/dma-buf/dma-heap.c b/drivers/dma-buf/dma-heap.c
new file mode 100644
index 000000000000..afd22c9dbdcf
--- /dev/null
+++ b/drivers/dma-buf/dma-heap.c
@@ -0,0 +1,298 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Framework for userspace DMA-BUF allocations
+ *
+ * Copyright (C) 2011 Google, Inc.
+ * Copyright (C) 2019 Linaro Ltd.
+ */
+
+#include <linux/cdev.h>
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/dma-buf.h>
+#include <linux/err.h>
+#include <linux/xarray.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/syscalls.h>
+#include <linux/dma-heap.h>
+#include <uapi/linux/dma-heap.h>
+
+#define DEVNAME "dma_heap"
+
+#define NUM_HEAP_MINORS 128
+
+/**
+ * struct dma_heap - represents a dmabuf heap in the system
+ * @name: used for debugging/device-node name
+ * @ops: ops struct for this heap
+ * @heap_devt heap device node
+ * @list list head connecting to list of heaps
+ * @heap_cdev heap char device
+ *
+ * Represents a heap of memory from which buffers can be made.
+ */
+struct dma_heap {
+ const char *name;
+ const struct dma_heap_ops *ops;
+ void *priv;
+ dev_t heap_devt;
+ struct list_head list;
+ struct cdev heap_cdev;
+};
+
+static LIST_HEAD(heap_list);
+static DEFINE_MUTEX(heap_list_lock);
+static dev_t dma_heap_devt;
+static struct class *dma_heap_class;
+static DEFINE_XARRAY_ALLOC(dma_heap_minors);
+
+static int dma_heap_buffer_alloc(struct dma_heap *heap, size_t len,
+ unsigned int fd_flags,
+ unsigned int heap_flags)
+{
+ /*
+ * Allocations from all heaps have to begin
+ * and end on page boundaries.
+ */
+ len = PAGE_ALIGN(len);
+ if (!len)
+ return -EINVAL;
+
+ return heap->ops->allocate(heap, len, fd_flags, heap_flags);
+}
+
+static int dma_heap_open(struct inode *inode, struct file *file)
+{
+ struct dma_heap *heap;
+
+ heap = xa_load(&dma_heap_minors, iminor(inode));
+ if (!heap) {
+ pr_err("dma_heap: minor %d unknown.\n", iminor(inode));
+ return -ENODEV;
+ }
+
+ /* instance data as context */
+ file->private_data = heap;
+ nonseekable_open(inode, file);
+
+ return 0;
+}
+
+static long dma_heap_ioctl_allocate(struct file *file, void *data)
+{
+ struct dma_heap_allocation_data *heap_allocation = data;
+ struct dma_heap *heap = file->private_data;
+ int fd;
+
+ if (heap_allocation->fd)
+ return -EINVAL;
+
+ if (heap_allocation->fd_flags & ~DMA_HEAP_VALID_FD_FLAGS)
+ return -EINVAL;
+
+ if (heap_allocation->heap_flags & ~DMA_HEAP_VALID_HEAP_FLAGS)
+ return -EINVAL;
+
+ fd = dma_heap_buffer_alloc(heap, heap_allocation->len,
+ heap_allocation->fd_flags,
+ heap_allocation->heap_flags);
+ if (fd < 0)
+ return fd;
+
+ heap_allocation->fd = fd;
+
+ return 0;
+}
+
+static unsigned int dma_heap_ioctl_cmds[] = {
+ DMA_HEAP_IOCTL_ALLOC,
+};
+
+static long dma_heap_ioctl(struct file *file, unsigned int ucmd,
+ unsigned long arg)
+{
+ char stack_kdata[128];
+ char *kdata = stack_kdata;
+ unsigned int kcmd;
+ unsigned int in_size, out_size, drv_size, ksize;
+ int nr = _IOC_NR(ucmd);
+ int ret = 0;
+
+ if (nr >= ARRAY_SIZE(dma_heap_ioctl_cmds))
+ return -EINVAL;
+
+ /* Get the kernel ioctl cmd that matches */
+ kcmd = dma_heap_ioctl_cmds[nr];
+
+ /* Figure out the delta between user cmd size and kernel cmd size */
+ drv_size = _IOC_SIZE(kcmd);
+ out_size = _IOC_SIZE(ucmd);
+ in_size = out_size;
+ if ((ucmd & kcmd & IOC_IN) == 0)
+ in_size = 0;
+ if ((ucmd & kcmd & IOC_OUT) == 0)
+ out_size = 0;
+ ksize = max(max(in_size, out_size), drv_size);
+
+ /* If necessary, allocate buffer for ioctl argument */
+ if (ksize > sizeof(stack_kdata)) {
+ kdata = kmalloc(ksize, GFP_KERNEL);
+ if (!kdata)
+ return -ENOMEM;
+ }
+
+ if (copy_from_user(kdata, (void __user *)arg, in_size) != 0) {
+ ret = -EFAULT;
+ goto err;
+ }
+
+ /* zero out any difference between the kernel/user structure size */
+ if (ksize > in_size)
+ memset(kdata + in_size, 0, ksize - in_size);
+
+ switch (kcmd) {
+ case DMA_HEAP_IOCTL_ALLOC:
+ ret = dma_heap_ioctl_allocate(file, kdata);
+ break;
+ default:
+ ret = -ENOTTY;
+ goto err;
+ }
+
+ if (copy_to_user((void __user *)arg, kdata, out_size) != 0)
+ ret = -EFAULT;
+err:
+ if (kdata != stack_kdata)
+ kfree(kdata);
+ return ret;
+}
+
+static const struct file_operations dma_heap_fops = {
+ .owner = THIS_MODULE,
+ .open = dma_heap_open,
+ .unlocked_ioctl = dma_heap_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = dma_heap_ioctl,
+#endif
+};
+
+/**
+ * dma_heap_get_drvdata() - get per-subdriver data for the heap
+ * @heap: DMA-Heap to retrieve private data for
+ *
+ * Returns:
+ * The per-subdriver data for the heap.
+ */
+void *dma_heap_get_drvdata(struct dma_heap *heap)
+{
+ return heap->priv;
+}
+
+struct dma_heap *dma_heap_add(const struct dma_heap_export_info *exp_info)
+{
+ struct dma_heap *heap, *h, *err_ret;
+ struct device *dev_ret;
+ unsigned int minor;
+ int ret;
+
+ if (!exp_info->name || !strcmp(exp_info->name, "")) {
+ pr_err("dma_heap: Cannot add heap without a name\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (!exp_info->ops || !exp_info->ops->allocate) {
+ pr_err("dma_heap: Cannot add heap with invalid ops struct\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* check the name is unique */
+ mutex_lock(&heap_list_lock);
+ list_for_each_entry(h, &heap_list, list) {
+ if (!strcmp(h->name, exp_info->name)) {
+ mutex_unlock(&heap_list_lock);
+ pr_err("dma_heap: Already registered heap named %s\n",
+ exp_info->name);
+ return ERR_PTR(-EINVAL);
+ }
+ }
+ mutex_unlock(&heap_list_lock);
+
+ heap = kzalloc(sizeof(*heap), GFP_KERNEL);
+ if (!heap)
+ return ERR_PTR(-ENOMEM);
+
+ heap->name = exp_info->name;
+ heap->ops = exp_info->ops;
+ heap->priv = exp_info->priv;
+
+ /* Find unused minor number */
+ ret = xa_alloc(&dma_heap_minors, &minor, heap,
+ XA_LIMIT(0, NUM_HEAP_MINORS - 1), GFP_KERNEL);
+ if (ret < 0) {
+ pr_err("dma_heap: Unable to get minor number for heap\n");
+ err_ret = ERR_PTR(ret);
+ goto err0;
+ }
+
+ /* Create device */
+ heap->heap_devt = MKDEV(MAJOR(dma_heap_devt), minor);
+
+ cdev_init(&heap->heap_cdev, &dma_heap_fops);
+ ret = cdev_add(&heap->heap_cdev, heap->heap_devt, 1);
+ if (ret < 0) {
+ pr_err("dma_heap: Unable to add char device\n");
+ err_ret = ERR_PTR(ret);
+ goto err1;
+ }
+
+ dev_ret = device_create(dma_heap_class,
+ NULL,
+ heap->heap_devt,
+ NULL,
+ heap->name);
+ if (IS_ERR(dev_ret)) {
+ pr_err("dma_heap: Unable to create device\n");
+ err_ret = ERR_CAST(dev_ret);
+ goto err2;
+ }
+ /* Add heap to the list */
+ mutex_lock(&heap_list_lock);
+ list_add(&heap->list, &heap_list);
+ mutex_unlock(&heap_list_lock);
+
+ return heap;
+
+err2:
+ cdev_del(&heap->heap_cdev);
+err1:
+ xa_erase(&dma_heap_minors, minor);
+err0:
+ kfree(heap);
+ return err_ret;
+}
+
+static char *dma_heap_devnode(struct device *dev, umode_t *mode)
+{
+ return kasprintf(GFP_KERNEL, "dma_heap/%s", dev_name(dev));
+}
+
+static int dma_heap_init(void)
+{
+ int ret;
+
+ ret = alloc_chrdev_region(&dma_heap_devt, 0, NUM_HEAP_MINORS, DEVNAME);
+ if (ret)
+ return ret;
+
+ dma_heap_class = class_create(THIS_MODULE, DEVNAME);
+ if (IS_ERR(dma_heap_class)) {
+ unregister_chrdev_region(dma_heap_devt, NUM_HEAP_MINORS);
+ return PTR_ERR(dma_heap_class);
+ }
+ dma_heap_class->devnode = dma_heap_devnode;
+
+ return 0;
+}
+subsys_initcall(dma_heap_init);
diff --git a/drivers/dma-buf/reservation.c b/drivers/dma-buf/dma-resv.c
index 4447e13d1e89..4264e64788c4 100644
--- a/drivers/dma-buf/reservation.c
+++ b/drivers/dma-buf/dma-resv.c
@@ -32,8 +32,9 @@
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
*/
-#include <linux/reservation.h>
+#include <linux/dma-resv.h>
#include <linux/export.h>
+#include <linux/sched/mm.h>
/**
* DOC: Reservation Object Overview
@@ -56,26 +57,134 @@ const char reservation_seqcount_string[] = "reservation_seqcount";
EXPORT_SYMBOL(reservation_seqcount_string);
/**
- * reservation_object_reserve_shared - Reserve space to add shared fences to
- * a reservation_object.
+ * dma_resv_list_alloc - allocate fence list
+ * @shared_max: number of fences we need space for
+ *
+ * Allocate a new dma_resv_list and make sure to correctly initialize
+ * shared_max.
+ */
+static struct dma_resv_list *dma_resv_list_alloc(unsigned int shared_max)
+{
+ struct dma_resv_list *list;
+
+ list = kmalloc(offsetof(typeof(*list), shared[shared_max]), GFP_KERNEL);
+ if (!list)
+ return NULL;
+
+ list->shared_max = (ksize(list) - offsetof(typeof(*list), shared)) /
+ sizeof(*list->shared);
+
+ return list;
+}
+
+/**
+ * dma_resv_list_free - free fence list
+ * @list: list to free
+ *
+ * Free a dma_resv_list and make sure to drop all references.
+ */
+static void dma_resv_list_free(struct dma_resv_list *list)
+{
+ unsigned int i;
+
+ if (!list)
+ return;
+
+ for (i = 0; i < list->shared_count; ++i)
+ dma_fence_put(rcu_dereference_protected(list->shared[i], true));
+
+ kfree_rcu(list, rcu);
+}
+
+#if IS_ENABLED(CONFIG_LOCKDEP)
+static int __init dma_resv_lockdep(void)
+{
+ struct mm_struct *mm = mm_alloc();
+ struct ww_acquire_ctx ctx;
+ struct dma_resv obj;
+ int ret;
+
+ if (!mm)
+ return -ENOMEM;
+
+ dma_resv_init(&obj);
+
+ down_read(&mm->mmap_sem);
+ ww_acquire_init(&ctx, &reservation_ww_class);
+ ret = dma_resv_lock(&obj, &ctx);
+ if (ret == -EDEADLK)
+ dma_resv_lock_slow(&obj, &ctx);
+ fs_reclaim_acquire(GFP_KERNEL);
+ fs_reclaim_release(GFP_KERNEL);
+ ww_mutex_unlock(&obj.lock);
+ ww_acquire_fini(&ctx);
+ up_read(&mm->mmap_sem);
+
+ mmput(mm);
+
+ return 0;
+}
+subsys_initcall(dma_resv_lockdep);
+#endif
+
+/**
+ * dma_resv_init - initialize a reservation object
+ * @obj: the reservation object
+ */
+void dma_resv_init(struct dma_resv *obj)
+{
+ ww_mutex_init(&obj->lock, &reservation_ww_class);
+
+ __seqcount_init(&obj->seq, reservation_seqcount_string,
+ &reservation_seqcount_class);
+ RCU_INIT_POINTER(obj->fence, NULL);
+ RCU_INIT_POINTER(obj->fence_excl, NULL);
+}
+EXPORT_SYMBOL(dma_resv_init);
+
+/**
+ * dma_resv_fini - destroys a reservation object
+ * @obj: the reservation object
+ */
+void dma_resv_fini(struct dma_resv *obj)
+{
+ struct dma_resv_list *fobj;
+ struct dma_fence *excl;
+
+ /*
+ * This object should be dead and all references must have
+ * been released to it, so no need to be protected with rcu.
+ */
+ excl = rcu_dereference_protected(obj->fence_excl, 1);
+ if (excl)
+ dma_fence_put(excl);
+
+ fobj = rcu_dereference_protected(obj->fence, 1);
+ dma_resv_list_free(fobj);
+ ww_mutex_destroy(&obj->lock);
+}
+EXPORT_SYMBOL(dma_resv_fini);
+
+/**
+ * dma_resv_reserve_shared - Reserve space to add shared fences to
+ * a dma_resv.
* @obj: reservation object
* @num_fences: number of fences we want to add
*
- * Should be called before reservation_object_add_shared_fence(). Must
+ * Should be called before dma_resv_add_shared_fence(). Must
* be called with obj->lock held.
*
* RETURNS
* Zero for success, or -errno
*/
-int reservation_object_reserve_shared(struct reservation_object *obj,
- unsigned int num_fences)
+int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences)
{
- struct reservation_object_list *old, *new;
+ struct dma_resv_list *old, *new;
unsigned int i, j, k, max;
- reservation_object_assert_held(obj);
+ dma_resv_assert_held(obj);
- old = reservation_object_get_list(obj);
+ old = dma_resv_get_list(obj);
if (old && old->shared_max) {
if ((old->shared_count + num_fences) <= old->shared_max)
@@ -87,7 +196,7 @@ int reservation_object_reserve_shared(struct reservation_object *obj,
max = 4;
}
- new = kmalloc(offsetof(typeof(*new), shared[max]), GFP_KERNEL);
+ new = dma_resv_list_alloc(max);
if (!new)
return -ENOMEM;
@@ -101,79 +210,76 @@ int reservation_object_reserve_shared(struct reservation_object *obj,
struct dma_fence *fence;
fence = rcu_dereference_protected(old->shared[i],
- reservation_object_held(obj));
+ dma_resv_held(obj));
if (dma_fence_is_signaled(fence))
RCU_INIT_POINTER(new->shared[--k], fence);
else
RCU_INIT_POINTER(new->shared[j++], fence);
}
new->shared_count = j;
- new->shared_max = max;
- preempt_disable();
- write_seqcount_begin(&obj->seq);
/*
- * RCU_INIT_POINTER can be used here,
- * seqcount provides the necessary barriers
+ * We are not changing the effective set of fences here so can
+ * merely update the pointer to the new array; both existing
+ * readers and new readers will see exactly the same set of
+ * active (unsignaled) shared fences. Individual fences and the
+ * old array are protected by RCU and so will not vanish under
+ * the gaze of the rcu_read_lock() readers.
*/
- RCU_INIT_POINTER(obj->fence, new);
- write_seqcount_end(&obj->seq);
- preempt_enable();
+ rcu_assign_pointer(obj->fence, new);
if (!old)
return 0;
/* Drop the references to the signaled fences */
- for (i = k; i < new->shared_max; ++i) {
+ for (i = k; i < max; ++i) {
struct dma_fence *fence;
fence = rcu_dereference_protected(new->shared[i],
- reservation_object_held(obj));
+ dma_resv_held(obj));
dma_fence_put(fence);
}
kfree_rcu(old, rcu);
return 0;
}
-EXPORT_SYMBOL(reservation_object_reserve_shared);
+EXPORT_SYMBOL(dma_resv_reserve_shared);
/**
- * reservation_object_add_shared_fence - Add a fence to a shared slot
+ * dma_resv_add_shared_fence - Add a fence to a shared slot
* @obj: the reservation object
* @fence: the shared fence to add
*
* Add a fence to a shared slot, obj->lock must be held, and
- * reservation_object_reserve_shared() has been called.
+ * dma_resv_reserve_shared() has been called.
*/
-void reservation_object_add_shared_fence(struct reservation_object *obj,
- struct dma_fence *fence)
+void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence)
{
- struct reservation_object_list *fobj;
+ struct dma_resv_list *fobj;
+ struct dma_fence *old;
unsigned int i, count;
dma_fence_get(fence);
- reservation_object_assert_held(obj);
+ dma_resv_assert_held(obj);
- fobj = reservation_object_get_list(obj);
+ fobj = dma_resv_get_list(obj);
count = fobj->shared_count;
preempt_disable();
write_seqcount_begin(&obj->seq);
for (i = 0; i < count; ++i) {
- struct dma_fence *old_fence;
- old_fence = rcu_dereference_protected(fobj->shared[i],
- reservation_object_held(obj));
- if (old_fence->context == fence->context ||
- dma_fence_is_signaled(old_fence)) {
- dma_fence_put(old_fence);
+ old = rcu_dereference_protected(fobj->shared[i],
+ dma_resv_held(obj));
+ if (old->context == fence->context ||
+ dma_fence_is_signaled(old))
goto replace;
- }
}
BUG_ON(fobj->shared_count >= fobj->shared_max);
+ old = NULL;
count++;
replace:
@@ -183,26 +289,26 @@ replace:
write_seqcount_end(&obj->seq);
preempt_enable();
+ dma_fence_put(old);
}
-EXPORT_SYMBOL(reservation_object_add_shared_fence);
+EXPORT_SYMBOL(dma_resv_add_shared_fence);
/**
- * reservation_object_add_excl_fence - Add an exclusive fence.
+ * dma_resv_add_excl_fence - Add an exclusive fence.
* @obj: the reservation object
* @fence: the shared fence to add
*
* Add a fence to the exclusive slot. The obj->lock must be held.
*/
-void reservation_object_add_excl_fence(struct reservation_object *obj,
- struct dma_fence *fence)
+void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence)
{
- struct dma_fence *old_fence = reservation_object_get_excl(obj);
- struct reservation_object_list *old;
+ struct dma_fence *old_fence = dma_resv_get_excl(obj);
+ struct dma_resv_list *old;
u32 i = 0;
- reservation_object_assert_held(obj);
+ dma_resv_assert_held(obj);
- old = reservation_object_get_list(obj);
+ old = dma_resv_get_list(obj);
if (old)
i = old->shared_count;
@@ -221,28 +327,26 @@ void reservation_object_add_excl_fence(struct reservation_object *obj,
/* inplace update, no shared fences */
while (i--)
dma_fence_put(rcu_dereference_protected(old->shared[i],
- reservation_object_held(obj)));
+ dma_resv_held(obj)));
dma_fence_put(old_fence);
}
-EXPORT_SYMBOL(reservation_object_add_excl_fence);
+EXPORT_SYMBOL(dma_resv_add_excl_fence);
/**
-* reservation_object_copy_fences - Copy all fences from src to dst.
+* dma_resv_copy_fences - Copy all fences from src to dst.
* @dst: the destination reservation object
* @src: the source reservation object
*
* Copy all fences from src to dst. dst-lock must be held.
*/
-int reservation_object_copy_fences(struct reservation_object *dst,
- struct reservation_object *src)
+int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
{
- struct reservation_object_list *src_list, *dst_list;
+ struct dma_resv_list *src_list, *dst_list;
struct dma_fence *old, *new;
- size_t size;
unsigned i;
- reservation_object_assert_held(dst);
+ dma_resv_assert_held(dst);
rcu_read_lock();
src_list = rcu_dereference(src->fence);
@@ -251,10 +355,9 @@ retry:
if (src_list) {
unsigned shared_count = src_list->shared_count;
- size = offsetof(typeof(*src_list), shared[shared_count]);
rcu_read_unlock();
- dst_list = kmalloc(size, GFP_KERNEL);
+ dst_list = dma_resv_list_alloc(shared_count);
if (!dst_list)
return -ENOMEM;
@@ -266,7 +369,6 @@ retry:
}
dst_list->shared_count = 0;
- dst_list->shared_max = shared_count;
for (i = 0; i < src_list->shared_count; ++i) {
struct dma_fence *fence;
@@ -276,7 +378,7 @@ retry:
continue;
if (!dma_fence_get_rcu(fence)) {
- kfree(dst_list);
+ dma_resv_list_free(dst_list);
src_list = rcu_dereference(src->fence);
goto retry;
}
@@ -295,8 +397,8 @@ retry:
new = dma_fence_get_rcu_safe(&src->fence_excl);
rcu_read_unlock();
- src_list = reservation_object_get_list(dst);
- old = reservation_object_get_excl(dst);
+ src_list = dma_resv_get_list(dst);
+ old = dma_resv_get_excl(dst);
preempt_disable();
write_seqcount_begin(&dst->seq);
@@ -306,16 +408,15 @@ retry:
write_seqcount_end(&dst->seq);
preempt_enable();
- if (src_list)
- kfree_rcu(src_list, rcu);
+ dma_resv_list_free(src_list);
dma_fence_put(old);
return 0;
}
-EXPORT_SYMBOL(reservation_object_copy_fences);
+EXPORT_SYMBOL(dma_resv_copy_fences);
/**
- * reservation_object_get_fences_rcu - Get an object's shared and exclusive
+ * dma_resv_get_fences_rcu - Get an object's shared and exclusive
* fences without update side lock held
* @obj: the reservation object
* @pfence_excl: the returned exclusive fence (or NULL)
@@ -327,10 +428,10 @@ EXPORT_SYMBOL(reservation_object_copy_fences);
* exclusive fence is not specified the fence is put into the array of the
* shared fences as well. Returns either zero or -ENOMEM.
*/
-int reservation_object_get_fences_rcu(struct reservation_object *obj,
- struct dma_fence **pfence_excl,
- unsigned *pshared_count,
- struct dma_fence ***pshared)
+int dma_resv_get_fences_rcu(struct dma_resv *obj,
+ struct dma_fence **pfence_excl,
+ unsigned *pshared_count,
+ struct dma_fence ***pshared)
{
struct dma_fence **shared = NULL;
struct dma_fence *fence_excl;
@@ -338,7 +439,7 @@ int reservation_object_get_fences_rcu(struct reservation_object *obj,
int ret = 1;
do {
- struct reservation_object_list *fobj;
+ struct dma_resv_list *fobj;
unsigned int i, seq;
size_t sz = 0;
@@ -385,13 +486,6 @@ int reservation_object_get_fences_rcu(struct reservation_object *obj,
if (!dma_fence_get_rcu(shared[i]))
break;
}
-
- if (!pfence_excl && fence_excl) {
- shared[i] = fence_excl;
- fence_excl = NULL;
- ++i;
- ++shared_count;
- }
}
if (i != shared_count || read_seqcount_retry(&obj->seq, seq)) {
@@ -406,6 +500,11 @@ unlock:
rcu_read_unlock();
} while (ret);
+ if (pfence_excl)
+ *pfence_excl = fence_excl;
+ else if (fence_excl)
+ shared[shared_count++] = fence_excl;
+
if (!shared_count) {
kfree(shared);
shared = NULL;
@@ -413,15 +512,12 @@ unlock:
*pshared_count = shared_count;
*pshared = shared;
- if (pfence_excl)
- *pfence_excl = fence_excl;
-
return ret;
}
-EXPORT_SYMBOL_GPL(reservation_object_get_fences_rcu);
+EXPORT_SYMBOL_GPL(dma_resv_get_fences_rcu);
/**
- * reservation_object_wait_timeout_rcu - Wait on reservation's objects
+ * dma_resv_wait_timeout_rcu - Wait on reservation's objects
* shared and/or exclusive fences.
* @obj: the reservation object
* @wait_all: if true, wait on all fences, else wait on just exclusive fence
@@ -432,9 +528,9 @@ EXPORT_SYMBOL_GPL(reservation_object_get_fences_rcu);
* Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
* greater than zer on success.
*/
-long reservation_object_wait_timeout_rcu(struct reservation_object *obj,
- bool wait_all, bool intr,
- unsigned long timeout)
+long dma_resv_wait_timeout_rcu(struct dma_resv *obj,
+ bool wait_all, bool intr,
+ unsigned long timeout)
{
struct dma_fence *fence;
unsigned seq, shared_count;
@@ -462,8 +558,7 @@ retry:
}
if (wait_all) {
- struct reservation_object_list *fobj =
- rcu_dereference(obj->fence);
+ struct dma_resv_list *fobj = rcu_dereference(obj->fence);
if (fobj)
shared_count = fobj->shared_count;
@@ -506,11 +601,10 @@ unlock_retry:
rcu_read_unlock();
goto retry;
}
-EXPORT_SYMBOL_GPL(reservation_object_wait_timeout_rcu);
+EXPORT_SYMBOL_GPL(dma_resv_wait_timeout_rcu);
-static inline int
-reservation_object_test_signaled_single(struct dma_fence *passed_fence)
+static inline int dma_resv_test_signaled_single(struct dma_fence *passed_fence)
{
struct dma_fence *fence, *lfence = passed_fence;
int ret = 1;
@@ -527,7 +621,7 @@ reservation_object_test_signaled_single(struct dma_fence *passed_fence)
}
/**
- * reservation_object_test_signaled_rcu - Test if a reservation object's
+ * dma_resv_test_signaled_rcu - Test if a reservation object's
* fences have been signaled.
* @obj: the reservation object
* @test_all: if true, test all fences, otherwise only test the exclusive
@@ -536,8 +630,7 @@ reservation_object_test_signaled_single(struct dma_fence *passed_fence)
* RETURNS
* true if all fences signaled, else false
*/
-bool reservation_object_test_signaled_rcu(struct reservation_object *obj,
- bool test_all)
+bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all)
{
unsigned seq, shared_count;
int ret;
@@ -551,8 +644,7 @@ retry:
if (test_all) {
unsigned i;
- struct reservation_object_list *fobj =
- rcu_dereference(obj->fence);
+ struct dma_resv_list *fobj = rcu_dereference(obj->fence);
if (fobj)
shared_count = fobj->shared_count;
@@ -560,7 +652,7 @@ retry:
for (i = 0; i < shared_count; ++i) {
struct dma_fence *fence = rcu_dereference(fobj->shared[i]);
- ret = reservation_object_test_signaled_single(fence);
+ ret = dma_resv_test_signaled_single(fence);
if (ret < 0)
goto retry;
else if (!ret)
@@ -575,8 +667,7 @@ retry:
struct dma_fence *fence_excl = rcu_dereference(obj->fence_excl);
if (fence_excl) {
- ret = reservation_object_test_signaled_single(
- fence_excl);
+ ret = dma_resv_test_signaled_single(fence_excl);
if (ret < 0)
goto retry;
@@ -588,4 +679,4 @@ retry:
rcu_read_unlock();
return ret;
}
-EXPORT_SYMBOL_GPL(reservation_object_test_signaled_rcu);
+EXPORT_SYMBOL_GPL(dma_resv_test_signaled_rcu);
diff --git a/drivers/dma-buf/heaps/Kconfig b/drivers/dma-buf/heaps/Kconfig
new file mode 100644
index 000000000000..a5eef06c4226
--- /dev/null
+++ b/drivers/dma-buf/heaps/Kconfig
@@ -0,0 +1,14 @@
+config DMABUF_HEAPS_SYSTEM
+ bool "DMA-BUF System Heap"
+ depends on DMABUF_HEAPS
+ help
+ Choose this option to enable the system dmabuf heap. The system heap
+ is backed by pages from the buddy allocator. If in doubt, say Y.
+
+config DMABUF_HEAPS_CMA
+ bool "DMA-BUF CMA Heap"
+ depends on DMABUF_HEAPS && DMA_CMA
+ help
+ Choose this option to enable dma-buf CMA heap. This heap is backed
+ by the Contiguous Memory Allocator (CMA). If your system has these
+ regions, you should say Y here.
diff --git a/drivers/dma-buf/heaps/Makefile b/drivers/dma-buf/heaps/Makefile
new file mode 100644
index 000000000000..6e54cdec3da0
--- /dev/null
+++ b/drivers/dma-buf/heaps/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-y += heap-helpers.o
+obj-$(CONFIG_DMABUF_HEAPS_SYSTEM) += system_heap.o
+obj-$(CONFIG_DMABUF_HEAPS_CMA) += cma_heap.o
diff --git a/drivers/dma-buf/heaps/cma_heap.c b/drivers/dma-buf/heaps/cma_heap.c
new file mode 100644
index 000000000000..626cf7fd033a
--- /dev/null
+++ b/drivers/dma-buf/heaps/cma_heap.c
@@ -0,0 +1,177 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * DMABUF CMA heap exporter
+ *
+ * Copyright (C) 2012, 2019 Linaro Ltd.
+ * Author: <benjamin.gaignard@linaro.org> for ST-Ericsson.
+ */
+
+#include <linux/cma.h>
+#include <linux/device.h>
+#include <linux/dma-buf.h>
+#include <linux/dma-heap.h>
+#include <linux/dma-contiguous.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/highmem.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/scatterlist.h>
+#include <linux/sched/signal.h>
+
+#include "heap-helpers.h"
+
+struct cma_heap {
+ struct dma_heap *heap;
+ struct cma *cma;
+};
+
+static void cma_heap_free(struct heap_helper_buffer *buffer)
+{
+ struct cma_heap *cma_heap = dma_heap_get_drvdata(buffer->heap);
+ unsigned long nr_pages = buffer->pagecount;
+ struct page *cma_pages = buffer->priv_virt;
+
+ /* free page list */
+ kfree(buffer->pages);
+ /* release memory */
+ cma_release(cma_heap->cma, cma_pages, nr_pages);
+ kfree(buffer);
+}
+
+/* dmabuf heap CMA operations functions */
+static int cma_heap_allocate(struct dma_heap *heap,
+ unsigned long len,
+ unsigned long fd_flags,
+ unsigned long heap_flags)
+{
+ struct cma_heap *cma_heap = dma_heap_get_drvdata(heap);
+ struct heap_helper_buffer *helper_buffer;
+ struct page *cma_pages;
+ size_t size = PAGE_ALIGN(len);
+ unsigned long nr_pages = size >> PAGE_SHIFT;
+ unsigned long align = get_order(size);
+ struct dma_buf *dmabuf;
+ int ret = -ENOMEM;
+ pgoff_t pg;
+
+ if (align > CONFIG_CMA_ALIGNMENT)
+ align = CONFIG_CMA_ALIGNMENT;
+
+ helper_buffer = kzalloc(sizeof(*helper_buffer), GFP_KERNEL);
+ if (!helper_buffer)
+ return -ENOMEM;
+
+ init_heap_helper_buffer(helper_buffer, cma_heap_free);
+ helper_buffer->heap = heap;
+ helper_buffer->size = len;
+
+ cma_pages = cma_alloc(cma_heap->cma, nr_pages, align, false);
+ if (!cma_pages)
+ goto free_buf;
+
+ if (PageHighMem(cma_pages)) {
+ unsigned long nr_clear_pages = nr_pages;
+ struct page *page = cma_pages;
+
+ while (nr_clear_pages > 0) {
+ void *vaddr = kmap_atomic(page);
+
+ memset(vaddr, 0, PAGE_SIZE);
+ kunmap_atomic(vaddr);
+ /*
+ * Avoid wasting time zeroing memory if the process
+ * has been killed by by SIGKILL
+ */
+ if (fatal_signal_pending(current))
+ goto free_cma;
+
+ page++;
+ nr_clear_pages--;
+ }
+ } else {
+ memset(page_address(cma_pages), 0, size);
+ }
+
+ helper_buffer->pagecount = nr_pages;
+ helper_buffer->pages = kmalloc_array(helper_buffer->pagecount,
+ sizeof(*helper_buffer->pages),
+ GFP_KERNEL);
+ if (!helper_buffer->pages) {
+ ret = -ENOMEM;
+ goto free_cma;
+ }
+
+ for (pg = 0; pg < helper_buffer->pagecount; pg++)
+ helper_buffer->pages[pg] = &cma_pages[pg];
+
+ /* create the dmabuf */
+ dmabuf = heap_helper_export_dmabuf(helper_buffer, fd_flags);
+ if (IS_ERR(dmabuf)) {
+ ret = PTR_ERR(dmabuf);
+ goto free_pages;
+ }
+
+ helper_buffer->dmabuf = dmabuf;
+ helper_buffer->priv_virt = cma_pages;
+
+ ret = dma_buf_fd(dmabuf, fd_flags);
+ if (ret < 0) {
+ dma_buf_put(dmabuf);
+ /* just return, as put will call release and that will free */
+ return ret;
+ }
+
+ return ret;
+
+free_pages:
+ kfree(helper_buffer->pages);
+free_cma:
+ cma_release(cma_heap->cma, cma_pages, nr_pages);
+free_buf:
+ kfree(helper_buffer);
+ return ret;
+}
+
+static const struct dma_heap_ops cma_heap_ops = {
+ .allocate = cma_heap_allocate,
+};
+
+static int __add_cma_heap(struct cma *cma, void *data)
+{
+ struct cma_heap *cma_heap;
+ struct dma_heap_export_info exp_info;
+
+ cma_heap = kzalloc(sizeof(*cma_heap), GFP_KERNEL);
+ if (!cma_heap)
+ return -ENOMEM;
+ cma_heap->cma = cma;
+
+ exp_info.name = cma_get_name(cma);
+ exp_info.ops = &cma_heap_ops;
+ exp_info.priv = cma_heap;
+
+ cma_heap->heap = dma_heap_add(&exp_info);
+ if (IS_ERR(cma_heap->heap)) {
+ int ret = PTR_ERR(cma_heap->heap);
+
+ kfree(cma_heap);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int add_default_cma_heap(void)
+{
+ struct cma *default_cma = dev_get_cma_area(NULL);
+ int ret = 0;
+
+ if (default_cma)
+ ret = __add_cma_heap(default_cma, NULL);
+
+ return ret;
+}
+module_init(add_default_cma_heap);
+MODULE_DESCRIPTION("DMA-BUF CMA Heap");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma-buf/heaps/heap-helpers.c b/drivers/dma-buf/heaps/heap-helpers.c
new file mode 100644
index 000000000000..9f964ca3f59c
--- /dev/null
+++ b/drivers/dma-buf/heaps/heap-helpers.c
@@ -0,0 +1,271 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/device.h>
+#include <linux/dma-buf.h>
+#include <linux/err.h>
+#include <linux/highmem.h>
+#include <linux/idr.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/vmalloc.h>
+#include <uapi/linux/dma-heap.h>
+
+#include "heap-helpers.h"
+
+void init_heap_helper_buffer(struct heap_helper_buffer *buffer,
+ void (*free)(struct heap_helper_buffer *))
+{
+ buffer->priv_virt = NULL;
+ mutex_init(&buffer->lock);
+ buffer->vmap_cnt = 0;
+ buffer->vaddr = NULL;
+ buffer->pagecount = 0;
+ buffer->pages = NULL;
+ INIT_LIST_HEAD(&buffer->attachments);
+ buffer->free = free;
+}
+
+struct dma_buf *heap_helper_export_dmabuf(struct heap_helper_buffer *buffer,
+ int fd_flags)
+{
+ DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+
+ exp_info.ops = &heap_helper_ops;
+ exp_info.size = buffer->size;
+ exp_info.flags = fd_flags;
+ exp_info.priv = buffer;
+
+ return dma_buf_export(&exp_info);
+}
+
+static void *dma_heap_map_kernel(struct heap_helper_buffer *buffer)
+{
+ void *vaddr;
+
+ vaddr = vmap(buffer->pages, buffer->pagecount, VM_MAP, PAGE_KERNEL);
+ if (!vaddr)
+ return ERR_PTR(-ENOMEM);
+
+ return vaddr;
+}
+
+static void dma_heap_buffer_destroy(struct heap_helper_buffer *buffer)
+{
+ if (buffer->vmap_cnt > 0) {
+ WARN(1, "%s: buffer still mapped in the kernel\n", __func__);
+ vunmap(buffer->vaddr);
+ }
+
+ buffer->free(buffer);
+}
+
+static void *dma_heap_buffer_vmap_get(struct heap_helper_buffer *buffer)
+{
+ void *vaddr;
+
+ if (buffer->vmap_cnt) {
+ buffer->vmap_cnt++;
+ return buffer->vaddr;
+ }
+ vaddr = dma_heap_map_kernel(buffer);
+ if (IS_ERR(vaddr))
+ return vaddr;
+ buffer->vaddr = vaddr;
+ buffer->vmap_cnt++;
+ return vaddr;
+}
+
+static void dma_heap_buffer_vmap_put(struct heap_helper_buffer *buffer)
+{
+ if (!--buffer->vmap_cnt) {
+ vunmap(buffer->vaddr);
+ buffer->vaddr = NULL;
+ }
+}
+
+struct dma_heaps_attachment {
+ struct device *dev;
+ struct sg_table table;
+ struct list_head list;
+};
+
+static int dma_heap_attach(struct dma_buf *dmabuf,
+ struct dma_buf_attachment *attachment)
+{
+ struct dma_heaps_attachment *a;
+ struct heap_helper_buffer *buffer = dmabuf->priv;
+ int ret;
+
+ a = kzalloc(sizeof(*a), GFP_KERNEL);
+ if (!a)
+ return -ENOMEM;
+
+ ret = sg_alloc_table_from_pages(&a->table, buffer->pages,
+ buffer->pagecount, 0,
+ buffer->pagecount << PAGE_SHIFT,
+ GFP_KERNEL);
+ if (ret) {
+ kfree(a);
+ return ret;
+ }
+
+ a->dev = attachment->dev;
+ INIT_LIST_HEAD(&a->list);
+
+ attachment->priv = a;
+
+ mutex_lock(&buffer->lock);
+ list_add(&a->list, &buffer->attachments);
+ mutex_unlock(&buffer->lock);
+
+ return 0;
+}
+
+static void dma_heap_detach(struct dma_buf *dmabuf,
+ struct dma_buf_attachment *attachment)
+{
+ struct dma_heaps_attachment *a = attachment->priv;
+ struct heap_helper_buffer *buffer = dmabuf->priv;
+
+ mutex_lock(&buffer->lock);
+ list_del(&a->list);
+ mutex_unlock(&buffer->lock);
+
+ sg_free_table(&a->table);
+ kfree(a);
+}
+
+static
+struct sg_table *dma_heap_map_dma_buf(struct dma_buf_attachment *attachment,
+ enum dma_data_direction direction)
+{
+ struct dma_heaps_attachment *a = attachment->priv;
+ struct sg_table *table;
+
+ table = &a->table;
+
+ if (!dma_map_sg(attachment->dev, table->sgl, table->nents,
+ direction))
+ table = ERR_PTR(-ENOMEM);
+ return table;
+}
+
+static void dma_heap_unmap_dma_buf(struct dma_buf_attachment *attachment,
+ struct sg_table *table,
+ enum dma_data_direction direction)
+{
+ dma_unmap_sg(attachment->dev, table->sgl, table->nents, direction);
+}
+
+static vm_fault_t dma_heap_vm_fault(struct vm_fault *vmf)
+{
+ struct vm_area_struct *vma = vmf->vma;
+ struct heap_helper_buffer *buffer = vma->vm_private_data;
+
+ if (vmf->pgoff > buffer->pagecount)
+ return VM_FAULT_SIGBUS;
+
+ vmf->page = buffer->pages[vmf->pgoff];
+ get_page(vmf->page);
+
+ return 0;
+}
+
+static const struct vm_operations_struct dma_heap_vm_ops = {
+ .fault = dma_heap_vm_fault,
+};
+
+static int dma_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
+{
+ struct heap_helper_buffer *buffer = dmabuf->priv;
+
+ if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0)
+ return -EINVAL;
+
+ vma->vm_ops = &dma_heap_vm_ops;
+ vma->vm_private_data = buffer;
+
+ return 0;
+}
+
+static void dma_heap_dma_buf_release(struct dma_buf *dmabuf)
+{
+ struct heap_helper_buffer *buffer = dmabuf->priv;
+
+ dma_heap_buffer_destroy(buffer);
+}
+
+static int dma_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
+ enum dma_data_direction direction)
+{
+ struct heap_helper_buffer *buffer = dmabuf->priv;
+ struct dma_heaps_attachment *a;
+ int ret = 0;
+
+ mutex_lock(&buffer->lock);
+
+ if (buffer->vmap_cnt)
+ invalidate_kernel_vmap_range(buffer->vaddr, buffer->size);
+
+ list_for_each_entry(a, &buffer->attachments, list) {
+ dma_sync_sg_for_cpu(a->dev, a->table.sgl, a->table.nents,
+ direction);
+ }
+ mutex_unlock(&buffer->lock);
+
+ return ret;
+}
+
+static int dma_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
+ enum dma_data_direction direction)
+{
+ struct heap_helper_buffer *buffer = dmabuf->priv;
+ struct dma_heaps_attachment *a;
+
+ mutex_lock(&buffer->lock);
+
+ if (buffer->vmap_cnt)
+ flush_kernel_vmap_range(buffer->vaddr, buffer->size);
+
+ list_for_each_entry(a, &buffer->attachments, list) {
+ dma_sync_sg_for_device(a->dev, a->table.sgl, a->table.nents,
+ direction);
+ }
+ mutex_unlock(&buffer->lock);
+
+ return 0;
+}
+
+static void *dma_heap_dma_buf_vmap(struct dma_buf *dmabuf)
+{
+ struct heap_helper_buffer *buffer = dmabuf->priv;
+ void *vaddr;
+
+ mutex_lock(&buffer->lock);
+ vaddr = dma_heap_buffer_vmap_get(buffer);
+ mutex_unlock(&buffer->lock);
+
+ return vaddr;
+}
+
+static void dma_heap_dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
+{
+ struct heap_helper_buffer *buffer = dmabuf->priv;
+
+ mutex_lock(&buffer->lock);
+ dma_heap_buffer_vmap_put(buffer);
+ mutex_unlock(&buffer->lock);
+}
+
+const struct dma_buf_ops heap_helper_ops = {
+ .map_dma_buf = dma_heap_map_dma_buf,
+ .unmap_dma_buf = dma_heap_unmap_dma_buf,
+ .mmap = dma_heap_mmap,
+ .release = dma_heap_dma_buf_release,
+ .attach = dma_heap_attach,
+ .detach = dma_heap_detach,
+ .begin_cpu_access = dma_heap_dma_buf_begin_cpu_access,
+ .end_cpu_access = dma_heap_dma_buf_end_cpu_access,
+ .vmap = dma_heap_dma_buf_vmap,
+ .vunmap = dma_heap_dma_buf_vunmap,
+};
diff --git a/drivers/dma-buf/heaps/heap-helpers.h b/drivers/dma-buf/heaps/heap-helpers.h
new file mode 100644
index 000000000000..805d2df88024
--- /dev/null
+++ b/drivers/dma-buf/heaps/heap-helpers.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * DMABUF Heaps helper code
+ *
+ * Copyright (C) 2011 Google, Inc.
+ * Copyright (C) 2019 Linaro Ltd.
+ */
+
+#ifndef _HEAP_HELPERS_H
+#define _HEAP_HELPERS_H
+
+#include <linux/dma-heap.h>
+#include <linux/list.h>
+
+/**
+ * struct heap_helper_buffer - helper buffer metadata
+ * @heap: back pointer to the heap the buffer came from
+ * @dmabuf: backing dma-buf for this buffer
+ * @size: size of the buffer
+ * @priv_virt pointer to heap specific private value
+ * @lock mutext to protect the data in this structure
+ * @vmap_cnt count of vmap references on the buffer
+ * @vaddr vmap'ed virtual address
+ * @pagecount number of pages in the buffer
+ * @pages list of page pointers
+ * @attachments list of device attachments
+ *
+ * @free heap callback to free the buffer
+ */
+struct heap_helper_buffer {
+ struct dma_heap *heap;
+ struct dma_buf *dmabuf;
+ size_t size;
+
+ void *priv_virt;
+ struct mutex lock;
+ int vmap_cnt;
+ void *vaddr;
+ pgoff_t pagecount;
+ struct page **pages;
+ struct list_head attachments;
+
+ void (*free)(struct heap_helper_buffer *buffer);
+};
+
+void init_heap_helper_buffer(struct heap_helper_buffer *buffer,
+ void (*free)(struct heap_helper_buffer *));
+
+struct dma_buf *heap_helper_export_dmabuf(struct heap_helper_buffer *buffer,
+ int fd_flags);
+
+extern const struct dma_buf_ops heap_helper_ops;
+#endif /* _HEAP_HELPERS_H */
diff --git a/drivers/dma-buf/heaps/system_heap.c b/drivers/dma-buf/heaps/system_heap.c
new file mode 100644
index 000000000000..0bf688e3c023
--- /dev/null
+++ b/drivers/dma-buf/heaps/system_heap.c
@@ -0,0 +1,123 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * DMABUF System heap exporter
+ *
+ * Copyright (C) 2011 Google, Inc.
+ * Copyright (C) 2019 Linaro Ltd.
+ */
+
+#include <linux/dma-buf.h>
+#include <linux/dma-mapping.h>
+#include <linux/dma-heap.h>
+#include <linux/err.h>
+#include <linux/highmem.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/sched/signal.h>
+#include <asm/page.h>
+
+#include "heap-helpers.h"
+
+struct dma_heap *sys_heap;
+
+static void system_heap_free(struct heap_helper_buffer *buffer)
+{
+ pgoff_t pg;
+
+ for (pg = 0; pg < buffer->pagecount; pg++)
+ __free_page(buffer->pages[pg]);
+ kfree(buffer->pages);
+ kfree(buffer);
+}
+
+static int system_heap_allocate(struct dma_heap *heap,
+ unsigned long len,
+ unsigned long fd_flags,
+ unsigned long heap_flags)
+{
+ struct heap_helper_buffer *helper_buffer;
+ struct dma_buf *dmabuf;
+ int ret = -ENOMEM;
+ pgoff_t pg;
+
+ helper_buffer = kzalloc(sizeof(*helper_buffer), GFP_KERNEL);
+ if (!helper_buffer)
+ return -ENOMEM;
+
+ init_heap_helper_buffer(helper_buffer, system_heap_free);
+ helper_buffer->heap = heap;
+ helper_buffer->size = len;
+
+ helper_buffer->pagecount = len / PAGE_SIZE;
+ helper_buffer->pages = kmalloc_array(helper_buffer->pagecount,
+ sizeof(*helper_buffer->pages),
+ GFP_KERNEL);
+ if (!helper_buffer->pages) {
+ ret = -ENOMEM;
+ goto err0;
+ }
+
+ for (pg = 0; pg < helper_buffer->pagecount; pg++) {
+ /*
+ * Avoid trying to allocate memory if the process
+ * has been killed by by SIGKILL
+ */
+ if (fatal_signal_pending(current))
+ goto err1;
+
+ helper_buffer->pages[pg] = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (!helper_buffer->pages[pg])
+ goto err1;
+ }
+
+ /* create the dmabuf */
+ dmabuf = heap_helper_export_dmabuf(helper_buffer, fd_flags);
+ if (IS_ERR(dmabuf)) {
+ ret = PTR_ERR(dmabuf);
+ goto err1;
+ }
+
+ helper_buffer->dmabuf = dmabuf;
+
+ ret = dma_buf_fd(dmabuf, fd_flags);
+ if (ret < 0) {
+ dma_buf_put(dmabuf);
+ /* just return, as put will call release and that will free */
+ return ret;
+ }
+
+ return ret;
+
+err1:
+ while (pg > 0)
+ __free_page(helper_buffer->pages[--pg]);
+ kfree(helper_buffer->pages);
+err0:
+ kfree(helper_buffer);
+
+ return ret;
+}
+
+static const struct dma_heap_ops system_heap_ops = {
+ .allocate = system_heap_allocate,
+};
+
+static int system_heap_create(void)
+{
+ struct dma_heap_export_info exp_info;
+ int ret = 0;
+
+ exp_info.name = "system";
+ exp_info.ops = &system_heap_ops;
+ exp_info.priv = NULL;
+
+ sys_heap = dma_heap_add(&exp_info);
+ if (IS_ERR(sys_heap))
+ ret = PTR_ERR(sys_heap);
+
+ return ret;
+}
+module_init(system_heap_create);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma-buf/selftest.c b/drivers/dma-buf/selftest.c
new file mode 100644
index 000000000000..c60b6944b4bd
--- /dev/null
+++ b/drivers/dma-buf/selftest.c
@@ -0,0 +1,167 @@
+/* SPDX-License-Identifier: MIT */
+
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <linux/compiler.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched/signal.h>
+#include <linux/slab.h>
+
+#include "selftest.h"
+
+enum {
+#define selftest(n, func) __idx_##n,
+#include "selftests.h"
+#undef selftest
+};
+
+#define selftest(n, f) [__idx_##n] = { .name = #n, .func = f },
+static struct selftest {
+ bool enabled;
+ const char *name;
+ int (*func)(void);
+} selftests[] = {
+#include "selftests.h"
+};
+#undef selftest
+
+/* Embed the line number into the parameter name so that we can order tests */
+#define param(n) __PASTE(igt__, __PASTE(__PASTE(__LINE__, __), n))
+#define selftest_0(n, func, id) \
+module_param_named(id, selftests[__idx_##n].enabled, bool, 0400);
+#define selftest(n, func) selftest_0(n, func, param(n))
+#include "selftests.h"
+#undef selftest
+
+int __sanitycheck__(void)
+{
+ pr_debug("Hello World!\n");
+ return 0;
+}
+
+static char *__st_filter;
+
+static bool apply_subtest_filter(const char *caller, const char *name)
+{
+ char *filter, *sep, *tok;
+ bool result = true;
+
+ filter = kstrdup(__st_filter, GFP_KERNEL);
+ for (sep = filter; (tok = strsep(&sep, ","));) {
+ bool allow = true;
+ char *sl;
+
+ if (*tok == '!') {
+ allow = false;
+ tok++;
+ }
+
+ if (*tok == '\0')
+ continue;
+
+ sl = strchr(tok, '/');
+ if (sl) {
+ *sl++ = '\0';
+ if (strcmp(tok, caller)) {
+ if (allow)
+ result = false;
+ continue;
+ }
+ tok = sl;
+ }
+
+ if (strcmp(tok, name)) {
+ if (allow)
+ result = false;
+ continue;
+ }
+
+ result = allow;
+ break;
+ }
+ kfree(filter);
+
+ return result;
+}
+
+int
+__subtests(const char *caller, const struct subtest *st, int count, void *data)
+{
+ int err;
+
+ for (; count--; st++) {
+ cond_resched();
+ if (signal_pending(current))
+ return -EINTR;
+
+ if (!apply_subtest_filter(caller, st->name))
+ continue;
+
+ pr_info("dma-buf: Running %s/%s\n", caller, st->name);
+
+ err = st->func(data);
+ if (err && err != -EINTR) {
+ pr_err("dma-buf/%s: %s failed with error %d\n",
+ caller, st->name, err);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static void set_default_test_all(struct selftest *st, unsigned long count)
+{
+ unsigned long i;
+
+ for (i = 0; i < count; i++)
+ if (st[i].enabled)
+ return;
+
+ for (i = 0; i < count; i++)
+ st[i].enabled = true;
+}
+
+static int run_selftests(struct selftest *st, unsigned long count)
+{
+ int err = 0;
+
+ set_default_test_all(st, count);
+
+ /* Tests are listed in natural order in selftests.h */
+ for (; count--; st++) {
+ if (!st->enabled)
+ continue;
+
+ pr_info("dma-buf: Running %s\n", st->name);
+ err = st->func();
+ if (err)
+ break;
+ }
+
+ if (WARN(err > 0 || err == -ENOTTY,
+ "%s returned %d, conflicting with selftest's magic values!\n",
+ st->name, err))
+ err = -1;
+
+ return err;
+}
+
+static int __init st_init(void)
+{
+ return run_selftests(selftests, ARRAY_SIZE(selftests));
+}
+
+static void __exit st_exit(void)
+{
+}
+
+module_param_named(st_filter, __st_filter, charp, 0400);
+module_init(st_init);
+module_exit(st_exit);
+
+MODULE_DESCRIPTION("Self-test harness for dma-buf");
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/dma-buf/selftest.h b/drivers/dma-buf/selftest.h
new file mode 100644
index 000000000000..45793aff6142
--- /dev/null
+++ b/drivers/dma-buf/selftest.h
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: MIT
+
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __SELFTEST_H__
+#define __SELFTEST_H__
+
+#include <linux/compiler.h>
+
+#define selftest(name, func) int func(void);
+#include "selftests.h"
+#undef selftest
+
+struct subtest {
+ int (*func)(void *data);
+ const char *name;
+};
+
+int __subtests(const char *caller,
+ const struct subtest *st,
+ int count,
+ void *data);
+#define subtests(T, data) \
+ __subtests(__func__, T, ARRAY_SIZE(T), data)
+
+#define SUBTEST(x) { x, #x }
+
+#endif /* __SELFTEST_H__ */
diff --git a/drivers/dma-buf/selftests.h b/drivers/dma-buf/selftests.h
new file mode 100644
index 000000000000..5320386f02e5
--- /dev/null
+++ b/drivers/dma-buf/selftests.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: MIT */
+/* List each unit test as selftest(name, function)
+ *
+ * The name is used as both an enum and expanded as subtest__name to create
+ * a module parameter. It must be unique and legal for a C identifier.
+ *
+ * The function should be of type int function(void). It may be conditionally
+ * compiled using #if IS_ENABLED(DRM_I915_SELFTEST).
+ *
+ * Tests are executed in order by igt/dmabuf_selftest
+ */
+selftest(sanitycheck, __sanitycheck__) /* keep first (igt selfcheck) */
+selftest(dma_fence, dma_fence)
diff --git a/drivers/dma-buf/st-dma-fence.c b/drivers/dma-buf/st-dma-fence.c
new file mode 100644
index 000000000000..e593064341c8
--- /dev/null
+++ b/drivers/dma-buf/st-dma-fence.c
@@ -0,0 +1,574 @@
+/* SPDX-License-Identifier: MIT */
+
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <linux/delay.h>
+#include <linux/dma-fence.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/sched/signal.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include "selftest.h"
+
+static struct kmem_cache *slab_fences;
+
+static struct mock_fence {
+ struct dma_fence base;
+ struct spinlock lock;
+} *to_mock_fence(struct dma_fence *f) {
+ return container_of(f, struct mock_fence, base);
+}
+
+static const char *mock_name(struct dma_fence *f)
+{
+ return "mock";
+}
+
+static void mock_fence_release(struct dma_fence *f)
+{
+ kmem_cache_free(slab_fences, to_mock_fence(f));
+}
+
+struct wait_cb {
+ struct dma_fence_cb cb;
+ struct task_struct *task;
+};
+
+static void mock_wakeup(struct dma_fence *f, struct dma_fence_cb *cb)
+{
+ wake_up_process(container_of(cb, struct wait_cb, cb)->task);
+}
+
+static long mock_wait(struct dma_fence *f, bool intr, long timeout)
+{
+ const int state = intr ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
+ struct wait_cb cb = { .task = current };
+
+ if (dma_fence_add_callback(f, &cb.cb, mock_wakeup))
+ return timeout;
+
+ while (timeout) {
+ set_current_state(state);
+
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &f->flags))
+ break;
+
+ if (signal_pending_state(state, current))
+ break;
+
+ timeout = schedule_timeout(timeout);
+ }
+ __set_current_state(TASK_RUNNING);
+
+ if (!dma_fence_remove_callback(f, &cb.cb))
+ return timeout;
+
+ if (signal_pending_state(state, current))
+ return -ERESTARTSYS;
+
+ return -ETIME;
+}
+
+static const struct dma_fence_ops mock_ops = {
+ .get_driver_name = mock_name,
+ .get_timeline_name = mock_name,
+ .wait = mock_wait,
+ .release = mock_fence_release,
+};
+
+static struct dma_fence *mock_fence(void)
+{
+ struct mock_fence *f;
+
+ f = kmem_cache_alloc(slab_fences, GFP_KERNEL);
+ if (!f)
+ return NULL;
+
+ spin_lock_init(&f->lock);
+ dma_fence_init(&f->base, &mock_ops, &f->lock, 0, 0);
+
+ return &f->base;
+}
+
+static int sanitycheck(void *arg)
+{
+ struct dma_fence *f;
+
+ f = mock_fence();
+ if (!f)
+ return -ENOMEM;
+
+ dma_fence_signal(f);
+ dma_fence_put(f);
+
+ return 0;
+}
+
+static int test_signaling(void *arg)
+{
+ struct dma_fence *f;
+ int err = -EINVAL;
+
+ f = mock_fence();
+ if (!f)
+ return -ENOMEM;
+
+ if (dma_fence_is_signaled(f)) {
+ pr_err("Fence unexpectedly signaled on creation\n");
+ goto err_free;
+ }
+
+ if (dma_fence_signal(f)) {
+ pr_err("Fence reported being already signaled\n");
+ goto err_free;
+ }
+
+ if (!dma_fence_is_signaled(f)) {
+ pr_err("Fence not reporting signaled\n");
+ goto err_free;
+ }
+
+ if (!dma_fence_signal(f)) {
+ pr_err("Fence reported not being already signaled\n");
+ goto err_free;
+ }
+
+ err = 0;
+err_free:
+ dma_fence_put(f);
+ return err;
+}
+
+struct simple_cb {
+ struct dma_fence_cb cb;
+ bool seen;
+};
+
+static void simple_callback(struct dma_fence *f, struct dma_fence_cb *cb)
+{
+ smp_store_mb(container_of(cb, struct simple_cb, cb)->seen, true);
+}
+
+static int test_add_callback(void *arg)
+{
+ struct simple_cb cb = {};
+ struct dma_fence *f;
+ int err = -EINVAL;
+
+ f = mock_fence();
+ if (!f)
+ return -ENOMEM;
+
+ if (dma_fence_add_callback(f, &cb.cb, simple_callback)) {
+ pr_err("Failed to add callback, fence already signaled!\n");
+ goto err_free;
+ }
+
+ dma_fence_signal(f);
+ if (!cb.seen) {
+ pr_err("Callback failed!\n");
+ goto err_free;
+ }
+
+ err = 0;
+err_free:
+ dma_fence_put(f);
+ return err;
+}
+
+static int test_late_add_callback(void *arg)
+{
+ struct simple_cb cb = {};
+ struct dma_fence *f;
+ int err = -EINVAL;
+
+ f = mock_fence();
+ if (!f)
+ return -ENOMEM;
+
+ dma_fence_signal(f);
+
+ if (!dma_fence_add_callback(f, &cb.cb, simple_callback)) {
+ pr_err("Added callback, but fence was already signaled!\n");
+ goto err_free;
+ }
+
+ dma_fence_signal(f);
+ if (cb.seen) {
+ pr_err("Callback called after failed attachment !\n");
+ goto err_free;
+ }
+
+ err = 0;
+err_free:
+ dma_fence_put(f);
+ return err;
+}
+
+static int test_rm_callback(void *arg)
+{
+ struct simple_cb cb = {};
+ struct dma_fence *f;
+ int err = -EINVAL;
+
+ f = mock_fence();
+ if (!f)
+ return -ENOMEM;
+
+ if (dma_fence_add_callback(f, &cb.cb, simple_callback)) {
+ pr_err("Failed to add callback, fence already signaled!\n");
+ goto err_free;
+ }
+
+ if (!dma_fence_remove_callback(f, &cb.cb)) {
+ pr_err("Failed to remove callback!\n");
+ goto err_free;
+ }
+
+ dma_fence_signal(f);
+ if (cb.seen) {
+ pr_err("Callback still signaled after removal!\n");
+ goto err_free;
+ }
+
+ err = 0;
+err_free:
+ dma_fence_put(f);
+ return err;
+}
+
+static int test_late_rm_callback(void *arg)
+{
+ struct simple_cb cb = {};
+ struct dma_fence *f;
+ int err = -EINVAL;
+
+ f = mock_fence();
+ if (!f)
+ return -ENOMEM;
+
+ if (dma_fence_add_callback(f, &cb.cb, simple_callback)) {
+ pr_err("Failed to add callback, fence already signaled!\n");
+ goto err_free;
+ }
+
+ dma_fence_signal(f);
+ if (!cb.seen) {
+ pr_err("Callback failed!\n");
+ goto err_free;
+ }
+
+ if (dma_fence_remove_callback(f, &cb.cb)) {
+ pr_err("Callback removal succeed after being executed!\n");
+ goto err_free;
+ }
+
+ err = 0;
+err_free:
+ dma_fence_put(f);
+ return err;
+}
+
+static int test_status(void *arg)
+{
+ struct dma_fence *f;
+ int err = -EINVAL;
+
+ f = mock_fence();
+ if (!f)
+ return -ENOMEM;
+
+ if (dma_fence_get_status(f)) {
+ pr_err("Fence unexpectedly has signaled status on creation\n");
+ goto err_free;
+ }
+
+ dma_fence_signal(f);
+ if (!dma_fence_get_status(f)) {
+ pr_err("Fence not reporting signaled status\n");
+ goto err_free;
+ }
+
+ err = 0;
+err_free:
+ dma_fence_put(f);
+ return err;
+}
+
+static int test_error(void *arg)
+{
+ struct dma_fence *f;
+ int err = -EINVAL;
+
+ f = mock_fence();
+ if (!f)
+ return -ENOMEM;
+
+ dma_fence_set_error(f, -EIO);
+
+ if (dma_fence_get_status(f)) {
+ pr_err("Fence unexpectedly has error status before signal\n");
+ goto err_free;
+ }
+
+ dma_fence_signal(f);
+ if (dma_fence_get_status(f) != -EIO) {
+ pr_err("Fence not reporting error status, got %d\n",
+ dma_fence_get_status(f));
+ goto err_free;
+ }
+
+ err = 0;
+err_free:
+ dma_fence_put(f);
+ return err;
+}
+
+static int test_wait(void *arg)
+{
+ struct dma_fence *f;
+ int err = -EINVAL;
+
+ f = mock_fence();
+ if (!f)
+ return -ENOMEM;
+
+ if (dma_fence_wait_timeout(f, false, 0) != -ETIME) {
+ pr_err("Wait reported complete before being signaled\n");
+ goto err_free;
+ }
+
+ dma_fence_signal(f);
+
+ if (dma_fence_wait_timeout(f, false, 0) != 0) {
+ pr_err("Wait reported incomplete after being signaled\n");
+ goto err_free;
+ }
+
+ err = 0;
+err_free:
+ dma_fence_signal(f);
+ dma_fence_put(f);
+ return err;
+}
+
+struct wait_timer {
+ struct timer_list timer;
+ struct dma_fence *f;
+};
+
+static void wait_timer(struct timer_list *timer)
+{
+ struct wait_timer *wt = from_timer(wt, timer, timer);
+
+ dma_fence_signal(wt->f);
+}
+
+static int test_wait_timeout(void *arg)
+{
+ struct wait_timer wt;
+ int err = -EINVAL;
+
+ timer_setup_on_stack(&wt.timer, wait_timer, 0);
+
+ wt.f = mock_fence();
+ if (!wt.f)
+ return -ENOMEM;
+
+ if (dma_fence_wait_timeout(wt.f, false, 1) != -ETIME) {
+ pr_err("Wait reported complete before being signaled\n");
+ goto err_free;
+ }
+
+ mod_timer(&wt.timer, jiffies + 1);
+
+ if (dma_fence_wait_timeout(wt.f, false, 2) == -ETIME) {
+ if (timer_pending(&wt.timer)) {
+ pr_notice("Timer did not fire within the jiffie!\n");
+ err = 0; /* not our fault! */
+ } else {
+ pr_err("Wait reported incomplete after timeout\n");
+ }
+ goto err_free;
+ }
+
+ err = 0;
+err_free:
+ del_timer_sync(&wt.timer);
+ destroy_timer_on_stack(&wt.timer);
+ dma_fence_signal(wt.f);
+ dma_fence_put(wt.f);
+ return err;
+}
+
+static int test_stub(void *arg)
+{
+ struct dma_fence *f[64];
+ int err = -EINVAL;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(f); i++) {
+ f[i] = dma_fence_get_stub();
+ if (!dma_fence_is_signaled(f[i])) {
+ pr_err("Obtained unsignaled stub fence!\n");
+ goto err;
+ }
+ }
+
+ err = 0;
+err:
+ while (i--)
+ dma_fence_put(f[i]);
+ return err;
+}
+
+/* Now off to the races! */
+
+struct race_thread {
+ struct dma_fence __rcu **fences;
+ struct task_struct *task;
+ bool before;
+ int id;
+};
+
+static void __wait_for_callbacks(struct dma_fence *f)
+{
+ spin_lock_irq(f->lock);
+ spin_unlock_irq(f->lock);
+}
+
+static int thread_signal_callback(void *arg)
+{
+ const struct race_thread *t = arg;
+ unsigned long pass = 0;
+ unsigned long miss = 0;
+ int err = 0;
+
+ while (!err && !kthread_should_stop()) {
+ struct dma_fence *f1, *f2;
+ struct simple_cb cb;
+
+ f1 = mock_fence();
+ if (!f1) {
+ err = -ENOMEM;
+ break;
+ }
+
+ rcu_assign_pointer(t->fences[t->id], f1);
+ smp_wmb();
+
+ rcu_read_lock();
+ do {
+ f2 = dma_fence_get_rcu_safe(&t->fences[!t->id]);
+ } while (!f2 && !kthread_should_stop());
+ rcu_read_unlock();
+
+ if (t->before)
+ dma_fence_signal(f1);
+
+ smp_store_mb(cb.seen, false);
+ if (!f2 || dma_fence_add_callback(f2, &cb.cb, simple_callback))
+ miss++, cb.seen = true;
+
+ if (!t->before)
+ dma_fence_signal(f1);
+
+ if (!cb.seen) {
+ dma_fence_wait(f2, false);
+ __wait_for_callbacks(f2);
+ }
+
+ if (!READ_ONCE(cb.seen)) {
+ pr_err("Callback not seen on thread %d, pass %lu (%lu misses), signaling %s add_callback; fence signaled? %s\n",
+ t->id, pass, miss,
+ t->before ? "before" : "after",
+ dma_fence_is_signaled(f2) ? "yes" : "no");
+ err = -EINVAL;
+ }
+
+ dma_fence_put(f2);
+
+ rcu_assign_pointer(t->fences[t->id], NULL);
+ smp_wmb();
+
+ dma_fence_put(f1);
+
+ pass++;
+ }
+
+ pr_info("%s[%d] completed %lu passes, %lu misses\n",
+ __func__, t->id, pass, miss);
+ return err;
+}
+
+static int race_signal_callback(void *arg)
+{
+ struct dma_fence __rcu *f[2] = {};
+ int ret = 0;
+ int pass;
+
+ for (pass = 0; !ret && pass <= 1; pass++) {
+ struct race_thread t[2];
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(t); i++) {
+ t[i].fences = f;
+ t[i].id = i;
+ t[i].before = pass;
+ t[i].task = kthread_run(thread_signal_callback, &t[i],
+ "dma-fence:%d", i);
+ get_task_struct(t[i].task);
+ }
+
+ msleep(50);
+
+ for (i = 0; i < ARRAY_SIZE(t); i++) {
+ int err;
+
+ err = kthread_stop(t[i].task);
+ if (err && !ret)
+ ret = err;
+
+ put_task_struct(t[i].task);
+ }
+ }
+
+ return ret;
+}
+
+int dma_fence(void)
+{
+ static const struct subtest tests[] = {
+ SUBTEST(sanitycheck),
+ SUBTEST(test_signaling),
+ SUBTEST(test_add_callback),
+ SUBTEST(test_late_add_callback),
+ SUBTEST(test_rm_callback),
+ SUBTEST(test_late_rm_callback),
+ SUBTEST(test_status),
+ SUBTEST(test_error),
+ SUBTEST(test_wait),
+ SUBTEST(test_wait_timeout),
+ SUBTEST(test_stub),
+ SUBTEST(race_signal_callback),
+ };
+ int ret;
+
+ pr_info("sizeof(dma_fence)=%zu\n", sizeof(struct dma_fence));
+
+ slab_fences = KMEM_CACHE(mock_fence,
+ SLAB_TYPESAFE_BY_RCU |
+ SLAB_HWCACHE_ALIGN);
+ if (!slab_fences)
+ return -ENOMEM;
+
+ ret = subtests(tests, NULL);
+
+ kmem_cache_destroy(slab_fences);
+
+ return ret;
+}
diff --git a/drivers/dma-buf/sw_sync.c b/drivers/dma-buf/sw_sync.c
index 051f6c2873c7..348b3a9170fa 100644
--- a/drivers/dma-buf/sw_sync.c
+++ b/drivers/dma-buf/sw_sync.c
@@ -132,17 +132,14 @@ static void timeline_fence_release(struct dma_fence *fence)
{
struct sync_pt *pt = dma_fence_to_sync_pt(fence);
struct sync_timeline *parent = dma_fence_parent(fence);
+ unsigned long flags;
+ spin_lock_irqsave(fence->lock, flags);
if (!list_empty(&pt->link)) {
- unsigned long flags;
-
- spin_lock_irqsave(fence->lock, flags);
- if (!list_empty(&pt->link)) {
- list_del(&pt->link);
- rb_erase(&pt->node, &parent->pt_tree);
- }
- spin_unlock_irqrestore(fence->lock, flags);
+ list_del(&pt->link);
+ rb_erase(&pt->node, &parent->pt_tree);
}
+ spin_unlock_irqrestore(fence->lock, flags);
sync_timeline_put(parent);
dma_fence_free(fence);
@@ -265,7 +262,8 @@ static struct sync_pt *sync_pt_create(struct sync_timeline *obj,
p = &parent->rb_left;
} else {
if (dma_fence_get_rcu(&other->base)) {
- dma_fence_put(&pt->base);
+ sync_timeline_put(obj);
+ kfree(pt);
pt = other;
goto unlock;
}
@@ -410,5 +408,5 @@ const struct file_operations sw_sync_debugfs_fops = {
.open = sw_sync_debugfs_open,
.release = sw_sync_debugfs_release,
.unlocked_ioctl = sw_sync_ioctl,
- .compat_ioctl = sw_sync_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
};
diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c
index ee4d1a96d779..5a5a1da01a00 100644
--- a/drivers/dma-buf/sync_file.c
+++ b/drivers/dma-buf/sync_file.c
@@ -221,7 +221,7 @@ static struct sync_file *sync_file_merge(const char *name, struct sync_file *a,
a_fences = get_fences(a, &a_num_fences);
b_fences = get_fences(b, &b_num_fences);
if (a_num_fences > INT_MAX - b_num_fences)
- return NULL;
+ goto err;
num_fences = a_num_fences + b_num_fences;
@@ -419,7 +419,7 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
* info->num_fences.
*/
if (!info.num_fences) {
- info.status = dma_fence_is_signaled(sync_file->fence);
+ info.status = dma_fence_get_status(sync_file->fence);
goto no_fences;
} else {
info.status = 1;
@@ -480,5 +480,5 @@ static const struct file_operations sync_file_fops = {
.release = sync_file_release,
.poll = sync_file_poll,
.unlocked_ioctl = sync_file_ioctl,
- .compat_ioctl = sync_file_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
};
diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c
index 9635897458a0..acb26c627d27 100644
--- a/drivers/dma-buf/udmabuf.c
+++ b/drivers/dma-buf/udmabuf.c
@@ -18,6 +18,8 @@ static const size_t size_limit_mb = 64; /* total dmabuf size, in megabytes */
struct udmabuf {
pgoff_t pagecount;
struct page **pages;
+ struct sg_table *sg;
+ struct miscdevice *device;
};
static vm_fault_t udmabuf_vm_fault(struct vm_fault *vmf)
@@ -46,10 +48,10 @@ static int mmap_udmabuf(struct dma_buf *buf, struct vm_area_struct *vma)
return 0;
}
-static struct sg_table *map_udmabuf(struct dma_buf_attachment *at,
- enum dma_data_direction direction)
+static struct sg_table *get_sg_table(struct device *dev, struct dma_buf *buf,
+ enum dma_data_direction direction)
{
- struct udmabuf *ubuf = at->dmabuf->priv;
+ struct udmabuf *ubuf = buf->priv;
struct sg_table *sg;
int ret;
@@ -61,7 +63,7 @@ static struct sg_table *map_udmabuf(struct dma_buf_attachment *at,
GFP_KERNEL);
if (ret < 0)
goto err;
- if (!dma_map_sg(at->dev, sg->sgl, sg->nents, direction)) {
+ if (!dma_map_sg(dev, sg->sgl, sg->nents, direction)) {
ret = -EINVAL;
goto err;
}
@@ -73,54 +75,89 @@ err:
return ERR_PTR(ret);
}
+static void put_sg_table(struct device *dev, struct sg_table *sg,
+ enum dma_data_direction direction)
+{
+ dma_unmap_sg(dev, sg->sgl, sg->nents, direction);
+ sg_free_table(sg);
+ kfree(sg);
+}
+
+static struct sg_table *map_udmabuf(struct dma_buf_attachment *at,
+ enum dma_data_direction direction)
+{
+ return get_sg_table(at->dev, at->dmabuf, direction);
+}
+
static void unmap_udmabuf(struct dma_buf_attachment *at,
struct sg_table *sg,
enum dma_data_direction direction)
{
- dma_unmap_sg(at->dev, sg->sgl, sg->nents, direction);
- sg_free_table(sg);
- kfree(sg);
+ return put_sg_table(at->dev, sg, direction);
}
static void release_udmabuf(struct dma_buf *buf)
{
struct udmabuf *ubuf = buf->priv;
+ struct device *dev = ubuf->device->this_device;
pgoff_t pg;
+ if (ubuf->sg)
+ put_sg_table(dev, ubuf->sg, DMA_BIDIRECTIONAL);
+
for (pg = 0; pg < ubuf->pagecount; pg++)
put_page(ubuf->pages[pg]);
kfree(ubuf->pages);
kfree(ubuf);
}
-static void *kmap_udmabuf(struct dma_buf *buf, unsigned long page_num)
+static int begin_cpu_udmabuf(struct dma_buf *buf,
+ enum dma_data_direction direction)
{
struct udmabuf *ubuf = buf->priv;
- struct page *page = ubuf->pages[page_num];
+ struct device *dev = ubuf->device->this_device;
+
+ if (!ubuf->sg) {
+ ubuf->sg = get_sg_table(dev, buf, direction);
+ if (IS_ERR(ubuf->sg))
+ return PTR_ERR(ubuf->sg);
+ } else {
+ dma_sync_sg_for_cpu(dev, ubuf->sg->sgl, ubuf->sg->nents,
+ direction);
+ }
- return kmap(page);
+ return 0;
}
-static void kunmap_udmabuf(struct dma_buf *buf, unsigned long page_num,
- void *vaddr)
+static int end_cpu_udmabuf(struct dma_buf *buf,
+ enum dma_data_direction direction)
{
- kunmap(vaddr);
+ struct udmabuf *ubuf = buf->priv;
+ struct device *dev = ubuf->device->this_device;
+
+ if (!ubuf->sg)
+ return -EINVAL;
+
+ dma_sync_sg_for_device(dev, ubuf->sg->sgl, ubuf->sg->nents, direction);
+ return 0;
}
static const struct dma_buf_ops udmabuf_ops = {
- .map_dma_buf = map_udmabuf,
- .unmap_dma_buf = unmap_udmabuf,
- .release = release_udmabuf,
- .map = kmap_udmabuf,
- .unmap = kunmap_udmabuf,
- .mmap = mmap_udmabuf,
+ .cache_sgt_mapping = true,
+ .map_dma_buf = map_udmabuf,
+ .unmap_dma_buf = unmap_udmabuf,
+ .release = release_udmabuf,
+ .mmap = mmap_udmabuf,
+ .begin_cpu_access = begin_cpu_udmabuf,
+ .end_cpu_access = end_cpu_udmabuf,
};
#define SEALS_WANTED (F_SEAL_SHRINK)
#define SEALS_DENIED (F_SEAL_WRITE)
-static long udmabuf_create(const struct udmabuf_create_list *head,
- const struct udmabuf_create_item *list)
+static long udmabuf_create(struct miscdevice *device,
+ struct udmabuf_create_list *head,
+ struct udmabuf_create_item *list)
{
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
struct file *memfd = NULL;
@@ -187,6 +224,7 @@ static long udmabuf_create(const struct udmabuf_create_list *head,
exp_info.priv = ubuf;
exp_info.flags = O_RDWR;
+ ubuf->device = device;
buf = dma_buf_export(&exp_info);
if (IS_ERR(buf)) {
ret = PTR_ERR(buf);
@@ -224,7 +262,7 @@ static long udmabuf_ioctl_create(struct file *filp, unsigned long arg)
list.offset = create.offset;
list.size = create.size;
- return udmabuf_create(&head, &list);
+ return udmabuf_create(filp->private_data, &head, &list);
}
static long udmabuf_ioctl_create_list(struct file *filp, unsigned long arg)
@@ -243,7 +281,7 @@ static long udmabuf_ioctl_create_list(struct file *filp, unsigned long arg)
if (IS_ERR(list))
return PTR_ERR(list);
- ret = udmabuf_create(&head, list);
+ ret = udmabuf_create(filp->private_data, &head, list);
kfree(list);
return ret;
}
OpenPOWER on IntegriCloud