diff options
author | Dave Airlie <airlied@redhat.com> | 2020-03-12 03:42:41 +0100 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2020-03-12 03:42:56 +0100 |
commit | 9e12da086e5e38f7b4f8d6e02a82447f4165fbee (patch) | |
tree | 96e6029378ccb9ae3f9530d019c8216a3ca4a556 /drivers/dma-buf | |
parent | Merge v5.6-rc5 into drm-next (diff) | |
parent | drm/virtio: add case for shmem objects in virtio_gpu_cleanup_object(..) (diff) | |
download | linux-9e12da086e5e38f7b4f8d6e02a82447f4165fbee.tar.xz linux-9e12da086e5e38f7b4f8d6e02a82447f4165fbee.zip |
Merge tag 'drm-misc-next-2020-03-09' of git://anongit.freedesktop.org/drm/drm-misc into drm-next
drm-misc-next for 5.7:
UAPI Changes:
Cross-subsystem Changes:
Core Changes:
Driver Changes:
- fb-helper: Remove drm_fb_helper_{add,add_all,remove}_one_connector
- fbdev: some cleanups and dead-code removal
- Conversions to simple-encoder
- zero-length array removal
- Panel: panel-dpi support in panel-simple, Novatek NT35510, Elida
KD35T133,
Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Maxime Ripard <maxime@cerno.tech>
Link: https://patchwork.freedesktop.org/patch/msgid/20200309135439.dicfnbo4ikj4tkz7@gilmour
Diffstat (limited to 'drivers/dma-buf')
-rw-r--r-- | drivers/dma-buf/Kconfig | 10 | ||||
-rw-r--r-- | drivers/dma-buf/dma-buf.c | 110 |
2 files changed, 111 insertions, 9 deletions
diff --git a/drivers/dma-buf/Kconfig b/drivers/dma-buf/Kconfig index e7d820ce0724..ef73b678419c 100644 --- a/drivers/dma-buf/Kconfig +++ b/drivers/dma-buf/Kconfig @@ -39,6 +39,16 @@ config UDMABUF A driver to let userspace turn memfd regions into dma-bufs. Qemu can use this to create host dmabufs for guest framebuffers. +config DMABUF_MOVE_NOTIFY + bool "Move notify between drivers (EXPERIMENTAL)" + default n + help + Don''t pin buffers if the dynamic DMA-buf interface is available on both the + exporter as well as the importer. This fixes a security problem where + userspace is able to pin unrestricted amounts of memory through DMA-buf. + But marked experimental because we don''t jet have a consistent execution + context and memory management between drivers. + config DMABUF_SELFTESTS tristate "Selftests for the dma-buf interfaces" default n diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index c343c7c10b4c..ccc9eda1bc28 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -525,7 +525,10 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info) } if (WARN_ON(exp_info->ops->cache_sgt_mapping && - exp_info->ops->dynamic_mapping)) + (exp_info->ops->pin || exp_info->ops->unpin))) + return ERR_PTR(-EINVAL); + + if (WARN_ON(!exp_info->ops->pin != !exp_info->ops->unpin)) return ERR_PTR(-EINVAL); if (!try_module_get(exp_info->owner)) @@ -652,7 +655,8 @@ EXPORT_SYMBOL_GPL(dma_buf_put); * calls attach() of dma_buf_ops to allow device-specific attach functionality * @dmabuf: [in] buffer to attach device to. * @dev: [in] device to be attached. - * @dynamic_mapping: [in] calling convention for map/unmap + * @importer_ops [in] importer operations for the attachment + * @importer_priv [in] importer private pointer for the attachment * * Returns struct dma_buf_attachment pointer for this attachment. Attachments * must be cleaned up by calling dma_buf_detach(). @@ -668,7 +672,8 @@ EXPORT_SYMBOL_GPL(dma_buf_put); */ struct dma_buf_attachment * dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev, - bool dynamic_mapping) + const struct dma_buf_attach_ops *importer_ops, + void *importer_priv) { struct dma_buf_attachment *attach; int ret; @@ -676,13 +681,17 @@ dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev, if (WARN_ON(!dmabuf || !dev)) return ERR_PTR(-EINVAL); + if (WARN_ON(importer_ops && !importer_ops->move_notify)) + return ERR_PTR(-EINVAL); + attach = kzalloc(sizeof(*attach), GFP_KERNEL); if (!attach) return ERR_PTR(-ENOMEM); attach->dev = dev; attach->dmabuf = dmabuf; - attach->dynamic_mapping = dynamic_mapping; + attach->importer_ops = importer_ops; + attach->importer_priv = importer_priv; if (dmabuf->ops->attach) { ret = dmabuf->ops->attach(dmabuf, attach); @@ -701,15 +710,19 @@ dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev, dma_buf_is_dynamic(dmabuf)) { struct sg_table *sgt; - if (dma_buf_is_dynamic(attach->dmabuf)) + if (dma_buf_is_dynamic(attach->dmabuf)) { dma_resv_lock(attach->dmabuf->resv, NULL); + ret = dma_buf_pin(attach); + if (ret) + goto err_unlock; + } sgt = dmabuf->ops->map_dma_buf(attach, DMA_BIDIRECTIONAL); if (!sgt) sgt = ERR_PTR(-ENOMEM); if (IS_ERR(sgt)) { ret = PTR_ERR(sgt); - goto err_unlock; + goto err_unpin; } if (dma_buf_is_dynamic(attach->dmabuf)) dma_resv_unlock(attach->dmabuf->resv); @@ -723,6 +736,10 @@ err_attach: kfree(attach); return ERR_PTR(ret); +err_unpin: + if (dma_buf_is_dynamic(attach->dmabuf)) + dma_buf_unpin(attach); + err_unlock: if (dma_buf_is_dynamic(attach->dmabuf)) dma_resv_unlock(attach->dmabuf->resv); @@ -743,7 +760,7 @@ EXPORT_SYMBOL_GPL(dma_buf_dynamic_attach); struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf, struct device *dev) { - return dma_buf_dynamic_attach(dmabuf, dev, false); + return dma_buf_dynamic_attach(dmabuf, dev, NULL, NULL); } EXPORT_SYMBOL_GPL(dma_buf_attach); @@ -766,8 +783,10 @@ void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach) dmabuf->ops->unmap_dma_buf(attach, attach->sgt, attach->dir); - if (dma_buf_is_dynamic(attach->dmabuf)) + if (dma_buf_is_dynamic(attach->dmabuf)) { + dma_buf_unpin(attach); dma_resv_unlock(attach->dmabuf->resv); + } } dma_resv_lock(dmabuf->resv, NULL); @@ -781,6 +800,44 @@ void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach) EXPORT_SYMBOL_GPL(dma_buf_detach); /** + * dma_buf_pin - Lock down the DMA-buf + * + * @attach: [in] attachment which should be pinned + * + * Returns: + * 0 on success, negative error code on failure. + */ +int dma_buf_pin(struct dma_buf_attachment *attach) +{ + struct dma_buf *dmabuf = attach->dmabuf; + int ret = 0; + + dma_resv_assert_held(dmabuf->resv); + + if (dmabuf->ops->pin) + ret = dmabuf->ops->pin(attach); + + return ret; +} +EXPORT_SYMBOL_GPL(dma_buf_pin); + +/** + * dma_buf_unpin - Remove lock from DMA-buf + * + * @attach: [in] attachment which should be unpinned + */ +void dma_buf_unpin(struct dma_buf_attachment *attach) +{ + struct dma_buf *dmabuf = attach->dmabuf; + + dma_resv_assert_held(dmabuf->resv); + + if (dmabuf->ops->unpin) + dmabuf->ops->unpin(attach); +} +EXPORT_SYMBOL_GPL(dma_buf_unpin); + +/** * dma_buf_map_attachment - Returns the scatterlist table of the attachment; * mapped into _device_ address space. Is a wrapper for map_dma_buf() of the * dma_buf_ops. @@ -799,6 +856,7 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach, enum dma_data_direction direction) { struct sg_table *sg_table; + int r; might_sleep(); @@ -820,13 +878,23 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach, return attach->sgt; } - if (dma_buf_is_dynamic(attach->dmabuf)) + if (dma_buf_is_dynamic(attach->dmabuf)) { dma_resv_assert_held(attach->dmabuf->resv); + if (!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) { + r = dma_buf_pin(attach); + if (r) + return ERR_PTR(r); + } + } sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction); if (!sg_table) sg_table = ERR_PTR(-ENOMEM); + if (IS_ERR(sg_table) && dma_buf_is_dynamic(attach->dmabuf) && + !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) + dma_buf_unpin(attach); + if (!IS_ERR(sg_table) && attach->dmabuf->ops->cache_sgt_mapping) { attach->sgt = sg_table; attach->dir = direction; @@ -865,10 +933,34 @@ void dma_buf_unmap_attachment(struct dma_buf_attachment *attach, dma_resv_assert_held(attach->dmabuf->resv); attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction); + + if (dma_buf_is_dynamic(attach->dmabuf) && + !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) + dma_buf_unpin(attach); } EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment); /** + * dma_buf_move_notify - notify attachments that DMA-buf is moving + * + * @dmabuf: [in] buffer which is moving + * + * Informs all attachmenst that they need to destroy and recreated all their + * mappings. + */ +void dma_buf_move_notify(struct dma_buf *dmabuf) +{ + struct dma_buf_attachment *attach; + + dma_resv_assert_held(dmabuf->resv); + + list_for_each_entry(attach, &dmabuf->attachments, node) + if (attach->importer_ops) + attach->importer_ops->move_notify(attach); +} +EXPORT_SYMBOL_GPL(dma_buf_move_notify); + +/** * DOC: cpu access * * There are mutliple reasons for supporting CPU access to a dma buffer object: |