diff options
author | Roman Penyaev <rpenyaev@suse.de> | 2019-11-28 12:53:22 +0100 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2019-11-28 18:08:02 +0100 |
commit | 6c5c240e412682f97aecd233c1e706822704aa28 (patch) | |
tree | cf8e3e33f22e918f081477a44eb21502694cdb1a /fs | |
parent | sr_vendor: support Beurer GL50 evo CD-on-a-chip devices. (diff) | |
download | linux-6c5c240e412682f97aecd233c1e706822704aa28.tar.xz linux-6c5c240e412682f97aecd233c1e706822704aa28.zip |
io_uring: add mapping support for NOMMU archs
That is a bit weird scenario but I find it interesting to run fio loads
using LKL linux, where MMU is disabled. Probably other real archs which
run uClinux can also benefit from this patch.
Signed-off-by: Roman Penyaev <rpenyaev@suse.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs')
-rw-r--r-- | fs/io_uring.c | 57 |
1 files changed, 51 insertions, 6 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c index 4c030a92de79..e6fc401e341f 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -4402,12 +4402,11 @@ static int io_uring_flush(struct file *file, void *data) return 0; } -static int io_uring_mmap(struct file *file, struct vm_area_struct *vma) +static void *io_uring_validate_mmap_request(struct file *file, + loff_t pgoff, size_t sz) { - loff_t offset = (loff_t) vma->vm_pgoff << PAGE_SHIFT; - unsigned long sz = vma->vm_end - vma->vm_start; struct io_ring_ctx *ctx = file->private_data; - unsigned long pfn; + loff_t offset = pgoff << PAGE_SHIFT; struct page *page; void *ptr; @@ -4420,17 +4419,59 @@ static int io_uring_mmap(struct file *file, struct vm_area_struct *vma) ptr = ctx->sq_sqes; break; default: - return -EINVAL; + return ERR_PTR(-EINVAL); } page = virt_to_head_page(ptr); if (sz > page_size(page)) - return -EINVAL; + return ERR_PTR(-EINVAL); + + return ptr; +} + +#ifdef CONFIG_MMU + +static int io_uring_mmap(struct file *file, struct vm_area_struct *vma) +{ + size_t sz = vma->vm_end - vma->vm_start; + unsigned long pfn; + void *ptr; + + ptr = io_uring_validate_mmap_request(file, vma->vm_pgoff, sz); + if (IS_ERR(ptr)) + return PTR_ERR(ptr); pfn = virt_to_phys(ptr) >> PAGE_SHIFT; return remap_pfn_range(vma, vma->vm_start, pfn, sz, vma->vm_page_prot); } +#else /* !CONFIG_MMU */ + +static int io_uring_mmap(struct file *file, struct vm_area_struct *vma) +{ + return vma->vm_flags & (VM_SHARED | VM_MAYSHARE) ? 0 : -EINVAL; +} + +static unsigned int io_uring_nommu_mmap_capabilities(struct file *file) +{ + return NOMMU_MAP_DIRECT | NOMMU_MAP_READ | NOMMU_MAP_WRITE; +} + +static unsigned long io_uring_nommu_get_unmapped_area(struct file *file, + unsigned long addr, unsigned long len, + unsigned long pgoff, unsigned long flags) +{ + void *ptr; + + ptr = io_uring_validate_mmap_request(file, pgoff, len); + if (IS_ERR(ptr)) + return PTR_ERR(ptr); + + return (unsigned long) ptr; +} + +#endif /* !CONFIG_MMU */ + SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit, u32, min_complete, u32, flags, const sigset_t __user *, sig, size_t, sigsz) @@ -4501,6 +4542,10 @@ static const struct file_operations io_uring_fops = { .release = io_uring_release, .flush = io_uring_flush, .mmap = io_uring_mmap, +#ifndef CONFIG_MMU + .get_unmapped_area = io_uring_nommu_get_unmapped_area, + .mmap_capabilities = io_uring_nommu_mmap_capabilities, +#endif .poll = io_uring_poll, .fasync = io_uring_fasync, }; |