diff options
author | David Hildenbrand <david@redhat.com> | 2021-05-07 03:06:06 +0200 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2021-05-07 09:26:34 +0200 |
commit | f7c8ce44ebb113b83135ada6e496db33d8a535e3 (patch) | |
tree | 17d385488af2239fbe18092b87305bf5fa130a21 /mm | |
parent | mm: remove xlate_dev_kmem_ptr() (diff) | |
download | linux-f7c8ce44ebb113b83135ada6e496db33d8a535e3.tar.xz linux-f7c8ce44ebb113b83135ada6e496db33d8a535e3.zip |
mm/vmalloc: remove vwrite()
The last user (/dev/kmem) is gone. Let's drop it.
Link: https://lkml.kernel.org/r/20210324102351.6932-4-david@redhat.com
Signed-off-by: David Hildenbrand <david@redhat.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Hillf Danton <hdanton@sina.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Oleksiy Avramchenko <oleksiy.avramchenko@sonymobile.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Minchan Kim <minchan@kernel.org>
Cc: huang ying <huang.ying.caritas@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/nommu.c | 10 | ||||
-rw-r--r-- | mm/vmalloc.c | 116 |
2 files changed, 1 insertions, 125 deletions
diff --git a/mm/nommu.c b/mm/nommu.c index 5c9ab799c0e6..85a3a68dffb6 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -210,16 +210,6 @@ long vread(char *buf, char *addr, unsigned long count) return count; } -long vwrite(char *buf, char *addr, unsigned long count) -{ - /* Don't allow overflow */ - if ((unsigned long) addr + count < count) - count = -(unsigned long) addr; - - memcpy(addr, buf, count); - return count; -} - /* * vmalloc - allocate virtually contiguous memory * diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 2868692c6807..a7f318c9e426 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -3146,10 +3146,7 @@ static int aligned_vread(char *buf, char *addr, unsigned long count) * kmap() and get small overhead in this access function. */ if (p) { - /* - * we can expect USER0 is not used (see vread/vwrite's - * function description) - */ + /* We can expect USER0 is not used -- see vread() */ void *map = kmap_atomic(p); memcpy(buf, map + offset, length); kunmap_atomic(map); @@ -3164,43 +3161,6 @@ static int aligned_vread(char *buf, char *addr, unsigned long count) return copied; } -static int aligned_vwrite(char *buf, char *addr, unsigned long count) -{ - struct page *p; - int copied = 0; - - while (count) { - unsigned long offset, length; - - offset = offset_in_page(addr); - length = PAGE_SIZE - offset; - if (length > count) - length = count; - p = vmalloc_to_page(addr); - /* - * To do safe access to this _mapped_ area, we need - * lock. But adding lock here means that we need to add - * overhead of vmalloc()/vfree() calles for this _debug_ - * interface, rarely used. Instead of that, we'll use - * kmap() and get small overhead in this access function. - */ - if (p) { - /* - * we can expect USER0 is not used (see vread/vwrite's - * function description) - */ - void *map = kmap_atomic(p); - memcpy(map + offset, buf, length); - kunmap_atomic(map); - } - addr += length; - buf += length; - copied += length; - count -= length; - } - return copied; -} - /** * vread() - read vmalloc area in a safe way. * @buf: buffer for reading data @@ -3284,80 +3244,6 @@ finished: } /** - * vwrite() - write vmalloc area in a safe way. - * @buf: buffer for source data - * @addr: vm address. - * @count: number of bytes to be read. - * - * This function checks that addr is a valid vmalloc'ed area, and - * copy data from a buffer to the given addr. If specified range of - * [addr...addr+count) includes some valid address, data is copied from - * proper area of @buf. If there are memory holes, no copy to hole. - * IOREMAP area is treated as memory hole and no copy is done. - * - * If [addr...addr+count) doesn't includes any intersects with alive - * vm_struct area, returns 0. @buf should be kernel's buffer. - * - * Note: In usual ops, vwrite() is never necessary because the caller - * should know vmalloc() area is valid and can use memcpy(). - * This is for routines which have to access vmalloc area without - * any information, as /dev/kmem. - * - * Return: number of bytes for which addr and buf should be - * increased (same number as @count) or %0 if [addr...addr+count) - * doesn't include any intersection with valid vmalloc area - */ -long vwrite(char *buf, char *addr, unsigned long count) -{ - struct vmap_area *va; - struct vm_struct *vm; - char *vaddr; - unsigned long n, buflen; - int copied = 0; - - /* Don't allow overflow */ - if ((unsigned long) addr + count < count) - count = -(unsigned long) addr; - buflen = count; - - spin_lock(&vmap_area_lock); - list_for_each_entry(va, &vmap_area_list, list) { - if (!count) - break; - - if (!va->vm) - continue; - - vm = va->vm; - vaddr = (char *) vm->addr; - if (addr >= vaddr + get_vm_area_size(vm)) - continue; - while (addr < vaddr) { - if (count == 0) - goto finished; - buf++; - addr++; - count--; - } - n = vaddr + get_vm_area_size(vm) - addr; - if (n > count) - n = count; - if (!(vm->flags & VM_IOREMAP)) { - aligned_vwrite(buf, addr, n); - copied++; - } - buf += n; - addr += n; - count -= n; - } -finished: - spin_unlock(&vmap_area_lock); - if (!copied) - return 0; - return buflen; -} - -/** * remap_vmalloc_range_partial - map vmalloc pages to userspace * @vma: vma to cover * @uaddr: target user address to start at |