diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2018-02-04 01:25:42 +0100 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-02-04 01:25:42 +0100 |
commit | 617aebe6a97efa539cc4b8a52adccd89596e6be0 (patch) | |
tree | 51c7753c940fd3727b8cc3e93553c57f89d1d9d2 /mm/slub.c | |
parent | Merge tag 'pstore-v4.16-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git... (diff) | |
parent | lkdtm: Update usercopy tests for whitelisting (diff) | |
download | linux-617aebe6a97efa539cc4b8a52adccd89596e6be0.tar.xz linux-617aebe6a97efa539cc4b8a52adccd89596e6be0.zip |
Merge tag 'usercopy-v4.16-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux
Pull hardened usercopy whitelisting from Kees Cook:
"Currently, hardened usercopy performs dynamic bounds checking on slab
cache objects. This is good, but still leaves a lot of kernel memory
available to be copied to/from userspace in the face of bugs.
To further restrict what memory is available for copying, this creates
a way to whitelist specific areas of a given slab cache object for
copying to/from userspace, allowing much finer granularity of access
control.
Slab caches that are never exposed to userspace can declare no
whitelist for their objects, thereby keeping them unavailable to
userspace via dynamic copy operations. (Note, an implicit form of
whitelisting is the use of constant sizes in usercopy operations and
get_user()/put_user(); these bypass all hardened usercopy checks since
these sizes cannot change at runtime.)
This new check is WARN-by-default, so any mistakes can be found over
the next several releases without breaking anyone's system.
The series has roughly the following sections:
- remove %p and improve reporting with offset
- prepare infrastructure and whitelist kmalloc
- update VFS subsystem with whitelists
- update SCSI subsystem with whitelists
- update network subsystem with whitelists
- update process memory with whitelists
- update per-architecture thread_struct with whitelists
- update KVM with whitelists and fix ioctl bug
- mark all other allocations as not whitelisted
- update lkdtm for more sensible test overage"
* tag 'usercopy-v4.16-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux: (38 commits)
lkdtm: Update usercopy tests for whitelisting
usercopy: Restrict non-usercopy caches to size 0
kvm: x86: fix KVM_XEN_HVM_CONFIG ioctl
kvm: whitelist struct kvm_vcpu_arch
arm: Implement thread_struct whitelist for hardened usercopy
arm64: Implement thread_struct whitelist for hardened usercopy
x86: Implement thread_struct whitelist for hardened usercopy
fork: Provide usercopy whitelisting for task_struct
fork: Define usercopy region in thread_stack slab caches
fork: Define usercopy region in mm_struct slab caches
net: Restrict unwhitelisted proto caches to size 0
sctp: Copy struct sctp_sock.autoclose to userspace using put_user()
sctp: Define usercopy region in SCTP proto slab cache
caif: Define usercopy region in caif proto slab cache
ip: Define usercopy region in IP proto slab cache
net: Define usercopy region in struct proto slab cache
scsi: Define usercopy region in scsi_sense_cache slab cache
cifs: Define usercopy region in cifs_request slab cache
vxfs: Define usercopy region in vxfs_inode slab cache
ufs: Define usercopy region in ufs_inode_cache slab cache
...
Diffstat (limited to 'mm/slub.c')
-rw-r--r-- | mm/slub.c | 49 |
1 files changed, 37 insertions, 12 deletions
diff --git a/mm/slub.c b/mm/slub.c index 693b7074bc53..cc71176c6eef 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -3813,13 +3813,15 @@ EXPORT_SYMBOL(__kmalloc_node); #ifdef CONFIG_HARDENED_USERCOPY /* - * Rejects objects that are incorrectly sized. + * Rejects incorrectly sized objects and objects that are to be copied + * to/from userspace but do not fall entirely within the containing slab + * cache's usercopy region. * * Returns NULL if check passes, otherwise const char * to name of cache * to indicate an error. */ -const char *__check_heap_object(const void *ptr, unsigned long n, - struct page *page) +void __check_heap_object(const void *ptr, unsigned long n, struct page *page, + bool to_user) { struct kmem_cache *s; unsigned long offset; @@ -3827,11 +3829,11 @@ const char *__check_heap_object(const void *ptr, unsigned long n, /* Find object and usable object size. */ s = page->slab_cache; - object_size = slab_ksize(s); /* Reject impossible pointers. */ if (ptr < page_address(page)) - return s->name; + usercopy_abort("SLUB object not in SLUB page?!", NULL, + to_user, 0, n); /* Find offset within object. */ offset = (ptr - page_address(page)) % s->size; @@ -3839,15 +3841,31 @@ const char *__check_heap_object(const void *ptr, unsigned long n, /* Adjust for redzone and reject if within the redzone. */ if (kmem_cache_debug(s) && s->flags & SLAB_RED_ZONE) { if (offset < s->red_left_pad) - return s->name; + usercopy_abort("SLUB object in left red zone", + s->name, to_user, offset, n); offset -= s->red_left_pad; } - /* Allow address range falling entirely within object size. */ - if (offset <= object_size && n <= object_size - offset) - return NULL; + /* Allow address range falling entirely within usercopy region. */ + if (offset >= s->useroffset && + offset - s->useroffset <= s->usersize && + n <= s->useroffset - offset + s->usersize) + return; + + /* + * If the copy is still within the allocated object, produce + * a warning instead of rejecting the copy. This is intended + * to be a temporary method to find any missing usercopy + * whitelists. + */ + object_size = slab_ksize(s); + if (usercopy_fallback && + offset <= object_size && n <= object_size - offset) { + usercopy_warn("SLUB object", s->name, to_user, offset, n); + return; + } - return s->name; + usercopy_abort("SLUB object", s->name, to_user, offset, n); } #endif /* CONFIG_HARDENED_USERCOPY */ @@ -4181,7 +4199,7 @@ void __init kmem_cache_init(void) kmem_cache = &boot_kmem_cache; create_boot_cache(kmem_cache_node, "kmem_cache_node", - sizeof(struct kmem_cache_node), SLAB_HWCACHE_ALIGN); + sizeof(struct kmem_cache_node), SLAB_HWCACHE_ALIGN, 0, 0); register_hotmemory_notifier(&slab_memory_callback_nb); @@ -4191,7 +4209,7 @@ void __init kmem_cache_init(void) create_boot_cache(kmem_cache, "kmem_cache", offsetof(struct kmem_cache, node) + nr_node_ids * sizeof(struct kmem_cache_node *), - SLAB_HWCACHE_ALIGN); + SLAB_HWCACHE_ALIGN, 0, 0); kmem_cache = bootstrap(&boot_kmem_cache); @@ -5061,6 +5079,12 @@ static ssize_t cache_dma_show(struct kmem_cache *s, char *buf) SLAB_ATTR_RO(cache_dma); #endif +static ssize_t usersize_show(struct kmem_cache *s, char *buf) +{ + return sprintf(buf, "%zu\n", s->usersize); +} +SLAB_ATTR_RO(usersize); + static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf) { return sprintf(buf, "%d\n", !!(s->flags & SLAB_TYPESAFE_BY_RCU)); @@ -5435,6 +5459,7 @@ static struct attribute *slab_attrs[] = { #ifdef CONFIG_FAILSLAB &failslab_attr.attr, #endif + &usersize_attr.attr, NULL }; |