diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2024-05-06 19:27:58 +0200 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2024-05-06 19:27:58 +0200 |
commit | ee5b455b0adae9ecafb38b174c648c48f2a3c1a5 (patch) | |
tree | 7351be427ce58c88a412649f65cff45649e590b9 | |
parent | Merge tag 'auxdisplay-v6.10-1' of git://git.kernel.org/pub/scm/linux/kernel/g... (diff) | |
parent | mm/slab: make __free(kfree) accept error pointers (diff) | |
download | linux-ee5b455b0adae9ecafb38b174c648c48f2a3c1a5.tar.xz linux-ee5b455b0adae9ecafb38b174c648c48f2a3c1a5.zip |
Merge tag 'slab-for-6.9-rc7-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/vbabka/slab
Pull slab fixes from Vlastimil Babka:
- Fix for cleanup infrastructure (Dan Carpenter)
This makes the __free(kfree) cleanup hooks not crash on error
pointers.
- SLUB fix for freepointer checking (Nicolas Bouchinet)
This fixes a recently introduced bug that manifests when
init_on_free, CONFIG_SLAB_FREELIST_HARDENED and consistency checks
(slub_debug=F) are all enabled, and results in false-positive
freepointer corrupt reports for caches that store freepointer outside
of the object area.
* tag 'slab-for-6.9-rc7-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/vbabka/slab:
mm/slab: make __free(kfree) accept error pointers
mm/slub: avoid zeroing outside-object freepointer for single free
-rw-r--r-- | include/linux/slab.h | 4 | ||||
-rw-r--r-- | mm/slub.c | 52 |
2 files changed, 31 insertions, 25 deletions
diff --git a/include/linux/slab.h b/include/linux/slab.h index e53cbfa18325..739b21262507 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -266,7 +266,7 @@ void kfree(const void *objp); void kfree_sensitive(const void *objp); size_t __ksize(const void *objp); -DEFINE_FREE(kfree, void *, if (_T) kfree(_T)) +DEFINE_FREE(kfree, void *, if (!IS_ERR_OR_NULL(_T)) kfree(_T)) /** * ksize - Report actual allocation size of associated object @@ -792,7 +792,7 @@ static inline __alloc_size(1, 2) void *kvcalloc(size_t n, size_t size, gfp_t fla extern void *kvrealloc(const void *p, size_t oldsize, size_t newsize, gfp_t flags) __realloc_size(3); extern void kvfree(const void *addr); -DEFINE_FREE(kvfree, void *, if (_T) kvfree(_T)) +DEFINE_FREE(kvfree, void *, if (!IS_ERR_OR_NULL(_T)) kvfree(_T)) extern void kvfree_sensitive(const void *addr, size_t len); diff --git a/mm/slub.c b/mm/slub.c index 1bb2a93cf7b6..24f702afd458 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -557,6 +557,26 @@ static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp) *(freeptr_t *)freeptr_addr = freelist_ptr_encode(s, fp, freeptr_addr); } +/* + * See comment in calculate_sizes(). + */ +static inline bool freeptr_outside_object(struct kmem_cache *s) +{ + return s->offset >= s->inuse; +} + +/* + * Return offset of the end of info block which is inuse + free pointer if + * not overlapping with object. + */ +static inline unsigned int get_info_end(struct kmem_cache *s) +{ + if (freeptr_outside_object(s)) + return s->inuse + sizeof(void *); + else + return s->inuse; +} + /* Loop over all objects in a slab */ #define for_each_object(__p, __s, __addr, __objects) \ for (__p = fixup_red_left(__s, __addr); \ @@ -845,26 +865,6 @@ static void print_section(char *level, char *text, u8 *addr, metadata_access_disable(); } -/* - * See comment in calculate_sizes(). - */ -static inline bool freeptr_outside_object(struct kmem_cache *s) -{ - return s->offset >= s->inuse; -} - -/* - * Return offset of the end of info block which is inuse + free pointer if - * not overlapping with object. - */ -static inline unsigned int get_info_end(struct kmem_cache *s) -{ - if (freeptr_outside_object(s)) - return s->inuse + sizeof(void *); - else - return s->inuse; -} - static struct track *get_track(struct kmem_cache *s, void *object, enum track_item alloc) { @@ -2092,15 +2092,20 @@ bool slab_free_hook(struct kmem_cache *s, void *x, bool init) * * The initialization memset's clear the object and the metadata, * but don't touch the SLAB redzone. + * + * The object's freepointer is also avoided if stored outside the + * object. */ if (unlikely(init)) { int rsize; + unsigned int inuse; + inuse = get_info_end(s); if (!kasan_has_integrated_init()) memset(kasan_reset_tag(x), 0, s->object_size); rsize = (s->flags & SLAB_RED_ZONE) ? s->red_left_pad : 0; - memset((char *)kasan_reset_tag(x) + s->inuse, 0, - s->size - s->inuse - rsize); + memset((char *)kasan_reset_tag(x) + inuse, 0, + s->size - inuse - rsize); } /* KASAN might put x into memory quarantine, delaying its reuse. */ return !kasan_slab_free(s, x, init); @@ -3722,7 +3727,8 @@ static void *__slab_alloc_node(struct kmem_cache *s, static __always_inline void maybe_wipe_obj_freeptr(struct kmem_cache *s, void *obj) { - if (unlikely(slab_want_init_on_free(s)) && obj) + if (unlikely(slab_want_init_on_free(s)) && obj && + !freeptr_outside_object(s)) memset((void *)((char *)kasan_reset_tag(obj) + s->offset), 0, sizeof(void *)); } |