diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2020-04-02 22:55:34 +0200 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2020-04-02 22:55:34 +0200 |
commit | 6cad420cc695867b4ca710bac21fde21a4102e4b (patch) | |
tree | 890d42abc1e82c2cf5cef583584f88ca70116ce9 /mm/slub.c | |
parent | Merge tag 'xfs-5.7-merge-8' of git://git.kernel.org/pub/scm/fs/xfs/xfs-linux (diff) | |
parent | include/linux/huge_mm.h: check PageTail in hpage_nr_pages even when !THP (diff) | |
download | linux-6cad420cc695867b4ca710bac21fde21a4102e4b.tar.xz linux-6cad420cc695867b4ca710bac21fde21a4102e4b.zip |
Merge branch 'akpm' (patches from Andrew)
Merge updates from Andrew Morton:
"A large amount of MM, plenty more to come.
Subsystems affected by this patch series:
- tools
- kthread
- kbuild
- scripts
- ocfs2
- vfs
- mm: slub, kmemleak, pagecache, gup, swap, memcg, pagemap, mremap,
sparsemem, kasan, pagealloc, vmscan, compaction, mempolicy,
hugetlbfs, hugetlb"
* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (155 commits)
include/linux/huge_mm.h: check PageTail in hpage_nr_pages even when !THP
mm/hugetlb: fix build failure with HUGETLB_PAGE but not HUGEBTLBFS
selftests/vm: fix map_hugetlb length used for testing read and write
mm/hugetlb: remove unnecessary memory fetch in PageHeadHuge()
mm/hugetlb.c: clean code by removing unnecessary initialization
hugetlb_cgroup: add hugetlb_cgroup reservation docs
hugetlb_cgroup: add hugetlb_cgroup reservation tests
hugetlb: support file_region coalescing again
hugetlb_cgroup: support noreserve mappings
hugetlb_cgroup: add accounting for shared mappings
hugetlb: disable region_add file_region coalescing
hugetlb_cgroup: add reservation accounting for private mappings
mm/hugetlb_cgroup: fix hugetlb_cgroup migration
hugetlb_cgroup: add interface for charge/uncharge hugetlb reservations
hugetlb_cgroup: add hugetlb_cgroup reservation counter
hugetlbfs: Use i_mmap_rwsem to address page fault/truncate race
hugetlbfs: use i_mmap_rwsem for more pmd sharing synchronization
mm/memblock.c: remove redundant assignment to variable max_addr
mm: mempolicy: require at least one nodeid for MPOL_PREFERRED
mm: mempolicy: use VM_BUG_ON_VMA in queue_pages_test_walk()
...
Diffstat (limited to 'mm/slub.c')
-rw-r--r-- | mm/slub.c | 27 |
1 files changed, 17 insertions, 10 deletions
diff --git a/mm/slub.c b/mm/slub.c index 6589b41d5a60..3098e0cf2899 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -259,7 +259,7 @@ static inline void *freelist_ptr(const struct kmem_cache *s, void *ptr, * freepointer to be restored incorrectly. */ return (void *)((unsigned long)ptr ^ s->random ^ - (unsigned long)kasan_reset_tag((void *)ptr_addr)); + swab((unsigned long)kasan_reset_tag((void *)ptr_addr))); #else return ptr; #endif @@ -2205,11 +2205,11 @@ static void unfreeze_partials(struct kmem_cache *s, struct kmem_cache_node *n = NULL, *n2 = NULL; struct page *page, *discard_page = NULL; - while ((page = c->partial)) { + while ((page = slub_percpu_partial(c))) { struct page new; struct page old; - c->partial = page->next; + slub_set_percpu_partial(c, page); n2 = get_node(s, page_to_nid(page)); if (n != n2) { @@ -2282,7 +2282,7 @@ static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain) if (oldpage) { pobjects = oldpage->pobjects; pages = oldpage->pages; - if (drain && pobjects > s->cpu_partial) { + if (drain && pobjects > slub_cpu_partial(s)) { unsigned long flags; /* * partial array is full. Move the existing @@ -2307,7 +2307,7 @@ static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain) } while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) != oldpage); - if (unlikely(!s->cpu_partial)) { + if (unlikely(!slub_cpu_partial(s))) { unsigned long flags; local_irq_save(flags); @@ -3512,15 +3512,15 @@ static void set_cpu_partial(struct kmem_cache *s) * 50% to keep some capacity around for frees. */ if (!kmem_cache_has_cpu_partial(s)) - s->cpu_partial = 0; + slub_set_cpu_partial(s, 0); else if (s->size >= PAGE_SIZE) - s->cpu_partial = 2; + slub_set_cpu_partial(s, 2); else if (s->size >= 1024) - s->cpu_partial = 6; + slub_set_cpu_partial(s, 6); else if (s->size >= 256) - s->cpu_partial = 13; + slub_set_cpu_partial(s, 13); else - s->cpu_partial = 30; + slub_set_cpu_partial(s, 30); #endif } @@ -3581,6 +3581,13 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order) */ s->offset = size; size += sizeof(void *); + } else if (size > sizeof(void *)) { + /* + * Store freelist pointer near middle of object to keep + * it away from the edges of the object to avoid small + * sized over/underflows from neighboring allocations. + */ + s->offset = ALIGN(size / 2, sizeof(void *)); } #ifdef CONFIG_SLUB_DEBUG |