diff options
author | Bhaskar Chowdhury <unixbhaskar@gmail.com> | 2021-04-30 07:54:51 +0200 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2021-04-30 20:20:36 +0200 |
commit | dc84207d00bef4a5d826e68bc0a310327b464fcf (patch) | |
tree | 0fd067f3e04bd8d151270d2777f5f63b59992921 /mm/slub.c | |
parent | mm, slub: enable slub_debug static key when creating cache with explicit debu... (diff) | |
download | linux-dc84207d00bef4a5d826e68bc0a310327b464fcf.tar.xz linux-dc84207d00bef4a5d826e68bc0a310327b464fcf.zip |
mm/slub.c: trivial typo fixes
s/operatios/operations/
s/Mininum/Minimum/
s/mininum/minimum/ ......two different places.
Link: https://lkml.kernel.org/r/20210325044940.14516-1-unixbhaskar@gmail.com
Signed-off-by: Bhaskar Chowdhury <unixbhaskar@gmail.com>
Acked-by: Randy Dunlap <rdunlap@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/slub.c')
-rw-r--r-- | mm/slub.c | 8 |
1 files changed, 4 insertions, 4 deletions
diff --git a/mm/slub.c b/mm/slub.c index b1f31f7b81bb..a178c738fc92 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -3,7 +3,7 @@ * SLUB: A slab allocator that limits cache line use instead of queuing * objects in per cpu and per node lists. * - * The allocator synchronizes using per slab locks or atomic operatios + * The allocator synchronizes using per slab locks or atomic operations * and only uses a centralized lock to manage a pool of partial slabs. * * (C) 2007 SGI, Christoph Lameter @@ -160,7 +160,7 @@ static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s) #undef SLUB_DEBUG_CMPXCHG /* - * Mininum number of partial slabs. These will be left on the partial + * Minimum number of partial slabs. These will be left on the partial * lists even if they are empty. kmem_cache_shrink may reclaim them. */ #define MIN_PARTIAL 5 @@ -833,7 +833,7 @@ static int check_bytes_and_report(struct kmem_cache *s, struct page *page, * * A. Free pointer (if we cannot overwrite object on free) * B. Tracking data for SLAB_STORE_USER - * C. Padding to reach required alignment boundary or at mininum + * C. Padding to reach required alignment boundary or at minimum * one word if debugging is on to be able to detect writes * before the word boundary. * @@ -3422,7 +3422,7 @@ static unsigned int slub_min_objects; * * Higher order allocations also allow the placement of more objects in a * slab and thereby reduce object handling overhead. If the user has - * requested a higher mininum order then we start with that one instead of + * requested a higher minimum order then we start with that one instead of * the smallest order which will fit the object. */ static inline unsigned int slab_order(unsigned int size, |