summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorWei Yang <richard.weiyang@gmail.com>2019-03-06 00:46:22 +0100
committerLinus Torvalds <torvalds@linux-foundation.org>2019-03-06 06:07:18 +0100
commit8bb4e7a2ee26c05a94ae6cb0aec2f82a3523cf35 (patch)
treeeca92d41268670cd0f4e8d1e0bdd46add2c7a484 /mm
parentmm/page_owner: move config option to mm/Kconfig.debug (diff)
downloadlinux-8bb4e7a2ee26c05a94ae6cb0aec2f82a3523cf35.tar.xz
linux-8bb4e7a2ee26c05a94ae6cb0aec2f82a3523cf35.zip
mm: fix some typos in mm directory
No functional change. Link: http://lkml.kernel.org/r/20190118235123.27843-1-richard.weiyang@gmail.com Signed-off-by: Wei Yang <richard.weiyang@gmail.com> Reviewed-by: Pekka Enberg <penberg@kernel.org> Acked-by: Mike Rapoport <rppt@linux.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/migrate.c2
-rw-r--r--mm/mmap.c8
-rw-r--r--mm/page_alloc.c4
-rw-r--r--mm/slub.c2
-rw-r--r--mm/vmscan.c2
5 files changed, 9 insertions, 9 deletions
diff --git a/mm/migrate.c b/mm/migrate.c
index 0e9888cb33ad..5308d6abd384 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -100,7 +100,7 @@ int isolate_movable_page(struct page *page, isolate_mode_t mode)
/*
* Check PageMovable before holding a PG_lock because page's owner
* assumes anybody doesn't touch PG_lock of newly allocated page
- * so unconditionally grapping the lock ruins page's owner side.
+ * so unconditionally grabbing the lock ruins page's owner side.
*/
if (unlikely(!__PageMovable(page)))
goto out_putpage;
diff --git a/mm/mmap.c b/mm/mmap.c
index eccba2650ef6..41eb48d9b527 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -438,7 +438,7 @@ static void vma_gap_update(struct vm_area_struct *vma)
{
/*
* As it turns out, RB_DECLARE_CALLBACKS() already created a callback
- * function that does exacltly what we want.
+ * function that does exactly what we want.
*/
vma_gap_callbacks_propagate(&vma->vm_rb, NULL);
}
@@ -1012,7 +1012,7 @@ static inline int is_mergeable_vma(struct vm_area_struct *vma,
* VM_SOFTDIRTY should not prevent from VMA merging, if we
* match the flags but dirty bit -- the caller should mark
* merged VMA as dirty. If dirty bit won't be excluded from
- * comparison, we increase pressue on the memory system forcing
+ * comparison, we increase pressure on the memory system forcing
* the kernel to generate new VMAs when old one could be
* extended instead.
*/
@@ -1115,7 +1115,7 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
* PPPP NNNN PPPPPPPPPPPP PPPPPPPPNNNN PPPPNNNNNNNN
* might become case 1 below case 2 below case 3 below
*
- * It is important for case 8 that the the vma NNNN overlapping the
+ * It is important for case 8 that the vma NNNN overlapping the
* region AAAA is never going to extended over XXXX. Instead XXXX must
* be extended in region AAAA and NNNN must be removed. This way in
* all cases where vma_merge succeeds, the moment vma_adjust drops the
@@ -1645,7 +1645,7 @@ SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg)
#endif /* __ARCH_WANT_SYS_OLD_MMAP */
/*
- * Some shared mappigns will want the pages marked read-only
+ * Some shared mappings will want the pages marked read-only
* to track write events. If so, we'll downgrade vm_page_prot
* to the private version (using protection_map[] without the
* VM_SHARED bit).
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 9be9a22ebe35..ec250453f5e8 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -7551,7 +7551,7 @@ static void __setup_per_zone_wmarks(void)
* value here.
*
* The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN)
- * deltas control asynch page reclaim, and so should
+ * deltas control async page reclaim, and so should
* not be capped for highmem.
*/
unsigned long min_pages;
@@ -8028,7 +8028,7 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
/*
* Hugepages are not in LRU lists, but they're movable.
- * We need not scan over tail pages bacause we don't
+ * We need not scan over tail pages because we don't
* handle each tail page individually in migration.
*/
if (PageHuge(page)) {
diff --git a/mm/slub.c b/mm/slub.c
index d8b1eee2dd86..017a2ce5ba23 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2129,7 +2129,7 @@ redo:
if (!lock) {
lock = 1;
/*
- * Taking the spinlock removes the possiblity
+ * Taking the spinlock removes the possibility
* that acquire_slab() will see a slab page that
* is frozen
*/
diff --git a/mm/vmscan.c b/mm/vmscan.c
index e979705bbf32..63195364ab2e 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -3527,7 +3527,7 @@ static bool kswapd_shrink_node(pg_data_t *pgdat,
*
* kswapd scans the zones in the highmem->normal->dma direction. It skips
* zones which have free_pages > high_wmark_pages(zone), but once a zone is
- * found to have free_pages <= high_wmark_pages(zone), any page is that zone
+ * found to have free_pages <= high_wmark_pages(zone), any page in that zone
* or lower is eligible for reclaim until at least one usable zone is
* balanced.
*/