diff options
author | Haitao Shi <shihaitao1@huawei.com> | 2020-12-16 05:47:26 +0100 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2020-12-16 07:46:19 +0100 |
commit | 8958b2491104d7f254cff0698505392582dbc13a (patch) | |
tree | e79c737af4459cf5e4b427ef7d701836a8662482 /mm | |
parent | mm: simplify follow_pte{,pmd} (diff) | |
download | linux-8958b2491104d7f254cff0698505392582dbc13a.tar.xz linux-8958b2491104d7f254cff0698505392582dbc13a.zip |
mm: fix some spelling mistakes in comments
Fix some spelling mistakes in comments:
udpate ==> update
succesful ==> successful
exmaple ==> example
unneccessary ==> unnecessary
stoping ==> stopping
uknown ==> unknown
Link: https://lkml.kernel.org/r/20201127011747.86005-1-shihaitao1@huawei.com
Signed-off-by: Haitao Shi <shihaitao1@huawei.com>
Reviewed-by: Mike Rapoport <rppt@linux.ibm.com>
Reviewed-by: Souptick Joarder <jrdr.linux@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/filemap.c | 2 | ||||
-rw-r--r-- | mm/huge_memory.c | 2 | ||||
-rw-r--r-- | mm/khugepaged.c | 2 | ||||
-rw-r--r-- | mm/memblock.c | 2 | ||||
-rw-r--r-- | mm/migrate.c | 2 | ||||
-rw-r--r-- | mm/page_ext.c | 2 |
6 files changed, 6 insertions, 6 deletions
diff --git a/mm/filemap.c b/mm/filemap.c index c178022d7893..2e16daf98bf9 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -1359,7 +1359,7 @@ static int __wait_on_page_locked_async(struct page *page, else ret = PageLocked(page); /* - * If we were succesful now, we know we're still on the + * If we were successful now, we know we're still on the * waitqueue as we're still under the lock. This means it's * safe to remove and return success, we know the callback * isn't going to trigger. diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 1efe2b5ad59a..9237976abe72 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2391,7 +2391,7 @@ static void __split_huge_page_tail(struct page *head, int tail, * Clone page flags before unfreezing refcount. * * After successful get_page_unless_zero() might follow flags change, - * for exmaple lock_page() which set PG_waiters. + * for example lock_page() which set PG_waiters. */ page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; page_tail->flags |= (head->flags & diff --git a/mm/khugepaged.c b/mm/khugepaged.c index ad316d2e1fee..67ab391a5373 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -1275,7 +1275,7 @@ static int khugepaged_scan_pmd(struct mm_struct *mm, * PTEs are armed with uffd write protection. * Here we can also mark the new huge pmd as * write protected if any of the small ones is - * marked but that could bring uknown + * marked but that could bring unknown * userfault messages that falls outside of * the registered range. So, just be simple. */ diff --git a/mm/memblock.c b/mm/memblock.c index 049df4163a97..a3c406070f4d 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -871,7 +871,7 @@ int __init_memblock memblock_physmem_add(phys_addr_t base, phys_addr_t size) * @base: base address of the region * @size: size of the region * @set: set or clear the flag - * @flag: the flag to udpate + * @flag: the flag to update * * This function isolates region [@base, @base + @size), and sets/clears flag * diff --git a/mm/migrate.c b/mm/migrate.c index ee802cb509a3..ee5e612b4cd8 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -2594,7 +2594,7 @@ static bool migrate_vma_check_page(struct page *page) * will bump the page reference count. Sadly there is no way to * differentiate a regular pin from migration wait. Hence to * avoid 2 racing thread trying to migrate back to CPU to enter - * infinite loop (one stoping migration because the other is + * infinite loop (one stopping migration because the other is * waiting on pte migration entry). We always return true here. * * FIXME proper solution is to rework migration_entry_wait() so diff --git a/mm/page_ext.c b/mm/page_ext.c index 16b161f28a31..df6f74aac8e1 100644 --- a/mm/page_ext.c +++ b/mm/page_ext.c @@ -34,7 +34,7 @@ * * The need callback is used to decide whether extended memory allocation is * needed or not. Sometimes users want to deactivate some features in this - * boot and extra memory would be unneccessary. In this case, to avoid + * boot and extra memory would be unnecessary. In this case, to avoid * allocating huge chunk of memory, each clients represent their need of * extra memory through the need callback. If one of the need callbacks * returns true, it means that someone needs extra memory so that |