diff options
author | Joerg Roedel <jroedel@suse.de> | 2019-07-19 20:46:51 +0200 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2019-07-22 10:18:30 +0200 |
commit | 8e998fc24de47c55b47a887f6c95ab91acd4a720 (patch) | |
tree | 3ea783fa077c813487ae0760772c08e7c019ef0c /arch/x86/mm | |
parent | x86/mm: Check for pfn instead of page in vmalloc_sync_one() (diff) | |
download | linux-8e998fc24de47c55b47a887f6c95ab91acd4a720.tar.xz linux-8e998fc24de47c55b47a887f6c95ab91acd4a720.zip |
x86/mm: Sync also unmappings in vmalloc_sync_all()
With huge-page ioremap areas the unmappings also need to be synced between
all page-tables. Otherwise it can cause data corruption when a region is
unmapped and later re-used.
Make the vmalloc_sync_one() function ready to sync unmappings and make sure
vmalloc_sync_all() iterates over all page-tables even when an unmapped PMD
is found.
Fixes: 5d72b4fba40ef ('x86, mm: support huge I/O mapping capability I/F')
Signed-off-by: Joerg Roedel <jroedel@suse.de>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Dave Hansen <dave.hansen@linux.intel.com>
Link: https://lkml.kernel.org/r/20190719184652.11391-3-joro@8bytes.org
Diffstat (limited to 'arch/x86/mm')
-rw-r--r-- | arch/x86/mm/fault.c | 13 |
1 files changed, 5 insertions, 8 deletions
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index e64173db4970..9ceacd1156db 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -177,11 +177,12 @@ static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address) pmd = pmd_offset(pud, address); pmd_k = pmd_offset(pud_k, address); - if (!pmd_present(*pmd_k)) - return NULL; - if (!pmd_present(*pmd)) + if (pmd_present(*pmd) != pmd_present(*pmd_k)) set_pmd(pmd, *pmd_k); + + if (!pmd_present(*pmd_k)) + return NULL; else BUG_ON(pmd_pfn(*pmd) != pmd_pfn(*pmd_k)); @@ -203,17 +204,13 @@ void vmalloc_sync_all(void) spin_lock(&pgd_lock); list_for_each_entry(page, &pgd_list, lru) { spinlock_t *pgt_lock; - pmd_t *ret; /* the pgt_lock only for Xen */ pgt_lock = &pgd_page_get_mm(page)->page_table_lock; spin_lock(pgt_lock); - ret = vmalloc_sync_one(page_address(page), address); + vmalloc_sync_one(page_address(page), address); spin_unlock(pgt_lock); - - if (!ret) - break; } spin_unlock(&pgd_lock); } |