diff options
author | Juergen Gross <jgross@suse.com> | 2014-11-03 14:01:59 +0100 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2014-11-16 11:04:26 +0100 |
commit | e00c8cc93c1ac01ecd5049929a50fb47b62bb041 (patch) | |
tree | a3833abca982fc685332340bb604dc925eb91c24 | |
parent | x86: Use new cache mode type in mm/ioremap.c (diff) | |
download | linux-e00c8cc93c1ac01ecd5049929a50fb47b62bb041.tar.xz linux-e00c8cc93c1ac01ecd5049929a50fb47b62bb041.zip |
x86: Use new cache mode type in memtype related functions
Instead of directly using the cache mode bits in the pte switch to
using the cache mode type.
Based-on-patch-by: Stefan Bader <stefan.bader@canonical.com>
Signed-off-by: Juergen Gross <jgross@suse.com>
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Cc: stefan.bader@canonical.com
Cc: xen-devel@lists.xensource.com
Cc: konrad.wilk@oracle.com
Cc: ville.syrjala@linux.intel.com
Cc: david.vrabel@citrix.com
Cc: jbeulich@suse.com
Cc: toshi.kani@hp.com
Cc: plagnioj@jcrosoft.com
Cc: tomi.valkeinen@ti.com
Cc: bhelgaas@google.com
Link: http://lkml.kernel.org/r/1415019724-4317-14-git-send-email-jgross@suse.com
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r-- | arch/x86/include/asm/cacheflush.h | 38 | ||||
-rw-r--r-- | arch/x86/include/asm/pat.h | 2 | ||||
-rw-r--r-- | arch/x86/mm/ioremap.c | 5 | ||||
-rw-r--r-- | arch/x86/mm/pageattr.c | 9 | ||||
-rw-r--r-- | arch/x86/mm/pat.c | 102 | ||||
-rw-r--r-- | arch/x86/mm/pat_internal.h | 22 | ||||
-rw-r--r-- | arch/x86/mm/pat_rbtree.c | 8 |
7 files changed, 96 insertions, 90 deletions
diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h index 9863ee3747da..157644bdf70e 100644 --- a/arch/x86/include/asm/cacheflush.h +++ b/arch/x86/include/asm/cacheflush.h @@ -9,10 +9,10 @@ /* * X86 PAT uses page flags WC and Uncached together to keep track of * memory type of pages that have backing page struct. X86 PAT supports 3 - * different memory types, _PAGE_CACHE_WB, _PAGE_CACHE_WC and - * _PAGE_CACHE_UC_MINUS and fourth state where page's memory type has not + * different memory types, _PAGE_CACHE_MODE_WB, _PAGE_CACHE_MODE_WC and + * _PAGE_CACHE_MODE_UC_MINUS and fourth state where page's memory type has not * been changed from its default (value of -1 used to denote this). - * Note we do not support _PAGE_CACHE_UC here. + * Note we do not support _PAGE_CACHE_MODE_UC here. */ #define _PGMT_DEFAULT 0 @@ -22,36 +22,40 @@ #define _PGMT_MASK (1UL << PG_uncached | 1UL << PG_arch_1) #define _PGMT_CLEAR_MASK (~_PGMT_MASK) -static inline unsigned long get_page_memtype(struct page *pg) +static inline enum page_cache_mode get_page_memtype(struct page *pg) { unsigned long pg_flags = pg->flags & _PGMT_MASK; if (pg_flags == _PGMT_DEFAULT) return -1; else if (pg_flags == _PGMT_WC) - return _PAGE_CACHE_WC; + return _PAGE_CACHE_MODE_WC; else if (pg_flags == _PGMT_UC_MINUS) - return _PAGE_CACHE_UC_MINUS; + return _PAGE_CACHE_MODE_UC_MINUS; else - return _PAGE_CACHE_WB; + return _PAGE_CACHE_MODE_WB; } -static inline void set_page_memtype(struct page *pg, unsigned long memtype) +static inline void set_page_memtype(struct page *pg, + enum page_cache_mode memtype) { - unsigned long memtype_flags = _PGMT_DEFAULT; + unsigned long memtype_flags; unsigned long old_flags; unsigned long new_flags; switch (memtype) { - case _PAGE_CACHE_WC: + case _PAGE_CACHE_MODE_WC: memtype_flags = _PGMT_WC; break; - case _PAGE_CACHE_UC_MINUS: + case _PAGE_CACHE_MODE_UC_MINUS: memtype_flags = _PGMT_UC_MINUS; break; - case _PAGE_CACHE_WB: + case _PAGE_CACHE_MODE_WB: memtype_flags = _PGMT_WB; break; + default: + memtype_flags = _PGMT_DEFAULT; + break; } do { @@ -60,8 +64,14 @@ static inline void set_page_memtype(struct page *pg, unsigned long memtype) } while (cmpxchg(&pg->flags, old_flags, new_flags) != old_flags); } #else -static inline unsigned long get_page_memtype(struct page *pg) { return -1; } -static inline void set_page_memtype(struct page *pg, unsigned long memtype) { } +static inline enum page_cache_mode get_page_memtype(struct page *pg) +{ + return -1; +} +static inline void set_page_memtype(struct page *pg, + enum page_cache_mode memtype) +{ +} #endif /* diff --git a/arch/x86/include/asm/pat.h b/arch/x86/include/asm/pat.h index d35ee2d976ca..150407a7234d 100644 --- a/arch/x86/include/asm/pat.h +++ b/arch/x86/include/asm/pat.h @@ -13,7 +13,7 @@ static const int pat_enabled; extern void pat_init(void); extern int reserve_memtype(u64 start, u64 end, - unsigned long req_type, unsigned long *ret_type); + enum page_cache_mode req_pcm, enum page_cache_mode *ret_pcm); extern int free_memtype(u64 start, u64 end); extern int kernel_map_sync_memtype(u64 base, unsigned long size, diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index f31507f6f60b..8832e510941e 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c @@ -83,7 +83,6 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr, const unsigned long unaligned_size = size; struct vm_struct *area; enum page_cache_mode new_pcm; - unsigned long new_prot_val; pgprot_t prot; int retval; void __iomem *ret_addr; @@ -135,14 +134,12 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr, size = PAGE_ALIGN(last_addr+1) - phys_addr; retval = reserve_memtype(phys_addr, (u64)phys_addr + size, - cachemode2protval(pcm), &new_prot_val); + pcm, &new_pcm); if (retval) { printk(KERN_ERR "ioremap reserve_memtype failed %d\n", retval); return NULL; } - new_pcm = pgprot2cachemode(__pgprot(new_prot_val)); - if (pcm != new_pcm) { if (!is_new_memtype_allowed(phys_addr, size, pcm, new_pcm)) { printk(KERN_ERR diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index 9f7e1b445e66..de807c9daad1 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c @@ -1451,7 +1451,7 @@ int set_memory_uc(unsigned long addr, int numpages) * for now UC MINUS. see comments in ioremap_nocache() */ ret = reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE, - _PAGE_CACHE_UC_MINUS, NULL); + _PAGE_CACHE_MODE_UC_MINUS, NULL); if (ret) goto out_err; @@ -1479,7 +1479,7 @@ static int _set_memory_array(unsigned long *addr, int addrinarray, */ for (i = 0; i < addrinarray; i++) { ret = reserve_memtype(__pa(addr[i]), __pa(addr[i]) + PAGE_SIZE, - cachemode2protval(new_type), NULL); + new_type, NULL); if (ret) goto out_free; } @@ -1544,7 +1544,7 @@ int set_memory_wc(unsigned long addr, int numpages) return set_memory_uc(addr, numpages); ret = reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE, - _PAGE_CACHE_WC, NULL); + _PAGE_CACHE_MODE_WC, NULL); if (ret) goto out_err; @@ -1662,8 +1662,7 @@ static int _set_pages_array(struct page **pages, int addrinarray, continue; start = page_to_pfn(pages[i]) << PAGE_SHIFT; end = start + PAGE_SIZE; - if (reserve_memtype(start, end, cachemode2protval(new_type), - NULL)) + if (reserve_memtype(start, end, new_type, NULL)) goto err_out; } diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c index 8f68a83491ba..ef75f3f89810 100644 --- a/arch/x86/mm/pat.c +++ b/arch/x86/mm/pat.c @@ -139,20 +139,21 @@ static DEFINE_SPINLOCK(memtype_lock); /* protects memtype accesses */ * The intersection is based on "Effective Memory Type" tables in IA-32 * SDM vol 3a */ -static unsigned long pat_x_mtrr_type(u64 start, u64 end, unsigned long req_type) +static unsigned long pat_x_mtrr_type(u64 start, u64 end, + enum page_cache_mode req_type) { /* * Look for MTRR hint to get the effective type in case where PAT * request is for WB. */ - if (req_type == _PAGE_CACHE_WB) { + if (req_type == _PAGE_CACHE_MODE_WB) { u8 mtrr_type; mtrr_type = mtrr_type_lookup(start, end); if (mtrr_type != MTRR_TYPE_WRBACK) - return _PAGE_CACHE_UC_MINUS; + return _PAGE_CACHE_MODE_UC_MINUS; - return _PAGE_CACHE_WB; + return _PAGE_CACHE_MODE_WB; } return req_type; @@ -207,25 +208,26 @@ static int pat_pagerange_is_ram(resource_size_t start, resource_size_t end) * - Find the memtype of all the pages in the range, look for any conflicts * - In case of no conflicts, set the new memtype for pages in the range */ -static int reserve_ram_pages_type(u64 start, u64 end, unsigned long req_type, - unsigned long *new_type) +static int reserve_ram_pages_type(u64 start, u64 end, + enum page_cache_mode req_type, + enum page_cache_mode *new_type) { struct page *page; u64 pfn; - if (req_type == _PAGE_CACHE_UC) { + if (req_type == _PAGE_CACHE_MODE_UC) { /* We do not support strong UC */ WARN_ON_ONCE(1); - req_type = _PAGE_CACHE_UC_MINUS; + req_type = _PAGE_CACHE_MODE_UC_MINUS; } for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) { - unsigned long type; + enum page_cache_mode type; page = pfn_to_page(pfn); type = get_page_memtype(page); if (type != -1) { - printk(KERN_INFO "reserve_ram_pages_type failed [mem %#010Lx-%#010Lx], track 0x%lx, req 0x%lx\n", + pr_info("reserve_ram_pages_type failed [mem %#010Lx-%#010Lx], track 0x%x, req 0x%x\n", start, end - 1, type, req_type); if (new_type) *new_type = type; @@ -258,21 +260,21 @@ static int free_ram_pages_type(u64 start, u64 end) /* * req_type typically has one of the: - * - _PAGE_CACHE_WB - * - _PAGE_CACHE_WC - * - _PAGE_CACHE_UC_MINUS - * - _PAGE_CACHE_UC + * - _PAGE_CACHE_MODE_WB + * - _PAGE_CACHE_MODE_WC + * - _PAGE_CACHE_MODE_UC_MINUS + * - _PAGE_CACHE_MODE_UC * * If new_type is NULL, function will return an error if it cannot reserve the * region with req_type. If new_type is non-NULL, function will return * available type in new_type in case of no error. In case of any error * it will return a negative return value. */ -int reserve_memtype(u64 start, u64 end, unsigned long req_type, - unsigned long *new_type) +int reserve_memtype(u64 start, u64 end, enum page_cache_mode req_type, + enum page_cache_mode *new_type) { struct memtype *new; - unsigned long actual_type; + enum page_cache_mode actual_type; int is_range_ram; int err = 0; @@ -281,10 +283,10 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type, if (!pat_enabled) { /* This is identical to page table setting without PAT */ if (new_type) { - if (req_type == _PAGE_CACHE_WC) - *new_type = _PAGE_CACHE_UC_MINUS; + if (req_type == _PAGE_CACHE_MODE_WC) + *new_type = _PAGE_CACHE_MODE_UC_MINUS; else - *new_type = req_type & _PAGE_CACHE_MASK; + *new_type = req_type; } return 0; } @@ -292,7 +294,7 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type, /* Low ISA region is always mapped WB in page table. No need to track */ if (x86_platform.is_untracked_pat_range(start, end)) { if (new_type) - *new_type = _PAGE_CACHE_WB; + *new_type = _PAGE_CACHE_MODE_WB; return 0; } @@ -302,7 +304,7 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type, * tools and ACPI tools). Use WB request for WB memory and use * UC_MINUS otherwise. */ - actual_type = pat_x_mtrr_type(start, end, req_type & _PAGE_CACHE_MASK); + actual_type = pat_x_mtrr_type(start, end, req_type); if (new_type) *new_type = actual_type; @@ -408,7 +410,7 @@ static enum page_cache_mode lookup_memtype(u64 paddr) if (pat_pagerange_is_ram(paddr, paddr + PAGE_SIZE)) { struct page *page; page = pfn_to_page(paddr >> PAGE_SHIFT); - rettype = pgprot2cachemode(__pgprot(get_page_memtype(page))); + rettype = get_page_memtype(page); /* * -1 from get_page_memtype() implies RAM page is in its * default state and not reserved, and hence of type WB @@ -423,7 +425,7 @@ static enum page_cache_mode lookup_memtype(u64 paddr) entry = rbt_memtype_lookup(paddr); if (entry != NULL) - rettype = pgprot2cachemode(__pgprot(entry->type)); + rettype = entry->type; else rettype = _PAGE_CACHE_MODE_UC_MINUS; @@ -447,18 +449,14 @@ int io_reserve_memtype(resource_size_t start, resource_size_t end, resource_size_t size = end - start; enum page_cache_mode req_type = *type; enum page_cache_mode new_type; - unsigned long new_prot; int ret; WARN_ON_ONCE(iomem_map_sanity_check(start, size)); - ret = reserve_memtype(start, end, cachemode2protval(req_type), - &new_prot); + ret = reserve_memtype(start, end, req_type, &new_type); if (ret) goto out_err; - new_type = pgprot2cachemode(__pgprot(new_prot)); - if (!is_new_memtype_allowed(start, size, req_type, new_type)) goto out_free; @@ -524,13 +522,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size) int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, unsigned long size, pgprot_t *vma_prot) { - unsigned long flags = _PAGE_CACHE_WB; + enum page_cache_mode pcm = _PAGE_CACHE_MODE_WB; if (!range_is_allowed(pfn, size)) return 0; if (file->f_flags & O_DSYNC) - flags = _PAGE_CACHE_UC_MINUS; + pcm = _PAGE_CACHE_MODE_UC_MINUS; #ifdef CONFIG_X86_32 /* @@ -547,12 +545,12 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, boot_cpu_has(X86_FEATURE_CYRIX_ARR) || boot_cpu_has(X86_FEATURE_CENTAUR_MCR)) && (pfn << PAGE_SHIFT) >= __pa(high_memory)) { - flags = _PAGE_CACHE_UC; + pcm = _PAGE_CACHE_MODE_UC; } #endif *vma_prot = __pgprot((pgprot_val(*vma_prot) & ~_PAGE_CACHE_MASK) | - flags); + cachemode2protval(pcm)); return 1; } @@ -583,7 +581,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, printk(KERN_INFO "%s:%d ioremap_change_attr failed %s " "for [mem %#010Lx-%#010Lx]\n", current->comm, current->pid, - cattr_name(cachemode2protval(pcm)), + cattr_name(pcm), base, (unsigned long long)(base + size-1)); return -EINVAL; } @@ -600,8 +598,8 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot, { int is_ram = 0; int ret; - unsigned long want_flags = (pgprot_val(*vma_prot) & _PAGE_CACHE_MASK); - unsigned long flags = want_flags; + enum page_cache_mode want_pcm = pgprot2cachemode(*vma_prot); + enum page_cache_mode pcm = want_pcm; is_ram = pat_pagerange_is_ram(paddr, paddr + size); @@ -614,38 +612,36 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot, if (!pat_enabled) return 0; - flags = cachemode2protval(lookup_memtype(paddr)); - if (want_flags != flags) { + pcm = lookup_memtype(paddr); + if (want_pcm != pcm) { printk(KERN_WARNING "%s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n", current->comm, current->pid, - cattr_name(want_flags), + cattr_name(want_pcm), (unsigned long long)paddr, (unsigned long long)(paddr + size - 1), - cattr_name(flags)); + cattr_name(pcm)); *vma_prot = __pgprot((pgprot_val(*vma_prot) & - (~_PAGE_CACHE_MASK)) | - flags); + (~_PAGE_CACHE_MASK)) | + cachemode2protval(pcm)); } return 0; } - ret = reserve_memtype(paddr, paddr + size, want_flags, &flags); + ret = reserve_memtype(paddr, paddr + size, want_pcm, &pcm); if (ret) return ret; - if (flags != want_flags) { + if (pcm != want_pcm) { if (strict_prot || - !is_new_memtype_allowed(paddr, size, - pgprot2cachemode(__pgprot(want_flags)), - pgprot2cachemode(__pgprot(flags)))) { + !is_new_memtype_allowed(paddr, size, want_pcm, pcm)) { free_memtype(paddr, paddr + size); printk(KERN_ERR "%s:%d map pfn expected mapping type %s" " for [mem %#010Lx-%#010Lx], got %s\n", current->comm, current->pid, - cattr_name(want_flags), + cattr_name(want_pcm), (unsigned long long)paddr, (unsigned long long)(paddr + size - 1), - cattr_name(flags)); + cattr_name(pcm)); return -EINVAL; } /* @@ -654,11 +650,10 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot, */ *vma_prot = __pgprot((pgprot_val(*vma_prot) & (~_PAGE_CACHE_MASK)) | - flags); + cachemode2protval(pcm)); } - if (kernel_map_sync_memtype(paddr, size, - pgprot2cachemode(__pgprot(flags))) < 0) { + if (kernel_map_sync_memtype(paddr, size, pcm) < 0) { free_memtype(paddr, paddr + size); return -EINVAL; } @@ -799,7 +794,8 @@ void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn, pgprot_t pgprot_writecombine(pgprot_t prot) { if (pat_enabled) - return __pgprot(pgprot_val(prot) | _PAGE_CACHE_WC); + return __pgprot(pgprot_val(prot) | + cachemode2protval(_PAGE_CACHE_MODE_WC)); else return pgprot_noncached(prot); } diff --git a/arch/x86/mm/pat_internal.h b/arch/x86/mm/pat_internal.h index 77e5ba153fac..f6411620305d 100644 --- a/arch/x86/mm/pat_internal.h +++ b/arch/x86/mm/pat_internal.h @@ -10,30 +10,32 @@ struct memtype { u64 start; u64 end; u64 subtree_max_end; - unsigned long type; + enum page_cache_mode type; struct rb_node rb; }; -static inline char *cattr_name(unsigned long flags) +static inline char *cattr_name(enum page_cache_mode pcm) { - switch (flags & _PAGE_CACHE_MASK) { - case _PAGE_CACHE_UC: return "uncached"; - case _PAGE_CACHE_UC_MINUS: return "uncached-minus"; - case _PAGE_CACHE_WB: return "write-back"; - case _PAGE_CACHE_WC: return "write-combining"; - default: return "broken"; + switch (pcm) { + case _PAGE_CACHE_MODE_UC: return "uncached"; + case _PAGE_CACHE_MODE_UC_MINUS: return "uncached-minus"; + case _PAGE_CACHE_MODE_WB: return "write-back"; + case _PAGE_CACHE_MODE_WC: return "write-combining"; + case _PAGE_CACHE_MODE_WT: return "write-through"; + case _PAGE_CACHE_MODE_WP: return "write-protected"; + default: return "broken"; } } #ifdef CONFIG_X86_PAT extern int rbt_memtype_check_insert(struct memtype *new, - unsigned long *new_type); + enum page_cache_mode *new_type); extern struct memtype *rbt_memtype_erase(u64 start, u64 end); extern struct memtype *rbt_memtype_lookup(u64 addr); extern int rbt_memtype_copy_nth_element(struct memtype *out, loff_t pos); #else static inline int rbt_memtype_check_insert(struct memtype *new, - unsigned long *new_type) + enum page_cache_mode *new_type) { return 0; } static inline struct memtype *rbt_memtype_erase(u64 start, u64 end) { return NULL; } diff --git a/arch/x86/mm/pat_rbtree.c b/arch/x86/mm/pat_rbtree.c index 415f6c4ced36..6582adcc8bd9 100644 --- a/arch/x86/mm/pat_rbtree.c +++ b/arch/x86/mm/pat_rbtree.c @@ -122,11 +122,12 @@ static struct memtype *memtype_rb_exact_match(struct rb_root *root, static int memtype_rb_check_conflict(struct rb_root *root, u64 start, u64 end, - unsigned long reqtype, unsigned long *newtype) + enum page_cache_mode reqtype, + enum page_cache_mode *newtype) { struct rb_node *node; struct memtype *match; - int found_type = reqtype; + enum page_cache_mode found_type = reqtype; match = memtype_rb_lowest_match(&memtype_rbroot, start, end); if (match == NULL) @@ -187,7 +188,8 @@ static void memtype_rb_insert(struct rb_root *root, struct memtype *newdata) rb_insert_augmented(&newdata->rb, root, &memtype_rb_augment_cb); } -int rbt_memtype_check_insert(struct memtype *new, unsigned long *ret_type) +int rbt_memtype_check_insert(struct memtype *new, + enum page_cache_mode *ret_type) { int err = 0; |