diff options
author | Guo Ren <ren_guo@c-sky.com> | 2019-08-20 06:47:24 +0200 |
---|---|---|
committer | Guo Ren <ren_guo@c-sky.com> | 2019-08-20 14:09:14 +0200 |
commit | dc140045c0cace809af872e3799e8fbe1b7d7f86 (patch) | |
tree | bef3a506c2b97019e8ad3b4aabae6239be02e953 | |
parent | csky: Fixup arch_get_unmapped_area() implementation (diff) | |
download | linux-dc140045c0cace809af872e3799e8fbe1b7d7f86.tar.xz linux-dc140045c0cace809af872e3799e8fbe1b7d7f86.zip |
csky: Fixup defer cache flush for 610
We use defer cache flush mechanism to improve the performance of
610, but the implementation is wrong. We fix it up now and update
the mechanism:
- Zero page needn't be flushed.
- If page is file mapping & non-touched in user space, defer flush.
- If page is anon mapping or dirty file mapping, flush immediately.
- In update_mmu_cache finish the defer flush by flush_dcache_page().
For 610 we need take care the dcache aliasing issue:
- VIPT cache with 8K-bytes size per way in 4K page granularity.
Signed-off-by: Guo Ren <ren_guo@c-sky.com>
Cc: Arnd Bergmann <arnd@arndb.de>
-rw-r--r-- | arch/csky/abiv1/cacheflush.c | 50 | ||||
-rw-r--r-- | arch/csky/abiv1/inc/abi/cacheflush.h | 4 |
2 files changed, 29 insertions, 25 deletions
diff --git a/arch/csky/abiv1/cacheflush.c b/arch/csky/abiv1/cacheflush.c index 10af8b6fe322..fee99fc6612f 100644 --- a/arch/csky/abiv1/cacheflush.c +++ b/arch/csky/abiv1/cacheflush.c @@ -11,42 +11,46 @@ #include <asm/cacheflush.h> #include <asm/cachectl.h> +#define PG_dcache_clean PG_arch_1 + void flush_dcache_page(struct page *page) { - struct address_space *mapping = page_mapping(page); - unsigned long addr; + struct address_space *mapping; - if (mapping && !mapping_mapped(mapping)) { - set_bit(PG_arch_1, &(page)->flags); + if (page == ZERO_PAGE(0)) return; - } - /* - * We could delay the flush for the !page_mapping case too. But that - * case is for exec env/arg pages and those are %99 certainly going to - * get faulted into the tlb (and thus flushed) anyways. - */ - addr = (unsigned long) page_address(page); - dcache_wb_range(addr, addr + PAGE_SIZE); + mapping = page_mapping_file(page); + + if (mapping && !page_mapcount(page)) + clear_bit(PG_dcache_clean, &page->flags); + else { + dcache_wbinv_all(); + if (mapping) + icache_inv_all(); + set_bit(PG_dcache_clean, &page->flags); + } } +EXPORT_SYMBOL(flush_dcache_page); -void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, - pte_t *pte) +void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, + pte_t *ptep) { - unsigned long addr; + unsigned long pfn = pte_pfn(*ptep); struct page *page; - unsigned long pfn; - pfn = pte_pfn(*pte); - if (unlikely(!pfn_valid(pfn))) + if (!pfn_valid(pfn)) return; page = pfn_to_page(pfn); - addr = (unsigned long) page_address(page); + if (page == ZERO_PAGE(0)) + return; - if (vma->vm_flags & VM_EXEC || - pages_do_alias(addr, address & PAGE_MASK)) - cache_wbinv_all(); + if (!test_and_set_bit(PG_dcache_clean, &page->flags)) + dcache_wbinv_all(); - clear_bit(PG_arch_1, &(page)->flags); + if (page_mapping_file(page)) { + if (vma->vm_flags & VM_EXEC) + icache_inv_all(); + } } diff --git a/arch/csky/abiv1/inc/abi/cacheflush.h b/arch/csky/abiv1/inc/abi/cacheflush.h index 5f663aef9b1b..fce5604cef40 100644 --- a/arch/csky/abiv1/inc/abi/cacheflush.h +++ b/arch/csky/abiv1/inc/abi/cacheflush.h @@ -26,8 +26,8 @@ extern void flush_dcache_page(struct page *); #define flush_icache_page(vma, page) cache_wbinv_all() #define flush_icache_range(start, end) cache_wbinv_range(start, end) -#define flush_icache_user_range(vma, pg, adr, len) \ - cache_wbinv_range(adr, adr + len) +#define flush_icache_user_range(vma,page,addr,len) \ + flush_dcache_page(page) #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ do { \ |