diff options
Diffstat (limited to 'arch/csky')
-rw-r--r-- | arch/csky/Kconfig | 31 | ||||
-rw-r--r-- | arch/csky/abiv1/inc/abi/string.h | 6 | ||||
-rw-r--r-- | arch/csky/include/asm/Kbuild | 4 | ||||
-rw-r--r-- | arch/csky/include/asm/cmpxchg.h | 31 | ||||
-rw-r--r-- | arch/csky/include/asm/jump_label.h | 47 | ||||
-rw-r--r-- | arch/csky/include/asm/pci.h | 23 | ||||
-rw-r--r-- | arch/csky/include/asm/pgalloc.h | 2 | ||||
-rw-r--r-- | arch/csky/include/asm/pgtable.h | 24 | ||||
-rw-r--r-- | arch/csky/include/asm/sections.h | 10 | ||||
-rw-r--r-- | arch/csky/include/asm/spinlock.h | 12 | ||||
-rw-r--r-- | arch/csky/include/asm/spinlock_types.h | 9 | ||||
-rw-r--r-- | arch/csky/include/asm/tlb.h | 15 | ||||
-rw-r--r-- | arch/csky/kernel/Makefile | 1 | ||||
-rw-r--r-- | arch/csky/kernel/entry.S | 8 | ||||
-rw-r--r-- | arch/csky/kernel/jump_label.c | 54 | ||||
-rw-r--r-- | arch/csky/kernel/probes/kprobes.c | 4 | ||||
-rw-r--r-- | arch/csky/kernel/setup.c | 4 | ||||
-rw-r--r-- | arch/csky/kernel/vmlinux.lds.S | 15 | ||||
-rw-r--r-- | arch/csky/mm/asid.c | 5 | ||||
-rw-r--r-- | arch/csky/mm/fault.c | 4 | ||||
-rw-r--r-- | arch/csky/mm/init.c | 20 |
21 files changed, 245 insertions, 84 deletions
diff --git a/arch/csky/Kconfig b/arch/csky/Kconfig index 21d72b078eef..3cbc2dc62baf 100644 --- a/arch/csky/Kconfig +++ b/arch/csky/Kconfig @@ -8,6 +8,33 @@ config CSKY select ARCH_HAS_SYNC_DMA_FOR_DEVICE select ARCH_USE_BUILTIN_BSWAP select ARCH_USE_QUEUED_RWLOCKS + select ARCH_USE_QUEUED_SPINLOCKS + select ARCH_INLINE_READ_LOCK if !PREEMPTION + select ARCH_INLINE_READ_LOCK_BH if !PREEMPTION + select ARCH_INLINE_READ_LOCK_IRQ if !PREEMPTION + select ARCH_INLINE_READ_LOCK_IRQSAVE if !PREEMPTION + select ARCH_INLINE_READ_UNLOCK if !PREEMPTION + select ARCH_INLINE_READ_UNLOCK_BH if !PREEMPTION + select ARCH_INLINE_READ_UNLOCK_IRQ if !PREEMPTION + select ARCH_INLINE_READ_UNLOCK_IRQRESTORE if !PREEMPTION + select ARCH_INLINE_WRITE_LOCK if !PREEMPTION + select ARCH_INLINE_WRITE_LOCK_BH if !PREEMPTION + select ARCH_INLINE_WRITE_LOCK_IRQ if !PREEMPTION + select ARCH_INLINE_WRITE_LOCK_IRQSAVE if !PREEMPTION + select ARCH_INLINE_WRITE_UNLOCK if !PREEMPTION + select ARCH_INLINE_WRITE_UNLOCK_BH if !PREEMPTION + select ARCH_INLINE_WRITE_UNLOCK_IRQ if !PREEMPTION + select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE if !PREEMPTION + select ARCH_INLINE_SPIN_TRYLOCK if !PREEMPTION + select ARCH_INLINE_SPIN_TRYLOCK_BH if !PREEMPTION + select ARCH_INLINE_SPIN_LOCK if !PREEMPTION + select ARCH_INLINE_SPIN_LOCK_BH if !PREEMPTION + select ARCH_INLINE_SPIN_LOCK_IRQ if !PREEMPTION + select ARCH_INLINE_SPIN_LOCK_IRQSAVE if !PREEMPTION + select ARCH_INLINE_SPIN_UNLOCK if !PREEMPTION + select ARCH_INLINE_SPIN_UNLOCK_BH if !PREEMPTION + select ARCH_INLINE_SPIN_UNLOCK_IRQ if !PREEMPTION + select ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE if !PREEMPTION select ARCH_WANT_FRAME_POINTERS if !CPU_CK610 && $(cc-option,-mbacktrace) select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT select COMMON_CLK @@ -40,9 +67,11 @@ config CSKY select GX6605S_TIMER if CPU_CK610 select HAVE_ARCH_TRACEHOOK select HAVE_ARCH_AUDITSYSCALL + select HAVE_ARCH_JUMP_LABEL if !CPU_CK610 + select HAVE_ARCH_JUMP_LABEL_RELATIVE select HAVE_ARCH_MMAP_RND_BITS select HAVE_ARCH_SECCOMP_FILTER - select HAVE_CONTEXT_TRACKING + select HAVE_CONTEXT_TRACKING_USER select HAVE_VIRT_CPU_ACCOUNTING_GEN select HAVE_DEBUG_BUGVERBOSE select HAVE_DEBUG_KMEMLEAK diff --git a/arch/csky/abiv1/inc/abi/string.h b/arch/csky/abiv1/inc/abi/string.h index 9d95594b0feb..de50117b904d 100644 --- a/arch/csky/abiv1/inc/abi/string.h +++ b/arch/csky/abiv1/inc/abi/string.h @@ -6,4 +6,10 @@ #define __HAVE_ARCH_MEMCPY extern void *memcpy(void *, const void *, __kernel_size_t); +#define __HAVE_ARCH_MEMMOVE +extern void *memmove(void *, const void *, __kernel_size_t); + +#define __HAVE_ARCH_MEMSET +extern void *memset(void *, int, __kernel_size_t); + #endif /* __ABI_CSKY_STRING_H */ diff --git a/arch/csky/include/asm/Kbuild b/arch/csky/include/asm/Kbuild index 103207a58f97..1117c28cb7e8 100644 --- a/arch/csky/include/asm/Kbuild +++ b/arch/csky/include/asm/Kbuild @@ -3,10 +3,10 @@ generic-y += asm-offsets.h generic-y += extable.h generic-y += gpio.h generic-y += kvm_para.h -generic-y += spinlock.h -generic-y += spinlock_types.h +generic-y += mcs_spinlock.h generic-y += qrwlock.h generic-y += qrwlock_types.h +generic-y += qspinlock.h generic-y += parport.h generic-y += user.h generic-y += vmlinux.lds.h diff --git a/arch/csky/include/asm/cmpxchg.h b/arch/csky/include/asm/cmpxchg.h index 5b8faccd65e4..916043b845f1 100644 --- a/arch/csky/include/asm/cmpxchg.h +++ b/arch/csky/include/asm/cmpxchg.h @@ -4,10 +4,9 @@ #define __ASM_CSKY_CMPXCHG_H #ifdef CONFIG_SMP +#include <linux/bug.h> #include <asm/barrier.h> -extern void __bad_xchg(void); - #define __xchg_relaxed(new, ptr, size) \ ({ \ __typeof__(ptr) __ptr = (ptr); \ @@ -15,6 +14,26 @@ extern void __bad_xchg(void); __typeof__(*(ptr)) __ret; \ unsigned long tmp; \ switch (size) { \ + case 2: { \ + u32 ret; \ + u32 shif = ((ulong)__ptr & 2) ? 16 : 0; \ + u32 mask = 0xffff << shif; \ + __ptr = (__typeof__(ptr))((ulong)__ptr & ~2); \ + __asm__ __volatile__ ( \ + "1: ldex.w %0, (%4)\n" \ + " and %1, %0, %2\n" \ + " or %1, %1, %3\n" \ + " stex.w %1, (%4)\n" \ + " bez %1, 1b\n" \ + : "=&r" (ret), "=&r" (tmp) \ + : "r" (~mask), \ + "r" ((u32)__new << shif), \ + "r" (__ptr) \ + : "memory"); \ + __ret = (__typeof__(*(ptr))) \ + ((ret & mask) >> shif); \ + break; \ + } \ case 4: \ asm volatile ( \ "1: ldex.w %0, (%3) \n" \ @@ -26,7 +45,7 @@ extern void __bad_xchg(void); :); \ break; \ default: \ - __bad_xchg(); \ + BUILD_BUG(); \ } \ __ret; \ }) @@ -56,7 +75,7 @@ extern void __bad_xchg(void); :); \ break; \ default: \ - __bad_xchg(); \ + BUILD_BUG(); \ } \ __ret; \ }) @@ -87,7 +106,7 @@ extern void __bad_xchg(void); :); \ break; \ default: \ - __bad_xchg(); \ + BUILD_BUG(); \ } \ __ret; \ }) @@ -119,7 +138,7 @@ extern void __bad_xchg(void); :); \ break; \ default: \ - __bad_xchg(); \ + BUILD_BUG(); \ } \ __ret; \ }) diff --git a/arch/csky/include/asm/jump_label.h b/arch/csky/include/asm/jump_label.h new file mode 100644 index 000000000000..d488ba6084bc --- /dev/null +++ b/arch/csky/include/asm/jump_label.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#ifndef __ASM_CSKY_JUMP_LABEL_H +#define __ASM_CSKY_JUMP_LABEL_H + +#ifndef __ASSEMBLY__ + +#include <linux/types.h> + +#define JUMP_LABEL_NOP_SIZE 4 + +static __always_inline bool arch_static_branch(struct static_key *key, + bool branch) +{ + asm_volatile_goto( + "1: nop32 \n" + " .pushsection __jump_table, \"aw\" \n" + " .align 2 \n" + " .long 1b - ., %l[label] - . \n" + " .long %0 - . \n" + " .popsection \n" + : : "i"(&((char *)key)[branch]) : : label); + + return false; +label: + return true; +} + +static __always_inline bool arch_static_branch_jump(struct static_key *key, + bool branch) +{ + asm_volatile_goto( + "1: bsr32 %l[label] \n" + " .pushsection __jump_table, \"aw\" \n" + " .align 2 \n" + " .long 1b - ., %l[label] - . \n" + " .long %0 - . \n" + " .popsection \n" + : : "i"(&((char *)key)[branch]) : : label); + + return false; +label: + return true; +} + +#endif /* __ASSEMBLY__ */ +#endif /* __ASM_CSKY_JUMP_LABEL_H */ diff --git a/arch/csky/include/asm/pci.h b/arch/csky/include/asm/pci.h index ebc765b1f78b..42724c630d30 100644 --- a/arch/csky/include/asm/pci.h +++ b/arch/csky/include/asm/pci.h @@ -9,26 +9,7 @@ #include <asm/io.h> -#define PCIBIOS_MIN_IO 0 -#define PCIBIOS_MIN_MEM 0 - -/* C-SKY shim does not initialize PCI bus */ -#define pcibios_assign_all_busses() 1 - -extern int isa_dma_bridge_buggy; - -#ifdef CONFIG_PCI -static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) -{ - /* no legacy IRQ on csky */ - return -ENODEV; -} - -static inline int pci_proc_domain(struct pci_bus *bus) -{ - /* always show the domain in /proc */ - return 1; -} -#endif /* CONFIG_PCI */ +/* Generic PCI */ +#include <asm-generic/pci.h> #endif /* __ASM_CSKY_PCI_H */ diff --git a/arch/csky/include/asm/pgalloc.h b/arch/csky/include/asm/pgalloc.h index bbbd0698b397..7d57e5da0914 100644 --- a/arch/csky/include/asm/pgalloc.h +++ b/arch/csky/include/asm/pgalloc.h @@ -44,7 +44,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm) pgd_t *ret; pgd_t *init; - ret = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_ORDER); + ret = (pgd_t *) __get_free_page(GFP_KERNEL); if (ret) { init = pgd_offset(&init_mm, 0UL); pgd_init((unsigned long *)ret); diff --git a/arch/csky/include/asm/pgtable.h b/arch/csky/include/asm/pgtable.h index bbe245117777..c3d9b92cbe61 100644 --- a/arch/csky/include/asm/pgtable.h +++ b/arch/csky/include/asm/pgtable.h @@ -18,12 +18,10 @@ /* * C-SKY is two-level paging structure: */ -#define PGD_ORDER 0 -#define PTE_ORDER 0 -#define PTRS_PER_PGD ((PAGE_SIZE << PGD_ORDER) / sizeof(pgd_t)) +#define PTRS_PER_PGD (PAGE_SIZE / sizeof(pgd_t)) #define PTRS_PER_PMD 1 -#define PTRS_PER_PTE ((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t)) +#define PTRS_PER_PTE (PAGE_SIZE / sizeof(pte_t)) #define pte_ERROR(e) \ pr_err("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, (e).pte_low) @@ -77,24 +75,6 @@ #define MAX_SWAPFILES_CHECK() \ BUILD_BUG_ON(MAX_SWAPFILES_SHIFT != 5) -#define __P000 PAGE_NONE -#define __P001 PAGE_READ -#define __P010 PAGE_READ -#define __P011 PAGE_READ -#define __P100 PAGE_READ -#define __P101 PAGE_READ -#define __P110 PAGE_READ -#define __P111 PAGE_READ - -#define __S000 PAGE_NONE -#define __S001 PAGE_READ -#define __S010 PAGE_WRITE -#define __S011 PAGE_WRITE -#define __S100 PAGE_READ -#define __S101 PAGE_READ -#define __S110 PAGE_WRITE -#define __S111 PAGE_WRITE - extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) diff --git a/arch/csky/include/asm/sections.h b/arch/csky/include/asm/sections.h new file mode 100644 index 000000000000..4192cba8445d --- /dev/null +++ b/arch/csky/include/asm/sections.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_SECTIONS_H +#define __ASM_SECTIONS_H + +#include <asm-generic/sections.h> + +extern char _start[]; + +#endif /* __ASM_SECTIONS_H */ diff --git a/arch/csky/include/asm/spinlock.h b/arch/csky/include/asm/spinlock.h new file mode 100644 index 000000000000..83a2005341f5 --- /dev/null +++ b/arch/csky/include/asm/spinlock.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_CSKY_SPINLOCK_H +#define __ASM_CSKY_SPINLOCK_H + +#include <asm/qspinlock.h> +#include <asm/qrwlock.h> + +/* See include/linux/spinlock.h */ +#define smp_mb__after_spinlock() smp_mb() + +#endif /* __ASM_CSKY_SPINLOCK_H */ diff --git a/arch/csky/include/asm/spinlock_types.h b/arch/csky/include/asm/spinlock_types.h new file mode 100644 index 000000000000..75bdf3af80ba --- /dev/null +++ b/arch/csky/include/asm/spinlock_types.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_CSKY_SPINLOCK_TYPES_H +#define __ASM_CSKY_SPINLOCK_TYPES_H + +#include <asm-generic/qspinlock_types.h> +#include <asm-generic/qrwlock_types.h> + +#endif /* __ASM_CSKY_SPINLOCK_TYPES_H */ diff --git a/arch/csky/include/asm/tlb.h b/arch/csky/include/asm/tlb.h index 3498e65f59f8..702861c68874 100644 --- a/arch/csky/include/asm/tlb.h +++ b/arch/csky/include/asm/tlb.h @@ -4,21 +4,6 @@ #define __ASM_CSKY_TLB_H #include <asm/cacheflush.h> - -#define tlb_start_vma(tlb, vma) \ - do { \ - if (!(tlb)->fullmm) \ - flush_cache_range(vma, (vma)->vm_start, (vma)->vm_end); \ - } while (0) - -#define tlb_end_vma(tlb, vma) \ - do { \ - if (!(tlb)->fullmm) \ - flush_tlb_range(vma, (vma)->vm_start, (vma)->vm_end); \ - } while (0) - -#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm) - #include <asm-generic/tlb.h> #endif /* __ASM_CSKY_TLB_H */ diff --git a/arch/csky/kernel/Makefile b/arch/csky/kernel/Makefile index 4eb41421ca5b..6f14c924b20d 100644 --- a/arch/csky/kernel/Makefile +++ b/arch/csky/kernel/Makefile @@ -13,6 +13,7 @@ obj-$(CONFIG_STACKTRACE) += stacktrace.o obj-$(CONFIG_CSKY_PMU_V1) += perf_event.o obj-$(CONFIG_PERF_EVENTS) += perf_callchain.o obj-$(CONFIG_HAVE_PERF_REGS) += perf_regs.o +obj-$(CONFIG_JUMP_LABEL) += jump_label.o ifdef CONFIG_FUNCTION_TRACER CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE) diff --git a/arch/csky/kernel/entry.S b/arch/csky/kernel/entry.S index a4ababf25e24..547b4cd1b24b 100644 --- a/arch/csky/kernel/entry.S +++ b/arch/csky/kernel/entry.S @@ -19,11 +19,11 @@ .endm .macro context_tracking -#ifdef CONFIG_CONTEXT_TRACKING +#ifdef CONFIG_CONTEXT_TRACKING_USER mfcr a0, epsr btsti a0, 31 bt 1f - jbsr context_tracking_user_exit + jbsr user_exit_callable ldw a0, (sp, LSAVE_A0) ldw a1, (sp, LSAVE_A1) ldw a2, (sp, LSAVE_A2) @@ -159,8 +159,8 @@ ret_from_exception: and r10, r9 cmpnei r10, 0 bt exit_work -#ifdef CONFIG_CONTEXT_TRACKING - jbsr context_tracking_user_enter +#ifdef CONFIG_CONTEXT_TRACKING_USER + jbsr user_enter_callable #endif 1: #ifdef CONFIG_PREEMPTION diff --git a/arch/csky/kernel/jump_label.c b/arch/csky/kernel/jump_label.c new file mode 100644 index 000000000000..d0e8b21447e1 --- /dev/null +++ b/arch/csky/kernel/jump_label.c @@ -0,0 +1,54 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include <linux/jump_label.h> +#include <linux/kernel.h> +#include <linux/memory.h> +#include <linux/mutex.h> +#include <linux/uaccess.h> +#include <asm/cacheflush.h> + +#define NOP32_HI 0xc400 +#define NOP32_LO 0x4820 +#define BSR_LINK 0xe000 + +void arch_jump_label_transform(struct jump_entry *entry, + enum jump_label_type type) +{ + unsigned long addr = jump_entry_code(entry); + u16 insn[2]; + int ret = 0; + + if (type == JUMP_LABEL_JMP) { + long offset = jump_entry_target(entry) - jump_entry_code(entry); + + if (WARN_ON(offset & 1 || offset < -67108864 || offset >= 67108864)) + return; + + offset = offset >> 1; + + insn[0] = BSR_LINK | + ((uint16_t)((unsigned long) offset >> 16) & 0x3ff); + insn[1] = (uint16_t)((unsigned long) offset & 0xffff); + } else { + insn[0] = NOP32_HI; + insn[1] = NOP32_LO; + } + + ret = copy_to_kernel_nofault((void *)addr, insn, 4); + WARN_ON(ret); + + flush_icache_range(addr, addr + 4); +} + +void arch_jump_label_transform_static(struct jump_entry *entry, + enum jump_label_type type) +{ + /* + * We use the same instructions in the arch_static_branch and + * arch_static_branch_jump inline functions, so there's no + * need to patch them up here. + * The core will call arch_jump_label_transform when those + * instructions need to be replaced. + */ + arch_jump_label_transform(entry, type); +} diff --git a/arch/csky/kernel/probes/kprobes.c b/arch/csky/kernel/probes/kprobes.c index 34ba684d5962..3c6e5c725d81 100644 --- a/arch/csky/kernel/probes/kprobes.c +++ b/arch/csky/kernel/probes/kprobes.c @@ -124,6 +124,10 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p) void __kprobes arch_remove_kprobe(struct kprobe *p) { + if (p->ainsn.api.insn) { + free_insn_slot(p->ainsn.api.insn, 0); + p->ainsn.api.insn = NULL; + } } static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb) diff --git a/arch/csky/kernel/setup.c b/arch/csky/kernel/setup.c index c64e7be2045b..106fbf0b6f3b 100644 --- a/arch/csky/kernel/setup.c +++ b/arch/csky/kernel/setup.c @@ -31,7 +31,7 @@ static void __init csky_memblock_init(void) unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0 }; signed long size; - memblock_reserve(__pa(_stext), _end - _stext); + memblock_reserve(__pa(_start), _end - _start); early_init_fdt_reserve_self(); early_init_fdt_scan_reserved_mem(); @@ -78,7 +78,7 @@ void __init setup_arch(char **cmdline_p) pr_info("Phys. mem: %ldMB\n", (unsigned long) memblock_phys_mem_size()/1024/1024); - setup_initial_init_mm(_stext, _etext, _edata, _end); + setup_initial_init_mm(_start, _etext, _edata, _end); parse_early_param(); diff --git a/arch/csky/kernel/vmlinux.lds.S b/arch/csky/kernel/vmlinux.lds.S index e8b1a4a49798..68c980d08482 100644 --- a/arch/csky/kernel/vmlinux.lds.S +++ b/arch/csky/kernel/vmlinux.lds.S @@ -22,17 +22,13 @@ SECTIONS { . = PAGE_OFFSET + PHYS_OFFSET_OFFSET; - _stext = .; - __init_begin = .; + _start = .; HEAD_TEXT_SECTION - INIT_TEXT_SECTION(PAGE_SIZE) - INIT_DATA_SECTION(PAGE_SIZE) - PERCPU_SECTION(L1_CACHE_BYTES) . = ALIGN(PAGE_SIZE); - __init_end = .; .text : AT(ADDR(.text) - LOAD_OFFSET) { _text = .; + _stext = .; VBR_BASE IRQENTRY_TEXT SOFTIRQENTRY_TEXT @@ -48,7 +44,12 @@ SECTIONS /* __init_begin __init_end must be page aligned for free_initmem */ . = ALIGN(PAGE_SIZE); - + __init_begin = .; + INIT_TEXT_SECTION(PAGE_SIZE) + INIT_DATA_SECTION(PAGE_SIZE) + PERCPU_SECTION(L1_CACHE_BYTES) + . = ALIGN(PAGE_SIZE); + __init_end = .; _sdata = .; RO_DATA(PAGE_SIZE) diff --git a/arch/csky/mm/asid.c b/arch/csky/mm/asid.c index b2e914745c1d..7fb6c417bbac 100644 --- a/arch/csky/mm/asid.c +++ b/arch/csky/mm/asid.c @@ -27,7 +27,7 @@ static void flush_context(struct asid_info *info) u64 asid; /* Update the list of reserved ASIDs and the ASID bitmap. */ - bitmap_clear(info->map, 0, NUM_CTXT_ASIDS(info)); + bitmap_zero(info->map, NUM_CTXT_ASIDS(info)); for_each_possible_cpu(i) { asid = atomic64_xchg_relaxed(&active_asid(info, i), 0); @@ -178,8 +178,7 @@ int asid_allocator_init(struct asid_info *info, */ WARN_ON(NUM_CTXT_ASIDS(info) - 1 <= num_possible_cpus()); atomic64_set(&info->generation, ASID_FIRST_VERSION(info)); - info->map = kcalloc(BITS_TO_LONGS(NUM_CTXT_ASIDS(info)), - sizeof(*info->map), GFP_KERNEL); + info->map = bitmap_zalloc(NUM_CTXT_ASIDS(info), GFP_KERNEL); if (!info->map) return -ENOMEM; diff --git a/arch/csky/mm/fault.c b/arch/csky/mm/fault.c index 7215a46b6b8e..e15f736cca4b 100644 --- a/arch/csky/mm/fault.c +++ b/arch/csky/mm/fault.c @@ -285,6 +285,10 @@ good_area: return; } + /* The fault is fully completed (including releasing mmap lock) */ + if (fault & VM_FAULT_COMPLETED) + return; + if (unlikely((fault & VM_FAULT_RETRY) && (flags & FAULT_FLAG_ALLOW_RETRY))) { flags |= FAULT_FLAG_TRIED; diff --git a/arch/csky/mm/init.c b/arch/csky/mm/init.c index bf2004aa811a..bde7cabd23df 100644 --- a/arch/csky/mm/init.c +++ b/arch/csky/mm/init.c @@ -197,3 +197,23 @@ void __init fixaddr_init(void) vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; fixrange_init(vaddr, vaddr + PMD_SIZE, swapper_pg_dir); } + +static const pgprot_t protection_map[16] = { + [VM_NONE] = PAGE_NONE, + [VM_READ] = PAGE_READ, + [VM_WRITE] = PAGE_READ, + [VM_WRITE | VM_READ] = PAGE_READ, + [VM_EXEC] = PAGE_READ, + [VM_EXEC | VM_READ] = PAGE_READ, + [VM_EXEC | VM_WRITE] = PAGE_READ, + [VM_EXEC | VM_WRITE | VM_READ] = PAGE_READ, + [VM_SHARED] = PAGE_NONE, + [VM_SHARED | VM_READ] = PAGE_READ, + [VM_SHARED | VM_WRITE] = PAGE_WRITE, + [VM_SHARED | VM_WRITE | VM_READ] = PAGE_WRITE, + [VM_SHARED | VM_EXEC] = PAGE_READ, + [VM_SHARED | VM_EXEC | VM_READ] = PAGE_READ, + [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_WRITE, + [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_WRITE +}; +DECLARE_VM_GET_PAGE_PROT |