diff options
Diffstat (limited to 'arch/xtensa')
37 files changed, 187 insertions, 217 deletions
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig index 3a9f1e80394a..e997e0119c02 100644 --- a/arch/xtensa/Kconfig +++ b/arch/xtensa/Kconfig @@ -21,10 +21,11 @@ config XTENSA select GENERIC_PCI_IOMAP select GENERIC_SCHED_CLOCK select GENERIC_STRNCPY_FROM_USER if KASAN + select HAVE_ARCH_AUDITSYSCALL select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL select HAVE_ARCH_KASAN if MMU && !XIP_KERNEL + select HAVE_ARCH_SECCOMP_FILTER select HAVE_ARCH_TRACEHOOK - select HAVE_COPY_THREAD_TLS select HAVE_DEBUG_KMEMLEAK select HAVE_DMA_CONTIGUOUS select HAVE_EXIT_THREAD @@ -216,6 +217,20 @@ config HOTPLUG_CPU Say N if you want to disable CPU hotplug. +config SECCOMP + bool + prompt "Enable seccomp to safely compute untrusted bytecode" + help + This kernel feature is useful for number crunching applications + that may need to compute untrusted bytecode during their + execution. By using pipes or other transports made available to + the process as file descriptors supporting the read/write + syscalls, it's possible to isolate those applications in + their own address space using seccomp. Once seccomp is + enabled via prctl(PR_SET_SECCOMP), it cannot be disabled + and the task is only allowed to execute a few safe syscalls + defined by each seccomp mode. + config FAST_SYSCALL_XTENSA bool "Enable fast atomic syscalls" default n diff --git a/arch/xtensa/boot/Makefile b/arch/xtensa/boot/Makefile index 1a14d38d9b33..f6bb352f94b4 100644 --- a/arch/xtensa/boot/Makefile +++ b/arch/xtensa/boot/Makefile @@ -17,6 +17,8 @@ BIG_ENDIAN := $(shell echo __XTENSA_EB__ | $(CC) -E - | grep -v "\#") export BIG_ENDIAN subdir-y := lib +targets += vmlinux.bin vmlinux.bin.gz +targets += uImage xipImage # Subdirs for the boot loader(s) @@ -35,19 +37,19 @@ boot-elf boot-redboot: $(addprefix $(obj)/,$(subdir-y)) OBJCOPYFLAGS = --strip-all -R .comment -R .notes -O binary -vmlinux.bin: vmlinux FORCE +$(obj)/vmlinux.bin: vmlinux FORCE $(call if_changed,objcopy) -vmlinux.bin.gz: vmlinux.bin FORCE +$(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE $(call if_changed,gzip) -boot-elf: vmlinux.bin -boot-redboot: vmlinux.bin.gz +boot-elf: $(obj)/vmlinux.bin +boot-redboot: $(obj)/vmlinux.bin.gz UIMAGE_LOADADDR = $(CONFIG_KERNEL_LOAD_ADDRESS) UIMAGE_COMPRESSION = gzip -$(obj)/uImage: vmlinux.bin.gz FORCE +$(obj)/uImage: $(obj)/vmlinux.bin.gz FORCE $(call if_changed,uimage) $(Q)$(kecho) ' Kernel: $@ is ready' diff --git a/arch/xtensa/boot/boot-elf/Makefile b/arch/xtensa/boot/boot-elf/Makefile index 12ae1e91cb75..f7c775d53012 100644 --- a/arch/xtensa/boot/boot-elf/Makefile +++ b/arch/xtensa/boot/boot-elf/Makefile @@ -15,17 +15,18 @@ export CPPFLAGS_boot.lds += -P -C export KBUILD_AFLAGS += -mtext-section-literals boot-y := bootstrap.o +targets += $(boot-y) boot.lds OBJS := $(addprefix $(obj)/,$(boot-y)) -$(obj)/Image.o: vmlinux.bin $(OBJS) +$(obj)/Image.o: $(obj)/../vmlinux.bin $(OBJS) $(Q)$(OBJCOPY) $(OBJCOPY_ARGS) -R .comment \ - --add-section image=vmlinux.bin \ + --add-section image=$< \ --set-section-flags image=contents,alloc,load,load,data \ $(OBJS) $@ $(obj)/../Image.elf: $(obj)/Image.o $(obj)/boot.lds - $(Q)$(LD) $(KBUILD_LDFLAGS) $(LDFLAGS_vmlinux) \ + $(Q)$(LD) $(KBUILD_LDFLAGS) \ -T $(obj)/boot.lds \ --build-id=none \ -o $@ $(obj)/Image.o diff --git a/arch/xtensa/boot/boot-redboot/Makefile b/arch/xtensa/boot/boot-redboot/Makefile index 8632473ad319..07cb24afedc2 100644 --- a/arch/xtensa/boot/boot-redboot/Makefile +++ b/arch/xtensa/boot/boot-redboot/Makefile @@ -13,15 +13,16 @@ endif LD_ARGS = -T $(srctree)/$(obj)/boot.ld boot-y := bootstrap.o +targets += $(boot-y) OBJS := $(addprefix $(obj)/,$(boot-y)) LIBS := arch/xtensa/boot/lib/lib.a arch/xtensa/lib/lib.a LIBGCC := $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name) -$(obj)/zImage.o: vmlinux.bin.gz $(OBJS) +$(obj)/zImage.o: $(obj)/../vmlinux.bin.gz $(OBJS) $(Q)$(OBJCOPY) $(OBJCOPY_ARGS) -R .comment \ - --add-section image=vmlinux.bin.gz \ + --add-section image=$< \ --set-section-flags image=contents,alloc,load,load,data \ $(OBJS) $@ diff --git a/arch/xtensa/include/asm/Kbuild b/arch/xtensa/include/asm/Kbuild index 9718e9593564..c59c42a1221a 100644 --- a/arch/xtensa/include/asm/Kbuild +++ b/arch/xtensa/include/asm/Kbuild @@ -7,4 +7,5 @@ generic-y += mcs_spinlock.h generic-y += param.h generic-y += qrwlock.h generic-y += qspinlock.h +generic-y += seccomp.h generic-y += user.h diff --git a/arch/xtensa/include/asm/atomic.h b/arch/xtensa/include/asm/atomic.h index 3e7c6134ed32..744c2f463845 100644 --- a/arch/xtensa/include/asm/atomic.h +++ b/arch/xtensa/include/asm/atomic.h @@ -19,8 +19,6 @@ #include <asm/cmpxchg.h> #include <asm/barrier.h> -#define ATOMIC_INIT(i) { (i) } - /* * This Xtensa implementation assumes that the right mechanism * for exclusion is for locking interrupts to level EXCM_LEVEL. diff --git a/arch/xtensa/include/asm/cacheflush.h b/arch/xtensa/include/asm/cacheflush.h index a0d50be5a8cb..cf907e5bf2f2 100644 --- a/arch/xtensa/include/asm/cacheflush.h +++ b/arch/xtensa/include/asm/cacheflush.h @@ -145,6 +145,8 @@ void local_flush_cache_page(struct vm_area_struct *vma, #endif +#define flush_icache_user_range flush_icache_range + /* Ensure consistency between data and instruction cache. */ #define local_flush_icache_range(start, end) \ do { \ diff --git a/arch/xtensa/include/asm/checksum.h b/arch/xtensa/include/asm/checksum.h index 8b687176ad72..243a5fe79d3c 100644 --- a/arch/xtensa/include/asm/checksum.h +++ b/arch/xtensa/include/asm/checksum.h @@ -44,8 +44,6 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst, /* * Note: when you get a NULL pointer exception here this means someone * passed in an incorrect kernel address to one of these functions. - * - * If you use these functions directly please don't forget the access_ok(). */ static inline __wsum csum_partial_copy_nocheck(const void *src, void *dst, @@ -54,12 +52,17 @@ __wsum csum_partial_copy_nocheck(const void *src, void *dst, return csum_partial_copy_generic(src, dst, len, sum, NULL, NULL); } +#define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER static inline -__wsum csum_partial_copy_from_user(const void __user *src, void *dst, +__wsum csum_and_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr) { - return csum_partial_copy_generic((__force const void *)src, dst, + if (access_ok(src, len)) + return csum_partial_copy_generic((__force const void *)src, dst, len, sum, err_ptr, NULL); + if (len) + *err_ptr = -EFAULT; + return sum; } /* diff --git a/arch/xtensa/include/asm/fixmap.h b/arch/xtensa/include/asm/fixmap.h index cfb8696917e9..a06ffb0c61c7 100644 --- a/arch/xtensa/include/asm/fixmap.h +++ b/arch/xtensa/include/asm/fixmap.h @@ -13,9 +13,9 @@ #ifndef _ASM_FIXMAP_H #define _ASM_FIXMAP_H -#include <asm/pgtable.h> #ifdef CONFIG_HIGHMEM #include <linux/threads.h> +#include <linux/pgtable.h> #include <asm/kmap_types.h> #endif @@ -76,12 +76,4 @@ static inline unsigned long virt_to_fix(const unsigned long vaddr) #endif -#define kmap_get_fixmap_pte(vaddr) \ - pte_offset_kernel( \ - pmd_offset(pud_offset(p4d_offset(pgd_offset_k(vaddr), \ - (vaddr)), \ - (vaddr)), \ - (vaddr)), \ - (vaddr)) - #endif diff --git a/arch/xtensa/include/asm/highmem.h b/arch/xtensa/include/asm/highmem.h index 04e9340eac4b..eac503215f17 100644 --- a/arch/xtensa/include/asm/highmem.h +++ b/arch/xtensa/include/asm/highmem.h @@ -13,10 +13,10 @@ #define _XTENSA_HIGHMEM_H #include <linux/wait.h> +#include <linux/pgtable.h> #include <asm/cacheflush.h> #include <asm/fixmap.h> #include <asm/kmap_types.h> -#include <asm/pgtable.h> #define PKMAP_BASE ((FIXADDR_START - \ (LAST_PKMAP + 1) * PAGE_SIZE) & PMD_MASK) @@ -63,38 +63,11 @@ static inline wait_queue_head_t *get_pkmap_wait_queue_head(unsigned int color) extern pte_t *pkmap_page_table; -void *kmap_high(struct page *page); -void kunmap_high(struct page *page); - -static inline void *kmap(struct page *page) -{ - /* Check if this memory layout is broken because PKMAP overlaps - * page table. - */ - BUILD_BUG_ON(PKMAP_BASE < - TLBTEMP_BASE_1 + TLBTEMP_SIZE); - BUG_ON(in_interrupt()); - if (!PageHighMem(page)) - return page_address(page); - return kmap_high(page); -} - -static inline void kunmap(struct page *page) -{ - BUG_ON(in_interrupt()); - if (!PageHighMem(page)) - return; - kunmap_high(page); -} - static inline void flush_cache_kmaps(void) { flush_cache_all(); } -void *kmap_atomic(struct page *page); -void __kunmap_atomic(void *kvaddr); - void kmap_init(void); #endif diff --git a/arch/xtensa/include/asm/initialize_mmu.h b/arch/xtensa/include/asm/initialize_mmu.h index e3e1d9a1ef69..05cb13dfe6f4 100644 --- a/arch/xtensa/include/asm/initialize_mmu.h +++ b/arch/xtensa/include/asm/initialize_mmu.h @@ -6,7 +6,7 @@ * For the new V3 MMU we remap the TLB from virtual == physical * to the standard Linux mapping used in earlier MMU's. * - * The the MMU we also support a new configuration register that + * For the MMU we also support a new configuration register that * specifies how the S32C1I instruction operates with the cache * controller. * @@ -24,7 +24,7 @@ #define _XTENSA_INITIALIZE_MMU_H #include <linux/init.h> -#include <asm/pgtable.h> +#include <linux/pgtable.h> #include <asm/vectors.h> #if XCHAL_HAVE_PTP_MMU diff --git a/arch/xtensa/include/asm/mmu_context.h b/arch/xtensa/include/asm/mmu_context.h index de5e6cbbafe4..74923ef3b228 100644 --- a/arch/xtensa/include/asm/mmu_context.h +++ b/arch/xtensa/include/asm/mmu_context.h @@ -18,10 +18,10 @@ #include <linux/stringify.h> #include <linux/sched.h> #include <linux/mm_types.h> +#include <linux/pgtable.h> #include <asm/vectors.h> -#include <asm/pgtable.h> #include <asm/cacheflush.h> #include <asm/tlbflush.h> #include <asm-generic/mm_hooks.h> diff --git a/arch/xtensa/include/asm/pgalloc.h b/arch/xtensa/include/asm/pgalloc.h index 1d38f0e755ba..d3a22da4d2c9 100644 --- a/arch/xtensa/include/asm/pgalloc.h +++ b/arch/xtensa/include/asm/pgalloc.h @@ -8,9 +8,14 @@ #ifndef _XTENSA_PGALLOC_H #define _XTENSA_PGALLOC_H +#ifdef CONFIG_MMU #include <linux/highmem.h> #include <linux/slab.h> +#define __HAVE_ARCH_PTE_ALLOC_ONE_KERNEL +#define __HAVE_ARCH_PTE_ALLOC_ONE +#include <asm-generic/pgalloc.h> + /* * Allocating and freeing a pmd is trivial: the 1-entry pmd is * inside the pgd, so has no extra memory associated with it. @@ -28,50 +33,37 @@ pgd_alloc(struct mm_struct *mm) return (pgd_t*) __get_free_pages(GFP_KERNEL | __GFP_ZERO, PGD_ORDER); } -static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) +static inline void ptes_clear(pte_t *ptep) { - free_page((unsigned long)pgd); + int i; + + for (i = 0; i < PTRS_PER_PTE; i++) + pte_clear(NULL, 0, ptep + i); } static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm) { pte_t *ptep; - int i; - ptep = (pte_t *)__get_free_page(GFP_KERNEL); + ptep = (pte_t *)__pte_alloc_one_kernel(mm); if (!ptep) return NULL; - for (i = 0; i < 1024; i++) - pte_clear(NULL, 0, ptep + i); + ptes_clear(ptep); return ptep; } static inline pgtable_t pte_alloc_one(struct mm_struct *mm) { - pte_t *pte; struct page *page; - pte = pte_alloc_one_kernel(mm); - if (!pte) + page = __pte_alloc_one(mm, GFP_PGTABLE_USER); + if (!page) return NULL; - page = virt_to_page(pte); - if (!pgtable_pte_page_ctor(page)) { - __free_page(page); - return NULL; - } + ptes_clear(page_address(page)); return page; } -static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) -{ - free_page((unsigned long)pte); -} - -static inline void pte_free(struct mm_struct *mm, pgtable_t pte) -{ - pgtable_pte_page_dtor(pte); - __free_page(pte); -} #define pmd_pgtable(pmd) pmd_page(pmd) +#endif /* CONFIG_MMU */ #endif /* _XTENSA_PGALLOC_H */ diff --git a/arch/xtensa/include/asm/pgtable.h b/arch/xtensa/include/asm/pgtable.h index 8be0c0568c50..fa054a1772e1 100644 --- a/arch/xtensa/include/asm/pgtable.h +++ b/arch/xtensa/include/asm/pgtable.h @@ -267,7 +267,7 @@ static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITABLE; } static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } -static inline pte_t pte_wrprotect(pte_t pte) +static inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) &= ~(_PAGE_WRITABLE | _PAGE_HW_WRITE); return pte; } static inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_HW_WRITE); return pte; } @@ -359,22 +359,6 @@ ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) update_pte(ptep, pte_wrprotect(pte)); } -/* to find an entry in a kernel page-table-directory */ -#define pgd_offset_k(address) pgd_offset(&init_mm, address) - -/* to find an entry in a page-table-directory */ -#define pgd_offset(mm,address) ((mm)->pgd + pgd_index(address)) - -#define pgd_index(address) ((address) >> PGDIR_SHIFT) - -/* Find an entry in the third-level page table.. */ -#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) -#define pte_offset_kernel(dir,addr) \ - ((pte_t*) pmd_page_vaddr(*(dir)) + pte_index(addr)) -#define pte_offset_map(dir,addr) pte_offset_kernel((dir),(addr)) -#define pte_unmap(pte) do { } while (0) - - /* * Encode and decode a swap and file entry. */ @@ -438,6 +422,4 @@ typedef pte_t *pte_addr_t; */ #define HAVE_ARCH_UNMAPPED_AREA -#include <asm-generic/pgtable.h> - #endif /* _XTENSA_PGTABLE_H */ diff --git a/arch/xtensa/include/asm/thread_info.h b/arch/xtensa/include/asm/thread_info.h index f092cc3f4e66..6acbbe0d87d3 100644 --- a/arch/xtensa/include/asm/thread_info.h +++ b/arch/xtensa/include/asm/thread_info.h @@ -55,6 +55,10 @@ struct thread_info { mm_segment_t addr_limit; /* thread address space */ unsigned long cpenable; +#if XCHAL_HAVE_EXCLUSIVE + /* result of the most recent exclusive store */ + unsigned long atomctl8; +#endif /* Allocate storage for extra user states and coprocessor states. */ #if XTENSA_HAVE_COPROCESSORS @@ -111,15 +115,20 @@ static inline struct thread_info *current_thread_info(void) #define TIF_RESTORE_SIGMASK 6 /* restore signal mask in do_signal() */ #define TIF_NOTIFY_RESUME 7 /* callback before returning to user */ #define TIF_DB_DISABLED 8 /* debug trap disabled for syscall */ +#define TIF_SYSCALL_AUDIT 9 /* syscall auditing active */ +#define TIF_SECCOMP 10 /* secure computing */ #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) #define _TIF_SIGPENDING (1<<TIF_SIGPENDING) #define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) #define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP) #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT) +#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) +#define _TIF_SECCOMP (1<<TIF_SECCOMP) #define _TIF_WORK_MASK (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP | \ - _TIF_SYSCALL_TRACEPOINT) + _TIF_SYSCALL_TRACEPOINT | \ + _TIF_SYSCALL_AUDIT | _TIF_SECCOMP) #define THREAD_SIZE KERNEL_STACK_SIZE #define THREAD_SIZE_ORDER (KERNEL_STACK_SHIFT - PAGE_SHIFT) diff --git a/arch/xtensa/include/asm/uaccess.h b/arch/xtensa/include/asm/uaccess.h index 47b7702aaa40..b9758119feca 100644 --- a/arch/xtensa/include/asm/uaccess.h +++ b/arch/xtensa/include/asm/uaccess.h @@ -35,7 +35,7 @@ #define get_fs() (current->thread.current_ds) #define set_fs(val) (current->thread.current_ds = (val)) -#define segment_eq(a, b) ((a).seg == (b).seg) +#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg) #define __kernel_ok (uaccess_kernel()) #define __user_ok(addr, size) \ @@ -84,7 +84,7 @@ extern long __put_user_bad(void); #define __put_user_check(x, ptr, size) \ ({ \ long __pu_err = -EFAULT; \ - __typeof__(*(ptr)) *__pu_addr = (ptr); \ + __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ if (access_ok(__pu_addr, size)) \ __put_user_size((x), __pu_addr, (size), __pu_err); \ __pu_err; \ @@ -180,11 +180,11 @@ __asm__ __volatile__( \ #define __get_user_check(x, ptr, size) \ ({ \ long __gu_err = -EFAULT; \ - const __typeof__(*(ptr)) *__gu_addr = (ptr); \ + const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ if (access_ok(__gu_addr, size)) \ __get_user_size((x), __gu_addr, (size), __gu_err); \ else \ - (x) = 0; \ + (x) = (__typeof__(*(ptr)))0; \ __gu_err; \ }) @@ -202,13 +202,15 @@ do { \ u64 __x; \ if (unlikely(__copy_from_user(&__x, ptr, 8))) { \ retval = -EFAULT; \ - (x) = 0; \ + (x) = (__typeof__(*(ptr)))0; \ } else { \ - (x) = *(__force __typeof__((ptr)))&__x; \ + (x) = *(__force __typeof__(*(ptr)) *)&__x; \ } \ break; \ } \ - default: (x) = 0; __get_user_bad(); \ + default: \ + (x) = (__typeof__(*(ptr)))0; \ + __get_user_bad(); \ } \ } while (0) @@ -270,15 +272,15 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long n) */ static inline unsigned long -__xtensa_clear_user(void *addr, unsigned long size) +__xtensa_clear_user(void __user *addr, unsigned long size) { - if (!__memset(addr, 0, size)) + if (!__memset((void __force *)addr, 0, size)) return size; return 0; } static inline unsigned long -clear_user(void *addr, unsigned long size) +clear_user(void __user *addr, unsigned long size) { if (access_ok(addr, size)) return __xtensa_clear_user(addr, size); @@ -290,10 +292,10 @@ clear_user(void *addr, unsigned long size) #ifndef CONFIG_GENERIC_STRNCPY_FROM_USER -extern long __strncpy_user(char *, const char *, long); +extern long __strncpy_user(char *dst, const char __user *src, long count); static inline long -strncpy_from_user(char *dst, const char *src, long count) +strncpy_from_user(char *dst, const char __user *src, long count) { if (access_ok(src, 1)) return __strncpy_user(dst, src, count); @@ -306,13 +308,11 @@ long strncpy_from_user(char *dst, const char *src, long count); /* * Return the size of a string (including the ending 0!) */ -extern long __strnlen_user(const char *, long); +extern long __strnlen_user(const char __user *str, long len); -static inline long strnlen_user(const char *str, long len) +static inline long strnlen_user(const char __user *str, long len) { - unsigned long top = __kernel_ok ? ~0UL : TASK_SIZE - 1; - - if ((unsigned long)str > top) + if (!access_ok(str, 1)) return 0; return __strnlen_user(str, len); } diff --git a/arch/xtensa/include/uapi/asm/ptrace.h b/arch/xtensa/include/uapi/asm/ptrace.h index 2ec0f9100a06..50db3e0a6341 100644 --- a/arch/xtensa/include/uapi/asm/ptrace.h +++ b/arch/xtensa/include/uapi/asm/ptrace.h @@ -50,7 +50,8 @@ struct user_pt_regs { __u32 windowstart; __u32 windowbase; __u32 threadptr; - __u32 reserved[7 + 48]; + __u32 syscall; + __u32 reserved[6 + 48]; __u32 a[64]; }; diff --git a/arch/xtensa/kernel/asm-offsets.c b/arch/xtensa/kernel/asm-offsets.c index 33a257b33723..dc5c83cad9be 100644 --- a/arch/xtensa/kernel/asm-offsets.c +++ b/arch/xtensa/kernel/asm-offsets.c @@ -93,6 +93,9 @@ int main(void) DEFINE(THREAD_RA, offsetof (struct task_struct, thread.ra)); DEFINE(THREAD_SP, offsetof (struct task_struct, thread.sp)); DEFINE(THREAD_CPENABLE, offsetof (struct thread_info, cpenable)); +#if XCHAL_HAVE_EXCLUSIVE + DEFINE(THREAD_ATOMCTL8, offsetof (struct thread_info, atomctl8)); +#endif #if XTENSA_HAVE_COPROCESSORS DEFINE(THREAD_XTREGS_CP0, offsetof(struct thread_info, xtregs_cp.cp0)); DEFINE(THREAD_XTREGS_CP1, offsetof(struct thread_info, xtregs_cp.cp1)); diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S index 06fbb0a171f1..703cf6205efe 100644 --- a/arch/xtensa/kernel/entry.S +++ b/arch/xtensa/kernel/entry.S @@ -13,6 +13,7 @@ */ #include <linux/linkage.h> +#include <linux/pgtable.h> #include <asm/asm-offsets.h> #include <asm/asmmacro.h> #include <asm/processor.h> @@ -22,7 +23,6 @@ #include <asm/unistd.h> #include <asm/ptrace.h> #include <asm/current.h> -#include <asm/pgtable.h> #include <asm/page.h> #include <asm/signal.h> #include <asm/tlbflush.h> @@ -374,6 +374,11 @@ common_exception: s32i a2, a1, PT_LCOUNT #endif +#if XCHAL_HAVE_EXCLUSIVE + /* Clear exclusive access monitor set by interrupted code */ + clrex +#endif + /* It is now save to restore the EXC_TABLE_FIXUP variable. */ rsr a2, exccause @@ -959,14 +964,14 @@ ENDPROC(unrecoverable_exception) * of the proper size instead. * * This algorithm simply backs out the register changes started by the user - * excpetion handler, makes it appear that we have started a window underflow + * exception handler, makes it appear that we have started a window underflow * by rotating the window back and then setting the old window base (OWB) in * the 'ps' register with the rolled back window base. The 'movsp' instruction * will be re-executed and this time since the next window frames is in the * active AR registers it won't cause an exception. * * If the WindowUnderflow code gets a TLB miss the page will get mapped - * the the partial windeowUnderflow will be handeled in the double exception + * the partial WindowUnderflow will be handled in the double exception * handler. * * Entry condition: @@ -2020,6 +2025,12 @@ ENTRY(_switch_to) s32i a3, a4, THREAD_CPENABLE #endif +#if XCHAL_HAVE_EXCLUSIVE + l32i a3, a5, THREAD_ATOMCTL8 + getex a3 + s32i a3, a4, THREAD_ATOMCTL8 +#endif + /* Flush register file. */ spill_registers_kernel diff --git a/arch/xtensa/kernel/perf_event.c b/arch/xtensa/kernel/perf_event.c index 9bae79f70301..a0d05c8598d0 100644 --- a/arch/xtensa/kernel/perf_event.c +++ b/arch/xtensa/kernel/perf_event.c @@ -362,9 +362,7 @@ irqreturn_t xtensa_pmu_irq_handler(int irq, void *dev_id) struct xtensa_pmu_events *ev = this_cpu_ptr(&xtensa_pmu_events); unsigned i; - for (i = find_first_bit(ev->used_mask, XCHAL_NUM_PERF_COUNTERS); - i < XCHAL_NUM_PERF_COUNTERS; - i = find_next_bit(ev->used_mask, XCHAL_NUM_PERF_COUNTERS, i + 1)) { + for_each_set_bit(i, ev->used_mask, XCHAL_NUM_PERF_COUNTERS) { uint32_t v = get_er(XTENSA_PMU_PMSTAT(i)); struct perf_event *event = ev->event[i]; struct hw_perf_event *hwc = &event->hw; @@ -401,7 +399,7 @@ static struct pmu xtensa_pmu = { .read = xtensa_pmu_read, }; -static int xtensa_pmu_setup(int cpu) +static int xtensa_pmu_setup(unsigned int cpu) { unsigned i; diff --git a/arch/xtensa/kernel/process.c b/arch/xtensa/kernel/process.c index 3edecc41ef8c..397a7de56377 100644 --- a/arch/xtensa/kernel/process.c +++ b/arch/xtensa/kernel/process.c @@ -37,7 +37,6 @@ #include <linux/slab.h> #include <linux/rcupdate.h> -#include <asm/pgtable.h> #include <linux/uaccess.h> #include <asm/io.h> #include <asm/processor.h> @@ -202,7 +201,7 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) * involved. Much simpler to just not copy those live frames across. */ -int copy_thread_tls(unsigned long clone_flags, unsigned long usp_thread_fn, +int copy_thread(unsigned long clone_flags, unsigned long usp_thread_fn, unsigned long thread_fn_arg, struct task_struct *p, unsigned long tls) { diff --git a/arch/xtensa/kernel/ptrace.c b/arch/xtensa/kernel/ptrace.c index 145742d70a9f..bb3f4797d212 100644 --- a/arch/xtensa/kernel/ptrace.c +++ b/arch/xtensa/kernel/ptrace.c @@ -12,6 +12,7 @@ * Marc Gauthier<marc@tensilica.com> <marc@alumni.uwaterloo.ca> */ +#include <linux/audit.h> #include <linux/errno.h> #include <linux/hw_breakpoint.h> #include <linux/kernel.h> @@ -21,6 +22,7 @@ #include <linux/regset.h> #include <linux/sched.h> #include <linux/sched/task_stack.h> +#include <linux/seccomp.h> #include <linux/security.h> #include <linux/signal.h> #include <linux/smp.h> @@ -33,13 +35,11 @@ #include <asm/coprocessor.h> #include <asm/elf.h> #include <asm/page.h> -#include <asm/pgtable.h> #include <asm/ptrace.h> static int gpr_get(struct task_struct *target, const struct user_regset *regset, - unsigned int pos, unsigned int count, - void *kbuf, void __user *ubuf) + struct membuf to) { struct pt_regs *regs = task_pt_regs(target); struct user_pt_regs newregs = { @@ -52,6 +52,7 @@ static int gpr_get(struct task_struct *target, .threadptr = regs->threadptr, .windowbase = regs->windowbase, .windowstart = regs->windowstart, + .syscall = regs->syscall, }; memcpy(newregs.a, @@ -61,8 +62,7 @@ static int gpr_get(struct task_struct *target, regs->areg, (WSBITS - regs->windowbase) * 16); - return user_regset_copyout(&pos, &count, &kbuf, &ubuf, - &newregs, 0, -1); + return membuf_write(&to, &newregs, sizeof(newregs)); } static int gpr_set(struct task_struct *target, @@ -91,6 +91,9 @@ static int gpr_set(struct task_struct *target, regs->sar = newregs.sar; regs->threadptr = newregs.threadptr; + if (newregs.syscall) + regs->syscall = newregs.syscall; + if (newregs.windowbase != regs->windowbase || newregs.windowstart != regs->windowstart) { u32 rotws, wmask; @@ -116,8 +119,7 @@ static int gpr_set(struct task_struct *target, static int tie_get(struct task_struct *target, const struct user_regset *regset, - unsigned int pos, unsigned int count, - void *kbuf, void __user *ubuf) + struct membuf to) { int ret; struct pt_regs *regs = task_pt_regs(target); @@ -142,8 +144,7 @@ static int tie_get(struct task_struct *target, newregs->cp6 = ti->xtregs_cp.cp6; newregs->cp7 = ti->xtregs_cp.cp7; #endif - ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, - newregs, 0, -1); + ret = membuf_write(&to, newregs, sizeof(*newregs)); kfree(newregs); return ret; } @@ -198,7 +199,7 @@ static const struct user_regset xtensa_regsets[] = { .n = sizeof(struct user_pt_regs) / sizeof(u32), .size = sizeof(u32), .align = sizeof(u32), - .get = gpr_get, + .regset_get = gpr_get, .set = gpr_set, }, [REGSET_TIE] = { @@ -206,7 +207,7 @@ static const struct user_regset xtensa_regsets[] = { .n = sizeof(elf_xtregs_t) / sizeof(u32), .size = sizeof(u32), .align = sizeof(u32), - .get = tie_get, + .regset_get = tie_get, .set = tie_set, }, }; @@ -555,7 +556,8 @@ int do_syscall_trace_enter(struct pt_regs *regs) return 0; } - if (regs->syscall == NO_SYSCALL) { + if (regs->syscall == NO_SYSCALL || + secure_computing() == -1) { do_syscall_trace_leave(regs); return 0; } @@ -563,6 +565,9 @@ int do_syscall_trace_enter(struct pt_regs *regs) if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) trace_sys_enter(regs, syscall_get_nr(current, regs)); + audit_syscall_entry(regs->syscall, regs->areg[6], + regs->areg[3], regs->areg[4], + regs->areg[5]); return 1; } @@ -570,6 +575,8 @@ void do_syscall_trace_leave(struct pt_regs *regs) { int step; + audit_syscall_exit(regs); + if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) trace_sys_exit(regs, regs_return_value(regs)); diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c index 3880c765d448..be2c78f71695 100644 --- a/arch/xtensa/kernel/setup.c +++ b/arch/xtensa/kernel/setup.c @@ -37,7 +37,6 @@ #include <asm/bootparam.h> #include <asm/kasan.h> #include <asm/mmu_context.h> -#include <asm/pgtable.h> #include <asm/processor.h> #include <asm/timex.h> #include <asm/platform.h> @@ -725,7 +724,8 @@ c_start(struct seq_file *f, loff_t *pos) static void * c_next(struct seq_file *f, void *v, loff_t *pos) { - return NULL; + ++*pos; + return c_start(f, pos); } static void diff --git a/arch/xtensa/kernel/signal.c b/arch/xtensa/kernel/signal.c index 76cee341507b..b3b17d6c50f0 100644 --- a/arch/xtensa/kernel/signal.c +++ b/arch/xtensa/kernel/signal.c @@ -448,7 +448,7 @@ static void do_signal(struct pt_regs *regs) regs->areg[2] = -EINTR; break; } - /* fallthrough */ + fallthrough; case -ERESTARTNOINTR: regs->areg[2] = regs->syscall; regs->pc -= 3; diff --git a/arch/xtensa/kernel/syscalls/syscall.tbl b/arch/xtensa/kernel/syscalls/syscall.tbl index 85a9ab1bc04d..6276e3c2d3fc 100644 --- a/arch/xtensa/kernel/syscalls/syscall.tbl +++ b/arch/xtensa/kernel/syscalls/syscall.tbl @@ -222,7 +222,7 @@ 204 common quotactl sys_quotactl # 205 was old nfsservctl 205 common nfsservctl sys_ni_syscall -206 common _sysctl sys_sysctl +206 common _sysctl sys_ni_syscall 207 common bdflush sys_bdflush 208 common uname sys_newuname 209 common sysinfo sys_sysinfo @@ -406,5 +406,7 @@ 433 common fspick sys_fspick 434 common pidfd_open sys_pidfd_open 435 common clone3 sys_clone3 +436 common close_range sys_close_range 437 common openat2 sys_openat2 438 common pidfd_getfd sys_pidfd_getfd +439 common faccessat2 sys_faccessat2 diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c index 0976e27b8d5d..efc3a29cde80 100644 --- a/arch/xtensa/kernel/traps.c +++ b/arch/xtensa/kernel/traps.c @@ -34,12 +34,12 @@ #include <linux/delay.h> #include <linux/hardirq.h> #include <linux/ratelimit.h> +#include <linux/pgtable.h> #include <asm/stacktrace.h> #include <asm/ptrace.h> #include <asm/timex.h> #include <linux/uaccess.h> -#include <asm/pgtable.h> #include <asm/processor.h> #include <asm/traps.h> #include <asm/hw_breakpoint.h> @@ -479,25 +479,29 @@ void show_regs(struct pt_regs * regs) static int show_trace_cb(struct stackframe *frame, void *data) { + const char *loglvl = data; + if (kernel_text_address(frame->pc)) - pr_cont(" [<%08lx>] %pB\n", frame->pc, (void *)frame->pc); + printk("%s [<%08lx>] %pB\n", + loglvl, frame->pc, (void *)frame->pc); return 0; } -void show_trace(struct task_struct *task, unsigned long *sp) +static void show_trace(struct task_struct *task, unsigned long *sp, + const char *loglvl) { if (!sp) sp = stack_pointer(task); - pr_info("Call Trace:\n"); - walk_stackframe(sp, show_trace_cb, NULL); + printk("%sCall Trace:\n", loglvl); + walk_stackframe(sp, show_trace_cb, (void *)loglvl); } #define STACK_DUMP_ENTRY_SIZE 4 #define STACK_DUMP_LINE_SIZE 32 static size_t kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH; -void show_stack(struct task_struct *task, unsigned long *sp) +void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl) { size_t len; @@ -507,11 +511,11 @@ void show_stack(struct task_struct *task, unsigned long *sp) len = min((-(size_t)sp) & (THREAD_SIZE - STACK_DUMP_ENTRY_SIZE), kstack_depth_to_print * STACK_DUMP_ENTRY_SIZE); - pr_info("Stack:\n"); - print_hex_dump(KERN_INFO, " ", DUMP_PREFIX_NONE, + printk("%sStack:\n", loglvl); + print_hex_dump(loglvl, " ", DUMP_PREFIX_NONE, STACK_DUMP_LINE_SIZE, STACK_DUMP_ENTRY_SIZE, sp, len, false); - show_trace(task, sp); + show_trace(task, sp, loglvl); } DEFINE_SPINLOCK(die_lock); @@ -530,7 +534,7 @@ void die(const char * str, struct pt_regs * regs, long err) pr_info("%s: sig: %ld [#%d]%s\n", str, err, ++die_counter, pr); show_regs(regs); if (!user_mode(regs)) - show_stack(NULL, (unsigned long*)regs->areg[1]); + show_stack(NULL, (unsigned long *)regs->areg[1], KERN_INFO); add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); spin_unlock_irq(&die_lock); diff --git a/arch/xtensa/kernel/vectors.S b/arch/xtensa/kernel/vectors.S index 95ad1e773991..1a7538ccfc5a 100644 --- a/arch/xtensa/kernel/vectors.S +++ b/arch/xtensa/kernel/vectors.S @@ -43,11 +43,11 @@ */ #include <linux/linkage.h> +#include <linux/pgtable.h> #include <asm/asmmacro.h> #include <asm/ptrace.h> #include <asm/current.h> #include <asm/asm-offsets.h> -#include <asm/pgtable.h> #include <asm/processor.h> #include <asm/page.h> #include <asm/thread_info.h> diff --git a/arch/xtensa/kernel/xtensa_ksyms.c b/arch/xtensa/kernel/xtensa_ksyms.c index 4092555828b1..415fe7faa37f 100644 --- a/arch/xtensa/kernel/xtensa_ksyms.c +++ b/arch/xtensa/kernel/xtensa_ksyms.c @@ -25,7 +25,6 @@ #include <asm/dma.h> #include <asm/io.h> #include <asm/page.h> -#include <asm/pgalloc.h> #include <asm/ftrace.h> #ifdef CONFIG_BLK_DEV_FD #include <asm/floppy.h> @@ -87,13 +86,13 @@ void __xtensa_libgcc_window_spill(void) } EXPORT_SYMBOL(__xtensa_libgcc_window_spill); -unsigned long __sync_fetch_and_and_4(unsigned long *p, unsigned long v) +unsigned int __sync_fetch_and_and_4(volatile void *p, unsigned int v) { BUG(); } EXPORT_SYMBOL(__sync_fetch_and_and_4); -unsigned long __sync_fetch_and_or_4(unsigned long *p, unsigned long v) +unsigned int __sync_fetch_and_or_4(volatile void *p, unsigned int v) { BUG(); } diff --git a/arch/xtensa/mm/cache.c b/arch/xtensa/mm/cache.c index b27359e2a464..5835406b3cec 100644 --- a/arch/xtensa/mm/cache.c +++ b/arch/xtensa/mm/cache.c @@ -24,14 +24,13 @@ #include <linux/memblock.h> #include <linux/swap.h> #include <linux/pagemap.h> +#include <linux/pgtable.h> #include <asm/bootparam.h> #include <asm/mmu_context.h> #include <asm/tlb.h> #include <asm/tlbflush.h> #include <asm/page.h> -#include <asm/pgalloc.h> -#include <asm/pgtable.h> /* * Note: diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c index e7172bd53ced..7666408ce12a 100644 --- a/arch/xtensa/mm/fault.c +++ b/arch/xtensa/mm/fault.c @@ -20,7 +20,6 @@ #include <asm/mmu_context.h> #include <asm/cacheflush.h> #include <asm/hardirq.h> -#include <asm/pgalloc.h> DEFINE_PER_CPU(unsigned long, asid_cache) = ASID_USER_FIRST; void bad_page_fault(struct pt_regs*, unsigned long, int); @@ -73,8 +72,11 @@ void do_page_fault(struct pt_regs *regs) if (user_mode(regs)) flags |= FAULT_FLAG_USER; + + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); + retry: - down_read(&mm->mmap_sem); + mmap_read_lock(mm); vma = find_vma(mm, address); if (!vma) @@ -108,7 +110,7 @@ good_area: * make sure we exit gracefully rather than endlessly redo * the fault. */ - fault = handle_mm_fault(vma, address, flags); + fault = handle_mm_fault(vma, address, flags, regs); if (fault_signal_pending(fault, regs)) return; @@ -123,14 +125,10 @@ good_area: BUG(); } if (flags & FAULT_FLAG_ALLOW_RETRY) { - if (fault & VM_FAULT_MAJOR) - current->maj_flt++; - else - current->min_flt++; if (fault & VM_FAULT_RETRY) { flags |= FAULT_FLAG_TRIED; - /* No need to up_read(&mm->mmap_sem) as we would + /* No need to mmap_read_unlock(mm) as we would * have already released it in __lock_page_or_retry * in mm/filemap.c. */ @@ -139,20 +137,14 @@ good_area: } } - up_read(&mm->mmap_sem); - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); - if (flags & VM_FAULT_MAJOR) - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address); - else - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address); - + mmap_read_unlock(mm); return; /* Something tried to access memory that isn't in our memory map.. * Fix it, but check if it's kernel or user first.. */ bad_area: - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); if (user_mode(regs)) { current->thread.bad_vaddr = address; current->thread.error_code = is_write; @@ -167,7 +159,7 @@ bad_area: * us unable to handle the page fault gracefully. */ out_of_memory: - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); if (!user_mode(regs)) bad_page_fault(regs, address, SIGKILL); else @@ -175,7 +167,7 @@ out_of_memory: return; do_sigbus: - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); /* Send a sigbus, regardless of whether we were in kernel * or user mode. diff --git a/arch/xtensa/mm/highmem.c b/arch/xtensa/mm/highmem.c index 184ceadccc1a..673196fe862e 100644 --- a/arch/xtensa/mm/highmem.c +++ b/arch/xtensa/mm/highmem.c @@ -37,29 +37,24 @@ static inline enum fixed_addresses kmap_idx(int type, unsigned long color) color; } -void *kmap_atomic(struct page *page) +void *kmap_atomic_high_prot(struct page *page, pgprot_t prot) { enum fixed_addresses idx; unsigned long vaddr; - preempt_disable(); - pagefault_disable(); - if (!PageHighMem(page)) - return page_address(page); - idx = kmap_idx(kmap_atomic_idx_push(), DCACHE_ALIAS(page_to_phys(page))); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); #ifdef CONFIG_DEBUG_HIGHMEM BUG_ON(!pte_none(*(kmap_pte + idx))); #endif - set_pte(kmap_pte + idx, mk_pte(page, PAGE_KERNEL_EXEC)); + set_pte(kmap_pte + idx, mk_pte(page, prot)); return (void *)vaddr; } -EXPORT_SYMBOL(kmap_atomic); +EXPORT_SYMBOL(kmap_atomic_high_prot); -void __kunmap_atomic(void *kvaddr) +void kunmap_atomic_high(void *kvaddr) { if (kvaddr >= (void *)FIXADDR_START && kvaddr < (void *)FIXADDR_TOP) { @@ -78,18 +73,19 @@ void __kunmap_atomic(void *kvaddr) kmap_atomic_idx_pop(); } - - pagefault_enable(); - preempt_enable(); } -EXPORT_SYMBOL(__kunmap_atomic); +EXPORT_SYMBOL(kunmap_atomic_high); void __init kmap_init(void) { unsigned long kmap_vstart; + /* Check if this memory layout is broken because PKMAP overlaps + * page table. + */ + BUILD_BUG_ON(PKMAP_BASE < TLBTEMP_BASE_1 + TLBTEMP_SIZE); /* cache the first kmap pte */ kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN); - kmap_pte = kmap_get_fixmap_pte(kmap_vstart); + kmap_pte = virt_to_kpte(kmap_vstart); kmap_waitqueues_init(); } diff --git a/arch/xtensa/mm/init.c b/arch/xtensa/mm/init.c index 19c625e6d81f..a05b306cf371 100644 --- a/arch/xtensa/mm/init.c +++ b/arch/xtensa/mm/init.c @@ -70,13 +70,13 @@ void __init bootmem_init(void) void __init zones_init(void) { /* All pages are DMA-able, so we put them all in the DMA zone. */ - unsigned long zones_size[MAX_NR_ZONES] = { - [ZONE_NORMAL] = max_low_pfn - ARCH_PFN_OFFSET, + unsigned long max_zone_pfn[MAX_NR_ZONES] = { + [ZONE_NORMAL] = max_low_pfn, #ifdef CONFIG_HIGHMEM - [ZONE_HIGHMEM] = max_pfn - max_low_pfn, + [ZONE_HIGHMEM] = max_pfn, #endif }; - free_area_init_node(0, zones_size, ARCH_PFN_OFFSET, NULL); + free_area_init(max_zone_pfn); } #ifdef CONFIG_HIGHMEM diff --git a/arch/xtensa/mm/ioremap.c b/arch/xtensa/mm/ioremap.c index 9ea3f21d60c7..a400188c16b9 100644 --- a/arch/xtensa/mm/ioremap.c +++ b/arch/xtensa/mm/ioremap.c @@ -7,9 +7,9 @@ #include <linux/io.h> #include <linux/vmalloc.h> +#include <linux/pgtable.h> #include <asm/cacheflush.h> #include <asm/io.h> -#include <asm/pgtable.h> static void __iomem *xtensa_ioremap(unsigned long paddr, unsigned long size, pgprot_t prot) diff --git a/arch/xtensa/mm/kasan_init.c b/arch/xtensa/mm/kasan_init.c index e3baa21ff24c..1fef24db2ff6 100644 --- a/arch/xtensa/mm/kasan_init.c +++ b/arch/xtensa/mm/kasan_init.c @@ -19,10 +19,7 @@ void __init kasan_early_init(void) { unsigned long vaddr = KASAN_SHADOW_START; - pgd_t *pgd = pgd_offset_k(vaddr); - p4d_t *p4d = p4d_offset(pgd, vaddr); - pud_t *pud = pud_offset(p4d, vaddr); - pmd_t *pmd = pmd_offset(pud, vaddr); + pmd_t *pmd = pmd_off_k(vaddr); int i; for (i = 0; i < PTRS_PER_PTE; ++i) @@ -43,10 +40,7 @@ static void __init populate(void *start, void *end) unsigned long n_pmds = n_pages / PTRS_PER_PTE; unsigned long i, j; unsigned long vaddr = (unsigned long)start; - pgd_t *pgd = pgd_offset_k(vaddr); - p4d_t *p4d = p4d_offset(pgd, vaddr); - pud_t *pud = pud_offset(p4d, vaddr); - pmd_t *pmd = pmd_offset(pud, vaddr); + pmd_t *pmd = pmd_off_k(vaddr); pte_t *pte = memblock_alloc(n_pages * sizeof(pte_t), PAGE_SIZE); if (!pte) diff --git a/arch/xtensa/mm/misc.S b/arch/xtensa/mm/misc.S index 6aa036c427c3..25cd67debee6 100644 --- a/arch/xtensa/mm/misc.S +++ b/arch/xtensa/mm/misc.S @@ -14,8 +14,8 @@ #include <linux/linkage.h> +#include <linux/pgtable.h> #include <asm/page.h> -#include <asm/pgtable.h> #include <asm/asmmacro.h> #include <asm/cacheasm.h> #include <asm/tlbflush.h> diff --git a/arch/xtensa/mm/mmu.c b/arch/xtensa/mm/mmu.c index 37e478a27877..fd2193df8a14 100644 --- a/arch/xtensa/mm/mmu.c +++ b/arch/xtensa/mm/mmu.c @@ -21,10 +21,7 @@ #if defined(CONFIG_HIGHMEM) static void * __init init_pmd(unsigned long vaddr, unsigned long n_pages) { - pgd_t *pgd = pgd_offset_k(vaddr); - p4d_t *p4d = p4d_offset(pgd, vaddr); - pud_t *pud = pud_offset(p4d, vaddr); - pmd_t *pmd = pmd_offset(pud, vaddr); + pmd_t *pmd = pmd_off_k(vaddr); pte_t *pte; unsigned long i; diff --git a/arch/xtensa/platforms/iss/simdisk.c b/arch/xtensa/platforms/iss/simdisk.c index 49322b66cda9..3447556d276d 100644 --- a/arch/xtensa/platforms/iss/simdisk.c +++ b/arch/xtensa/platforms/iss/simdisk.c @@ -101,9 +101,9 @@ static void simdisk_transfer(struct simdisk *dev, unsigned long sector, spin_unlock(&dev->lock); } -static blk_qc_t simdisk_make_request(struct request_queue *q, struct bio *bio) +static blk_qc_t simdisk_submit_bio(struct bio *bio) { - struct simdisk *dev = q->queuedata; + struct simdisk *dev = bio->bi_disk->private_data; struct bio_vec bvec; struct bvec_iter iter; sector_t sector = bio->bi_iter.bi_sector; @@ -127,8 +127,6 @@ static int simdisk_open(struct block_device *bdev, fmode_t mode) struct simdisk *dev = bdev->bd_disk->private_data; spin_lock(&dev->lock); - if (!dev->users) - check_disk_change(bdev); ++dev->users; spin_unlock(&dev->lock); return 0; @@ -144,6 +142,7 @@ static void simdisk_release(struct gendisk *disk, fmode_t mode) static const struct block_device_operations simdisk_ops = { .owner = THIS_MODULE, + .submit_bio = simdisk_submit_bio, .open = simdisk_open, .release = simdisk_release, }; @@ -267,14 +266,12 @@ static int __init simdisk_setup(struct simdisk *dev, int which, spin_lock_init(&dev->lock); dev->users = 0; - dev->queue = blk_alloc_queue(simdisk_make_request, NUMA_NO_NODE); + dev->queue = blk_alloc_queue(NUMA_NO_NODE); if (dev->queue == NULL) { pr_err("blk_alloc_queue failed\n"); goto out_alloc_queue; } - dev->queue->queuedata = dev; - dev->gd = alloc_disk(SIMDISK_MINORS); if (dev->gd == NULL) { pr_err("alloc_disk failed\n"); |