diff options
Diffstat (limited to 'include/asm-generic')
-rw-r--r-- | include/asm-generic/5level-fixup.h | 58 | ||||
-rw-r--r-- | include/asm-generic/Kbuild | 1 | ||||
-rw-r--r-- | include/asm-generic/barrier.h | 16 | ||||
-rw-r--r-- | include/asm-generic/bug.h | 9 | ||||
-rw-r--r-- | include/asm-generic/cacheflush.h | 25 | ||||
-rw-r--r-- | include/asm-generic/checksum.h | 9 | ||||
-rw-r--r-- | include/asm-generic/hugetlb.h | 2 | ||||
-rw-r--r-- | include/asm-generic/hyperv-tlfs.h | 497 | ||||
-rw-r--r-- | include/asm-generic/io.h | 66 | ||||
-rw-r--r-- | include/asm-generic/mshyperv.h | 2 | ||||
-rw-r--r-- | include/asm-generic/pgtable-nop4d-hack.h | 64 | ||||
-rw-r--r-- | include/asm-generic/pgtable-nopmd.h | 1 | ||||
-rw-r--r-- | include/asm-generic/pgtable-nopud.h | 5 | ||||
-rw-r--r-- | include/asm-generic/pgtable.h | 1262 | ||||
-rw-r--r-- | include/asm-generic/sections.h | 3 | ||||
-rw-r--r-- | include/asm-generic/topology.h | 2 | ||||
-rw-r--r-- | include/asm-generic/vermagic.h | 7 | ||||
-rw-r--r-- | include/asm-generic/vmlinux.lds.h | 10 |
18 files changed, 596 insertions, 1443 deletions
diff --git a/include/asm-generic/5level-fixup.h b/include/asm-generic/5level-fixup.h deleted file mode 100644 index 4c74b1c1d13b..000000000000 --- a/include/asm-generic/5level-fixup.h +++ /dev/null @@ -1,58 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _5LEVEL_FIXUP_H -#define _5LEVEL_FIXUP_H - -#define __ARCH_HAS_5LEVEL_HACK -#define __PAGETABLE_P4D_FOLDED 1 - -#define P4D_SHIFT PGDIR_SHIFT -#define P4D_SIZE PGDIR_SIZE -#define P4D_MASK PGDIR_MASK -#define MAX_PTRS_PER_P4D 1 -#define PTRS_PER_P4D 1 - -#define p4d_t pgd_t - -#define pud_alloc(mm, p4d, address) \ - ((unlikely(pgd_none(*(p4d))) && __pud_alloc(mm, p4d, address)) ? \ - NULL : pud_offset(p4d, address)) - -#define p4d_alloc(mm, pgd, address) (pgd) -#define p4d_offset(pgd, start) (pgd) - -#ifndef __ASSEMBLY__ -static inline int p4d_none(p4d_t p4d) -{ - return 0; -} - -static inline int p4d_bad(p4d_t p4d) -{ - return 0; -} - -static inline int p4d_present(p4d_t p4d) -{ - return 1; -} -#endif - -#define p4d_ERROR(p4d) do { } while (0) -#define p4d_clear(p4d) pgd_clear(p4d) -#define p4d_val(p4d) pgd_val(p4d) -#define p4d_populate(mm, p4d, pud) pgd_populate(mm, p4d, pud) -#define p4d_populate_safe(mm, p4d, pud) pgd_populate(mm, p4d, pud) -#define p4d_page(p4d) pgd_page(p4d) -#define p4d_page_vaddr(p4d) pgd_page_vaddr(p4d) - -#define __p4d(x) __pgd(x) -#define set_p4d(p4dp, p4d) set_pgd(p4dp, p4d) - -#undef p4d_free_tlb -#define p4d_free_tlb(tlb, x, addr) do { } while (0) -#define p4d_free(mm, x) do { } while (0) - -#undef p4d_addr_end -#define p4d_addr_end(addr, end) (end) - -#endif diff --git a/include/asm-generic/Kbuild b/include/asm-generic/Kbuild index 36341dfded70..44ec80e70518 100644 --- a/include/asm-generic/Kbuild +++ b/include/asm-generic/Kbuild @@ -56,6 +56,7 @@ mandatory-y += topology.h mandatory-y += trace_clock.h mandatory-y += uaccess.h mandatory-y += unaligned.h +mandatory-y += vermagic.h mandatory-y += vga.h mandatory-y += word-at-a-time.h mandatory-y += xor.h diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h index 85b28eb80b11..2eacaf7d62f6 100644 --- a/include/asm-generic/barrier.h +++ b/include/asm-generic/barrier.h @@ -128,10 +128,10 @@ do { \ #ifndef __smp_load_acquire #define __smp_load_acquire(p) \ ({ \ - typeof(*p) ___p1 = READ_ONCE(*p); \ + __unqual_scalar_typeof(*p) ___p1 = READ_ONCE(*p); \ compiletime_assert_atomic_type(*p); \ __smp_mb(); \ - ___p1; \ + (typeof(*p))___p1; \ }) #endif @@ -183,10 +183,10 @@ do { \ #ifndef smp_load_acquire #define smp_load_acquire(p) \ ({ \ - typeof(*p) ___p1 = READ_ONCE(*p); \ + __unqual_scalar_typeof(*p) ___p1 = READ_ONCE(*p); \ compiletime_assert_atomic_type(*p); \ barrier(); \ - ___p1; \ + (typeof(*p))___p1; \ }) #endif @@ -229,14 +229,14 @@ do { \ #ifndef smp_cond_load_relaxed #define smp_cond_load_relaxed(ptr, cond_expr) ({ \ typeof(ptr) __PTR = (ptr); \ - typeof(*ptr) VAL; \ + __unqual_scalar_typeof(*ptr) VAL; \ for (;;) { \ VAL = READ_ONCE(*__PTR); \ if (cond_expr) \ break; \ cpu_relax(); \ } \ - VAL; \ + (typeof(*ptr))VAL; \ }) #endif @@ -250,10 +250,10 @@ do { \ */ #ifndef smp_cond_load_acquire #define smp_cond_load_acquire(ptr, cond_expr) ({ \ - typeof(*ptr) _val; \ + __unqual_scalar_typeof(*ptr) _val; \ _val = smp_cond_load_relaxed(ptr, cond_expr); \ smp_acquire__after_ctrl_dep(); \ - _val; \ + (typeof(*ptr))_val; \ }) #endif diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h index 384b5c835ced..c94e33ae3e7b 100644 --- a/include/asm-generic/bug.h +++ b/include/asm-generic/bug.h @@ -83,14 +83,19 @@ extern __printf(4, 5) void warn_slowpath_fmt(const char *file, const int line, unsigned taint, const char *fmt, ...); #define __WARN() __WARN_printf(TAINT_WARN, NULL) -#define __WARN_printf(taint, arg...) \ - warn_slowpath_fmt(__FILE__, __LINE__, taint, arg) +#define __WARN_printf(taint, arg...) do { \ + instrumentation_begin(); \ + warn_slowpath_fmt(__FILE__, __LINE__, taint, arg); \ + instrumentation_end(); \ + } while (0) #else extern __printf(1, 2) void __warn_printk(const char *fmt, ...); #define __WARN() __WARN_FLAGS(BUGFLAG_TAINT(TAINT_WARN)) #define __WARN_printf(taint, arg...) do { \ + instrumentation_begin(); \ __warn_printk(arg); \ __WARN_FLAGS(BUGFLAG_NO_CUT_HERE | BUGFLAG_TAINT(taint));\ + instrumentation_end(); \ } while (0) #define WARN_ON_ONCE(condition) ({ \ int __ret_warn_on = !!(condition); \ diff --git a/include/asm-generic/cacheflush.h b/include/asm-generic/cacheflush.h index cac7404b2bdd..907fa5d16494 100644 --- a/include/asm-generic/cacheflush.h +++ b/include/asm-generic/cacheflush.h @@ -1,11 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ -#ifndef __ASM_CACHEFLUSH_H -#define __ASM_CACHEFLUSH_H - -/* Keep includes the same across arches. */ -#include <linux/mm.h> - -#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 +#ifndef _ASM_GENERIC_CACHEFLUSH_H +#define _ASM_GENERIC_CACHEFLUSH_H /* * The cache doesn't need to be flushed when TLB entries change when @@ -45,12 +40,14 @@ static inline void flush_cache_page(struct vm_area_struct *vma, } #endif -#ifndef flush_dcache_page +#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE static inline void flush_dcache_page(struct page *page) { } +#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 #endif + #ifndef flush_dcache_mmap_lock static inline void flush_dcache_mmap_lock(struct address_space *mapping) { @@ -69,6 +66,10 @@ static inline void flush_icache_range(unsigned long start, unsigned long end) } #endif +#ifndef flush_icache_user_range +#define flush_icache_user_range flush_icache_range +#endif + #ifndef flush_icache_page static inline void flush_icache_page(struct vm_area_struct *vma, struct page *page) @@ -76,8 +77,8 @@ static inline void flush_icache_page(struct vm_area_struct *vma, } #endif -#ifndef flush_icache_user_range -static inline void flush_icache_user_range(struct vm_area_struct *vma, +#ifndef flush_icache_user_page +static inline void flush_icache_user_page(struct vm_area_struct *vma, struct page *page, unsigned long addr, int len) { @@ -100,7 +101,7 @@ static inline void flush_cache_vunmap(unsigned long start, unsigned long end) #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ do { \ memcpy(dst, src, len); \ - flush_icache_user_range(vma, page, vaddr, len); \ + flush_icache_user_page(vma, page, vaddr, len); \ } while (0) #endif @@ -109,4 +110,4 @@ static inline void flush_cache_vunmap(unsigned long start, unsigned long end) memcpy(dst, src, len) #endif -#endif /* __ASM_CACHEFLUSH_H */ +#endif /* _ASM_GENERIC_CACHEFLUSH_H */ diff --git a/include/asm-generic/checksum.h b/include/asm-generic/checksum.h index 34785c0f57b0..5a80f8e54300 100644 --- a/include/asm-generic/checksum.h +++ b/include/asm-generic/checksum.h @@ -25,15 +25,6 @@ extern __wsum csum_partial(const void *buff, int len, __wsum sum); */ extern __wsum csum_partial_copy(const void *src, void *dst, int len, __wsum sum); -/* - * the same as csum_partial_copy, but copies from user space. - * - * here even more important to align src and dst on a 32-bit (or even - * better 64-bit) boundary - */ -extern __wsum csum_partial_copy_from_user(const void __user *src, void *dst, - int len, __wsum sum, int *csum_err); - #ifndef csum_partial_copy_nocheck #define csum_partial_copy_nocheck(src, dst, len, sum) \ csum_partial_copy((src), (dst), (len), (sum)) diff --git a/include/asm-generic/hugetlb.h b/include/asm-generic/hugetlb.h index 822f433ac95c..40f85decc2ee 100644 --- a/include/asm-generic/hugetlb.h +++ b/include/asm-generic/hugetlb.h @@ -122,7 +122,7 @@ static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma, #ifndef __HAVE_ARCH_HUGE_PTEP_GET static inline pte_t huge_ptep_get(pte_t *ptep) { - return *ptep; + return READ_ONCE(*ptep); } #endif diff --git a/include/asm-generic/hyperv-tlfs.h b/include/asm-generic/hyperv-tlfs.h new file mode 100644 index 000000000000..e73a11850055 --- /dev/null +++ b/include/asm-generic/hyperv-tlfs.h @@ -0,0 +1,497 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* + * This file contains definitions from Hyper-V Hypervisor Top-Level Functional + * Specification (TLFS): + * https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/reference/tlfs + */ + +#ifndef _ASM_GENERIC_HYPERV_TLFS_H +#define _ASM_GENERIC_HYPERV_TLFS_H + +#include <linux/types.h> +#include <linux/bits.h> +#include <linux/time64.h> + +/* + * While not explicitly listed in the TLFS, Hyper-V always runs with a page size + * of 4096. These definitions are used when communicating with Hyper-V using + * guest physical pages and guest physical page addresses, since the guest page + * size may not be 4096 on all architectures. + */ +#define HV_HYP_PAGE_SHIFT 12 +#define HV_HYP_PAGE_SIZE BIT(HV_HYP_PAGE_SHIFT) +#define HV_HYP_PAGE_MASK (~(HV_HYP_PAGE_SIZE - 1)) + +/* + * Hyper-V provides two categories of flags relevant to guest VMs. The + * "Features" category indicates specific functionality that is available + * to guests on this particular instance of Hyper-V. The "Features" + * are presented in four groups, each of which is 32 bits. The group A + * and B definitions are common across architectures and are listed here. + * However, not all flags are relevant on all architectures. + * + * Groups C and D vary across architectures and are listed in the + * architecture specific portion of hyperv-tlfs.h. Some of these flags exist + * on multiple architectures, but the bit positions are different so they + * cannot appear in the generic portion of hyperv-tlfs.h. + * + * The "Enlightenments" category provides recommendations on whether to use + * specific enlightenments that are available. The Enlighenments are a single + * group of 32 bits, but they vary across architectures and are listed in + * the architecture specific portion of hyperv-tlfs.h. + */ + +/* + * Group A Features. + */ + +/* VP Runtime register available */ +#define HV_MSR_VP_RUNTIME_AVAILABLE BIT(0) +/* Partition Reference Counter available*/ +#define HV_MSR_TIME_REF_COUNT_AVAILABLE BIT(1) +/* Basic SynIC register available */ +#define HV_MSR_SYNIC_AVAILABLE BIT(2) +/* Synthetic Timer registers available */ +#define HV_MSR_SYNTIMER_AVAILABLE BIT(3) +/* Virtual APIC assist and VP assist page registers available */ +#define HV_MSR_APIC_ACCESS_AVAILABLE BIT(4) +/* Hypercall and Guest OS ID registers available*/ +#define HV_MSR_HYPERCALL_AVAILABLE BIT(5) +/* Access virtual processor index register available*/ +#define HV_MSR_VP_INDEX_AVAILABLE BIT(6) +/* Virtual system reset register available*/ +#define HV_MSR_RESET_AVAILABLE BIT(7) +/* Access statistics page registers available */ +#define HV_MSR_STAT_PAGES_AVAILABLE BIT(8) +/* Partition reference TSC register is available */ +#define HV_MSR_REFERENCE_TSC_AVAILABLE BIT(9) +/* Partition Guest IDLE register is available */ +#define HV_MSR_GUEST_IDLE_AVAILABLE BIT(10) +/* Partition local APIC and TSC frequency registers available */ +#define HV_ACCESS_FREQUENCY_MSRS BIT(11) +/* AccessReenlightenmentControls privilege */ +#define HV_ACCESS_REENLIGHTENMENT BIT(13) +/* AccessTscInvariantControls privilege */ +#define HV_ACCESS_TSC_INVARIANT BIT(15) + +/* + * Group B features. + */ +#define HV_CREATE_PARTITIONS BIT(0) +#define HV_ACCESS_PARTITION_ID BIT(1) +#define HV_ACCESS_MEMORY_POOL BIT(2) +#define HV_ADJUST_MESSAGE_BUFFERS BIT(3) +#define HV_POST_MESSAGES BIT(4) +#define HV_SIGNAL_EVENTS BIT(5) +#define HV_CREATE_PORT BIT(6) +#define HV_CONNECT_PORT BIT(7) +#define HV_ACCESS_STATS BIT(8) +#define HV_DEBUGGING BIT(11) +#define HV_CPU_POWER_MANAGEMENT BIT(12) + + +/* + * TSC page layout. + */ +struct ms_hyperv_tsc_page { + volatile u32 tsc_sequence; + u32 reserved1; + volatile u64 tsc_scale; + volatile s64 tsc_offset; +} __packed; + +/* + * The guest OS needs to register the guest ID with the hypervisor. + * The guest ID is a 64 bit entity and the structure of this ID is + * specified in the Hyper-V specification: + * + * msdn.microsoft.com/en-us/library/windows/hardware/ff542653%28v=vs.85%29.aspx + * + * While the current guideline does not specify how Linux guest ID(s) + * need to be generated, our plan is to publish the guidelines for + * Linux and other guest operating systems that currently are hosted + * on Hyper-V. The implementation here conforms to this yet + * unpublished guidelines. + * + * + * Bit(s) + * 63 - Indicates if the OS is Open Source or not; 1 is Open Source + * 62:56 - Os Type; Linux is 0x100 + * 55:48 - Distro specific identification + * 47:16 - Linux kernel version number + * 15:0 - Distro specific identification + * + * + */ + +#define HV_LINUX_VENDOR_ID 0x8100 + +/* + * Crash notification flags. + */ +#define HV_CRASH_CTL_CRASH_NOTIFY_MSG BIT_ULL(62) +#define HV_CRASH_CTL_CRASH_NOTIFY BIT_ULL(63) + +/* Declare the various hypercall operations. */ +#define HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE 0x0002 +#define HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST 0x0003 +#define HVCALL_NOTIFY_LONG_SPIN_WAIT 0x0008 +#define HVCALL_SEND_IPI 0x000b +#define HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX 0x0013 +#define HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX 0x0014 +#define HVCALL_SEND_IPI_EX 0x0015 +#define HVCALL_GET_VP_REGISTERS 0x0050 +#define HVCALL_SET_VP_REGISTERS 0x0051 +#define HVCALL_POST_MESSAGE 0x005c +#define HVCALL_SIGNAL_EVENT 0x005d +#define HVCALL_POST_DEBUG_DATA 0x0069 +#define HVCALL_RETRIEVE_DEBUG_DATA 0x006a +#define HVCALL_RESET_DEBUG_SESSION 0x006b +#define HVCALL_RETARGET_INTERRUPT 0x007e +#define HVCALL_FLUSH_GUEST_PHYSICAL_ADDRESS_SPACE 0x00af +#define HVCALL_FLUSH_GUEST_PHYSICAL_ADDRESS_LIST 0x00b0 + +#define HV_FLUSH_ALL_PROCESSORS BIT(0) +#define HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES BIT(1) +#define HV_FLUSH_NON_GLOBAL_MAPPINGS_ONLY BIT(2) +#define HV_FLUSH_USE_EXTENDED_RANGE_FORMAT BIT(3) + +enum HV_GENERIC_SET_FORMAT { + HV_GENERIC_SET_SPARSE_4K, + HV_GENERIC_SET_ALL, +}; + +#define HV_PARTITION_ID_SELF ((u64)-1) +#define HV_VP_INDEX_SELF ((u32)-2) + +#define HV_HYPERCALL_RESULT_MASK GENMASK_ULL(15, 0) +#define HV_HYPERCALL_FAST_BIT BIT(16) +#define HV_HYPERCALL_VARHEAD_OFFSET 17 +#define HV_HYPERCALL_REP_COMP_OFFSET 32 +#define HV_HYPERCALL_REP_COMP_1 BIT_ULL(32) +#define HV_HYPERCALL_REP_COMP_MASK GENMASK_ULL(43, 32) +#define HV_HYPERCALL_REP_START_OFFSET 48 +#define HV_HYPERCALL_REP_START_MASK GENMASK_ULL(59, 48) + +/* hypercall status code */ +#define HV_STATUS_SUCCESS 0 +#define HV_STATUS_INVALID_HYPERCALL_CODE 2 +#define HV_STATUS_INVALID_HYPERCALL_INPUT 3 +#define HV_STATUS_INVALID_ALIGNMENT 4 +#define HV_STATUS_INVALID_PARAMETER 5 +#define HV_STATUS_OPERATION_DENIED 8 +#define HV_STATUS_INSUFFICIENT_MEMORY 11 +#define HV_STATUS_INVALID_PORT_ID 17 +#define HV_STATUS_INVALID_CONNECTION_ID 18 +#define HV_STATUS_INSUFFICIENT_BUFFERS 19 + +/* + * The Hyper-V TimeRefCount register and the TSC + * page provide a guest VM clock with 100ns tick rate + */ +#define HV_CLOCK_HZ (NSEC_PER_SEC/100) + +/* Define the number of synthetic interrupt sources. */ +#define HV_SYNIC_SINT_COUNT (16) +/* Define the expected SynIC version. */ +#define HV_SYNIC_VERSION_1 (0x1) +/* Valid SynIC vectors are 16-255. */ +#define HV_SYNIC_FIRST_VALID_VECTOR (16) + +#define HV_SYNIC_CONTROL_ENABLE (1ULL << 0) +#define HV_SYNIC_SIMP_ENABLE (1ULL << 0) +#define HV_SYNIC_SIEFP_ENABLE (1ULL << 0) +#define HV_SYNIC_SINT_MASKED (1ULL << 16) +#define HV_SYNIC_SINT_AUTO_EOI (1ULL << 17) +#define HV_SYNIC_SINT_VECTOR_MASK (0xFF) + +#define HV_SYNIC_STIMER_COUNT (4) + +/* Define synthetic interrupt controller message constants. */ +#define HV_MESSAGE_SIZE (256) +#define HV_MESSAGE_PAYLOAD_BYTE_COUNT (240) +#define HV_MESSAGE_PAYLOAD_QWORD_COUNT (30) + +/* Define synthetic interrupt controller message flags. */ +union hv_message_flags { + __u8 asu8; + struct { + __u8 msg_pending:1; + __u8 reserved:7; + } __packed; +}; + +/* Define port identifier type. */ +union hv_port_id { + __u32 asu32; + struct { + __u32 id:24; + __u32 reserved:8; + } __packed u; +}; + +/* Define synthetic interrupt controller message header. */ +struct hv_message_header { + __u32 message_type; + __u8 payload_size; + union hv_message_flags message_flags; + __u8 reserved[2]; + union { + __u64 sender; + union hv_port_id port; + }; +} __packed; + +/* Define synthetic interrupt controller message format. */ +struct hv_message { + struct hv_message_header header; + union { + __u64 payload[HV_MESSAGE_PAYLOAD_QWORD_COUNT]; + } u; +} __packed; + +/* Define the synthetic interrupt message page layout. */ +struct hv_message_page { + struct hv_message sint_message[HV_SYNIC_SINT_COUNT]; +} __packed; + +/* Define timer message payload structure. */ +struct hv_timer_message_payload { + __u32 timer_index; + __u32 reserved; + __u64 expiration_time; /* When the timer expired */ + __u64 delivery_time; /* When the message was delivered */ +} __packed; + + +/* Define synthetic interrupt controller flag constants. */ +#define HV_EVENT_FLAGS_COUNT (256 * 8) +#define HV_EVENT_FLAGS_LONG_COUNT (256 / sizeof(unsigned long)) + +/* + * Synthetic timer configuration. + */ +union hv_stimer_config { + u64 as_uint64; + struct { + u64 enable:1; + u64 periodic:1; + u64 lazy:1; + u64 auto_enable:1; + u64 apic_vector:8; + u64 direct_mode:1; + u64 reserved_z0:3; + u64 sintx:4; + u64 reserved_z1:44; + } __packed; +}; + + +/* Define the synthetic interrupt controller event flags format. */ +union hv_synic_event_flags { + unsigned long flags[HV_EVENT_FLAGS_LONG_COUNT]; +}; + +/* Define SynIC control register. */ +union hv_synic_scontrol { + u64 as_uint64; + struct { + u64 enable:1; + u64 reserved:63; + } __packed; +}; + +/* Define synthetic interrupt source. */ +union hv_synic_sint { + u64 as_uint64; + struct { + u64 vector:8; + u64 reserved1:8; + u64 masked:1; + u64 auto_eoi:1; + u64 polling:1; + u64 reserved2:45; + } __packed; +}; + +/* Define the format of the SIMP register */ +union hv_synic_simp { + u64 as_uint64; + struct { + u64 simp_enabled:1; + u64 preserved:11; + u64 base_simp_gpa:52; + } __packed; +}; + +/* Define the format of the SIEFP register */ +union hv_synic_siefp { + u64 as_uint64; + struct { + u64 siefp_enabled:1; + u64 preserved:11; + u64 base_siefp_gpa:52; + } __packed; +}; + +struct hv_vpset { + u64 format; + u64 valid_bank_mask; + u64 bank_contents[]; +} __packed; + +/* HvCallSendSyntheticClusterIpi hypercall */ +struct hv_send_ipi { + u32 vector; + u32 reserved; + u64 cpu_mask; +} __packed; + +/* HvCallSendSyntheticClusterIpiEx hypercall */ +struct hv_send_ipi_ex { + u32 vector; + u32 reserved; + struct hv_vpset vp_set; +} __packed; + +/* HvFlushGuestPhysicalAddressSpace hypercalls */ +struct hv_guest_mapping_flush { + u64 address_space; + u64 flags; +} __packed; + +/* + * HV_MAX_FLUSH_PAGES = "additional_pages" + 1. It's limited + * by the bitwidth of "additional_pages" in union hv_gpa_page_range. + */ +#define HV_MAX_FLUSH_PAGES (2048) + +/* HvFlushGuestPhysicalAddressList hypercall */ +union hv_gpa_page_range { + u64 address_space; + struct { + u64 additional_pages:11; + u64 largepage:1; + u64 basepfn:52; + } page; +}; + +/* + * All input flush parameters should be in single page. The max flush + * count is equal with how many entries of union hv_gpa_page_range can + * be populated into the input parameter page. + */ +#define HV_MAX_FLUSH_REP_COUNT ((HV_HYP_PAGE_SIZE - 2 * sizeof(u64)) / \ + sizeof(union hv_gpa_page_range)) + +struct hv_guest_mapping_flush_list { + u64 address_space; + u64 flags; + union hv_gpa_page_range gpa_list[HV_MAX_FLUSH_REP_COUNT]; +}; + +/* HvFlushVirtualAddressSpace, HvFlushVirtualAddressList hypercalls */ +struct hv_tlb_flush { + u64 address_space; + u64 flags; + u64 processor_mask; + u64 gva_list[]; +} __packed; + +/* HvFlushVirtualAddressSpaceEx, HvFlushVirtualAddressListEx hypercalls */ +struct hv_tlb_flush_ex { + u64 address_space; + u64 flags; + struct hv_vpset hv_vp_set; + u64 gva_list[]; +} __packed; + +/* HvRetargetDeviceInterrupt hypercall */ +union hv_msi_entry { + u64 as_uint64; + struct { + u32 address; + u32 data; + } __packed; +}; + +struct hv_interrupt_entry { + u32 source; /* 1 for MSI(-X) */ + u32 reserved1; + union hv_msi_entry msi_entry; +} __packed; + +/* + * flags for hv_device_interrupt_target.flags + */ +#define HV_DEVICE_INTERRUPT_TARGET_MULTICAST 1 +#define HV_DEVICE_INTERRUPT_TARGET_PROCESSOR_SET 2 + +struct hv_device_interrupt_target { + u32 vector; + u32 flags; + union { + u64 vp_mask; + struct hv_vpset vp_set; + }; +} __packed; + +struct hv_retarget_device_interrupt { + u64 partition_id; /* use "self" */ + u64 device_id; + struct hv_interrupt_entry int_entry; + u64 reserved2; + struct hv_device_interrupt_target int_target; +} __packed __aligned(8); + + +/* HvGetVpRegisters hypercall input with variable size reg name list*/ +struct hv_get_vp_registers_input { + struct { + u64 partitionid; + u32 vpindex; + u8 inputvtl; + u8 padding[3]; + } header; + struct input { + u32 name0; + u32 name1; + } element[]; +} __packed; + + +/* HvGetVpRegisters returns an array of these output elements */ +struct hv_get_vp_registers_output { + union { + struct { + u32 a; + u32 b; + u32 c; + u32 d; + } as32 __packed; + struct { + u64 low; + u64 high; + } as64 __packed; + }; +}; + +/* HvSetVpRegisters hypercall with variable size reg name/value list*/ +struct hv_set_vp_registers_input { + struct { + u64 partitionid; + u32 vpindex; + u8 inputvtl; + u8 padding[3]; + } header; + struct { + u32 name; + u32 padding1; + u64 padding2; + u64 valuelow; + u64 valuehigh; + } element[]; +} __packed; + +#endif diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h index d39ac997dda8..8b1e020e9a03 100644 --- a/include/asm-generic/io.h +++ b/include/asm-generic/io.h @@ -448,17 +448,15 @@ static inline void writesq(volatile void __iomem *addr, const void *buffer, #define IO_SPACE_LIMIT 0xffff #endif -#include <linux/logic_pio.h> - /* * {in,out}{b,w,l}() access little endian I/O. {in,out}{b,w,l}_p() can be * implemented on hardware that needs an additional delay for I/O accesses to * take effect. */ -#ifndef inb -#define inb inb -static inline u8 inb(unsigned long addr) +#if !defined(inb) && !defined(_inb) +#define _inb _inb +static inline u16 _inb(unsigned long addr) { u8 val; @@ -469,9 +467,9 @@ static inline u8 inb(unsigned long addr) } #endif -#ifndef inw -#define inw inw -static inline u16 inw(unsigned long addr) +#if !defined(inw) && !defined(_inw) +#define _inw _inw +static inline u16 _inw(unsigned long addr) { u16 val; @@ -482,9 +480,9 @@ static inline u16 inw(unsigned long addr) } #endif -#ifndef inl -#define inl inl -static inline u32 inl(unsigned long addr) +#if !defined(inl) && !defined(_inl) +#define _inl _inl +static inline u16 _inl(unsigned long addr) { u32 val; @@ -495,9 +493,9 @@ static inline u32 inl(unsigned long addr) } #endif -#ifndef outb -#define outb outb -static inline void outb(u8 value, unsigned long addr) +#if !defined(outb) && !defined(_outb) +#define _outb _outb +static inline void _outb(u8 value, unsigned long addr) { __io_pbw(); __raw_writeb(value, PCI_IOBASE + addr); @@ -505,9 +503,9 @@ static inline void outb(u8 value, unsigned long addr) } #endif -#ifndef outw -#define outw outw -static inline void outw(u16 value, unsigned long addr) +#if !defined(outw) && !defined(_outw) +#define _outw _outw +static inline void _outw(u16 value, unsigned long addr) { __io_pbw(); __raw_writew(cpu_to_le16(value), PCI_IOBASE + addr); @@ -515,9 +513,9 @@ static inline void outw(u16 value, unsigned long addr) } #endif -#ifndef outl -#define outl outl -static inline void outl(u32 value, unsigned long addr) +#if !defined(outl) && !defined(_outl) +#define _outl _outl +static inline void _outl(u32 value, unsigned long addr) { __io_pbw(); __raw_writel(cpu_to_le32(value), PCI_IOBASE + addr); @@ -525,6 +523,32 @@ static inline void outl(u32 value, unsigned long addr) } #endif +#include <linux/logic_pio.h> + +#ifndef inb +#define inb _inb +#endif + +#ifndef inw +#define inw _inw +#endif + +#ifndef inl +#define inl _inl +#endif + +#ifndef outb +#define outb _outb +#endif + +#ifndef outw +#define outw _outw +#endif + +#ifndef outl +#define outl _outl +#endif + #ifndef inb_p #define inb_p inb_p static inline u8 inb_p(unsigned long addr) @@ -948,7 +972,7 @@ static inline void iounmap(void __iomem *addr) } #endif #elif defined(CONFIG_GENERIC_IOREMAP) -#include <asm/pgtable.h> +#include <linux/pgtable.h> void __iomem *ioremap_prot(phys_addr_t addr, size_t size, unsigned long prot); void iounmap(volatile void __iomem *addr); diff --git a/include/asm-generic/mshyperv.h b/include/asm-generic/mshyperv.h index b3f1082cc435..1c4fd950f091 100644 --- a/include/asm-generic/mshyperv.h +++ b/include/asm-generic/mshyperv.h @@ -163,7 +163,7 @@ static inline int cpumask_to_vpset(struct hv_vpset *vpset, return nr_bank; } -void hyperv_report_panic(struct pt_regs *regs, long err); +void hyperv_report_panic(struct pt_regs *regs, long err, bool in_die); void hyperv_report_panic_msg(phys_addr_t pa, size_t size); bool hv_is_hyperv_initialized(void); bool hv_is_hibernation_supported(void); diff --git a/include/asm-generic/pgtable-nop4d-hack.h b/include/asm-generic/pgtable-nop4d-hack.h deleted file mode 100644 index 829bdb0d6327..000000000000 --- a/include/asm-generic/pgtable-nop4d-hack.h +++ /dev/null @@ -1,64 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _PGTABLE_NOP4D_HACK_H -#define _PGTABLE_NOP4D_HACK_H - -#ifndef __ASSEMBLY__ -#include <asm-generic/5level-fixup.h> - -#define __PAGETABLE_PUD_FOLDED 1 - -/* - * Having the pud type consist of a pgd gets the size right, and allows - * us to conceptually access the pgd entry that this pud is folded into - * without casting. - */ -typedef struct { pgd_t pgd; } pud_t; - -#define PUD_SHIFT PGDIR_SHIFT -#define PTRS_PER_PUD 1 -#define PUD_SIZE (1UL << PUD_SHIFT) -#define PUD_MASK (~(PUD_SIZE-1)) - -/* - * The "pgd_xxx()" functions here are trivial for a folded two-level - * setup: the pud is never bad, and a pud always exists (as it's folded - * into the pgd entry) - */ -static inline int pgd_none(pgd_t pgd) { return 0; } -static inline int pgd_bad(pgd_t pgd) { return 0; } -static inline int pgd_present(pgd_t pgd) { return 1; } -static inline void pgd_clear(pgd_t *pgd) { } -#define pud_ERROR(pud) (pgd_ERROR((pud).pgd)) - -#define pgd_populate(mm, pgd, pud) do { } while (0) -#define pgd_populate_safe(mm, pgd, pud) do { } while (0) -/* - * (puds are folded into pgds so this doesn't get actually called, - * but the define is needed for a generic inline function.) - */ -#define set_pgd(pgdptr, pgdval) set_pud((pud_t *)(pgdptr), (pud_t) { pgdval }) - -static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address) -{ - return (pud_t *)pgd; -} - -#define pud_val(x) (pgd_val((x).pgd)) -#define __pud(x) ((pud_t) { __pgd(x) }) - -#define pgd_page(pgd) (pud_page((pud_t){ pgd })) -#define pgd_page_vaddr(pgd) (pud_page_vaddr((pud_t){ pgd })) - -/* - * allocating and freeing a pud is trivial: the 1-entry pud is - * inside the pgd, so has no extra memory associated with it. - */ -#define pud_alloc_one(mm, address) NULL -#define pud_free(mm, x) do { } while (0) -#define __pud_free_tlb(tlb, x, a) do { } while (0) - -#undef pud_addr_end -#define pud_addr_end(addr, end) (end) - -#endif /* __ASSEMBLY__ */ -#endif /* _PGTABLE_NOP4D_HACK_H */ diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h index 0d9b28cba16d..3e13acd019ae 100644 --- a/include/asm-generic/pgtable-nopmd.h +++ b/include/asm-generic/pgtable-nopmd.h @@ -45,6 +45,7 @@ static inline pmd_t * pmd_offset(pud_t * pud, unsigned long address) { return (pmd_t *)pud; } +#define pmd_offset pmd_offset #define pmd_val(x) (pud_val((x).pud)) #define __pmd(x) ((pmd_t) { __pud(x) } ) diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h index d3776cb494c0..a9d751fbda9e 100644 --- a/include/asm-generic/pgtable-nopud.h +++ b/include/asm-generic/pgtable-nopud.h @@ -4,9 +4,6 @@ #ifndef __ASSEMBLY__ -#ifdef __ARCH_USE_5LEVEL_HACK -#include <asm-generic/pgtable-nop4d-hack.h> -#else #include <asm-generic/pgtable-nop4d.h> #define __PAGETABLE_PUD_FOLDED 1 @@ -46,6 +43,7 @@ static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address) { return (pud_t *)p4d; } +#define pud_offset pud_offset #define pud_val(x) (p4d_val((x).p4d)) #define __pud(x) ((pud_t) { __p4d(x) }) @@ -65,5 +63,4 @@ static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address) #define pud_addr_end(addr, end) (end) #endif /* __ASSEMBLY__ */ -#endif /* !__ARCH_USE_5LEVEL_HACK */ #endif /* _PGTABLE_NOPUD_H */ diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h deleted file mode 100644 index 329b8c8ca703..000000000000 --- a/include/asm-generic/pgtable.h +++ /dev/null @@ -1,1262 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _ASM_GENERIC_PGTABLE_H -#define _ASM_GENERIC_PGTABLE_H - -#include <linux/pfn.h> - -#ifndef __ASSEMBLY__ -#ifdef CONFIG_MMU - -#include <linux/mm_types.h> -#include <linux/bug.h> -#include <linux/errno.h> -#include <asm-generic/pgtable_uffd.h> - -#if 5 - defined(__PAGETABLE_P4D_FOLDED) - defined(__PAGETABLE_PUD_FOLDED) - \ - defined(__PAGETABLE_PMD_FOLDED) != CONFIG_PGTABLE_LEVELS -#error CONFIG_PGTABLE_LEVELS is not consistent with __PAGETABLE_{P4D,PUD,PMD}_FOLDED -#endif - -/* - * On almost all architectures and configurations, 0 can be used as the - * upper ceiling to free_pgtables(): on many architectures it has the same - * effect as using TASK_SIZE. However, there is one configuration which - * must impose a more careful limit, to avoid freeing kernel pgtables. - */ -#ifndef USER_PGTABLES_CEILING -#define USER_PGTABLES_CEILING 0UL -#endif - -#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS -extern int ptep_set_access_flags(struct vm_area_struct *vma, - unsigned long address, pte_t *ptep, - pte_t entry, int dirty); -#endif - -#ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS -#ifdef CONFIG_TRANSPARENT_HUGEPAGE -extern int pmdp_set_access_flags(struct vm_area_struct *vma, - unsigned long address, pmd_t *pmdp, - pmd_t entry, int dirty); -extern int pudp_set_access_flags(struct vm_area_struct *vma, - unsigned long address, pud_t *pudp, - pud_t entry, int dirty); -#else -static inline int pmdp_set_access_flags(struct vm_area_struct *vma, - unsigned long address, pmd_t *pmdp, - pmd_t entry, int dirty) -{ - BUILD_BUG(); - return 0; -} -static inline int pudp_set_access_flags(struct vm_area_struct *vma, - unsigned long address, pud_t *pudp, - pud_t entry, int dirty) -{ - BUILD_BUG(); - return 0; -} -#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ -#endif - -#ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG -static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, - unsigned long address, - pte_t *ptep) -{ - pte_t pte = *ptep; - int r = 1; - if (!pte_young(pte)) - r = 0; - else - set_pte_at(vma->vm_mm, address, ptep, pte_mkold(pte)); - return r; -} -#endif - -#ifndef __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG -#ifdef CONFIG_TRANSPARENT_HUGEPAGE -static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, - unsigned long address, - pmd_t *pmdp) -{ - pmd_t pmd = *pmdp; - int r = 1; - if (!pmd_young(pmd)) - r = 0; - else - set_pmd_at(vma->vm_mm, address, pmdp, pmd_mkold(pmd)); - return r; -} -#else -static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, - unsigned long address, - pmd_t *pmdp) -{ - BUILD_BUG(); - return 0; -} -#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ -#endif - -#ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH -int ptep_clear_flush_young(struct vm_area_struct *vma, - unsigned long address, pte_t *ptep); -#endif - -#ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH -#ifdef CONFIG_TRANSPARENT_HUGEPAGE -extern int pmdp_clear_flush_young(struct vm_area_struct *vma, - unsigned long address, pmd_t *pmdp); -#else -/* - * Despite relevant to THP only, this API is called from generic rmap code - * under PageTransHuge(), hence needs a dummy implementation for !THP - */ -static inline int pmdp_clear_flush_young(struct vm_area_struct *vma, - unsigned long address, pmd_t *pmdp) -{ - BUILD_BUG(); - return 0; -} -#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ -#endif - -#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR -static inline pte_t ptep_get_and_clear(struct mm_struct *mm, - unsigned long address, - pte_t *ptep) -{ - pte_t pte = *ptep; - pte_clear(mm, address, ptep); - return pte; -} -#endif - -#ifdef CONFIG_TRANSPARENT_HUGEPAGE -#ifndef __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR -static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, - unsigned long address, - pmd_t *pmdp) -{ - pmd_t pmd = *pmdp; - pmd_clear(pmdp); - return pmd; -} -#endif /* __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR */ -#ifndef __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR -static inline pud_t pudp_huge_get_and_clear(struct mm_struct *mm, - unsigned long address, - pud_t *pudp) -{ - pud_t pud = *pudp; - - pud_clear(pudp); - return pud; -} -#endif /* __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR */ -#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ - -#ifdef CONFIG_TRANSPARENT_HUGEPAGE -#ifndef __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL -static inline pmd_t pmdp_huge_get_and_clear_full(struct mm_struct *mm, - unsigned long address, pmd_t *pmdp, - int full) -{ - return pmdp_huge_get_and_clear(mm, address, pmdp); -} -#endif - -#ifndef __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR_FULL -static inline pud_t pudp_huge_get_and_clear_full(struct mm_struct *mm, - unsigned long address, pud_t *pudp, - int full) -{ - return pudp_huge_get_and_clear(mm, address, pudp); -} -#endif -#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ - -#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL -static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, - unsigned long address, pte_t *ptep, - int full) -{ - pte_t pte; - pte = ptep_get_and_clear(mm, address, ptep); - return pte; -} -#endif - -/* - * Some architectures may be able to avoid expensive synchronization - * primitives when modifications are made to PTE's which are already - * not present, or in the process of an address space destruction. - */ -#ifndef __HAVE_ARCH_PTE_CLEAR_NOT_PRESENT_FULL -static inline void pte_clear_not_present_full(struct mm_struct *mm, - unsigned long address, - pte_t *ptep, - int full) -{ - pte_clear(mm, address, ptep); -} -#endif - -#ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH -extern pte_t ptep_clear_flush(struct vm_area_struct *vma, - unsigned long address, - pte_t *ptep); -#endif - -#ifndef __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH -extern pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma, - unsigned long address, - pmd_t *pmdp); -extern pud_t pudp_huge_clear_flush(struct vm_area_struct *vma, - unsigned long address, - pud_t *pudp); -#endif - -#ifndef __HAVE_ARCH_PTEP_SET_WRPROTECT -struct mm_struct; -static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep) -{ - pte_t old_pte = *ptep; - set_pte_at(mm, address, ptep, pte_wrprotect(old_pte)); -} -#endif - -#ifndef pte_savedwrite -#define pte_savedwrite pte_write -#endif - -#ifndef pte_mk_savedwrite -#define pte_mk_savedwrite pte_mkwrite -#endif - -#ifndef pte_clear_savedwrite -#define pte_clear_savedwrite pte_wrprotect -#endif - -#ifndef pmd_savedwrite -#define pmd_savedwrite pmd_write -#endif - -#ifndef pmd_mk_savedwrite -#define pmd_mk_savedwrite pmd_mkwrite -#endif - -#ifndef pmd_clear_savedwrite -#define pmd_clear_savedwrite pmd_wrprotect -#endif - -#ifndef __HAVE_ARCH_PMDP_SET_WRPROTECT -#ifdef CONFIG_TRANSPARENT_HUGEPAGE -static inline void pmdp_set_wrprotect(struct mm_struct *mm, - unsigned long address, pmd_t *pmdp) -{ - pmd_t old_pmd = *pmdp; - set_pmd_at(mm, address, pmdp, pmd_wrprotect(old_pmd)); -} -#else -static inline void pmdp_set_wrprotect(struct mm_struct *mm, - unsigned long address, pmd_t *pmdp) -{ - BUILD_BUG(); -} -#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ -#endif -#ifndef __HAVE_ARCH_PUDP_SET_WRPROTECT -#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD -static inline void pudp_set_wrprotect(struct mm_struct *mm, - unsigned long address, pud_t *pudp) -{ - pud_t old_pud = *pudp; - - set_pud_at(mm, address, pudp, pud_wrprotect(old_pud)); -} -#else -static inline void pudp_set_wrprotect(struct mm_struct *mm, - unsigned long address, pud_t *pudp) -{ - BUILD_BUG(); -} -#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ -#endif - -#ifndef pmdp_collapse_flush -#ifdef CONFIG_TRANSPARENT_HUGEPAGE -extern pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, - unsigned long address, pmd_t *pmdp); -#else -static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, - unsigned long address, - pmd_t *pmdp) -{ - BUILD_BUG(); - return *pmdp; -} -#define pmdp_collapse_flush pmdp_collapse_flush -#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ -#endif - -#ifndef __HAVE_ARCH_PGTABLE_DEPOSIT -extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, - pgtable_t pgtable); -#endif - -#ifndef __HAVE_ARCH_PGTABLE_WITHDRAW -extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp); -#endif - -#ifdef CONFIG_TRANSPARENT_HUGEPAGE -/* - * This is an implementation of pmdp_establish() that is only suitable for an - * architecture that doesn't have hardware dirty/accessed bits. In this case we - * can't race with CPU which sets these bits and non-atomic aproach is fine. - */ -static inline pmd_t generic_pmdp_establish(struct vm_area_struct *vma, - unsigned long address, pmd_t *pmdp, pmd_t pmd) -{ - pmd_t old_pmd = *pmdp; - set_pmd_at(vma->vm_mm, address, pmdp, pmd); - return old_pmd; -} -#endif - -#ifndef __HAVE_ARCH_PMDP_INVALIDATE -extern pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, - pmd_t *pmdp); -#endif - -#ifndef __HAVE_ARCH_PTE_SAME -static inline int pte_same(pte_t pte_a, pte_t pte_b) -{ - return pte_val(pte_a) == pte_val(pte_b); -} -#endif - -#ifndef __HAVE_ARCH_PTE_UNUSED -/* - * Some architectures provide facilities to virtualization guests - * so that they can flag allocated pages as unused. This allows the - * host to transparently reclaim unused pages. This function returns - * whether the pte's page is unused. - */ -static inline int pte_unused(pte_t pte) -{ - return 0; -} -#endif - -#ifndef pte_access_permitted -#define pte_access_permitted(pte, write) \ - (pte_present(pte) && (!(write) || pte_write(pte))) -#endif - -#ifndef pmd_access_permitted -#define pmd_access_permitted(pmd, write) \ - (pmd_present(pmd) && (!(write) || pmd_write(pmd))) -#endif - -#ifndef pud_access_permitted -#define pud_access_permitted(pud, write) \ - (pud_present(pud) && (!(write) || pud_write(pud))) -#endif - -#ifndef p4d_access_permitted -#define p4d_access_permitted(p4d, write) \ - (p4d_present(p4d) && (!(write) || p4d_write(p4d))) -#endif - -#ifndef pgd_access_permitted -#define pgd_access_permitted(pgd, write) \ - (pgd_present(pgd) && (!(write) || pgd_write(pgd))) -#endif - -#ifndef __HAVE_ARCH_PMD_SAME -static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b) -{ - return pmd_val(pmd_a) == pmd_val(pmd_b); -} - -static inline int pud_same(pud_t pud_a, pud_t pud_b) -{ - return pud_val(pud_a) == pud_val(pud_b); -} -#endif - -#ifndef __HAVE_ARCH_P4D_SAME -static inline int p4d_same(p4d_t p4d_a, p4d_t p4d_b) -{ - return p4d_val(p4d_a) == p4d_val(p4d_b); -} -#endif - -#ifndef __HAVE_ARCH_PGD_SAME -static inline int pgd_same(pgd_t pgd_a, pgd_t pgd_b) -{ - return pgd_val(pgd_a) == pgd_val(pgd_b); -} -#endif - -/* - * Use set_p*_safe(), and elide TLB flushing, when confident that *no* - * TLB flush will be required as a result of the "set". For example, use - * in scenarios where it is known ahead of time that the routine is - * setting non-present entries, or re-setting an existing entry to the - * same value. Otherwise, use the typical "set" helpers and flush the - * TLB. - */ -#define set_pte_safe(ptep, pte) \ -({ \ - WARN_ON_ONCE(pte_present(*ptep) && !pte_same(*ptep, pte)); \ - set_pte(ptep, pte); \ -}) - -#define set_pmd_safe(pmdp, pmd) \ -({ \ - WARN_ON_ONCE(pmd_present(*pmdp) && !pmd_same(*pmdp, pmd)); \ - set_pmd(pmdp, pmd); \ -}) - -#define set_pud_safe(pudp, pud) \ -({ \ - WARN_ON_ONCE(pud_present(*pudp) && !pud_same(*pudp, pud)); \ - set_pud(pudp, pud); \ -}) - -#define set_p4d_safe(p4dp, p4d) \ -({ \ - WARN_ON_ONCE(p4d_present(*p4dp) && !p4d_same(*p4dp, p4d)); \ - set_p4d(p4dp, p4d); \ -}) - -#define set_pgd_safe(pgdp, pgd) \ -({ \ - WARN_ON_ONCE(pgd_present(*pgdp) && !pgd_same(*pgdp, pgd)); \ - set_pgd(pgdp, pgd); \ -}) - -#ifndef __HAVE_ARCH_DO_SWAP_PAGE -/* - * Some architectures support metadata associated with a page. When a - * page is being swapped out, this metadata must be saved so it can be - * restored when the page is swapped back in. SPARC M7 and newer - * processors support an ADI (Application Data Integrity) tag for the - * page as metadata for the page. arch_do_swap_page() can restore this - * metadata when a page is swapped back in. - */ -static inline void arch_do_swap_page(struct mm_struct *mm, - struct vm_area_struct *vma, - unsigned long addr, - pte_t pte, pte_t oldpte) -{ - -} -#endif - -#ifndef __HAVE_ARCH_UNMAP_ONE -/* - * Some architectures support metadata associated with a page. When a - * page is being swapped out, this metadata must be saved so it can be - * restored when the page is swapped back in. SPARC M7 and newer - * processors support an ADI (Application Data Integrity) tag for the - * page as metadata for the page. arch_unmap_one() can save this - * metadata on a swap-out of a page. - */ -static inline int arch_unmap_one(struct mm_struct *mm, - struct vm_area_struct *vma, - unsigned long addr, - pte_t orig_pte) -{ - return 0; -} -#endif - -#ifndef __HAVE_ARCH_PGD_OFFSET_GATE -#define pgd_offset_gate(mm, addr) pgd_offset(mm, addr) -#endif - -#ifndef __HAVE_ARCH_MOVE_PTE -#define move_pte(pte, prot, old_addr, new_addr) (pte) -#endif - -#ifndef pte_accessible -# define pte_accessible(mm, pte) ((void)(pte), 1) -#endif - -#ifndef flush_tlb_fix_spurious_fault -#define flush_tlb_fix_spurious_fault(vma, address) flush_tlb_page(vma, address) -#endif - -#ifndef pgprot_noncached -#define pgprot_noncached(prot) (prot) -#endif - -#ifndef pgprot_writecombine -#define pgprot_writecombine pgprot_noncached -#endif - -#ifndef pgprot_writethrough -#define pgprot_writethrough pgprot_noncached -#endif - -#ifndef pgprot_device -#define pgprot_device pgprot_noncached -#endif - -#ifndef pgprot_modify -#define pgprot_modify pgprot_modify -static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot) -{ - if (pgprot_val(oldprot) == pgprot_val(pgprot_noncached(oldprot))) - newprot = pgprot_noncached(newprot); - if (pgprot_val(oldprot) == pgprot_val(pgprot_writecombine(oldprot))) - newprot = pgprot_writecombine(newprot); - if (pgprot_val(oldprot) == pgprot_val(pgprot_device(oldprot))) - newprot = pgprot_device(newprot); - return newprot; -} -#endif - -/* - * When walking page tables, get the address of the next boundary, - * or the end address of the range if that comes earlier. Although no - * vma end wraps to 0, rounded up __boundary may wrap to 0 throughout. - */ - -#define pgd_addr_end(addr, end) \ -({ unsigned long __boundary = ((addr) + PGDIR_SIZE) & PGDIR_MASK; \ - (__boundary - 1 < (end) - 1)? __boundary: (end); \ -}) - -#ifndef p4d_addr_end -#define p4d_addr_end(addr, end) \ -({ unsigned long __boundary = ((addr) + P4D_SIZE) & P4D_MASK; \ - (__boundary - 1 < (end) - 1)? __boundary: (end); \ -}) -#endif - -#ifndef pud_addr_end -#define pud_addr_end(addr, end) \ -({ unsigned long __boundary = ((addr) + PUD_SIZE) & PUD_MASK; \ - (__boundary - 1 < (end) - 1)? __boundary: (end); \ -}) -#endif - -#ifndef pmd_addr_end -#define pmd_addr_end(addr, end) \ -({ unsigned long __boundary = ((addr) + PMD_SIZE) & PMD_MASK; \ - (__boundary - 1 < (end) - 1)? __boundary: (end); \ -}) -#endif - -/* - * When walking page tables, we usually want to skip any p?d_none entries; - * and any p?d_bad entries - reporting the error before resetting to none. - * Do the tests inline, but report and clear the bad entry in mm/memory.c. - */ -void pgd_clear_bad(pgd_t *); - -#ifndef __PAGETABLE_P4D_FOLDED -void p4d_clear_bad(p4d_t *); -#else -#define p4d_clear_bad(p4d) do { } while (0) -#endif - -#ifndef __PAGETABLE_PUD_FOLDED -void pud_clear_bad(pud_t *); -#else -#define pud_clear_bad(p4d) do { } while (0) -#endif - -void pmd_clear_bad(pmd_t *); - -static inline int pgd_none_or_clear_bad(pgd_t *pgd) -{ - if (pgd_none(*pgd)) - return 1; - if (unlikely(pgd_bad(*pgd))) { - pgd_clear_bad(pgd); - return 1; - } - return 0; -} - -static inline int p4d_none_or_clear_bad(p4d_t *p4d) -{ - if (p4d_none(*p4d)) - return 1; - if (unlikely(p4d_bad(*p4d))) { - p4d_clear_bad(p4d); - return 1; - } - return 0; -} - -static inline int pud_none_or_clear_bad(pud_t *pud) -{ - if (pud_none(*pud)) - return 1; - if (unlikely(pud_bad(*pud))) { - pud_clear_bad(pud); - return 1; - } - return 0; -} - -static inline int pmd_none_or_clear_bad(pmd_t *pmd) -{ - if (pmd_none(*pmd)) - return 1; - if (unlikely(pmd_bad(*pmd))) { - pmd_clear_bad(pmd); - return 1; - } - return 0; -} - -static inline pte_t __ptep_modify_prot_start(struct vm_area_struct *vma, - unsigned long addr, - pte_t *ptep) -{ - /* - * Get the current pte state, but zero it out to make it - * non-present, preventing the hardware from asynchronously - * updating it. - */ - return ptep_get_and_clear(vma->vm_mm, addr, ptep); -} - -static inline void __ptep_modify_prot_commit(struct vm_area_struct *vma, - unsigned long addr, - pte_t *ptep, pte_t pte) -{ - /* - * The pte is non-present, so there's no hardware state to - * preserve. - */ - set_pte_at(vma->vm_mm, addr, ptep, pte); -} - -#ifndef __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION -/* - * Start a pte protection read-modify-write transaction, which - * protects against asynchronous hardware modifications to the pte. - * The intention is not to prevent the hardware from making pte - * updates, but to prevent any updates it may make from being lost. - * - * This does not protect against other software modifications of the - * pte; the appropriate pte lock must be held over the transation. - * - * Note that this interface is intended to be batchable, meaning that - * ptep_modify_prot_commit may not actually update the pte, but merely - * queue the update to be done at some later time. The update must be - * actually committed before the pte lock is released, however. - */ -static inline pte_t ptep_modify_prot_start(struct vm_area_struct *vma, - unsigned long addr, - pte_t *ptep) -{ - return __ptep_modify_prot_start(vma, addr, ptep); -} - -/* - * Commit an update to a pte, leaving any hardware-controlled bits in - * the PTE unmodified. - */ -static inline void ptep_modify_prot_commit(struct vm_area_struct *vma, - unsigned long addr, - pte_t *ptep, pte_t old_pte, pte_t pte) -{ - __ptep_modify_prot_commit(vma, addr, ptep, pte); -} -#endif /* __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION */ -#endif /* CONFIG_MMU */ - -/* - * No-op macros that just return the current protection value. Defined here - * because these macros can be used used even if CONFIG_MMU is not defined. - */ -#ifndef pgprot_encrypted -#define pgprot_encrypted(prot) (prot) -#endif - -#ifndef pgprot_decrypted -#define pgprot_decrypted(prot) (prot) -#endif - -/* - * A facility to provide lazy MMU batching. This allows PTE updates and - * page invalidations to be delayed until a call to leave lazy MMU mode - * is issued. Some architectures may benefit from doing this, and it is - * beneficial for both shadow and direct mode hypervisors, which may batch - * the PTE updates which happen during this window. Note that using this - * interface requires that read hazards be removed from the code. A read - * hazard could result in the direct mode hypervisor case, since the actual - * write to the page tables may not yet have taken place, so reads though - * a raw PTE pointer after it has been modified are not guaranteed to be - * up to date. This mode can only be entered and left under the protection of - * the page table locks for all page tables which may be modified. In the UP - * case, this is required so that preemption is disabled, and in the SMP case, - * it must synchronize the delayed page table writes properly on other CPUs. - */ -#ifndef __HAVE_ARCH_ENTER_LAZY_MMU_MODE -#define arch_enter_lazy_mmu_mode() do {} while (0) -#define arch_leave_lazy_mmu_mode() do {} while (0) -#define arch_flush_lazy_mmu_mode() do {} while (0) -#endif - -/* - * A facility to provide batching of the reload of page tables and - * other process state with the actual context switch code for - * paravirtualized guests. By convention, only one of the batched - * update (lazy) modes (CPU, MMU) should be active at any given time, - * entry should never be nested, and entry and exits should always be - * paired. This is for sanity of maintaining and reasoning about the - * kernel code. In this case, the exit (end of the context switch) is - * in architecture-specific code, and so doesn't need a generic - * definition. - */ -#ifndef __HAVE_ARCH_START_CONTEXT_SWITCH -#define arch_start_context_switch(prev) do {} while (0) -#endif - -#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY -#ifndef CONFIG_ARCH_ENABLE_THP_MIGRATION -static inline pmd_t pmd_swp_mksoft_dirty(pmd_t pmd) -{ - return pmd; -} - -static inline int pmd_swp_soft_dirty(pmd_t pmd) -{ - return 0; -} - -static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd) -{ - return pmd; -} -#endif -#else /* !CONFIG_HAVE_ARCH_SOFT_DIRTY */ -static inline int pte_soft_dirty(pte_t pte) -{ - return 0; -} - -static inline int pmd_soft_dirty(pmd_t pmd) -{ - return 0; -} - -static inline pte_t pte_mksoft_dirty(pte_t pte) -{ - return pte; -} - -static inline pmd_t pmd_mksoft_dirty(pmd_t pmd) -{ - return pmd; -} - -static inline pte_t pte_clear_soft_dirty(pte_t pte) -{ - return pte; -} - -static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd) -{ - return pmd; -} - -static inline pte_t pte_swp_mksoft_dirty(pte_t pte) -{ - return pte; -} - -static inline int pte_swp_soft_dirty(pte_t pte) -{ - return 0; -} - -static inline pte_t pte_swp_clear_soft_dirty(pte_t pte) -{ - return pte; -} - -static inline pmd_t pmd_swp_mksoft_dirty(pmd_t pmd) -{ - return pmd; -} - -static inline int pmd_swp_soft_dirty(pmd_t pmd) -{ - return 0; -} - -static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd) -{ - return pmd; -} -#endif - -#ifndef __HAVE_PFNMAP_TRACKING -/* - * Interfaces that can be used by architecture code to keep track of - * memory type of pfn mappings specified by the remap_pfn_range, - * vmf_insert_pfn. - */ - -/* - * track_pfn_remap is called when a _new_ pfn mapping is being established - * by remap_pfn_range() for physical range indicated by pfn and size. - */ -static inline int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot, - unsigned long pfn, unsigned long addr, - unsigned long size) -{ - return 0; -} - -/* - * track_pfn_insert is called when a _new_ single pfn is established - * by vmf_insert_pfn(). - */ -static inline void track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot, - pfn_t pfn) -{ -} - -/* - * track_pfn_copy is called when vma that is covering the pfnmap gets - * copied through copy_page_range(). - */ -static inline int track_pfn_copy(struct vm_area_struct *vma) -{ - return 0; -} - -/* - * untrack_pfn is called while unmapping a pfnmap for a region. - * untrack can be called for a specific region indicated by pfn and size or - * can be for the entire vma (in which case pfn, size are zero). - */ -static inline void untrack_pfn(struct vm_area_struct *vma, - unsigned long pfn, unsigned long size) -{ -} - -/* - * untrack_pfn_moved is called while mremapping a pfnmap for a new region. - */ -static inline void untrack_pfn_moved(struct vm_area_struct *vma) -{ -} -#else -extern int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot, - unsigned long pfn, unsigned long addr, - unsigned long size); -extern void track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot, - pfn_t pfn); -extern int track_pfn_copy(struct vm_area_struct *vma); -extern void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn, - unsigned long size); -extern void untrack_pfn_moved(struct vm_area_struct *vma); -#endif - -#ifdef __HAVE_COLOR_ZERO_PAGE -static inline int is_zero_pfn(unsigned long pfn) -{ - extern unsigned long zero_pfn; - unsigned long offset_from_zero_pfn = pfn - zero_pfn; - return offset_from_zero_pfn <= (zero_page_mask >> PAGE_SHIFT); -} - -#define my_zero_pfn(addr) page_to_pfn(ZERO_PAGE(addr)) - -#else -static inline int is_zero_pfn(unsigned long pfn) -{ - extern unsigned long zero_pfn; - return pfn == zero_pfn; -} - -static inline unsigned long my_zero_pfn(unsigned long addr) -{ - extern unsigned long zero_pfn; - return zero_pfn; -} -#endif - -#ifdef CONFIG_MMU - -#ifndef CONFIG_TRANSPARENT_HUGEPAGE -static inline int pmd_trans_huge(pmd_t pmd) -{ - return 0; -} -#ifndef pmd_write -static inline int pmd_write(pmd_t pmd) -{ - BUG(); - return 0; -} -#endif /* pmd_write */ -#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ - -#ifndef pud_write -static inline int pud_write(pud_t pud) -{ - BUG(); - return 0; -} -#endif /* pud_write */ - -#if !defined(CONFIG_ARCH_HAS_PTE_DEVMAP) || !defined(CONFIG_TRANSPARENT_HUGEPAGE) -static inline int pmd_devmap(pmd_t pmd) -{ - return 0; -} -static inline int pud_devmap(pud_t pud) -{ - return 0; -} -static inline int pgd_devmap(pgd_t pgd) -{ - return 0; -} -#endif - -#if !defined(CONFIG_TRANSPARENT_HUGEPAGE) || \ - (defined(CONFIG_TRANSPARENT_HUGEPAGE) && \ - !defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)) -static inline int pud_trans_huge(pud_t pud) -{ - return 0; -} -#endif - -/* See pmd_none_or_trans_huge_or_clear_bad for discussion. */ -static inline int pud_none_or_trans_huge_or_dev_or_clear_bad(pud_t *pud) -{ - pud_t pudval = READ_ONCE(*pud); - - if (pud_none(pudval) || pud_trans_huge(pudval) || pud_devmap(pudval)) - return 1; - if (unlikely(pud_bad(pudval))) { - pud_clear_bad(pud); - return 1; - } - return 0; -} - -/* See pmd_trans_unstable for discussion. */ -static inline int pud_trans_unstable(pud_t *pud) -{ -#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \ - defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) - return pud_none_or_trans_huge_or_dev_or_clear_bad(pud); -#else - return 0; -#endif -} - -#ifndef pmd_read_atomic -static inline pmd_t pmd_read_atomic(pmd_t *pmdp) -{ - /* - * Depend on compiler for an atomic pmd read. NOTE: this is - * only going to work, if the pmdval_t isn't larger than - * an unsigned long. - */ - return *pmdp; -} -#endif - -#ifndef arch_needs_pgtable_deposit -#define arch_needs_pgtable_deposit() (false) -#endif -/* - * This function is meant to be used by sites walking pagetables with - * the mmap_sem hold in read mode to protect against MADV_DONTNEED and - * transhuge page faults. MADV_DONTNEED can convert a transhuge pmd - * into a null pmd and the transhuge page fault can convert a null pmd - * into an hugepmd or into a regular pmd (if the hugepage allocation - * fails). While holding the mmap_sem in read mode the pmd becomes - * stable and stops changing under us only if it's not null and not a - * transhuge pmd. When those races occurs and this function makes a - * difference vs the standard pmd_none_or_clear_bad, the result is - * undefined so behaving like if the pmd was none is safe (because it - * can return none anyway). The compiler level barrier() is critically - * important to compute the two checks atomically on the same pmdval. - * - * For 32bit kernels with a 64bit large pmd_t this automatically takes - * care of reading the pmd atomically to avoid SMP race conditions - * against pmd_populate() when the mmap_sem is hold for reading by the - * caller (a special atomic read not done by "gcc" as in the generic - * version above, is also needed when THP is disabled because the page - * fault can populate the pmd from under us). - */ -static inline int pmd_none_or_trans_huge_or_clear_bad(pmd_t *pmd) -{ - pmd_t pmdval = pmd_read_atomic(pmd); - /* - * The barrier will stabilize the pmdval in a register or on - * the stack so that it will stop changing under the code. - * - * When CONFIG_TRANSPARENT_HUGEPAGE=y on x86 32bit PAE, - * pmd_read_atomic is allowed to return a not atomic pmdval - * (for example pointing to an hugepage that has never been - * mapped in the pmd). The below checks will only care about - * the low part of the pmd with 32bit PAE x86 anyway, with the - * exception of pmd_none(). So the important thing is that if - * the low part of the pmd is found null, the high part will - * be also null or the pmd_none() check below would be - * confused. - */ -#ifdef CONFIG_TRANSPARENT_HUGEPAGE - barrier(); -#endif - /* - * !pmd_present() checks for pmd migration entries - * - * The complete check uses is_pmd_migration_entry() in linux/swapops.h - * But using that requires moving current function and pmd_trans_unstable() - * to linux/swapops.h to resovle dependency, which is too much code move. - * - * !pmd_present() is equivalent to is_pmd_migration_entry() currently, - * because !pmd_present() pages can only be under migration not swapped - * out. - * - * pmd_none() is preseved for future condition checks on pmd migration - * entries and not confusing with this function name, although it is - * redundant with !pmd_present(). - */ - if (pmd_none(pmdval) || pmd_trans_huge(pmdval) || - (IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION) && !pmd_present(pmdval))) - return 1; - if (unlikely(pmd_bad(pmdval))) { - pmd_clear_bad(pmd); - return 1; - } - return 0; -} - -/* - * This is a noop if Transparent Hugepage Support is not built into - * the kernel. Otherwise it is equivalent to - * pmd_none_or_trans_huge_or_clear_bad(), and shall only be called in - * places that already verified the pmd is not none and they want to - * walk ptes while holding the mmap sem in read mode (write mode don't - * need this). If THP is not enabled, the pmd can't go away under the - * code even if MADV_DONTNEED runs, but if THP is enabled we need to - * run a pmd_trans_unstable before walking the ptes after - * split_huge_pmd returns (because it may have run when the pmd become - * null, but then a page fault can map in a THP and not a regular page). - */ -static inline int pmd_trans_unstable(pmd_t *pmd) -{ -#ifdef CONFIG_TRANSPARENT_HUGEPAGE - return pmd_none_or_trans_huge_or_clear_bad(pmd); -#else - return 0; -#endif -} - -#ifndef CONFIG_NUMA_BALANCING -/* - * Technically a PTE can be PROTNONE even when not doing NUMA balancing but - * the only case the kernel cares is for NUMA balancing and is only ever set - * when the VMA is accessible. For PROT_NONE VMAs, the PTEs are not marked - * _PAGE_PROTNONE so by by default, implement the helper as "always no". It - * is the responsibility of the caller to distinguish between PROT_NONE - * protections and NUMA hinting fault protections. - */ -static inline int pte_protnone(pte_t pte) -{ - return 0; -} - -static inline int pmd_protnone(pmd_t pmd) -{ - return 0; -} -#endif /* CONFIG_NUMA_BALANCING */ - -#endif /* CONFIG_MMU */ - -#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP - -#ifndef __PAGETABLE_P4D_FOLDED -int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot); -int p4d_clear_huge(p4d_t *p4d); -#else -static inline int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot) -{ - return 0; -} -static inline int p4d_clear_huge(p4d_t *p4d) -{ - return 0; -} -#endif /* !__PAGETABLE_P4D_FOLDED */ - -int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot); -int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot); -int pud_clear_huge(pud_t *pud); -int pmd_clear_huge(pmd_t *pmd); -int p4d_free_pud_page(p4d_t *p4d, unsigned long addr); -int pud_free_pmd_page(pud_t *pud, unsigned long addr); -int pmd_free_pte_page(pmd_t *pmd, unsigned long addr); -#else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */ -static inline int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot) -{ - return 0; -} -static inline int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot) -{ - return 0; -} -static inline int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot) -{ - return 0; -} -static inline int p4d_clear_huge(p4d_t *p4d) -{ - return 0; -} -static inline int pud_clear_huge(pud_t *pud) -{ - return 0; -} -static inline int pmd_clear_huge(pmd_t *pmd) -{ - return 0; -} -static inline int p4d_free_pud_page(p4d_t *p4d, unsigned long addr) -{ - return 0; -} -static inline int pud_free_pmd_page(pud_t *pud, unsigned long addr) -{ - return 0; -} -static inline int pmd_free_pte_page(pmd_t *pmd, unsigned long addr) -{ - return 0; -} -#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */ - -#ifndef __HAVE_ARCH_FLUSH_PMD_TLB_RANGE -#ifdef CONFIG_TRANSPARENT_HUGEPAGE -/* - * ARCHes with special requirements for evicting THP backing TLB entries can - * implement this. Otherwise also, it can help optimize normal TLB flush in - * THP regime. stock flush_tlb_range() typically has optimization to nuke the - * entire TLB TLB if flush span is greater than a threshold, which will - * likely be true for a single huge page. Thus a single thp flush will - * invalidate the entire TLB which is not desitable. - * e.g. see arch/arc: flush_pmd_tlb_range - */ -#define flush_pmd_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end) -#define flush_pud_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end) -#else -#define flush_pmd_tlb_range(vma, addr, end) BUILD_BUG() -#define flush_pud_tlb_range(vma, addr, end) BUILD_BUG() -#endif -#endif - -struct file; -int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, - unsigned long size, pgprot_t *vma_prot); - -#ifndef CONFIG_X86_ESPFIX64 -static inline void init_espfix_bsp(void) { } -#endif - -extern void __init pgtable_cache_init(void); - -#ifndef __HAVE_ARCH_PFN_MODIFY_ALLOWED -static inline bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot) -{ - return true; -} - -static inline bool arch_has_pfn_modify_check(void) -{ - return false; -} -#endif /* !_HAVE_ARCH_PFN_MODIFY_ALLOWED */ - -/* - * Architecture PAGE_KERNEL_* fallbacks - * - * Some architectures don't define certain PAGE_KERNEL_* flags. This is either - * because they really don't support them, or the port needs to be updated to - * reflect the required functionality. Below are a set of relatively safe - * fallbacks, as best effort, which we can count on in lieu of the architectures - * not defining them on their own yet. - */ - -#ifndef PAGE_KERNEL_RO -# define PAGE_KERNEL_RO PAGE_KERNEL -#endif - -#ifndef PAGE_KERNEL_EXEC -# define PAGE_KERNEL_EXEC PAGE_KERNEL -#endif - -#endif /* !__ASSEMBLY__ */ - -#ifndef io_remap_pfn_range -#define io_remap_pfn_range remap_pfn_range -#endif - -#ifndef has_transparent_hugepage -#ifdef CONFIG_TRANSPARENT_HUGEPAGE -#define has_transparent_hugepage() 1 -#else -#define has_transparent_hugepage() 0 -#endif -#endif - -/* - * On some architectures it depends on the mm if the p4d/pud or pmd - * layer of the page table hierarchy is folded or not. - */ -#ifndef mm_p4d_folded -#define mm_p4d_folded(mm) __is_defined(__PAGETABLE_P4D_FOLDED) -#endif - -#ifndef mm_pud_folded -#define mm_pud_folded(mm) __is_defined(__PAGETABLE_PUD_FOLDED) -#endif - -#ifndef mm_pmd_folded -#define mm_pmd_folded(mm) __is_defined(__PAGETABLE_PMD_FOLDED) -#endif - -/* - * p?d_leaf() - true if this entry is a final mapping to a physical address. - * This differs from p?d_huge() by the fact that they are always available (if - * the architecture supports large pages at the appropriate level) even - * if CONFIG_HUGETLB_PAGE is not defined. - * Only meaningful when called on a valid entry. - */ -#ifndef pgd_leaf -#define pgd_leaf(x) 0 -#endif -#ifndef p4d_leaf -#define p4d_leaf(x) 0 -#endif -#ifndef pud_leaf -#define pud_leaf(x) 0 -#endif -#ifndef pmd_leaf -#define pmd_leaf(x) 0 -#endif - -#endif /* _ASM_GENERIC_PGTABLE_H */ diff --git a/include/asm-generic/sections.h b/include/asm-generic/sections.h index d1779d442aa5..66397ed10acb 100644 --- a/include/asm-generic/sections.h +++ b/include/asm-generic/sections.h @@ -53,6 +53,9 @@ extern char __ctors_start[], __ctors_end[]; /* Start and end of .opd section - used for function descriptors. */ extern char __start_opd[], __end_opd[]; +/* Start and end of instrumentation protected text section */ +extern char __noinstr_text_start[], __noinstr_text_end[]; + extern __visible const void __nosave_begin, __nosave_end; /* Function descriptor handling (if any). Override in asm/sections.h */ diff --git a/include/asm-generic/topology.h b/include/asm-generic/topology.h index 238873739550..5aa8705df87e 100644 --- a/include/asm-generic/topology.h +++ b/include/asm-generic/topology.h @@ -48,7 +48,7 @@ #ifdef CONFIG_NEED_MULTIPLE_NODES #define cpumask_of_node(node) ((node) == 0 ? cpu_online_mask : cpu_none_mask) #else - #define cpumask_of_node(node) ((void)node, cpu_online_mask) + #define cpumask_of_node(node) ((void)(node), cpu_online_mask) #endif #endif #ifndef pcibus_to_node diff --git a/include/asm-generic/vermagic.h b/include/asm-generic/vermagic.h new file mode 100644 index 000000000000..084274a1219e --- /dev/null +++ b/include/asm-generic/vermagic.h @@ -0,0 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#ifndef _ASM_GENERIC_VERMAGIC_H +#define _ASM_GENERIC_VERMAGIC_H + +#define MODULE_ARCH_VERMAGIC "" + +#endif /* _ASM_GENERIC_VERMAGIC_H */ diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 71e387a5fe90..db600ef218d7 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -541,6 +541,15 @@ __end_rodata = .; /* + * Non-instrumentable text section + */ +#define NOINSTR_TEXT \ + ALIGN_FUNCTION(); \ + __noinstr_text_start = .; \ + *(.noinstr.text) \ + __noinstr_text_end = .; + +/* * .text section. Map to function alignment to avoid address changes * during second ld run in second ld pass when generating System.map * @@ -551,6 +560,7 @@ #define TEXT_TEXT \ ALIGN_FUNCTION(); \ *(.text.hot TEXT_MAIN .text.fixup .text.unlikely) \ + NOINSTR_TEXT \ *(.text..refcount) \ *(.ref.text) \ MEM_KEEP(init.text*) \ |