diff options
Diffstat (limited to 'include')
329 files changed, 7751 insertions, 2833 deletions
diff --git a/include/acpi/acnames.h b/include/acpi/acnames.h index d71291f25a80..76aa6aa346ba 100644 --- a/include/acpi/acnames.h +++ b/include/acpi/acnames.h @@ -22,6 +22,7 @@ #define METHOD_NAME__DDN "_DDN" #define METHOD_NAME__DIS "_DIS" #define METHOD_NAME__DMA "_DMA" +#define METHOD_NAME__EVT "_EVT" #define METHOD_NAME__HID "_HID" #define METHOD_NAME__INI "_INI" #define METHOD_NAME__PLD "_PLD" diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h index c941d99162c0..254685085c82 100644 --- a/include/acpi/acpi_bus.h +++ b/include/acpi/acpi_bus.h @@ -515,6 +515,12 @@ void acpi_bus_private_data_handler(acpi_handle, void *); int acpi_bus_get_private_data(acpi_handle, void **); int acpi_bus_attach_private_data(acpi_handle, void *); void acpi_bus_detach_private_data(acpi_handle); +int acpi_dev_install_notify_handler(struct acpi_device *adev, + u32 handler_type, + acpi_notify_handler handler); +void acpi_dev_remove_notify_handler(struct acpi_device *adev, + u32 handler_type, + acpi_notify_handler handler); extern int acpi_notifier_call_chain(struct acpi_device *, u32, u32); extern int register_acpi_notifier(struct notifier_block *); extern int unregister_acpi_notifier(struct notifier_block *); @@ -563,8 +569,6 @@ int acpi_match_device_ids(struct acpi_device *device, const struct acpi_device_id *ids); void acpi_set_modalias(struct acpi_device *adev, const char *default_id, char *modalias, size_t len); -int acpi_create_dir(struct acpi_device *); -void acpi_remove_dir(struct acpi_device *); static inline bool acpi_device_enumerated(struct acpi_device *adev) { @@ -645,6 +649,8 @@ int acpi_disable_wakeup_device_power(struct acpi_device *dev); #ifdef CONFIG_X86 bool acpi_device_override_status(struct acpi_device *adev, unsigned long long *status); bool acpi_quirk_skip_acpi_ac_and_battery(void); +int acpi_install_cmos_rtc_space_handler(acpi_handle handle); +void acpi_remove_cmos_rtc_space_handler(acpi_handle handle); #else static inline bool acpi_device_override_status(struct acpi_device *adev, unsigned long long *status) @@ -655,6 +661,13 @@ static inline bool acpi_quirk_skip_acpi_ac_and_battery(void) { return false; } +static inline int acpi_install_cmos_rtc_space_handler(acpi_handle handle) +{ + return 1; +} +static inline void acpi_remove_cmos_rtc_space_handler(acpi_handle handle) +{ +} #endif #if IS_ENABLED(CONFIG_X86_ANDROID_TABLETS) diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h index 9ffdc0425bc2..3d90716f9522 100644 --- a/include/acpi/acpixf.h +++ b/include/acpi/acpixf.h @@ -12,7 +12,7 @@ /* Current ACPICA subsystem version in YYYYMMDD format */ -#define ACPI_CA_VERSION 0x20230331 +#define ACPI_CA_VERSION 0x20230628 #include <acpi/acconfig.h> #include <acpi/actypes.h> @@ -970,8 +970,6 @@ ACPI_EXTERNAL_RETURN_STATUS(acpi_status void **data, void (*callback)(void *))) -void acpi_run_debugger(char *batch_buffer); - void acpi_set_debugger_thread_id(acpi_thread_id thread_id); #endif /* __ACXFACE_H__ */ diff --git a/include/acpi/actbl1.h b/include/acpi/actbl1.h index 58b0490a2ad1..8d5572ad48cb 100644 --- a/include/acpi/actbl1.h +++ b/include/acpi/actbl1.h @@ -402,7 +402,7 @@ struct acpi_cdat_dsmas { /* Flags for subtable above */ -#define ACPI_CEDT_DSMAS_NON_VOLATILE (1 << 2) +#define ACPI_CDAT_DSMAS_NON_VOLATILE (1 << 2) /* Subtable 1: Device scoped Latency and Bandwidth Information Structure (DSLBIS) */ diff --git a/include/acpi/actbl2.h b/include/acpi/actbl2.h index 0029336775a9..3751ae69432f 100644 --- a/include/acpi/actbl2.h +++ b/include/acpi/actbl2.h @@ -893,7 +893,10 @@ enum acpi_madt_type { ACPI_MADT_TYPE_BIO_PIC = 22, ACPI_MADT_TYPE_LPC_PIC = 23, ACPI_MADT_TYPE_RINTC = 24, - ACPI_MADT_TYPE_RESERVED = 25, /* 25 to 0x7F are reserved */ + ACPI_MADT_TYPE_IMSIC = 25, + ACPI_MADT_TYPE_APLIC = 26, + ACPI_MADT_TYPE_PLIC = 27, + ACPI_MADT_TYPE_RESERVED = 28, /* 28 to 0x7F are reserved */ ACPI_MADT_TYPE_OEM_RESERVED = 0x80 /* 0x80 to 0xFF are reserved for OEM use */ }; @@ -1261,6 +1264,9 @@ struct acpi_madt_rintc { u32 flags; u64 hart_id; u32 uid; /* ACPI processor UID */ + u32 ext_intc_id; /* External INTC Id */ + u64 imsic_addr; /* IMSIC base address */ + u32 imsic_size; /* IMSIC size */ }; /* Values for RISC-V INTC Version field above */ @@ -1271,6 +1277,48 @@ enum acpi_madt_rintc_version { ACPI_MADT_RINTC_VERSION_RESERVED = 2 /* 2 and greater are reserved */ }; +/* 25: RISC-V IMSIC */ +struct acpi_madt_imsic { + struct acpi_subtable_header header; + u8 version; + u8 reserved; + u32 flags; + u16 num_ids; + u16 num_guest_ids; + u8 guest_index_bits; + u8 hart_index_bits; + u8 group_index_bits; + u8 group_index_shift; +}; + +/* 26: RISC-V APLIC */ +struct acpi_madt_aplic { + struct acpi_subtable_header header; + u8 version; + u8 id; + u32 flags; + u8 hw_id[8]; + u16 num_idcs; + u16 num_sources; + u32 gsi_base; + u64 base_addr; + u32 size; +}; + +/* 27: RISC-V PLIC */ +struct acpi_madt_plic { + struct acpi_subtable_header header; + u8 version; + u8 id; + u8 hw_id[8]; + u16 num_irqs; + u16 max_prio; + u32 flags; + u32 size; + u64 base_addr; + u32 gsi_base; +}; + /* 80: OEM data */ struct acpi_madt_oem_data { @@ -2730,12 +2778,15 @@ enum acpi_rgrt_image_type { struct acpi_table_rhct { struct acpi_table_header header; /* Common ACPI table header */ - u32 reserved; + u32 flags; /* RHCT flags */ u64 time_base_freq; u32 node_count; u32 node_offset; }; +/* RHCT Flags */ + +#define ACPI_RHCT_TIMER_CANNOT_WAKEUP_CPU (1) /* * RHCT subtables */ @@ -2749,6 +2800,9 @@ struct acpi_rhct_node_header { enum acpi_rhct_node_type { ACPI_RHCT_NODE_TYPE_ISA_STRING = 0x0000, + ACPI_RHCT_NODE_TYPE_CMO = 0x0001, + ACPI_RHCT_NODE_TYPE_MMU = 0x0002, + ACPI_RHCT_NODE_TYPE_RESERVED = 0x0003, ACPI_RHCT_NODE_TYPE_HART_INFO = 0xFFFF, }; @@ -2762,6 +2816,24 @@ struct acpi_rhct_isa_string { char isa[]; }; +struct acpi_rhct_cmo_node { + u8 reserved; /* Must be zero */ + u8 cbom_size; /* CBOM size in powerof 2 */ + u8 cbop_size; /* CBOP size in powerof 2 */ + u8 cboz_size; /* CBOZ size in powerof 2 */ +}; + +struct acpi_rhct_mmu_node { + u8 reserved; /* Must be zero */ + u8 mmu_type; /* Virtual Address Scheme */ +}; + +enum acpi_rhct_mmu_type { + ACPI_RHCT_MMU_TYPE_SV39 = 0, + ACPI_RHCT_MMU_TYPE_SV48 = 1, + ACPI_RHCT_MMU_TYPE_SV57 = 2 +}; + /* Hart Info node structure */ struct acpi_rhct_hart_info { u16 num_offsets; diff --git a/include/acpi/actbl3.h b/include/acpi/actbl3.h index 000764ab3985..c080d579a546 100644 --- a/include/acpi/actbl3.h +++ b/include/acpi/actbl3.h @@ -279,12 +279,14 @@ struct acpi_srat_gic_its_affinity { * 6: ACPI_SRAT_TYPE_GENERIC_PORT_AFFINITY */ +#define ACPI_SRAT_DEVICE_HANDLE_SIZE 16 + struct acpi_srat_generic_affinity { struct acpi_subtable_header header; u8 reserved; u8 device_handle_type; u32 proximity_domain; - u8 device_handle[16]; + u8 device_handle[ACPI_SRAT_DEVICE_HANDLE_SIZE]; u32 flags; u32 reserved1; }; diff --git a/include/acpi/pdc_intel.h b/include/acpi/pdc_intel.h deleted file mode 100644 index 967c552d1cd3..000000000000 --- a/include/acpi/pdc_intel.h +++ /dev/null @@ -1,36 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ - -/* _PDC bit definition for Intel processors */ - -#ifndef __PDC_INTEL_H__ -#define __PDC_INTEL_H__ - -#define ACPI_PDC_P_FFH (0x0001) -#define ACPI_PDC_C_C1_HALT (0x0002) -#define ACPI_PDC_T_FFH (0x0004) -#define ACPI_PDC_SMP_C1PT (0x0008) -#define ACPI_PDC_SMP_C2C3 (0x0010) -#define ACPI_PDC_SMP_P_SWCOORD (0x0020) -#define ACPI_PDC_SMP_C_SWCOORD (0x0040) -#define ACPI_PDC_SMP_T_SWCOORD (0x0080) -#define ACPI_PDC_C_C1_FFH (0x0100) -#define ACPI_PDC_C_C2C3_FFH (0x0200) -#define ACPI_PDC_SMP_P_HWCOORD (0x0800) - -#define ACPI_PDC_EST_CAPABILITY_SMP (ACPI_PDC_SMP_C1PT | \ - ACPI_PDC_C_C1_HALT | \ - ACPI_PDC_P_FFH) - -#define ACPI_PDC_EST_CAPABILITY_SWSMP (ACPI_PDC_SMP_C1PT | \ - ACPI_PDC_C_C1_HALT | \ - ACPI_PDC_SMP_P_SWCOORD | \ - ACPI_PDC_SMP_P_HWCOORD | \ - ACPI_PDC_P_FFH) - -#define ACPI_PDC_C_CAPABILITY_SMP (ACPI_PDC_SMP_C2C3 | \ - ACPI_PDC_SMP_C1PT | \ - ACPI_PDC_C_C1_HALT | \ - ACPI_PDC_C_C1_FFH | \ - ACPI_PDC_C_C2C3_FFH) - -#endif /* __PDC_INTEL_H__ */ diff --git a/include/acpi/platform/aclinux.h b/include/acpi/platform/aclinux.h index 1ca450e35c0d..565341c826e3 100644 --- a/include/acpi/platform/aclinux.h +++ b/include/acpi/platform/aclinux.h @@ -182,6 +182,7 @@ #ifdef ACPI_USE_STANDARD_HEADERS #include <stddef.h> #include <unistd.h> +#include <stdint.h> #define ACPI_OFFSET(d, f) offsetof(d, f) #endif diff --git a/include/acpi/platform/aczephyr.h b/include/acpi/platform/aczephyr.h index 2f0d30c3c5fd..703db4dc740d 100644 --- a/include/acpi/platform/aczephyr.h +++ b/include/acpi/platform/aczephyr.h @@ -10,9 +10,6 @@ #ifndef __ACZEPHYR_H__ #define __ACZEPHYR_H__ -#define SEEK_SET FS_SEEK_SET -#define SEEK_END FS_SEEK_END - #define ACPI_MACHINE_WIDTH 64 #define ACPI_NO_ERROR_MESSAGES diff --git a/include/acpi/proc_cap_intel.h b/include/acpi/proc_cap_intel.h new file mode 100644 index 000000000000..ddcdc41d6c3e --- /dev/null +++ b/include/acpi/proc_cap_intel.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* Vendor specific processor capabilities bit definition + * for Intel processors. Those bits are used to convey OSPM + * power management capabilities to the platform. + */ + +#ifndef __PROC_CAP_INTEL_H__ +#define __PROC_CAP_INTEL_H__ + +#define ACPI_PROC_CAP_P_FFH (0x0001) +#define ACPI_PROC_CAP_C_C1_HALT (0x0002) +#define ACPI_PROC_CAP_T_FFH (0x0004) +#define ACPI_PROC_CAP_SMP_C1PT (0x0008) +#define ACPI_PROC_CAP_SMP_C2C3 (0x0010) +#define ACPI_PROC_CAP_SMP_P_SWCOORD (0x0020) +#define ACPI_PROC_CAP_SMP_C_SWCOORD (0x0040) +#define ACPI_PROC_CAP_SMP_T_SWCOORD (0x0080) +#define ACPI_PROC_CAP_C_C1_FFH (0x0100) +#define ACPI_PROC_CAP_C_C2C3_FFH (0x0200) +#define ACPI_PROC_CAP_SMP_P_HWCOORD (0x0800) +#define ACPI_PROC_CAP_COLLAB_PROC_PERF (0x1000) + +#define ACPI_PROC_CAP_EST_CAPABILITY_SMP (ACPI_PROC_CAP_SMP_C1PT | \ + ACPI_PROC_CAP_C_C1_HALT | \ + ACPI_PROC_CAP_P_FFH) + +#define ACPI_PROC_CAP_EST_CAPABILITY_SWSMP (ACPI_PROC_CAP_SMP_C1PT | \ + ACPI_PROC_CAP_C_C1_HALT | \ + ACPI_PROC_CAP_SMP_P_SWCOORD | \ + ACPI_PROC_CAP_SMP_P_HWCOORD | \ + ACPI_PROC_CAP_P_FFH) + +#define ACPI_PROC_CAP_C_CAPABILITY_SMP (ACPI_PROC_CAP_SMP_C2C3 | \ + ACPI_PROC_CAP_SMP_C1PT | \ + ACPI_PROC_CAP_C_C1_HALT | \ + ACPI_PROC_CAP_C_C1_FFH | \ + ACPI_PROC_CAP_C_C2C3_FFH) + +#endif /* __PROC_CAP_INTEL_H__ */ diff --git a/include/asm-generic/cacheflush.h b/include/asm-generic/cacheflush.h index f46258d1a080..84ec53ccc450 100644 --- a/include/asm-generic/cacheflush.h +++ b/include/asm-generic/cacheflush.h @@ -77,13 +77,6 @@ static inline void flush_icache_range(unsigned long start, unsigned long end) #define flush_icache_user_range flush_icache_range #endif -#ifndef flush_icache_page -static inline void flush_icache_page(struct vm_area_struct *vma, - struct page *page) -{ -} -#endif - #ifndef flush_icache_user_page static inline void flush_icache_user_page(struct vm_area_struct *vma, struct page *page, diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h index 587e7e9b9a37..bac63e874c7b 100644 --- a/include/asm-generic/io.h +++ b/include/asm-generic/io.h @@ -1047,41 +1047,22 @@ static inline void iounmap(volatile void __iomem *addr) #elif defined(CONFIG_GENERIC_IOREMAP) #include <linux/pgtable.h> -/* - * Arch code can implement the following two hooks when using GENERIC_IOREMAP - * ioremap_allowed() return a bool, - * - true means continue to remap - * - false means skip remap and return directly - * iounmap_allowed() return a bool, - * - true means continue to vunmap - * - false means skip vunmap and return directly - */ -#ifndef ioremap_allowed -#define ioremap_allowed ioremap_allowed -static inline bool ioremap_allowed(phys_addr_t phys_addr, size_t size, - unsigned long prot) -{ - return true; -} -#endif - -#ifndef iounmap_allowed -#define iounmap_allowed iounmap_allowed -static inline bool iounmap_allowed(void *addr) -{ - return true; -} -#endif +void __iomem *generic_ioremap_prot(phys_addr_t phys_addr, size_t size, + pgprot_t prot); void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size, unsigned long prot); void iounmap(volatile void __iomem *addr); +void generic_iounmap(volatile void __iomem *addr); +#ifndef ioremap +#define ioremap ioremap static inline void __iomem *ioremap(phys_addr_t addr, size_t size) { /* _PAGE_IOREMAP needs to be supplied by the architecture */ return ioremap_prot(addr, size, _PAGE_IOREMAP); } +#endif #endif /* !CONFIG_MMU || CONFIG_GENERIC_IOREMAP */ #ifndef ioremap_wc diff --git a/include/asm-generic/iomap.h b/include/asm-generic/iomap.h index 08237ae8b840..196087a8126e 100644 --- a/include/asm-generic/iomap.h +++ b/include/asm-generic/iomap.h @@ -93,15 +93,15 @@ extern void __iomem *ioport_map(unsigned long port, unsigned int nr); extern void ioport_unmap(void __iomem *); #endif -#ifndef ARCH_HAS_IOREMAP_WC +#ifndef ioremap_wc #define ioremap_wc ioremap #endif -#ifndef ARCH_HAS_IOREMAP_WT +#ifndef ioremap_wt #define ioremap_wt ioremap #endif -#ifndef ARCH_HAS_IOREMAP_NP +#ifndef ioremap_np /* See the comment in asm-generic/io.h about ioremap_np(). */ #define ioremap_np ioremap_np static inline void __iomem *ioremap_np(phys_addr_t offset, size_t size) diff --git a/include/asm-generic/mshyperv.h b/include/asm-generic/mshyperv.h index 402a8c1c202d..a8f4b653ef4e 100644 --- a/include/asm-generic/mshyperv.h +++ b/include/asm-generic/mshyperv.h @@ -190,7 +190,7 @@ int hv_common_cpu_die(unsigned int cpu); void *hv_alloc_hyperv_page(void); void *hv_alloc_hyperv_zeroed_page(void); -void hv_free_hyperv_page(unsigned long addr); +void hv_free_hyperv_page(void *addr); /** * hv_cpu_number_to_vp_number() - Map CPU to VP. diff --git a/include/asm-generic/pgalloc.h b/include/asm-generic/pgalloc.h index a7cf825befae..c75d4a753849 100644 --- a/include/asm-generic/pgalloc.h +++ b/include/asm-generic/pgalloc.h @@ -8,7 +8,7 @@ #define GFP_PGTABLE_USER (GFP_PGTABLE_KERNEL | __GFP_ACCOUNT) /** - * __pte_alloc_one_kernel - allocate a page for PTE-level kernel page table + * __pte_alloc_one_kernel - allocate memory for a PTE-level kernel page table * @mm: the mm_struct of the current context * * This function is intended for architectures that need @@ -18,12 +18,17 @@ */ static inline pte_t *__pte_alloc_one_kernel(struct mm_struct *mm) { - return (pte_t *)__get_free_page(GFP_PGTABLE_KERNEL); + struct ptdesc *ptdesc = pagetable_alloc(GFP_PGTABLE_KERNEL & + ~__GFP_HIGHMEM, 0); + + if (!ptdesc) + return NULL; + return ptdesc_address(ptdesc); } #ifndef __HAVE_ARCH_PTE_ALLOC_ONE_KERNEL /** - * pte_alloc_one_kernel - allocate a page for PTE-level kernel page table + * pte_alloc_one_kernel - allocate memory for a PTE-level kernel page table * @mm: the mm_struct of the current context * * Return: pointer to the allocated memory or %NULL on error @@ -35,40 +40,40 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm) #endif /** - * pte_free_kernel - free PTE-level kernel page table page + * pte_free_kernel - free PTE-level kernel page table memory * @mm: the mm_struct of the current context * @pte: pointer to the memory containing the page table */ static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) { - free_page((unsigned long)pte); + pagetable_free(virt_to_ptdesc(pte)); } /** - * __pte_alloc_one - allocate a page for PTE-level user page table + * __pte_alloc_one - allocate memory for a PTE-level user page table * @mm: the mm_struct of the current context * @gfp: GFP flags to use for the allocation * - * Allocates a page and runs the pgtable_pte_page_ctor(). + * Allocate memory for a page table and ptdesc and runs pagetable_pte_ctor(). * * This function is intended for architectures that need * anything beyond simple page allocation or must have custom GFP flags. * - * Return: `struct page` initialized as page table or %NULL on error + * Return: `struct page` referencing the ptdesc or %NULL on error */ static inline pgtable_t __pte_alloc_one(struct mm_struct *mm, gfp_t gfp) { - struct page *pte; + struct ptdesc *ptdesc; - pte = alloc_page(gfp); - if (!pte) + ptdesc = pagetable_alloc(gfp, 0); + if (!ptdesc) return NULL; - if (!pgtable_pte_page_ctor(pte)) { - __free_page(pte); + if (!pagetable_pte_ctor(ptdesc)) { + pagetable_free(ptdesc); return NULL; } - return pte; + return ptdesc_page(ptdesc); } #ifndef __HAVE_ARCH_PTE_ALLOC_ONE @@ -76,9 +81,9 @@ static inline pgtable_t __pte_alloc_one(struct mm_struct *mm, gfp_t gfp) * pte_alloc_one - allocate a page for PTE-level user page table * @mm: the mm_struct of the current context * - * Allocates a page and runs the pgtable_pte_page_ctor(). + * Allocate memory for a page table and ptdesc and runs pagetable_pte_ctor(). * - * Return: `struct page` initialized as page table or %NULL on error + * Return: `struct page` referencing the ptdesc or %NULL on error */ static inline pgtable_t pte_alloc_one(struct mm_struct *mm) { @@ -92,14 +97,16 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm) */ /** - * pte_free - free PTE-level user page table page + * pte_free - free PTE-level user page table memory * @mm: the mm_struct of the current context - * @pte_page: the `struct page` representing the page table + * @pte_page: the `struct page` referencing the ptdesc */ static inline void pte_free(struct mm_struct *mm, struct page *pte_page) { - pgtable_pte_page_dtor(pte_page); - __free_page(pte_page); + struct ptdesc *ptdesc = page_ptdesc(pte_page); + + pagetable_pte_dtor(ptdesc); + pagetable_free(ptdesc); } @@ -107,10 +114,11 @@ static inline void pte_free(struct mm_struct *mm, struct page *pte_page) #ifndef __HAVE_ARCH_PMD_ALLOC_ONE /** - * pmd_alloc_one - allocate a page for PMD-level page table + * pmd_alloc_one - allocate memory for a PMD-level page table * @mm: the mm_struct of the current context * - * Allocates a page and runs the pgtable_pmd_page_ctor(). + * Allocate memory for a page table and ptdesc and runs pagetable_pmd_ctor(). + * * Allocations use %GFP_PGTABLE_USER in user context and * %GFP_PGTABLE_KERNEL in kernel context. * @@ -118,28 +126,30 @@ static inline void pte_free(struct mm_struct *mm, struct page *pte_page) */ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) { - struct page *page; + struct ptdesc *ptdesc; gfp_t gfp = GFP_PGTABLE_USER; if (mm == &init_mm) gfp = GFP_PGTABLE_KERNEL; - page = alloc_page(gfp); - if (!page) + ptdesc = pagetable_alloc(gfp, 0); + if (!ptdesc) return NULL; - if (!pgtable_pmd_page_ctor(page)) { - __free_page(page); + if (!pagetable_pmd_ctor(ptdesc)) { + pagetable_free(ptdesc); return NULL; } - return (pmd_t *)page_address(page); + return ptdesc_address(ptdesc); } #endif #ifndef __HAVE_ARCH_PMD_FREE static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) { + struct ptdesc *ptdesc = virt_to_ptdesc(pmd); + BUG_ON((unsigned long)pmd & (PAGE_SIZE-1)); - pgtable_pmd_page_dtor(virt_to_page(pmd)); - free_page((unsigned long)pmd); + pagetable_pmd_dtor(ptdesc); + pagetable_free(ptdesc); } #endif @@ -150,19 +160,25 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) static inline pud_t *__pud_alloc_one(struct mm_struct *mm, unsigned long addr) { gfp_t gfp = GFP_PGTABLE_USER; + struct ptdesc *ptdesc; if (mm == &init_mm) gfp = GFP_PGTABLE_KERNEL; - return (pud_t *)get_zeroed_page(gfp); + gfp &= ~__GFP_HIGHMEM; + + ptdesc = pagetable_alloc(gfp, 0); + if (!ptdesc) + return NULL; + return ptdesc_address(ptdesc); } #ifndef __HAVE_ARCH_PUD_ALLOC_ONE /** - * pud_alloc_one - allocate a page for PUD-level page table + * pud_alloc_one - allocate memory for a PUD-level page table * @mm: the mm_struct of the current context * - * Allocates a page using %GFP_PGTABLE_USER for user context and - * %GFP_PGTABLE_KERNEL for kernel context. + * Allocate memory for a page table using %GFP_PGTABLE_USER for user context + * and %GFP_PGTABLE_KERNEL for kernel context. * * Return: pointer to the allocated memory or %NULL on error */ @@ -175,7 +191,7 @@ static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) static inline void __pud_free(struct mm_struct *mm, pud_t *pud) { BUG_ON((unsigned long)pud & (PAGE_SIZE-1)); - free_page((unsigned long)pud); + pagetable_free(virt_to_ptdesc(pud)); } #ifndef __HAVE_ARCH_PUD_FREE @@ -190,7 +206,7 @@ static inline void pud_free(struct mm_struct *mm, pud_t *pud) #ifndef __HAVE_ARCH_PGD_FREE static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) { - free_page((unsigned long)pgd); + pagetable_free(virt_to_ptdesc(pgd)); } #endif diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h index b46617207c93..129a3a759976 100644 --- a/include/asm-generic/tlb.h +++ b/include/asm-generic/tlb.h @@ -456,7 +456,6 @@ static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb) return; tlb_flush(tlb); - mmu_notifier_invalidate_range(tlb->mm, tlb->start, tlb->end); __tlb_reset_range(tlb); } @@ -481,6 +480,17 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) return tlb_remove_page_size(tlb, page, PAGE_SIZE); } +static inline void tlb_remove_ptdesc(struct mmu_gather *tlb, void *pt) +{ + tlb_remove_table(tlb, pt); +} + +/* Like tlb_remove_ptdesc, but for page-like page directories. */ +static inline void tlb_remove_page_ptdesc(struct mmu_gather *tlb, struct ptdesc *pt) +{ + tlb_remove_page(tlb, ptdesc_page(pt)); +} + static inline void tlb_change_page_size(struct mmu_gather *tlb, unsigned int page_size) { diff --git a/include/asm-generic/word-at-a-time.h b/include/asm-generic/word-at-a-time.h index 20c93f08c993..95a1d214108a 100644 --- a/include/asm-generic/word-at-a-time.h +++ b/include/asm-generic/word-at-a-time.h @@ -38,7 +38,7 @@ static inline long find_zero(unsigned long mask) return (mask >> 8) ? byte : byte + 1; } -static inline bool has_zero(unsigned long val, unsigned long *data, const struct word_at_a_time *c) +static inline unsigned long has_zero(unsigned long val, unsigned long *data, const struct word_at_a_time *c) { unsigned long rhs = val | c->low_bits; *data = rhs; diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h index 6156161b181f..ca86f4c6ba43 100644 --- a/include/crypto/algapi.h +++ b/include/crypto/algapi.h @@ -12,6 +12,7 @@ #include <linux/cache.h> #include <linux/crypto.h> #include <linux/types.h> +#include <linux/workqueue.h> /* * Maximum values for blocksize and alignmask, used to allocate @@ -82,6 +83,8 @@ struct crypto_instance { struct crypto_spawn *spawns; }; + struct work_struct free_work; + void *__ctx[] CRYPTO_MINALIGN_ATTR; }; diff --git a/include/crypto/engine.h b/include/crypto/engine.h index 2038764b30c2..2835069c5997 100644 --- a/include/crypto/engine.h +++ b/include/crypto/engine.h @@ -7,91 +7,47 @@ #ifndef _CRYPTO_ENGINE_H #define _CRYPTO_ENGINE_H -#include <linux/crypto.h> -#include <linux/list.h> -#include <linux/kthread.h> -#include <linux/spinlock.h> -#include <linux/types.h> - -#include <crypto/algapi.h> #include <crypto/aead.h> #include <crypto/akcipher.h> #include <crypto/hash.h> -#include <crypto/skcipher.h> #include <crypto/kpp.h> +#include <crypto/skcipher.h> +#include <linux/types.h> +struct crypto_engine; struct device; -#define ENGINE_NAME_LEN 30 -/* - * struct crypto_engine - crypto hardware engine - * @name: the engine name - * @idling: the engine is entering idle state - * @busy: request pump is busy - * @running: the engine is on working - * @retry_support: indication that the hardware allows re-execution - * of a failed backlog request - * crypto-engine, in head position to keep order - * @list: link with the global crypto engine list - * @queue_lock: spinlock to synchronise access to request queue - * @queue: the crypto queue of the engine - * @rt: whether this queue is set to run as a realtime task - * @prepare_crypt_hardware: a request will soon arrive from the queue - * so the subsystem requests the driver to prepare the hardware - * by issuing this call - * @unprepare_crypt_hardware: there are currently no more requests on the - * queue so the subsystem notifies the driver that it may relax the - * hardware by issuing this call - * @do_batch_requests: execute a batch of requests. Depends on multiple - * requests support. - * @kworker: kthread worker struct for request pump - * @pump_requests: work struct for scheduling work to the request pump - * @priv_data: the engine private data - * @cur_req: the current request which is on processing - */ -struct crypto_engine { - char name[ENGINE_NAME_LEN]; - bool idling; - bool busy; - bool running; - - bool retry_support; - - struct list_head list; - spinlock_t queue_lock; - struct crypto_queue queue; - struct device *dev; - - bool rt; - - int (*prepare_crypt_hardware)(struct crypto_engine *engine); - int (*unprepare_crypt_hardware)(struct crypto_engine *engine); - int (*do_batch_requests)(struct crypto_engine *engine); - - - struct kthread_worker *kworker; - struct kthread_work pump_requests; - - void *priv_data; - struct crypto_async_request *cur_req; -}; - /* * struct crypto_engine_op - crypto hardware engine operations - * @prepare_request: do some preparation if needed before handling the current request - * @unprepare_request: undo any work done by prepare_request() * @do_one_request: do encryption for current request */ struct crypto_engine_op { - int (*prepare_request)(struct crypto_engine *engine, - void *areq); - int (*unprepare_request)(struct crypto_engine *engine, - void *areq); int (*do_one_request)(struct crypto_engine *engine, void *areq); }; -struct crypto_engine_ctx { +struct aead_engine_alg { + struct aead_alg base; + struct crypto_engine_op op; +}; + +struct ahash_engine_alg { + struct ahash_alg base; + struct crypto_engine_op op; +}; + +struct akcipher_engine_alg { + struct akcipher_alg base; + struct crypto_engine_op op; +}; + +struct kpp_engine_alg { + struct kpp_alg base; + struct crypto_engine_op op; +}; + +struct skcipher_engine_alg { + struct skcipher_alg base; struct crypto_engine_op op; }; @@ -124,4 +80,28 @@ struct crypto_engine *crypto_engine_alloc_init_and_set(struct device *dev, bool rt, int qlen); int crypto_engine_exit(struct crypto_engine *engine); +int crypto_engine_register_aead(struct aead_engine_alg *alg); +void crypto_engine_unregister_aead(struct aead_engine_alg *alg); +int crypto_engine_register_aeads(struct aead_engine_alg *algs, int count); +void crypto_engine_unregister_aeads(struct aead_engine_alg *algs, int count); + +int crypto_engine_register_ahash(struct ahash_engine_alg *alg); +void crypto_engine_unregister_ahash(struct ahash_engine_alg *alg); +int crypto_engine_register_ahashes(struct ahash_engine_alg *algs, int count); +void crypto_engine_unregister_ahashes(struct ahash_engine_alg *algs, + int count); + +int crypto_engine_register_akcipher(struct akcipher_engine_alg *alg); +void crypto_engine_unregister_akcipher(struct akcipher_engine_alg *alg); + +int crypto_engine_register_kpp(struct kpp_engine_alg *alg); +void crypto_engine_unregister_kpp(struct kpp_engine_alg *alg); + +int crypto_engine_register_skcipher(struct skcipher_engine_alg *alg); +void crypto_engine_unregister_skcipher(struct skcipher_engine_alg *alg); +int crypto_engine_register_skciphers(struct skcipher_engine_alg *algs, + int count); +void crypto_engine_unregister_skciphers(struct skcipher_engine_alg *algs, + int count); + #endif /* _CRYPTO_ENGINE_H */ diff --git a/include/crypto/internal/engine.h b/include/crypto/internal/engine.h new file mode 100644 index 000000000000..fbf4be56cf12 --- /dev/null +++ b/include/crypto/internal/engine.h @@ -0,0 +1,74 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Crypto engine API + * + * Copyright (c) 2016 Baolin Wang <baolin.wang@linaro.org> + * Copyright (c) 2023 Herbert Xu <herbert@gondor.apana.org.au> + */ +#ifndef _CRYPTO_INTERNAL_ENGINE_H +#define _CRYPTO_INTERNAL_ENGINE_H + +#include <crypto/algapi.h> +#include <crypto/engine.h> +#include <linux/kthread.h> +#include <linux/spinlock_types.h> +#include <linux/types.h> + +#define ENGINE_NAME_LEN 30 + +struct device; + +/* + * struct crypto_engine - crypto hardware engine + * @name: the engine name + * @idling: the engine is entering idle state + * @busy: request pump is busy + * @running: the engine is on working + * @retry_support: indication that the hardware allows re-execution + * of a failed backlog request + * crypto-engine, in head position to keep order + * @list: link with the global crypto engine list + * @queue_lock: spinlock to synchronise access to request queue + * @queue: the crypto queue of the engine + * @rt: whether this queue is set to run as a realtime task + * @prepare_crypt_hardware: a request will soon arrive from the queue + * so the subsystem requests the driver to prepare the hardware + * by issuing this call + * @unprepare_crypt_hardware: there are currently no more requests on the + * queue so the subsystem notifies the driver that it may relax the + * hardware by issuing this call + * @do_batch_requests: execute a batch of requests. Depends on multiple + * requests support. + * @kworker: kthread worker struct for request pump + * @pump_requests: work struct for scheduling work to the request pump + * @priv_data: the engine private data + * @cur_req: the current request which is on processing + */ +struct crypto_engine { + char name[ENGINE_NAME_LEN]; + bool idling; + bool busy; + bool running; + + bool retry_support; + + struct list_head list; + spinlock_t queue_lock; + struct crypto_queue queue; + struct device *dev; + + bool rt; + + int (*prepare_crypt_hardware)(struct crypto_engine *engine); + int (*unprepare_crypt_hardware)(struct crypto_engine *engine); + int (*do_batch_requests)(struct crypto_engine *engine); + + + struct kthread_worker *kworker; + struct kthread_work pump_requests; + + void *priv_data; + struct crypto_async_request *cur_req; +}; + +#endif diff --git a/include/crypto/public_key.h b/include/crypto/public_key.h index 8fadd561c50e..462f8a34cdf8 100644 --- a/include/crypto/public_key.h +++ b/include/crypto/public_key.h @@ -78,6 +78,10 @@ extern int restrict_link_by_ca(struct key *dest_keyring, const struct key_type *type, const union key_payload *payload, struct key *trust_keyring); +int restrict_link_by_digsig(struct key *dest_keyring, + const struct key_type *type, + const union key_payload *payload, + struct key *trust_keyring); #else static inline int restrict_link_by_ca(struct key *dest_keyring, const struct key_type *type, @@ -86,6 +90,14 @@ static inline int restrict_link_by_ca(struct key *dest_keyring, { return 0; } + +static inline int restrict_link_by_digsig(struct key *dest_keyring, + const struct key_type *type, + const union key_payload *payload, + struct key *trust_keyring) +{ + return 0; +} #endif extern int query_asymmetric_key(const struct kernel_pkey_params *, diff --git a/include/drm/display/drm_dp.h b/include/drm/display/drm_dp.h index 02f2ac4dd2df..e69cece404b3 100644 --- a/include/drm/display/drm_dp.h +++ b/include/drm/display/drm_dp.h @@ -1537,7 +1537,7 @@ enum drm_dp_phy { #define DP_BRANCH_OUI_HEADER_SIZE 0xc #define DP_RECEIVER_CAP_SIZE 0xf -#define DP_DSC_RECEIVER_CAP_SIZE 0xf +#define DP_DSC_RECEIVER_CAP_SIZE 0x10 /* DSC Capabilities 0x60 through 0x6F */ #define EDP_PSR_RECEIVER_CAP_SIZE 2 #define EDP_DISPLAY_CTL_CAP_SIZE 3 #define DP_LTTPR_COMMON_CAP_SIZE 8 diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h index 169755d3de19..48e93f909ef6 100644 --- a/include/drm/drm_edid.h +++ b/include/drm/drm_edid.h @@ -61,15 +61,9 @@ struct std_timing { u8 vfreq_aspect; } __attribute__((packed)); -#define DRM_EDID_PT_SYNC_MASK (3 << 3) -# define DRM_EDID_PT_ANALOG_CSYNC (0 << 3) -# define DRM_EDID_PT_BIPOLAR_ANALOG_CSYNC (1 << 3) -# define DRM_EDID_PT_DIGITAL_CSYNC (2 << 3) -# define DRM_EDID_PT_CSYNC_ON_RGB (1 << 1) /* analog csync only */ -# define DRM_EDID_PT_CSYNC_SERRATE (1 << 2) -# define DRM_EDID_PT_DIGITAL_SEPARATE_SYNC (3 << 3) -# define DRM_EDID_PT_HSYNC_POSITIVE (1 << 1) /* also digital csync */ -# define DRM_EDID_PT_VSYNC_POSITIVE (1 << 2) +#define DRM_EDID_PT_HSYNC_POSITIVE (1 << 1) +#define DRM_EDID_PT_VSYNC_POSITIVE (1 << 2) +#define DRM_EDID_PT_SEPARATE_SYNC (3 << 3) #define DRM_EDID_PT_STEREO (1 << 5) #define DRM_EDID_PT_INTERLACED (1 << 7) diff --git a/include/drm/drm_probe_helper.h b/include/drm/drm_probe_helper.h index 4977e0ab72db..fad3c4003b2b 100644 --- a/include/drm/drm_probe_helper.h +++ b/include/drm/drm_probe_helper.h @@ -25,6 +25,7 @@ void drm_kms_helper_connector_hotplug_event(struct drm_connector *connector); void drm_kms_helper_poll_disable(struct drm_device *dev); void drm_kms_helper_poll_enable(struct drm_device *dev); +void drm_kms_helper_poll_reschedule(struct drm_device *dev); bool drm_kms_helper_is_poll_worker(void); enum drm_mode_status drm_crtc_helper_mode_valid_fixed(struct drm_crtc *crtc, diff --git a/include/keys/system_keyring.h b/include/keys/system_keyring.h index 91e080efb918..8365adf842ef 100644 --- a/include/keys/system_keyring.h +++ b/include/keys/system_keyring.h @@ -23,10 +23,15 @@ extern int restrict_link_by_builtin_trusted(struct key *keyring, const struct key_type *type, const union key_payload *payload, struct key *restriction_key); +int restrict_link_by_digsig_builtin(struct key *dest_keyring, + const struct key_type *type, + const union key_payload *payload, + struct key *restriction_key); extern __init int load_module_cert(struct key *keyring); #else #define restrict_link_by_builtin_trusted restrict_link_reject +#define restrict_link_by_digsig_builtin restrict_link_reject static inline __init int load_module_cert(struct key *keyring) { @@ -41,8 +46,17 @@ extern int restrict_link_by_builtin_and_secondary_trusted( const struct key_type *type, const union key_payload *payload, struct key *restriction_key); +int restrict_link_by_digsig_builtin_and_secondary(struct key *keyring, + const struct key_type *type, + const union key_payload *payload, + struct key *restriction_key); +void __init add_to_secondary_keyring(const char *source, const void *data, size_t len); #else #define restrict_link_by_builtin_and_secondary_trusted restrict_link_by_builtin_trusted +#define restrict_link_by_digsig_builtin_and_secondary restrict_link_by_digsig_builtin +static inline void __init add_to_secondary_keyring(const char *source, const void *data, size_t len) +{ +} #endif #ifdef CONFIG_INTEGRITY_MACHINE_KEYRING diff --git a/include/kunit/attributes.h b/include/kunit/attributes.h new file mode 100644 index 000000000000..bc76a0b786d2 --- /dev/null +++ b/include/kunit/attributes.h @@ -0,0 +1,50 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * KUnit API to save and access test attributes + * + * Copyright (C) 2023, Google LLC. + * Author: Rae Moar <rmoar@google.com> + */ + +#ifndef _KUNIT_ATTRIBUTES_H +#define _KUNIT_ATTRIBUTES_H + +/* + * struct kunit_attr_filter - representation of attributes filter with the + * attribute object and string input + */ +struct kunit_attr_filter { + struct kunit_attr *attr; + char *input; +}; + +/* + * Returns the name of the filter's attribute. + */ +const char *kunit_attr_filter_name(struct kunit_attr_filter filter); + +/* + * Print all test attributes for a test case or suite. + * Output format for test cases: "# <test_name>.<attribute>: <value>" + * Output format for test suites: "# <attribute>: <value>" + */ +void kunit_print_attr(void *test_or_suite, bool is_test, unsigned int test_level); + +/* + * Returns the number of fitlers in input. + */ +int kunit_get_filter_count(char *input); + +/* + * Parse attributes filter input and return an objects containing the + * attribute object and the string input of the next filter. + */ +struct kunit_attr_filter kunit_next_attr_filter(char **filters, int *err); + +/* + * Returns a copy of the suite containing only tests that pass the filter. + */ +struct kunit_suite *kunit_filter_attr_tests(const struct kunit_suite *const suite, + struct kunit_attr_filter filter, char *action, int *err); + +#endif /* _KUNIT_ATTRIBUTES_H */ diff --git a/include/kunit/static_stub.h b/include/kunit/static_stub.h index 9b80150a5d62..85315c80b303 100644 --- a/include/kunit/static_stub.h +++ b/include/kunit/static_stub.h @@ -11,7 +11,7 @@ #if !IS_ENABLED(CONFIG_KUNIT) /* If CONFIG_KUNIT is not enabled, these stubs quietly disappear. */ -#define KUNIT_TRIGGER_STATIC_STUB(real_fn_name, args...) do {} while (0) +#define KUNIT_STATIC_STUB_REDIRECT(real_fn_name, args...) do {} while (0) #else @@ -30,7 +30,7 @@ * This is a function prologue which is used to allow calls to the current * function to be redirected by a KUnit test. KUnit tests can call * kunit_activate_static_stub() to pass a replacement function in. The - * replacement function will be called by KUNIT_TRIGGER_STATIC_STUB(), which + * replacement function will be called by KUNIT_STATIC_STUB_REDIRECT(), which * will then return from the function. If the caller is not in a KUnit context, * the function will continue execution as normal. * @@ -87,7 +87,7 @@ void __kunit_activate_static_stub(struct kunit *test, * When activated, calls to real_fn_addr from within this test (even if called * indirectly) will instead call replacement_addr. The function pointed to by * real_fn_addr must begin with the static stub prologue in - * KUNIT_TRIGGER_STATIC_STUB() for this to work. real_fn_addr and + * KUNIT_STATIC_STUB_REDIRECT() for this to work. real_fn_addr and * replacement_addr must have the same type. * * The redirection can be disabled again with kunit_deactivate_static_stub(). diff --git a/include/kunit/test-bug.h b/include/kunit/test-bug.h index 30ca541b6ff2..47aa8f21ccce 100644 --- a/include/kunit/test-bug.h +++ b/include/kunit/test-bug.h @@ -9,6 +9,8 @@ #ifndef _KUNIT_TEST_BUG_H #define _KUNIT_TEST_BUG_H +#include <linux/stddef.h> /* for NULL */ + #if IS_ENABLED(CONFIG_KUNIT) #include <linux/jump_label.h> /* For static branch */ diff --git a/include/kunit/test.h b/include/kunit/test.h index 107c81431634..68ff01aee244 100644 --- a/include/kunit/test.h +++ b/include/kunit/test.h @@ -64,12 +64,35 @@ enum kunit_status { KUNIT_SKIPPED, }; +/* Attribute struct/enum definitions */ + +/* + * Speed Attribute is stored as an enum and separated into categories of + * speed: very_slowm, slow, and normal. These speeds are relative to + * other KUnit tests. + * + * Note: unset speed attribute acts as default of KUNIT_SPEED_NORMAL. + */ +enum kunit_speed { + KUNIT_SPEED_UNSET, + KUNIT_SPEED_VERY_SLOW, + KUNIT_SPEED_SLOW, + KUNIT_SPEED_NORMAL, + KUNIT_SPEED_MAX = KUNIT_SPEED_NORMAL, +}; + +/* Holds attributes for each test case and suite */ +struct kunit_attributes { + enum kunit_speed speed; +}; + /** * struct kunit_case - represents an individual test case. * * @run_case: the function representing the actual test case. * @name: the name of the test case. * @generate_params: the generator function for parameterized tests. + * @attr: the attributes associated with the test * * A test case is a function with the signature, * ``void (*)(struct kunit *)`` @@ -105,9 +128,11 @@ struct kunit_case { void (*run_case)(struct kunit *test); const char *name; const void* (*generate_params)(const void *prev, char *desc); + struct kunit_attributes attr; /* private: internal use only. */ enum kunit_status status; + char *module_name; char *log; }; @@ -132,7 +157,32 @@ static inline char *kunit_status_to_ok_not_ok(enum kunit_status status) * &struct kunit_case object from it. See the documentation for * &struct kunit_case for an example on how to use it. */ -#define KUNIT_CASE(test_name) { .run_case = test_name, .name = #test_name } +#define KUNIT_CASE(test_name) \ + { .run_case = test_name, .name = #test_name, \ + .module_name = KBUILD_MODNAME} + +/** + * KUNIT_CASE_ATTR - A helper for creating a &struct kunit_case + * with attributes + * + * @test_name: a reference to a test case function. + * @attributes: a reference to a struct kunit_attributes object containing + * test attributes + */ +#define KUNIT_CASE_ATTR(test_name, attributes) \ + { .run_case = test_name, .name = #test_name, \ + .attr = attributes, .module_name = KBUILD_MODNAME} + +/** + * KUNIT_CASE_SLOW - A helper for creating a &struct kunit_case + * with the slow attribute + * + * @test_name: a reference to a test case function. + */ + +#define KUNIT_CASE_SLOW(test_name) \ + { .run_case = test_name, .name = #test_name, \ + .attr.speed = KUNIT_SPEED_SLOW, .module_name = KBUILD_MODNAME} /** * KUNIT_CASE_PARAM - A helper for creation a parameterized &struct kunit_case @@ -153,7 +203,21 @@ static inline char *kunit_status_to_ok_not_ok(enum kunit_status status) */ #define KUNIT_CASE_PARAM(test_name, gen_params) \ { .run_case = test_name, .name = #test_name, \ - .generate_params = gen_params } + .generate_params = gen_params, .module_name = KBUILD_MODNAME} + +/** + * KUNIT_CASE_PARAM_ATTR - A helper for creating a parameterized &struct + * kunit_case with attributes + * + * @test_name: a reference to a test case function. + * @gen_params: a reference to a parameter generator function. + * @attributes: a reference to a struct kunit_attributes object containing + * test attributes + */ +#define KUNIT_CASE_PARAM_ATTR(test_name, gen_params, attributes) \ + { .run_case = test_name, .name = #test_name, \ + .generate_params = gen_params, \ + .attr = attributes, .module_name = KBUILD_MODNAME} /** * struct kunit_suite - describes a related collection of &struct kunit_case @@ -164,6 +228,7 @@ static inline char *kunit_status_to_ok_not_ok(enum kunit_status status) * @init: called before every test case. * @exit: called after every test case. * @test_cases: a null terminated array of test cases. + * @attr: the attributes associated with the test suite * * A kunit_suite is a collection of related &struct kunit_case s, such that * @init is called before every test case and @exit is called after every @@ -183,6 +248,7 @@ struct kunit_suite { int (*init)(struct kunit *test); void (*exit)(struct kunit *test); struct kunit_case *test_cases; + struct kunit_attributes attr; /* private: internal use only */ char status_comment[KUNIT_STATUS_COMMENT_SIZE]; @@ -191,6 +257,12 @@ struct kunit_suite { int suite_init_err; }; +/* Stores an array of suites, end points one past the end */ +struct kunit_suite_set { + struct kunit_suite * const *start; + struct kunit_suite * const *end; +}; + /** * struct kunit - represents a running instance of a test. * @@ -238,6 +310,10 @@ static inline void kunit_set_failure(struct kunit *test) } bool kunit_enabled(void); +const char *kunit_action(void); +const char *kunit_filter_glob(void); +char *kunit_filter(void); +char *kunit_filter_action(void); void kunit_init_test(struct kunit *test, const char *name, char *log); @@ -248,10 +324,21 @@ size_t kunit_suite_num_test_cases(struct kunit_suite *suite); unsigned int kunit_test_case_num(struct kunit_suite *suite, struct kunit_case *test_case); +struct kunit_suite_set +kunit_filter_suites(const struct kunit_suite_set *suite_set, + const char *filter_glob, + char *filters, + char *filter_action, + int *err); +void kunit_free_suite_set(struct kunit_suite_set suite_set); + int __kunit_test_suites_init(struct kunit_suite * const * const suites, int num_suites); void __kunit_test_suites_exit(struct kunit_suite **suites, int num_suites); +void kunit_exec_run_tests(struct kunit_suite_set *suite_set, bool builtin); +void kunit_exec_list_tests(struct kunit_suite_set *suite_set, bool include_attr); + #if IS_BUILTIN(CONFIG_KUNIT) int kunit_run_all_tests(void); #else diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 641dc4843987..a73246c3c35e 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -477,8 +477,6 @@ static inline int acpi_get_node(acpi_handle handle) return 0; } #endif -extern int acpi_paddr_to_node(u64 start_addr, u64 size); - extern int pnpacpi_disabled; #define PXM_INVAL (-1) @@ -1100,7 +1098,7 @@ void acpi_os_set_prepare_extended_sleep(int (*func)(u8 sleep_state, acpi_status acpi_os_prepare_extended_sleep(u8 sleep_state, u32 val_a, u32 val_b); -#ifdef CONFIG_X86 +#if defined(CONFIG_SUSPEND) && defined(CONFIG_X86) struct acpi_s2idle_dev_ops { struct list_head list_node; void (*prepare)(void); @@ -1109,7 +1107,13 @@ struct acpi_s2idle_dev_ops { }; int acpi_register_lps0_dev(struct acpi_s2idle_dev_ops *arg); void acpi_unregister_lps0_dev(struct acpi_s2idle_dev_ops *arg); -#endif /* CONFIG_X86 */ +int acpi_get_lps0_constraint(struct acpi_device *adev); +#else /* CONFIG_SUSPEND && CONFIG_X86 */ +static inline int acpi_get_lps0_constraint(struct device *dev) +{ + return ACPI_STATE_UNKNOWN; +} +#endif /* CONFIG_SUSPEND && CONFIG_X86 */ #ifndef CONFIG_IA64 void arch_reserve_mem_area(acpi_physical_address addr, size_t size); #else diff --git a/include/linux/acpi_iort.h b/include/linux/acpi_iort.h index ee7cb6aaff71..1cb65592c95d 100644 --- a/include/linux/acpi_iort.h +++ b/include/linux/acpi_iort.h @@ -21,6 +21,7 @@ */ #define IORT_SMMU_V3_PMCG_GENERIC 0x00000000 /* Generic SMMUv3 PMCG */ #define IORT_SMMU_V3_PMCG_HISI_HIP08 0x00000001 /* HiSilicon HIP08 PMCG */ +#define IORT_SMMU_V3_PMCG_HISI_HIP09 0x00000002 /* HiSilicon HIP09 PMCG */ int iort_register_domain_token(int trans_id, phys_addr_t base, struct fwnode_handle *fw_node); diff --git a/include/linux/arm_sdei.h b/include/linux/arm_sdei.h index 14dc461b0e82..255701e1251b 100644 --- a/include/linux/arm_sdei.h +++ b/include/linux/arm_sdei.h @@ -47,10 +47,12 @@ int sdei_unregister_ghes(struct ghes *ghes); int sdei_mask_local_cpu(void); int sdei_unmask_local_cpu(void); void __init sdei_init(void); +void sdei_handler_abort(void); #else static inline int sdei_mask_local_cpu(void) { return 0; } static inline int sdei_unmask_local_cpu(void) { return 0; } static inline void sdei_init(void) { } +static inline void sdei_handler_abort(void) { } #endif /* CONFIG_ARM_SDE_INTERFACE */ diff --git a/include/linux/atmel-mci.h b/include/linux/atmel-mci.h deleted file mode 100644 index 1491af38cc6e..000000000000 --- a/include/linux/atmel-mci.h +++ /dev/null @@ -1,46 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef __LINUX_ATMEL_MCI_H -#define __LINUX_ATMEL_MCI_H - -#include <linux/types.h> -#include <linux/dmaengine.h> - -#define ATMCI_MAX_NR_SLOTS 2 - -/** - * struct mci_slot_pdata - board-specific per-slot configuration - * @bus_width: Number of data lines wired up the slot - * @detect_pin: GPIO pin wired to the card detect switch - * @wp_pin: GPIO pin wired to the write protect sensor - * @detect_is_active_high: The state of the detect pin when it is active - * @non_removable: The slot is not removable, only detect once - * - * If a given slot is not present on the board, @bus_width should be - * set to 0. The other fields are ignored in this case. - * - * Any pins that aren't available should be set to a negative value. - * - * Note that support for multiple slots is experimental -- some cards - * might get upset if we don't get the clock management exactly right. - * But in most cases, it should work just fine. - */ -struct mci_slot_pdata { - unsigned int bus_width; - int detect_pin; - int wp_pin; - bool detect_is_active_high; - bool non_removable; -}; - -/** - * struct mci_platform_data - board-specific MMC/SDcard configuration - * @dma_slave: DMA slave interface to use in data transfers. - * @slot: Per-slot configuration data. - */ -struct mci_platform_data { - void *dma_slave; - dma_filter_fn dma_filter; - struct mci_slot_pdata slot[ATMCI_MAX_NR_SLOTS]; -}; - -#endif /* __LINUX_ATMEL_MCI_H */ diff --git a/include/linux/avf/virtchnl.h b/include/linux/avf/virtchnl.h index c15221dcb75e..d0807ad43f93 100644 --- a/include/linux/avf/virtchnl.h +++ b/include/linux/avf/virtchnl.h @@ -268,10 +268,11 @@ struct virtchnl_vf_resource { u32 rss_key_size; u32 rss_lut_size; - struct virtchnl_vsi_resource vsi_res[1]; + struct virtchnl_vsi_resource vsi_res[]; }; -VIRTCHNL_CHECK_STRUCT_LEN(36, virtchnl_vf_resource); +VIRTCHNL_CHECK_STRUCT_LEN(20, virtchnl_vf_resource); +#define virtchnl_vf_resource_LEGACY_SIZEOF 36 /* VIRTCHNL_OP_CONFIG_TX_QUEUE * VF sends this message to set up parameters for one TX queue. @@ -340,10 +341,11 @@ struct virtchnl_vsi_queue_config_info { u16 vsi_id; u16 num_queue_pairs; u32 pad; - struct virtchnl_queue_pair_info qpair[1]; + struct virtchnl_queue_pair_info qpair[]; }; -VIRTCHNL_CHECK_STRUCT_LEN(72, virtchnl_vsi_queue_config_info); +VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_vsi_queue_config_info); +#define virtchnl_vsi_queue_config_info_LEGACY_SIZEOF 72 /* VIRTCHNL_OP_REQUEST_QUEUES * VF sends this message to request the PF to allocate additional queues to @@ -385,10 +387,11 @@ VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_vector_map); struct virtchnl_irq_map_info { u16 num_vectors; - struct virtchnl_vector_map vecmap[1]; + struct virtchnl_vector_map vecmap[]; }; -VIRTCHNL_CHECK_STRUCT_LEN(14, virtchnl_irq_map_info); +VIRTCHNL_CHECK_STRUCT_LEN(2, virtchnl_irq_map_info); +#define virtchnl_irq_map_info_LEGACY_SIZEOF 14 /* VIRTCHNL_OP_ENABLE_QUEUES * VIRTCHNL_OP_DISABLE_QUEUES @@ -459,10 +462,11 @@ VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_ether_addr); struct virtchnl_ether_addr_list { u16 vsi_id; u16 num_elements; - struct virtchnl_ether_addr list[1]; + struct virtchnl_ether_addr list[]; }; -VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_ether_addr_list); +VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_ether_addr_list); +#define virtchnl_ether_addr_list_LEGACY_SIZEOF 12 /* VIRTCHNL_OP_ADD_VLAN * VF sends this message to add one or more VLAN tag filters for receives. @@ -481,10 +485,11 @@ VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_ether_addr_list); struct virtchnl_vlan_filter_list { u16 vsi_id; u16 num_elements; - u16 vlan_id[1]; + u16 vlan_id[]; }; -VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_vlan_filter_list); +VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_vlan_filter_list); +#define virtchnl_vlan_filter_list_LEGACY_SIZEOF 6 /* This enum is used for all of the VIRTCHNL_VF_OFFLOAD_VLAN_V2_CAPS related * structures and opcodes. @@ -711,10 +716,11 @@ struct virtchnl_vlan_filter_list_v2 { u16 vport_id; u16 num_elements; u8 pad[4]; - struct virtchnl_vlan_filter filters[1]; + struct virtchnl_vlan_filter filters[]; }; -VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_vlan_filter_list_v2); +VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_vlan_filter_list_v2); +#define virtchnl_vlan_filter_list_v2_LEGACY_SIZEOF 40 /* VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 * VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2 @@ -866,18 +872,20 @@ VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_promisc_info); struct virtchnl_rss_key { u16 vsi_id; u16 key_len; - u8 key[1]; /* RSS hash key, packed bytes */ + u8 key[]; /* RSS hash key, packed bytes */ }; -VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_key); +VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_rss_key); +#define virtchnl_rss_key_LEGACY_SIZEOF 6 struct virtchnl_rss_lut { u16 vsi_id; u16 lut_entries; - u8 lut[1]; /* RSS lookup table */ + u8 lut[]; /* RSS lookup table */ }; -VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_lut); +VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_rss_lut); +#define virtchnl_rss_lut_LEGACY_SIZEOF 6 /* VIRTCHNL_OP_GET_RSS_HENA_CAPS * VIRTCHNL_OP_SET_RSS_HENA @@ -911,10 +919,11 @@ VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_channel_info); struct virtchnl_tc_info { u32 num_tc; u32 pad; - struct virtchnl_channel_info list[1]; + struct virtchnl_channel_info list[]; }; -VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_tc_info); +VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_tc_info); +#define virtchnl_tc_info_LEGACY_SIZEOF 24 /* VIRTCHNL_ADD_CLOUD_FILTER * VIRTCHNL_DEL_CLOUD_FILTER @@ -1052,10 +1061,11 @@ VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_rdma_qv_info); struct virtchnl_rdma_qvlist_info { u32 num_vectors; - struct virtchnl_rdma_qv_info qv_info[1]; + struct virtchnl_rdma_qv_info qv_info[]; }; -VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_rdma_qvlist_info); +VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_rdma_qvlist_info); +#define virtchnl_rdma_qvlist_info_LEGACY_SIZEOF 16 /* VF reset states - these are written into the RSTAT register: * VFGEN_RSTAT on the VF @@ -1367,6 +1377,31 @@ struct virtchnl_fdir_del { VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_fdir_del); +#define __vss_byone(p, member, count, old) \ + (struct_size(p, member, count) + (old - 1 - struct_size(p, member, 0))) + +#define __vss_byelem(p, member, count, old) \ + (struct_size(p, member, count - 1) + (old - struct_size(p, member, 0))) + +#define __vss_full(p, member, count, old) \ + (struct_size(p, member, count) + (old - struct_size(p, member, 0))) + +#define __vss(type, func, p, member, count) \ + struct type: func(p, member, count, type##_LEGACY_SIZEOF) + +#define virtchnl_struct_size(p, m, c) \ + _Generic(*p, \ + __vss(virtchnl_vf_resource, __vss_full, p, m, c), \ + __vss(virtchnl_vsi_queue_config_info, __vss_full, p, m, c), \ + __vss(virtchnl_irq_map_info, __vss_full, p, m, c), \ + __vss(virtchnl_ether_addr_list, __vss_full, p, m, c), \ + __vss(virtchnl_vlan_filter_list, __vss_full, p, m, c), \ + __vss(virtchnl_vlan_filter_list_v2, __vss_byelem, p, m, c), \ + __vss(virtchnl_tc_info, __vss_byelem, p, m, c), \ + __vss(virtchnl_rdma_qvlist_info, __vss_byelem, p, m, c), \ + __vss(virtchnl_rss_key, __vss_byone, p, m, c), \ + __vss(virtchnl_rss_lut, __vss_byone, p, m, c)) + /** * virtchnl_vc_validate_vf_msg * @ver: Virtchnl version info @@ -1401,24 +1436,23 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode, valid_len = sizeof(struct virtchnl_rxq_info); break; case VIRTCHNL_OP_CONFIG_VSI_QUEUES: - valid_len = sizeof(struct virtchnl_vsi_queue_config_info); + valid_len = virtchnl_vsi_queue_config_info_LEGACY_SIZEOF; if (msglen >= valid_len) { struct virtchnl_vsi_queue_config_info *vqc = (struct virtchnl_vsi_queue_config_info *)msg; - valid_len += (vqc->num_queue_pairs * - sizeof(struct - virtchnl_queue_pair_info)); + valid_len = virtchnl_struct_size(vqc, qpair, + vqc->num_queue_pairs); if (vqc->num_queue_pairs == 0) err_msg_format = true; } break; case VIRTCHNL_OP_CONFIG_IRQ_MAP: - valid_len = sizeof(struct virtchnl_irq_map_info); + valid_len = virtchnl_irq_map_info_LEGACY_SIZEOF; if (msglen >= valid_len) { struct virtchnl_irq_map_info *vimi = (struct virtchnl_irq_map_info *)msg; - valid_len += (vimi->num_vectors * - sizeof(struct virtchnl_vector_map)); + valid_len = virtchnl_struct_size(vimi, vecmap, + vimi->num_vectors); if (vimi->num_vectors == 0) err_msg_format = true; } @@ -1429,23 +1463,24 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode, break; case VIRTCHNL_OP_ADD_ETH_ADDR: case VIRTCHNL_OP_DEL_ETH_ADDR: - valid_len = sizeof(struct virtchnl_ether_addr_list); + valid_len = virtchnl_ether_addr_list_LEGACY_SIZEOF; if (msglen >= valid_len) { struct virtchnl_ether_addr_list *veal = (struct virtchnl_ether_addr_list *)msg; - valid_len += veal->num_elements * - sizeof(struct virtchnl_ether_addr); + valid_len = virtchnl_struct_size(veal, list, + veal->num_elements); if (veal->num_elements == 0) err_msg_format = true; } break; case VIRTCHNL_OP_ADD_VLAN: case VIRTCHNL_OP_DEL_VLAN: - valid_len = sizeof(struct virtchnl_vlan_filter_list); + valid_len = virtchnl_vlan_filter_list_LEGACY_SIZEOF; if (msglen >= valid_len) { struct virtchnl_vlan_filter_list *vfl = (struct virtchnl_vlan_filter_list *)msg; - valid_len += vfl->num_elements * sizeof(u16); + valid_len = virtchnl_struct_size(vfl, vlan_id, + vfl->num_elements); if (vfl->num_elements == 0) err_msg_format = true; } @@ -1469,29 +1504,31 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode, case VIRTCHNL_OP_RELEASE_RDMA_IRQ_MAP: break; case VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP: - valid_len = sizeof(struct virtchnl_rdma_qvlist_info); + valid_len = virtchnl_rdma_qvlist_info_LEGACY_SIZEOF; if (msglen >= valid_len) { struct virtchnl_rdma_qvlist_info *qv = (struct virtchnl_rdma_qvlist_info *)msg; - valid_len += ((qv->num_vectors - 1) * - sizeof(struct virtchnl_rdma_qv_info)); + valid_len = virtchnl_struct_size(qv, qv_info, + qv->num_vectors); } break; case VIRTCHNL_OP_CONFIG_RSS_KEY: - valid_len = sizeof(struct virtchnl_rss_key); + valid_len = virtchnl_rss_key_LEGACY_SIZEOF; if (msglen >= valid_len) { struct virtchnl_rss_key *vrk = (struct virtchnl_rss_key *)msg; - valid_len += vrk->key_len - 1; + valid_len = virtchnl_struct_size(vrk, key, + vrk->key_len); } break; case VIRTCHNL_OP_CONFIG_RSS_LUT: - valid_len = sizeof(struct virtchnl_rss_lut); + valid_len = virtchnl_rss_lut_LEGACY_SIZEOF; if (msglen >= valid_len) { struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg; - valid_len += vrl->lut_entries - 1; + valid_len = virtchnl_struct_size(vrl, lut, + vrl->lut_entries); } break; case VIRTCHNL_OP_GET_RSS_HENA_CAPS: @@ -1506,12 +1543,12 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode, valid_len = sizeof(struct virtchnl_vf_res_request); break; case VIRTCHNL_OP_ENABLE_CHANNELS: - valid_len = sizeof(struct virtchnl_tc_info); + valid_len = virtchnl_tc_info_LEGACY_SIZEOF; if (msglen >= valid_len) { struct virtchnl_tc_info *vti = (struct virtchnl_tc_info *)msg; - valid_len += (vti->num_tc - 1) * - sizeof(struct virtchnl_channel_info); + valid_len = virtchnl_struct_size(vti, list, + vti->num_tc); if (vti->num_tc == 0) err_msg_format = true; } @@ -1538,13 +1575,13 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode, break; case VIRTCHNL_OP_ADD_VLAN_V2: case VIRTCHNL_OP_DEL_VLAN_V2: - valid_len = sizeof(struct virtchnl_vlan_filter_list_v2); + valid_len = virtchnl_vlan_filter_list_v2_LEGACY_SIZEOF; if (msglen >= valid_len) { struct virtchnl_vlan_filter_list_v2 *vfl = (struct virtchnl_vlan_filter_list_v2 *)msg; - valid_len += (vfl->num_elements - 1) * - sizeof(struct virtchnl_vlan_filter); + valid_len = virtchnl_struct_size(vfl, filters, + vfl->num_elements); if (vfl->num_elements == 0) { err_msg_format = true; diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h index fbad4fcd408e..1a97277f99b1 100644 --- a/include/linux/backing-dev.h +++ b/include/linux/backing-dev.h @@ -46,7 +46,6 @@ extern spinlock_t bdi_lock; extern struct list_head bdi_list; extern struct workqueue_struct *bdi_wq; -extern struct workqueue_struct *bdi_async_bio_wq; static inline bool wb_has_dirty_io(struct bdi_writeback *wb) { diff --git a/include/linux/bio.h b/include/linux/bio.h index c4f5b5228105..e8767f165bad 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -253,6 +253,11 @@ static inline struct page *bio_first_page_all(struct bio *bio) return bio_first_bvec_all(bio)->bv_page; } +static inline struct folio *bio_first_folio_all(struct bio *bio) +{ + return page_folio(bio_first_page_all(bio)); +} + static inline struct bio_vec *bio_last_bvec_all(struct bio *bio) { WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)); @@ -791,7 +796,7 @@ static inline int bio_integrity_add_page(struct bio *bio, struct page *page, static inline void bio_set_polled(struct bio *bio, struct kiocb *kiocb) { bio->bi_opf |= REQ_POLLED; - if (!is_sync_kiocb(kiocb)) + if (kiocb->ki_flags & IOCB_NOWAIT) bio->bi_opf |= REQ_NOWAIT; } diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index 0bad62cca3d0..d5c5e59ddbd2 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -52,7 +52,6 @@ struct block_device { atomic_t bd_openers; spinlock_t bd_size_lock; /* for bd_inode->i_size updates */ struct inode * bd_inode; /* will die */ - struct super_block * bd_super; void * bd_claiming; void * bd_holder; const struct blk_holder_ops *bd_holder_ops; diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index ed44a997f629..83ce87354e9a 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -750,7 +750,8 @@ static inline int bdev_read_only(struct block_device *bdev) } bool set_capacity_and_notify(struct gendisk *disk, sector_t size); -bool disk_force_media_change(struct gendisk *disk, unsigned int events); +void disk_force_media_change(struct gendisk *disk); +void bdev_mark_dead(struct block_device *bdev, bool surprise); void add_disk_randomness(struct gendisk *disk) __latent_entropy; void rand_initialize_disk(struct gendisk *disk); @@ -809,7 +810,6 @@ int __register_blkdev(unsigned int major, const char *name, void unregister_blkdev(unsigned int major, const char *name); bool disk_check_media_change(struct gendisk *disk); -int __invalidate_device(struct block_device *bdev, bool kill_dirty); void set_capacity(struct gendisk *disk, sector_t size); #ifdef CONFIG_BLOCK_HOLDER_DEPRECATED @@ -969,7 +969,6 @@ struct blk_plug { bool multiple_queues; bool has_elevator; - bool nowait; struct list_head cb_list; /* md requires an unplug callback */ }; @@ -1461,9 +1460,16 @@ void blkdev_show(struct seq_file *seqf, off_t offset); #endif struct blk_holder_ops { - void (*mark_dead)(struct block_device *bdev); + void (*mark_dead)(struct block_device *bdev, bool surprise); + + /* + * Sync the file system mounted on the block device. + */ + void (*sync)(struct block_device *bdev); }; +extern const struct blk_holder_ops fs_holder_ops; + /* * Return the correct open flags for blkdev_get_by_* for super block flags * as stored in sb->s_flags. @@ -1522,8 +1528,6 @@ static inline int early_lookup_bdev(const char *pathname, dev_t *dev) } #endif /* CONFIG_BLOCK */ -int fsync_bdev(struct block_device *bdev); - int freeze_bdev(struct block_device *bdev); int thaw_bdev(struct block_device *bdev); diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h index 57e9e109257e..8506690dbb9c 100644 --- a/include/linux/bpf-cgroup.h +++ b/include/linux/bpf-cgroup.h @@ -199,9 +199,9 @@ static inline bool cgroup_bpf_sock_enabled(struct sock *sk, #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb) \ ({ \ int __ret = 0; \ - if (cgroup_bpf_enabled(CGROUP_INET_EGRESS) && sk && sk == skb->sk) { \ + if (cgroup_bpf_enabled(CGROUP_INET_EGRESS) && sk) { \ typeof(sk) __sk = sk_to_full_sk(sk); \ - if (sk_fullsock(__sk) && \ + if (sk_fullsock(__sk) && __sk == skb_to_full_sk(skb) && \ cgroup_bpf_sock_enabled(__sk, CGROUP_INET_EGRESS)) \ __ret = __cgroup_bpf_run_filter_skb(__sk, skb, \ CGROUP_INET_EGRESS); \ diff --git a/include/linux/bpf.h b/include/linux/bpf.h index f58895830ada..12596af59c00 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -228,6 +228,18 @@ struct btf_record { struct btf_field fields[]; }; +/* Non-opaque version of bpf_rb_node in uapi/linux/bpf.h */ +struct bpf_rb_node_kern { + struct rb_node rb_node; + void *owner; +} __attribute__((aligned(8))); + +/* Non-opaque version of bpf_list_node in uapi/linux/bpf.h */ +struct bpf_list_node_kern { + struct list_head list_head; + void *owner; +} __attribute__((aligned(8))); + struct bpf_map { /* The first two cachelines with read-mostly members of which some * are also accessed in fast-path (e.g. ops, max_entries). @@ -275,6 +287,7 @@ struct bpf_map { } owner; bool bypass_spec_v1; bool frozen; /* write-once; write-protected by freeze_mutex */ + s64 __percpu *elem_count; }; static inline const char *btf_field_type_name(enum btf_field_type type) @@ -640,7 +653,8 @@ enum bpf_type_flag { MEM_RCU = BIT(13 + BPF_BASE_TYPE_BITS), /* Used to tag PTR_TO_BTF_ID | MEM_ALLOC references which are non-owning. - * Currently only valid for linked-list and rbtree nodes. + * Currently only valid for linked-list and rbtree nodes. If the nodes + * have a bpf_refcount_field, they must be tagged MEM_RCU as well. */ NON_OWN_REF = BIT(14 + BPF_BASE_TYPE_BITS), @@ -1537,6 +1551,53 @@ struct bpf_struct_ops_value; struct btf_member; #define BPF_STRUCT_OPS_MAX_NR_MEMBERS 64 +/** + * struct bpf_struct_ops - A structure of callbacks allowing a subsystem to + * define a BPF_MAP_TYPE_STRUCT_OPS map type composed + * of BPF_PROG_TYPE_STRUCT_OPS progs. + * @verifier_ops: A structure of callbacks that are invoked by the verifier + * when determining whether the struct_ops progs in the + * struct_ops map are valid. + * @init: A callback that is invoked a single time, and before any other + * callback, to initialize the structure. A nonzero return value means + * the subsystem could not be initialized. + * @check_member: When defined, a callback invoked by the verifier to allow + * the subsystem to determine if an entry in the struct_ops map + * is valid. A nonzero return value means that the map is + * invalid and should be rejected by the verifier. + * @init_member: A callback that is invoked for each member of the struct_ops + * map to allow the subsystem to initialize the member. A nonzero + * value means the member could not be initialized. This callback + * is exclusive with the @type, @type_id, @value_type, and + * @value_id fields. + * @reg: A callback that is invoked when the struct_ops map has been + * initialized and is being attached to. Zero means the struct_ops map + * has been successfully registered and is live. A nonzero return value + * means the struct_ops map could not be registered. + * @unreg: A callback that is invoked when the struct_ops map should be + * unregistered. + * @update: A callback that is invoked when the live struct_ops map is being + * updated to contain new values. This callback is only invoked when + * the struct_ops map is loaded with BPF_F_LINK. If not defined, the + * it is assumed that the struct_ops map cannot be updated. + * @validate: A callback that is invoked after all of the members have been + * initialized. This callback should perform static checks on the + * map, meaning that it should either fail or succeed + * deterministically. A struct_ops map that has been validated may + * not necessarily succeed in being registered if the call to @reg + * fails. For example, a valid struct_ops map may be loaded, but + * then fail to be registered due to there being another active + * struct_ops map on the system in the subsystem already. For this + * reason, if this callback is not defined, the check is skipped as + * the struct_ops map will have final verification performed in + * @reg. + * @type: BTF type. + * @value_type: Value type. + * @name: The name of the struct bpf_struct_ops object. + * @func_models: Func models + * @type_id: BTF type id. + * @value_id: BTF value id. + */ struct bpf_struct_ops { const struct bpf_verifier_ops *verifier_ops; int (*init)(struct btf *btf); @@ -1806,6 +1867,7 @@ struct bpf_cg_run_ctx { struct bpf_trace_run_ctx { struct bpf_run_ctx run_ctx; u64 bpf_cookie; + bool is_uprobe; }; struct bpf_tramp_run_ctx { @@ -1854,6 +1916,8 @@ bpf_prog_run_array(const struct bpf_prog_array *array, if (unlikely(!array)) return ret; + run_ctx.is_uprobe = false; + migrate_disable(); old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx); item = &array->items[0]; @@ -1878,8 +1942,8 @@ bpf_prog_run_array(const struct bpf_prog_array *array, * rcu-protected dynamically sized maps. */ static __always_inline u32 -bpf_prog_run_array_sleepable(const struct bpf_prog_array __rcu *array_rcu, - const void *ctx, bpf_prog_run_fn run_prog) +bpf_prog_run_array_uprobe(const struct bpf_prog_array __rcu *array_rcu, + const void *ctx, bpf_prog_run_fn run_prog) { const struct bpf_prog_array_item *item; const struct bpf_prog *prog; @@ -1893,6 +1957,8 @@ bpf_prog_run_array_sleepable(const struct bpf_prog_array __rcu *array_rcu, rcu_read_lock_trace(); migrate_disable(); + run_ctx.is_uprobe = true; + array = rcu_dereference_check(array_rcu, rcu_read_lock_trace_held()); if (unlikely(!array)) goto out; @@ -2040,6 +2106,35 @@ bpf_map_alloc_percpu(const struct bpf_map *map, size_t size, size_t align, } #endif +static inline int +bpf_map_init_elem_count(struct bpf_map *map) +{ + size_t size = sizeof(*map->elem_count), align = size; + gfp_t flags = GFP_USER | __GFP_NOWARN; + + map->elem_count = bpf_map_alloc_percpu(map, size, align, flags); + if (!map->elem_count) + return -ENOMEM; + + return 0; +} + +static inline void +bpf_map_free_elem_count(struct bpf_map *map) +{ + free_percpu(map->elem_count); +} + +static inline void bpf_map_inc_elem_count(struct bpf_map *map) +{ + this_cpu_inc(*map->elem_count); +} + +static inline void bpf_map_dec_elem_count(struct bpf_map *map) +{ + this_cpu_dec(*map->elem_count); +} + extern int sysctl_unprivileged_bpf_disabled; static inline bool bpf_allow_ptr_leaks(void) @@ -2073,7 +2168,6 @@ void bpf_link_cleanup(struct bpf_link_primer *primer); void bpf_link_inc(struct bpf_link *link); void bpf_link_put(struct bpf_link *link); int bpf_link_new_fd(struct bpf_link *link); -struct file *bpf_link_new_file(struct bpf_link *link, int *reserved_fd); struct bpf_link *bpf_link_get_from_fd(u32 ufd); struct bpf_link *bpf_link_get_curr_or_next(u32 *id); @@ -2619,6 +2713,18 @@ static inline void bpf_dynptr_set_rdonly(struct bpf_dynptr_kern *ptr) } #endif /* CONFIG_BPF_SYSCALL */ +static __always_inline int +bpf_probe_read_kernel_common(void *dst, u32 size, const void *unsafe_ptr) +{ + int ret = -EFAULT; + + if (IS_ENABLED(CONFIG_BPF_EVENTS)) + ret = copy_from_kernel_nofault(dst, unsafe_ptr, size); + if (unlikely(ret < 0)) + memset(dst, 0, size); + return ret; +} + void __bpf_free_used_btfs(struct bpf_prog_aux *aux, struct btf_mod_pair *used_btfs, u32 len); diff --git a/include/linux/bpf_mem_alloc.h b/include/linux/bpf_mem_alloc.h index 3929be5743f4..d644bbb298af 100644 --- a/include/linux/bpf_mem_alloc.h +++ b/include/linux/bpf_mem_alloc.h @@ -27,10 +27,12 @@ void bpf_mem_alloc_destroy(struct bpf_mem_alloc *ma); /* kmalloc/kfree equivalent: */ void *bpf_mem_alloc(struct bpf_mem_alloc *ma, size_t size); void bpf_mem_free(struct bpf_mem_alloc *ma, void *ptr); +void bpf_mem_free_rcu(struct bpf_mem_alloc *ma, void *ptr); /* kmem_cache_alloc/free equivalent: */ void *bpf_mem_cache_alloc(struct bpf_mem_alloc *ma); void bpf_mem_cache_free(struct bpf_mem_alloc *ma, void *ptr); +void bpf_mem_cache_free_rcu(struct bpf_mem_alloc *ma, void *ptr); void bpf_mem_cache_raw_free(void *ptr); void *bpf_mem_cache_alloc_flags(struct bpf_mem_alloc *ma, gfp_t flags); diff --git a/include/linux/bpf_mprog.h b/include/linux/bpf_mprog.h new file mode 100644 index 000000000000..929225f7b095 --- /dev/null +++ b/include/linux/bpf_mprog.h @@ -0,0 +1,343 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2023 Isovalent */ +#ifndef __BPF_MPROG_H +#define __BPF_MPROG_H + +#include <linux/bpf.h> + +/* bpf_mprog framework: + * + * bpf_mprog is a generic layer for multi-program attachment. In-kernel users + * of the bpf_mprog don't need to care about the dependency resolution + * internals, they can just consume it with few API calls. Currently available + * dependency directives are BPF_F_{BEFORE,AFTER} which enable insertion of + * a BPF program or BPF link relative to an existing BPF program or BPF link + * inside the multi-program array as well as prepend and append behavior if + * no relative object was specified, see corresponding selftests for concrete + * examples (e.g. tc_links and tc_opts test cases of test_progs). + * + * Usage of bpf_mprog_{attach,detach,query}() core APIs with pseudo code: + * + * Attach case: + * + * struct bpf_mprog_entry *entry, *entry_new; + * int ret; + * + * // bpf_mprog user-side lock + * // fetch active @entry from attach location + * [...] + * ret = bpf_mprog_attach(entry, &entry_new, [...]); + * if (!ret) { + * if (entry != entry_new) { + * // swap @entry to @entry_new at attach location + * // ensure there are no inflight users of @entry: + * synchronize_rcu(); + * } + * bpf_mprog_commit(entry); + * } else { + * // error path, bail out, propagate @ret + * } + * // bpf_mprog user-side unlock + * + * Detach case: + * + * struct bpf_mprog_entry *entry, *entry_new; + * int ret; + * + * // bpf_mprog user-side lock + * // fetch active @entry from attach location + * [...] + * ret = bpf_mprog_detach(entry, &entry_new, [...]); + * if (!ret) { + * // all (*) marked is optional and depends on the use-case + * // whether bpf_mprog_bundle should be freed or not + * if (!bpf_mprog_total(entry_new)) (*) + * entry_new = NULL (*) + * // swap @entry to @entry_new at attach location + * // ensure there are no inflight users of @entry: + * synchronize_rcu(); + * bpf_mprog_commit(entry); + * if (!entry_new) (*) + * // free bpf_mprog_bundle (*) + * } else { + * // error path, bail out, propagate @ret + * } + * // bpf_mprog user-side unlock + * + * Query case: + * + * struct bpf_mprog_entry *entry; + * int ret; + * + * // bpf_mprog user-side lock + * // fetch active @entry from attach location + * [...] + * ret = bpf_mprog_query(attr, uattr, entry); + * // bpf_mprog user-side unlock + * + * Data/fast path: + * + * struct bpf_mprog_entry *entry; + * struct bpf_mprog_fp *fp; + * struct bpf_prog *prog; + * int ret = [...]; + * + * rcu_read_lock(); + * // fetch active @entry from attach location + * [...] + * bpf_mprog_foreach_prog(entry, fp, prog) { + * ret = bpf_prog_run(prog, [...]); + * // process @ret from program + * } + * [...] + * rcu_read_unlock(); + * + * bpf_mprog locking considerations: + * + * bpf_mprog_{attach,detach,query}() must be protected by an external lock + * (like RTNL in case of tcx). + * + * bpf_mprog_entry pointer can be an __rcu annotated pointer (in case of tcx + * the netdevice has tcx_ingress and tcx_egress __rcu pointer) which gets + * updated via rcu_assign_pointer() pointing to the active bpf_mprog_entry of + * the bpf_mprog_bundle. + * + * Fast path accesses the active bpf_mprog_entry within RCU critical section + * (in case of tcx it runs in NAPI which provides RCU protection there, + * other users might need explicit rcu_read_lock()). The bpf_mprog_commit() + * assumes that for the old bpf_mprog_entry there are no inflight users + * anymore. + * + * The READ_ONCE()/WRITE_ONCE() pairing for bpf_mprog_fp's prog access is for + * the replacement case where we don't swap the bpf_mprog_entry. + */ + +#define bpf_mprog_foreach_tuple(entry, fp, cp, t) \ + for (fp = &entry->fp_items[0], cp = &entry->parent->cp_items[0];\ + ({ \ + t.prog = READ_ONCE(fp->prog); \ + t.link = cp->link; \ + t.prog; \ + }); \ + fp++, cp++) + +#define bpf_mprog_foreach_prog(entry, fp, p) \ + for (fp = &entry->fp_items[0]; \ + (p = READ_ONCE(fp->prog)); \ + fp++) + +#define BPF_MPROG_MAX 64 + +struct bpf_mprog_fp { + struct bpf_prog *prog; +}; + +struct bpf_mprog_cp { + struct bpf_link *link; +}; + +struct bpf_mprog_entry { + struct bpf_mprog_fp fp_items[BPF_MPROG_MAX]; + struct bpf_mprog_bundle *parent; +}; + +struct bpf_mprog_bundle { + struct bpf_mprog_entry a; + struct bpf_mprog_entry b; + struct bpf_mprog_cp cp_items[BPF_MPROG_MAX]; + struct bpf_prog *ref; + atomic64_t revision; + u32 count; +}; + +struct bpf_tuple { + struct bpf_prog *prog; + struct bpf_link *link; +}; + +static inline struct bpf_mprog_entry * +bpf_mprog_peer(const struct bpf_mprog_entry *entry) +{ + if (entry == &entry->parent->a) + return &entry->parent->b; + else + return &entry->parent->a; +} + +static inline void bpf_mprog_bundle_init(struct bpf_mprog_bundle *bundle) +{ + BUILD_BUG_ON(sizeof(bundle->a.fp_items[0]) > sizeof(u64)); + BUILD_BUG_ON(ARRAY_SIZE(bundle->a.fp_items) != + ARRAY_SIZE(bundle->cp_items)); + + memset(bundle, 0, sizeof(*bundle)); + atomic64_set(&bundle->revision, 1); + bundle->a.parent = bundle; + bundle->b.parent = bundle; +} + +static inline void bpf_mprog_inc(struct bpf_mprog_entry *entry) +{ + entry->parent->count++; +} + +static inline void bpf_mprog_dec(struct bpf_mprog_entry *entry) +{ + entry->parent->count--; +} + +static inline int bpf_mprog_max(void) +{ + return ARRAY_SIZE(((struct bpf_mprog_entry *)NULL)->fp_items) - 1; +} + +static inline int bpf_mprog_total(struct bpf_mprog_entry *entry) +{ + int total = entry->parent->count; + + WARN_ON_ONCE(total > bpf_mprog_max()); + return total; +} + +static inline bool bpf_mprog_exists(struct bpf_mprog_entry *entry, + struct bpf_prog *prog) +{ + const struct bpf_mprog_fp *fp; + const struct bpf_prog *tmp; + + bpf_mprog_foreach_prog(entry, fp, tmp) { + if (tmp == prog) + return true; + } + return false; +} + +static inline void bpf_mprog_mark_for_release(struct bpf_mprog_entry *entry, + struct bpf_tuple *tuple) +{ + WARN_ON_ONCE(entry->parent->ref); + if (!tuple->link) + entry->parent->ref = tuple->prog; +} + +static inline void bpf_mprog_complete_release(struct bpf_mprog_entry *entry) +{ + /* In the non-link case prog deletions can only drop the reference + * to the prog after the bpf_mprog_entry got swapped and the + * bpf_mprog ensured that there are no inflight users anymore. + * + * Paired with bpf_mprog_mark_for_release(). + */ + if (entry->parent->ref) { + bpf_prog_put(entry->parent->ref); + entry->parent->ref = NULL; + } +} + +static inline void bpf_mprog_revision_new(struct bpf_mprog_entry *entry) +{ + atomic64_inc(&entry->parent->revision); +} + +static inline void bpf_mprog_commit(struct bpf_mprog_entry *entry) +{ + bpf_mprog_complete_release(entry); + bpf_mprog_revision_new(entry); +} + +static inline u64 bpf_mprog_revision(struct bpf_mprog_entry *entry) +{ + return atomic64_read(&entry->parent->revision); +} + +static inline void bpf_mprog_entry_copy(struct bpf_mprog_entry *dst, + struct bpf_mprog_entry *src) +{ + memcpy(dst->fp_items, src->fp_items, sizeof(src->fp_items)); +} + +static inline void bpf_mprog_entry_clear(struct bpf_mprog_entry *dst) +{ + memset(dst->fp_items, 0, sizeof(dst->fp_items)); +} + +static inline void bpf_mprog_clear_all(struct bpf_mprog_entry *entry, + struct bpf_mprog_entry **entry_new) +{ + struct bpf_mprog_entry *peer; + + peer = bpf_mprog_peer(entry); + bpf_mprog_entry_clear(peer); + peer->parent->count = 0; + *entry_new = peer; +} + +static inline void bpf_mprog_entry_grow(struct bpf_mprog_entry *entry, int idx) +{ + int total = bpf_mprog_total(entry); + + memmove(entry->fp_items + idx + 1, + entry->fp_items + idx, + (total - idx) * sizeof(struct bpf_mprog_fp)); + + memmove(entry->parent->cp_items + idx + 1, + entry->parent->cp_items + idx, + (total - idx) * sizeof(struct bpf_mprog_cp)); +} + +static inline void bpf_mprog_entry_shrink(struct bpf_mprog_entry *entry, int idx) +{ + /* Total array size is needed in this case to enure the NULL + * entry is copied at the end. + */ + int total = ARRAY_SIZE(entry->fp_items); + + memmove(entry->fp_items + idx, + entry->fp_items + idx + 1, + (total - idx - 1) * sizeof(struct bpf_mprog_fp)); + + memmove(entry->parent->cp_items + idx, + entry->parent->cp_items + idx + 1, + (total - idx - 1) * sizeof(struct bpf_mprog_cp)); +} + +static inline void bpf_mprog_read(struct bpf_mprog_entry *entry, u32 idx, + struct bpf_mprog_fp **fp, + struct bpf_mprog_cp **cp) +{ + *fp = &entry->fp_items[idx]; + *cp = &entry->parent->cp_items[idx]; +} + +static inline void bpf_mprog_write(struct bpf_mprog_fp *fp, + struct bpf_mprog_cp *cp, + struct bpf_tuple *tuple) +{ + WRITE_ONCE(fp->prog, tuple->prog); + cp->link = tuple->link; +} + +int bpf_mprog_attach(struct bpf_mprog_entry *entry, + struct bpf_mprog_entry **entry_new, + struct bpf_prog *prog_new, struct bpf_link *link, + struct bpf_prog *prog_old, + u32 flags, u32 id_or_fd, u64 revision); + +int bpf_mprog_detach(struct bpf_mprog_entry *entry, + struct bpf_mprog_entry **entry_new, + struct bpf_prog *prog, struct bpf_link *link, + u32 flags, u32 id_or_fd, u64 revision); + +int bpf_mprog_query(const union bpf_attr *attr, union bpf_attr __user *uattr, + struct bpf_mprog_entry *entry); + +static inline bool bpf_mprog_supported(enum bpf_prog_type type) +{ + switch (type) { + case BPF_PROG_TYPE_SCHED_CLS: + return true; + default: + return false; + } +} +#endif /* __BPF_MPROG_H */ diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index f70f9ac884d2..b6e58dab8e27 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -745,7 +745,7 @@ static inline bool bpf_prog_check_recur(const struct bpf_prog *prog) } } -#define BPF_REG_TRUSTED_MODIFIERS (MEM_ALLOC | PTR_TRUSTED) +#define BPF_REG_TRUSTED_MODIFIERS (MEM_ALLOC | PTR_TRUSTED | NON_OWN_REF) static inline bool bpf_type_has_unsafe_modifiers(u32 type) { diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h index 5d732f48f787..c55810a43541 100644 --- a/include/linux/brcmphy.h +++ b/include/linux/brcmphy.h @@ -44,6 +44,7 @@ #define PHY_ID_BCM7366 0x600d8490 #define PHY_ID_BCM7346 0x600d8650 #define PHY_ID_BCM7362 0x600d84b0 +#define PHY_ID_BCM74165 0x359052c0 #define PHY_ID_BCM7425 0x600d86b0 #define PHY_ID_BCM7429 0x600d8730 #define PHY_ID_BCM7435 0x600d8750 diff --git a/include/linux/btf.h b/include/linux/btf.h index cac9f304e27a..df64cc642074 100644 --- a/include/linux/btf.h +++ b/include/linux/btf.h @@ -204,8 +204,6 @@ u32 btf_nr_types(const struct btf *btf); bool btf_member_is_reg_int(const struct btf *btf, const struct btf_type *s, const struct btf_member *m, u32 expected_offset, u32 expected_size); -int btf_find_spin_lock(const struct btf *btf, const struct btf_type *t); -int btf_find_timer(const struct btf *btf, const struct btf_type *t); struct btf_record *btf_parse_fields(const struct btf *btf, const struct btf_type *t, u32 field_mask, u32 value_size); int btf_check_and_fixup_fields(const struct btf *btf, struct btf_record *rec); diff --git a/include/linux/btf_ids.h b/include/linux/btf_ids.h index 00950cc03bff..a3462a9b8e18 100644 --- a/include/linux/btf_ids.h +++ b/include/linux/btf_ids.h @@ -267,5 +267,6 @@ MAX_BTF_TRACING_TYPE, extern u32 btf_tracing_ids[]; extern u32 bpf_cgroup_btf_id[]; extern u32 bpf_local_storage_map_btf_id[]; +extern u32 btf_bpf_map_id[]; #endif diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index 6cb3e9af78c9..06566aee94ca 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h @@ -194,8 +194,6 @@ void buffer_check_dirty_writeback(struct folio *folio, void mark_buffer_dirty(struct buffer_head *bh); void mark_buffer_write_io_error(struct buffer_head *bh); void touch_buffer(struct buffer_head *bh); -void set_bh_page(struct buffer_head *bh, - struct page *page, unsigned long offset); void folio_set_bh(struct buffer_head *bh, struct folio *folio, unsigned long offset); bool try_to_free_buffers(struct folio *); @@ -288,7 +286,7 @@ int cont_write_begin(struct file *, struct address_space *, loff_t, unsigned, struct page **, void **, get_block_t *, loff_t *); int generic_cont_expand_simple(struct inode *inode, loff_t size); -int block_commit_write(struct page *page, unsigned from, unsigned to); +void block_commit_write(struct page *page, unsigned int from, unsigned int to); int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, get_block_t get_block); /* Convert errno to return value from ->page_mkwrite() call */ diff --git a/include/linux/cacheflush.h b/include/linux/cacheflush.h index a6189d21f2ba..55f297b2c23f 100644 --- a/include/linux/cacheflush.h +++ b/include/linux/cacheflush.h @@ -7,14 +7,23 @@ struct folio; #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE -#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_FOLIO +#ifndef flush_dcache_folio void flush_dcache_folio(struct folio *folio); #endif #else static inline void flush_dcache_folio(struct folio *folio) { } -#define ARCH_IMPLEMENTS_FLUSH_DCACHE_FOLIO 0 +#define flush_dcache_folio flush_dcache_folio #endif /* ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE */ +#ifndef flush_icache_pages +static inline void flush_icache_pages(struct vm_area_struct *vma, + struct page *page, unsigned int nr) +{ +} +#endif + +#define flush_icache_page(vma, page) flush_icache_pages(vma, page, 1) + #endif /* _LINUX_CACHEFLUSH_H */ diff --git a/include/linux/can/rx-offload.h b/include/linux/can/rx-offload.h index c205c51d79c9..d29bb4521947 100644 --- a/include/linux/can/rx-offload.h +++ b/include/linux/can/rx-offload.h @@ -3,7 +3,7 @@ * linux/can/rx-offload.h * * Copyright (c) 2014 David Jander, Protonic Holland - * Copyright (c) 2014-2017 Pengutronix, Marc Kleine-Budde <kernel@pengutronix.de> + * Copyright (c) 2014-2017, 2023 Pengutronix, Marc Kleine-Budde <kernel@pengutronix.de> */ #ifndef _CAN_RX_OFFLOAD_H @@ -44,11 +44,14 @@ int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload, int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload); int can_rx_offload_queue_timestamp(struct can_rx_offload *offload, struct sk_buff *skb, u32 timestamp); -unsigned int can_rx_offload_get_echo_skb(struct can_rx_offload *offload, - unsigned int idx, u32 timestamp, - unsigned int *frame_len_ptr); +unsigned int can_rx_offload_get_echo_skb_queue_timestamp(struct can_rx_offload *offload, + unsigned int idx, u32 timestamp, + unsigned int *frame_len_ptr); int can_rx_offload_queue_tail(struct can_rx_offload *offload, struct sk_buff *skb); +unsigned int can_rx_offload_get_echo_skb_queue_tail(struct can_rx_offload *offload, + unsigned int idx, + unsigned int *frame_len_ptr); void can_rx_offload_irq_finish(struct can_rx_offload *offload); void can_rx_offload_threaded_irq_finish(struct can_rx_offload *offload); void can_rx_offload_del(struct can_rx_offload *offload); diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h index 8a0d5466c7be..ae20dbb885d6 100644 --- a/include/linux/cgroup-defs.h +++ b/include/linux/cgroup-defs.h @@ -661,6 +661,8 @@ struct cgroup_subsys { void (*css_rstat_flush)(struct cgroup_subsys_state *css, int cpu); int (*css_extra_stat_show)(struct seq_file *seq, struct cgroup_subsys_state *css); + int (*css_local_stat_show)(struct seq_file *seq, + struct cgroup_subsys_state *css); int (*can_attach)(struct cgroup_taskset *tset); void (*cancel_attach)(struct cgroup_taskset *tset); diff --git a/include/linux/clk.h b/include/linux/clk.h index 1ef013324237..06f1b292f8a0 100644 --- a/include/linux/clk.h +++ b/include/linux/clk.h @@ -183,6 +183,39 @@ int clk_get_scaled_duty_cycle(struct clk *clk, unsigned int scale); */ bool clk_is_match(const struct clk *p, const struct clk *q); +/** + * clk_rate_exclusive_get - get exclusivity over the rate control of a + * producer + * @clk: clock source + * + * This function allows drivers to get exclusive control over the rate of a + * provider. It prevents any other consumer to execute, even indirectly, + * opereation which could alter the rate of the provider or cause glitches + * + * If exlusivity is claimed more than once on clock, even by the same driver, + * the rate effectively gets locked as exclusivity can't be preempted. + * + * Must not be called from within atomic context. + * + * Returns success (0) or negative errno. + */ +int clk_rate_exclusive_get(struct clk *clk); + +/** + * clk_rate_exclusive_put - release exclusivity over the rate control of a + * producer + * @clk: clock source + * + * This function allows drivers to release the exclusivity it previously got + * from clk_rate_exclusive_get() + * + * The caller must balance the number of clk_rate_exclusive_get() and + * clk_rate_exclusive_put() calls. + * + * Must not be called from within atomic context. + */ +void clk_rate_exclusive_put(struct clk *clk); + #else static inline int clk_notifier_register(struct clk *clk, @@ -236,6 +269,13 @@ static inline bool clk_is_match(const struct clk *p, const struct clk *q) return p == q; } +static inline int clk_rate_exclusive_get(struct clk *clk) +{ + return 0; +} + +static inline void clk_rate_exclusive_put(struct clk *clk) {} + #endif #ifdef CONFIG_HAVE_CLK_PREPARE @@ -583,38 +623,6 @@ struct clk *devm_clk_get_optional_enabled(struct device *dev, const char *id); */ struct clk *devm_get_clk_from_child(struct device *dev, struct device_node *np, const char *con_id); -/** - * clk_rate_exclusive_get - get exclusivity over the rate control of a - * producer - * @clk: clock source - * - * This function allows drivers to get exclusive control over the rate of a - * provider. It prevents any other consumer to execute, even indirectly, - * opereation which could alter the rate of the provider or cause glitches - * - * If exlusivity is claimed more than once on clock, even by the same driver, - * the rate effectively gets locked as exclusivity can't be preempted. - * - * Must not be called from within atomic context. - * - * Returns success (0) or negative errno. - */ -int clk_rate_exclusive_get(struct clk *clk); - -/** - * clk_rate_exclusive_put - release exclusivity over the rate control of a - * producer - * @clk: clock source - * - * This function allows drivers to release the exclusivity it previously got - * from clk_rate_exclusive_get() - * - * The caller must balance the number of clk_rate_exclusive_get() and - * clk_rate_exclusive_put() calls. - * - * Must not be called from within atomic context. - */ -void clk_rate_exclusive_put(struct clk *clk); /** * clk_enable - inform the system when the clock source should be running. @@ -974,14 +982,6 @@ static inline void clk_bulk_put_all(int num_clks, struct clk_bulk_data *clks) {} static inline void devm_clk_put(struct device *dev, struct clk *clk) {} - -static inline int clk_rate_exclusive_get(struct clk *clk) -{ - return 0; -} - -static inline void clk_rate_exclusive_put(struct clk *clk) {} - static inline int clk_enable(struct clk *clk) { return 0; diff --git a/include/linux/compiler_attributes.h b/include/linux/compiler_attributes.h index 00efa35c350f..28566624f008 100644 --- a/include/linux/compiler_attributes.h +++ b/include/linux/compiler_attributes.h @@ -95,6 +95,19 @@ #endif /* + * Optional: only supported since gcc >= 14 + * Optional: only supported since clang >= 18 + * + * gcc: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=108896 + * clang: https://reviews.llvm.org/D148381 + */ +#if __has_attribute(__counted_by__) +# define __counted_by(member) __attribute__((__counted_by__(member))) +#else +# define __counted_by(member) +#endif + +/* * Optional: not supported by gcc * Optional: only supported since clang >= 14.0 * @@ -130,19 +143,6 @@ #endif /* - * Optional: only supported since gcc >= 14 - * Optional: only supported since clang >= 17 - * - * gcc: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=108896 - * clang: https://reviews.llvm.org/D148381 - */ -#if __has_attribute(__element_count__) -# define __counted_by(member) __attribute__((__element_count__(#member))) -#else -# define __counted_by(member) -#endif - -/* * Optional: only supported since clang >= 14.0 * * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-error-function-attribute diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h index 547ea1ff806e..c523c6683789 100644 --- a/include/linux/compiler_types.h +++ b/include/linux/compiler_types.h @@ -106,6 +106,34 @@ static inline void __chk_io_ptr(const volatile void __iomem *ptr) { } #define __cold #endif +/* + * On x86-64 and arm64 targets, __preserve_most changes the calling convention + * of a function to make the code in the caller as unintrusive as possible. This + * convention behaves identically to the C calling convention on how arguments + * and return values are passed, but uses a different set of caller- and callee- + * saved registers. + * + * The purpose is to alleviates the burden of saving and recovering a large + * register set before and after the call in the caller. This is beneficial for + * rarely taken slow paths, such as error-reporting functions that may be called + * from hot paths. + * + * Note: This may conflict with instrumentation inserted on function entry which + * does not use __preserve_most or equivalent convention (if in assembly). Since + * function tracing assumes the normal C calling convention, where the attribute + * is supported, __preserve_most implies notrace. It is recommended to restrict + * use of the attribute to functions that should or already disable tracing. + * + * Optional: not supported by gcc. + * + * clang: https://clang.llvm.org/docs/AttributeReference.html#preserve-most + */ +#if __has_attribute(__preserve_most__) && (defined(CONFIG_X86_64) || defined(CONFIG_ARM64)) +# define __preserve_most notrace __attribute__((__preserve_most__)) +#else +# define __preserve_most +#endif + /* Builtins */ /* diff --git a/include/linux/completion.h b/include/linux/completion.h index 62b32b19e0a8..fb2915676574 100644 --- a/include/linux/completion.h +++ b/include/linux/completion.h @@ -116,6 +116,7 @@ extern bool try_wait_for_completion(struct completion *x); extern bool completion_done(struct completion *x); extern void complete(struct completion *); +extern void complete_on_current_cpu(struct completion *x); extern void complete_all(struct completion *); #endif diff --git a/include/linux/connector.h b/include/linux/connector.h index 487350bb19c3..cec2d99ae902 100644 --- a/include/linux/connector.h +++ b/include/linux/connector.h @@ -90,13 +90,19 @@ void cn_del_callback(const struct cb_id *id); * If @group is not zero, then message will be delivered * to the specified group. * @gfp_mask: GFP mask. + * @filter: Filter function to be used at netlink layer. + * @filter_data:Filter data to be supplied to the filter function * * It can be safely called from softirq context, but may silently * fail under strong memory pressure. * * If there are no listeners for given group %-ESRCH can be returned. */ -int cn_netlink_send_mult(struct cn_msg *msg, u16 len, u32 portid, u32 group, gfp_t gfp_mask); +int cn_netlink_send_mult(struct cn_msg *msg, u16 len, u32 portid, + u32 group, gfp_t gfp_mask, + int (*filter)(struct sock *dsk, struct sk_buff *skb, + void *data), + void *filter_data); /** * cn_netlink_send - Sends message to the specified groups. diff --git a/include/linux/cpu.h b/include/linux/cpu.h index 6e6e57ec69e8..0abd60a7987b 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -18,6 +18,7 @@ #include <linux/compiler.h> #include <linux/cpumask.h> #include <linux/cpuhotplug.h> +#include <linux/cpu_smt.h> struct device; struct device_node; @@ -70,6 +71,10 @@ extern ssize_t cpu_show_mmio_stale_data(struct device *dev, char *buf); extern ssize_t cpu_show_retbleed(struct device *dev, struct device_attribute *attr, char *buf); +extern ssize_t cpu_show_spec_rstack_overflow(struct device *dev, + struct device_attribute *attr, char *buf); +extern ssize_t cpu_show_gds(struct device *dev, + struct device_attribute *attr, char *buf); extern __printf(4, 5) struct device *cpu_device_create(struct device *parent, void *drvdata, @@ -190,7 +195,6 @@ void arch_cpu_finalize_init(void); static inline void arch_cpu_finalize_init(void) { } #endif -void cpu_set_state_online(int cpu); void play_idle_precise(u64 duration_ns, u64 latency_ns); static inline void play_idle(unsigned long duration_us) @@ -204,30 +208,6 @@ void cpuhp_report_idle_dead(void); static inline void cpuhp_report_idle_dead(void) { } #endif /* #ifdef CONFIG_HOTPLUG_CPU */ -enum cpuhp_smt_control { - CPU_SMT_ENABLED, - CPU_SMT_DISABLED, - CPU_SMT_FORCE_DISABLED, - CPU_SMT_NOT_SUPPORTED, - CPU_SMT_NOT_IMPLEMENTED, -}; - -#if defined(CONFIG_SMP) && defined(CONFIG_HOTPLUG_SMT) -extern enum cpuhp_smt_control cpu_smt_control; -extern void cpu_smt_disable(bool force); -extern void cpu_smt_check_topology(void); -extern bool cpu_smt_possible(void); -extern int cpuhp_smt_enable(void); -extern int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval); -#else -# define cpu_smt_control (CPU_SMT_NOT_IMPLEMENTED) -static inline void cpu_smt_disable(bool force) { } -static inline void cpu_smt_check_topology(void) { } -static inline bool cpu_smt_possible(void) { return false; } -static inline int cpuhp_smt_enable(void) { return 0; } -static inline int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval) { return 0; } -#endif - extern bool cpu_mitigations_off(void); extern bool cpu_mitigations_auto_nosmt(void); diff --git a/include/linux/cpu_smt.h b/include/linux/cpu_smt.h new file mode 100644 index 000000000000..0c1664294b57 --- /dev/null +++ b/include/linux/cpu_smt.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_CPU_SMT_H_ +#define _LINUX_CPU_SMT_H_ + +enum cpuhp_smt_control { + CPU_SMT_ENABLED, + CPU_SMT_DISABLED, + CPU_SMT_FORCE_DISABLED, + CPU_SMT_NOT_SUPPORTED, + CPU_SMT_NOT_IMPLEMENTED, +}; + +#if defined(CONFIG_SMP) && defined(CONFIG_HOTPLUG_SMT) +extern enum cpuhp_smt_control cpu_smt_control; +extern unsigned int cpu_smt_num_threads; +extern void cpu_smt_disable(bool force); +extern void cpu_smt_set_num_threads(unsigned int num_threads, + unsigned int max_threads); +extern bool cpu_smt_possible(void); +extern int cpuhp_smt_enable(void); +extern int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval); +#else +# define cpu_smt_control (CPU_SMT_NOT_IMPLEMENTED) +# define cpu_smt_num_threads 1 +static inline void cpu_smt_disable(bool force) { } +static inline void cpu_smt_set_num_threads(unsigned int num_threads, + unsigned int max_threads) { } +static inline bool cpu_smt_possible(void) { return false; } +static inline int cpuhp_smt_enable(void) { return 0; } +static inline int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval) { return 0; } +#endif + +#endif /* _LINUX_CPU_SMT_H_ */ diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 172ff51c1b2a..43b363a99215 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -19,6 +19,7 @@ #include <linux/pm_qos.h> #include <linux/spinlock.h> #include <linux/sysfs.h> +#include <linux/minmax.h> /********************************************************************* * CPUFREQ INTERFACE * @@ -370,7 +371,7 @@ struct cpufreq_driver { int (*target_intermediate)(struct cpufreq_policy *policy, unsigned int index); - /* should be defined, if possible */ + /* should be defined, if possible, return 0 on error */ unsigned int (*get)(unsigned int cpu); /* Called to update policy limits on firmware notifications. */ @@ -467,17 +468,8 @@ static inline void cpufreq_verify_within_limits(struct cpufreq_policy_data *poli unsigned int min, unsigned int max) { - if (policy->min < min) - policy->min = min; - if (policy->max < min) - policy->max = min; - if (policy->min > max) - policy->min = max; - if (policy->max > max) - policy->max = max; - if (policy->min > policy->max) - policy->min = policy->max; - return; + policy->max = clamp(policy->max, min, max); + policy->min = clamp(policy->min, min, policy->max); } static inline void diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index 25b6e6e6ba6b..06dda85f0424 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -48,7 +48,7 @@ * same section. * * If neither #1 nor #2 apply, please use the dynamic state space when - * setting up a state by using CPUHP_PREPARE_DYN or CPUHP_PREPARE_ONLINE + * setting up a state by using CPUHP_BP_PREPARE_DYN or CPUHP_AP_ONLINE_DYN * for the @state argument of the setup function. * * See Documentation/core-api/cpu_hotplug.rst for further information and diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index 0d2e2a38b92d..f10fb87d49db 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h @@ -175,8 +175,8 @@ static inline unsigned int cpumask_first_zero(const struct cpumask *srcp) /** * cpumask_first_and - return the first cpu from *srcp1 & *srcp2 - * @src1p: the first input - * @src2p: the second input + * @srcp1: the first input + * @srcp2: the second input * * Returns >= nr_cpu_ids if no cpus set in both. See also cpumask_next_and(). */ @@ -1197,6 +1197,10 @@ cpumap_print_bitmask_to_buf(char *buf, const struct cpumask *mask, /** * cpumap_print_list_to_buf - copies the cpumask into the buffer as * comma-separated list of cpus + * @buf: the buffer to copy into + * @mask: the cpumask to copy + * @off: in the string from which we are copying, we copy to @buf + * @count: the maximum number of bytes to print * * Everything is same with the above cpumap_print_bitmask_to_buf() * except the print format. diff --git a/include/linux/damon.h b/include/linux/damon.h index d5d4d19928e0..ae2664d1d5f1 100644 --- a/include/linux/damon.h +++ b/include/linux/damon.h @@ -226,16 +226,26 @@ struct damos_stat { * enum damos_filter_type - Type of memory for &struct damos_filter * @DAMOS_FILTER_TYPE_ANON: Anonymous pages. * @DAMOS_FILTER_TYPE_MEMCG: Specific memcg's pages. + * @DAMOS_FILTER_TYPE_ADDR: Address range. + * @DAMOS_FILTER_TYPE_TARGET: Data Access Monitoring target. * @NR_DAMOS_FILTER_TYPES: Number of filter types. * - * The support of each filter type is up to running &struct damon_operations. - * &enum DAMON_OPS_PADDR is supporting all filter types, while - * &enum DAMON_OPS_VADDR and &enum DAMON_OPS_FVADDR are not supporting any - * filter types. + * The anon pages type and memcg type filters are handled by underlying + * &struct damon_operations as a part of scheme action trying, and therefore + * accounted as 'tried'. In contrast, other types are handled by core layer + * before trying of the action and therefore not accounted as 'tried'. + * + * The support of the filters that handled by &struct damon_operations depend + * on the running &struct damon_operations. + * &enum DAMON_OPS_PADDR supports both anon pages type and memcg type filters, + * while &enum DAMON_OPS_VADDR and &enum DAMON_OPS_FVADDR don't support any of + * the two types. */ enum damos_filter_type { DAMOS_FILTER_TYPE_ANON, DAMOS_FILTER_TYPE_MEMCG, + DAMOS_FILTER_TYPE_ADDR, + DAMOS_FILTER_TYPE_TARGET, NR_DAMOS_FILTER_TYPES, }; @@ -244,18 +254,24 @@ enum damos_filter_type { * @type: Type of the page. * @matching: If the matching page should filtered out or in. * @memcg_id: Memcg id of the question if @type is DAMOS_FILTER_MEMCG. + * @addr_range: Address range if @type is DAMOS_FILTER_TYPE_ADDR. + * @target_idx: Index of the &struct damon_target of + * &damon_ctx->adaptive_targets if @type is + * DAMOS_FILTER_TYPE_TARGET. * @list: List head for siblings. * * Before applying the &damos->action to a memory region, DAMOS checks if each * page of the region matches to this and avoid applying the action if so. - * Note that the check support is up to &struct damon_operations - * implementation. + * Support of each filter type depends on the running &struct damon_operations + * and the type. Refer to &enum damos_filter_type for more detai. */ struct damos_filter { enum damos_filter_type type; bool matching; union { unsigned short memcg_id; + struct damon_addr_range addr_range; + int target_idx; }; struct list_head list; }; diff --git a/include/linux/dax.h b/include/linux/dax.h index 261944ec0887..22cd9902345d 100644 --- a/include/linux/dax.h +++ b/include/linux/dax.h @@ -241,10 +241,10 @@ void dax_flush(struct dax_device *dax_dev, void *addr, size_t size); ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter, const struct iomap_ops *ops); -vm_fault_t dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size, +vm_fault_t dax_iomap_fault(struct vm_fault *vmf, unsigned int order, pfn_t *pfnp, int *errp, const struct iomap_ops *ops); vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf, - enum page_entry_size pe_size, pfn_t pfn); + unsigned int order, pfn_t pfn); int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index); int dax_invalidate_mapping_entry_sync(struct address_space *mapping, pgoff_t index); diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h index 9192986b1a73..ac862422df15 100644 --- a/include/linux/decompress/mm.h +++ b/include/linux/decompress/mm.h @@ -48,7 +48,7 @@ MALLOC_VISIBLE void *malloc(int size) if (!malloc_ptr) malloc_ptr = free_mem_ptr; - malloc_ptr = (malloc_ptr + 3) & ~3; /* Align */ + malloc_ptr = (malloc_ptr + 7) & ~7; /* Align */ p = (void *)malloc_ptr; malloc_ptr += size; diff --git a/include/linux/dm-verity-loadpin.h b/include/linux/dm-verity-loadpin.h index 552b817ab102..3ac6dbaeaa37 100644 --- a/include/linux/dm-verity-loadpin.h +++ b/include/linux/dm-verity-loadpin.h @@ -12,7 +12,7 @@ extern struct list_head dm_verity_loadpin_trusted_root_digests; struct dm_verity_loadpin_trusted_root_digest { struct list_head node; unsigned int len; - u8 data[]; + u8 data[] __counted_by(len); }; #if IS_ENABLED(CONFIG_SECURITY_LOADPIN_VERITY) diff --git a/include/linux/dnotify.h b/include/linux/dnotify.h index b1d26f9f1c9f..9f183a679277 100644 --- a/include/linux/dnotify.h +++ b/include/linux/dnotify.h @@ -30,7 +30,7 @@ struct dnotify_struct { FS_MOVED_FROM | FS_MOVED_TO) extern void dnotify_flush(struct file *, fl_owner_t); -extern int fcntl_dirnotify(int, struct file *, unsigned long); +extern int fcntl_dirnotify(int, struct file *, unsigned int); #else @@ -38,7 +38,7 @@ static inline void dnotify_flush(struct file *filp, fl_owner_t id) { } -static inline int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg) +static inline int fcntl_dirnotify(int fd, struct file *filp, unsigned int arg) { return -EINVAL; } diff --git a/include/linux/efi.h b/include/linux/efi.h index ab088c662e88..5a1e39df8b26 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h @@ -726,7 +726,6 @@ static inline efi_status_t efi_query_variable_store(u32 attributes, return EFI_SUCCESS; } #endif -extern void __iomem *efi_lookup_mapped_addr(u64 phys_addr); extern int __init __efi_memmap_init(struct efi_memory_map_data *data); extern int __init efi_memmap_init_early(struct efi_memory_map_data *data); @@ -1130,7 +1129,7 @@ extern bool efi_runtime_disabled(void); static inline bool efi_runtime_disabled(void) { return true; } #endif -extern void efi_call_virt_check_flags(unsigned long flags, const char *call); +extern void efi_call_virt_check_flags(unsigned long flags, const void *caller); extern unsigned long efi_call_virt_save_flags(void); enum efi_secureboot_mode { @@ -1171,8 +1170,7 @@ static inline void efi_check_for_embedded_firmwares(void) { } #define arch_efi_call_virt(p, f, args...) ((p)->f(args)) /* - * Arch code can implement the following three template macros, avoiding - * reptition for the void/non-void return cases of {__,}efi_call_virt(): + * Arch code must implement the following three routines: * * * arch_efi_call_virt_setup() * @@ -1181,9 +1179,8 @@ static inline void efi_check_for_embedded_firmwares(void) { } * * * arch_efi_call_virt() * - * Performs the call. The last expression in the macro must be the call - * itself, allowing the logic to be shared by the void and non-void - * cases. + * Performs the call. This routine takes a variable number of arguments so + * it must be implemented as a variadic preprocessor macro. * * * arch_efi_call_virt_teardown() * @@ -1192,33 +1189,20 @@ static inline void efi_check_for_embedded_firmwares(void) { } #define efi_call_virt_pointer(p, f, args...) \ ({ \ - efi_status_t __s; \ + typeof((p)->f(args)) __s; \ unsigned long __flags; \ \ arch_efi_call_virt_setup(); \ \ __flags = efi_call_virt_save_flags(); \ __s = arch_efi_call_virt(p, f, args); \ - efi_call_virt_check_flags(__flags, __stringify(f)); \ + efi_call_virt_check_flags(__flags, NULL); \ \ arch_efi_call_virt_teardown(); \ \ __s; \ }) -#define __efi_call_virt_pointer(p, f, args...) \ -({ \ - unsigned long __flags; \ - \ - arch_efi_call_virt_setup(); \ - \ - __flags = efi_call_virt_save_flags(); \ - arch_efi_call_virt(p, f, args); \ - efi_call_virt_check_flags(__flags, __stringify(f)); \ - \ - arch_efi_call_virt_teardown(); \ -}) - #define EFI_RANDOM_SEED_SIZE 32U // BLAKE2S_HASH_SIZE struct linux_efi_random_seed { @@ -1244,6 +1228,10 @@ extern int efi_tpm_final_log_size; extern unsigned long rci2_table_phys; +efi_status_t +efi_call_acpi_prm_handler(efi_status_t (__efiapi *handler_addr)(u64, void *), + u64 param_buffer_addr, void *context); + /* * efi_runtime_service() function identifiers. * "NONE" is used by efi_recover_from_page_fault() to check if the page @@ -1263,25 +1251,26 @@ enum efi_rts_ids { EFI_RESET_SYSTEM, EFI_UPDATE_CAPSULE, EFI_QUERY_CAPSULE_CAPS, + EFI_ACPI_PRM_HANDLER, }; +union efi_rts_args; + /* * efi_runtime_work: Details of EFI Runtime Service work - * @arg<1-5>: EFI Runtime Service function arguments + * @args: Pointer to union describing the arguments * @status: Status of executing EFI Runtime Service * @efi_rts_id: EFI Runtime Service function identifier * @efi_rts_comp: Struct used for handling completions + * @caller: The caller of the runtime service */ struct efi_runtime_work { - void *arg1; - void *arg2; - void *arg3; - void *arg4; - void *arg5; - efi_status_t status; - struct work_struct work; - enum efi_rts_ids efi_rts_id; - struct completion efi_rts_comp; + union efi_rts_args *args; + efi_status_t status; + struct work_struct work; + enum efi_rts_ids efi_rts_id; + struct completion efi_rts_comp; + const void *caller; }; extern struct efi_runtime_work efi_rts_work; diff --git a/include/linux/filelock.h b/include/linux/filelock.h index efcdd1631d9b..95e868e09e29 100644 --- a/include/linux/filelock.h +++ b/include/linux/filelock.h @@ -144,7 +144,7 @@ int fcntl_setlk64(unsigned int, struct file *, unsigned int, struct flock64 *); #endif -int fcntl_setlease(unsigned int fd, struct file *filp, long arg); +int fcntl_setlease(unsigned int fd, struct file *filp, int arg); int fcntl_getlease(struct file *filp); /* fs/locks.c */ @@ -167,8 +167,8 @@ bool vfs_inode_has_locks(struct inode *inode); int locks_lock_inode_wait(struct inode *inode, struct file_lock *fl); int __break_lease(struct inode *inode, unsigned int flags, unsigned int type); void lease_get_mtime(struct inode *, struct timespec64 *time); -int generic_setlease(struct file *, long, struct file_lock **, void **priv); -int vfs_setlease(struct file *, long, struct file_lock **, void **); +int generic_setlease(struct file *, int, struct file_lock **, void **priv); +int vfs_setlease(struct file *, int, struct file_lock **, void **); int lease_modify(struct file_lock *, int, struct list_head *); struct notifier_block; @@ -213,7 +213,7 @@ static inline int fcntl_setlk64(unsigned int fd, struct file *file, return -EACCES; } #endif -static inline int fcntl_setlease(unsigned int fd, struct file *filp, long arg) +static inline int fcntl_setlease(unsigned int fd, struct file *filp, int arg) { return -EINVAL; } @@ -306,13 +306,13 @@ static inline void lease_get_mtime(struct inode *inode, return; } -static inline int generic_setlease(struct file *filp, long arg, +static inline int generic_setlease(struct file *filp, int arg, struct file_lock **flp, void **priv) { return -EINVAL; } -static inline int vfs_setlease(struct file *filp, long arg, +static inline int vfs_setlease(struct file *filp, int arg, struct file_lock **lease, void **priv) { return -EINVAL; diff --git a/include/linux/filter.h b/include/linux/filter.h index f69114083ec7..761af6b3cf2b 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -69,6 +69,9 @@ struct ctl_table_header; /* unused opcode to mark special load instruction. Same as BPF_ABS */ #define BPF_PROBE_MEM 0x20 +/* unused opcode to mark special ldsx instruction. Same as BPF_IND */ +#define BPF_PROBE_MEMSX 0x40 + /* unused opcode to mark call to interpreter with arguments */ #define BPF_CALL_ARGS 0xe0 @@ -90,22 +93,28 @@ struct ctl_table_header; /* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */ -#define BPF_ALU64_REG(OP, DST, SRC) \ +#define BPF_ALU64_REG_OFF(OP, DST, SRC, OFF) \ ((struct bpf_insn) { \ .code = BPF_ALU64 | BPF_OP(OP) | BPF_X, \ .dst_reg = DST, \ .src_reg = SRC, \ - .off = 0, \ + .off = OFF, \ .imm = 0 }) -#define BPF_ALU32_REG(OP, DST, SRC) \ +#define BPF_ALU64_REG(OP, DST, SRC) \ + BPF_ALU64_REG_OFF(OP, DST, SRC, 0) + +#define BPF_ALU32_REG_OFF(OP, DST, SRC, OFF) \ ((struct bpf_insn) { \ .code = BPF_ALU | BPF_OP(OP) | BPF_X, \ .dst_reg = DST, \ .src_reg = SRC, \ - .off = 0, \ + .off = OFF, \ .imm = 0 }) +#define BPF_ALU32_REG(OP, DST, SRC) \ + BPF_ALU32_REG_OFF(OP, DST, SRC, 0) + /* ALU ops on immediates, bpf_add|sub|...: dst_reg += imm32 */ #define BPF_ALU64_IMM(OP, DST, IMM) \ @@ -765,23 +774,6 @@ DECLARE_STATIC_KEY_FALSE(bpf_master_redirect_enabled_key); u32 xdp_master_redirect(struct xdp_buff *xdp); -static __always_inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog, - struct xdp_buff *xdp) -{ - /* Driver XDP hooks are invoked within a single NAPI poll cycle and thus - * under local_bh_disable(), which provides the needed RCU protection - * for accessing map entries. - */ - u32 act = __bpf_prog_run(prog, xdp, BPF_DISPATCHER_FUNC(xdp)); - - if (static_branch_unlikely(&bpf_master_redirect_enabled_key)) { - if (act == XDP_TX && netif_is_bond_slave(xdp->rxq->dev)) - act = xdp_master_redirect(xdp); - } - - return act; -} - void bpf_prog_change_xdp(struct bpf_prog *prev_prog, struct bpf_prog *prog); static inline u32 bpf_prog_insn_size(const struct bpf_prog *prog) @@ -1580,10 +1572,9 @@ static inline void *bpf_xdp_pointer(struct xdp_buff *xdp, u32 offset, u32 len) return NULL; } -static inline void *bpf_xdp_copy_buf(struct xdp_buff *xdp, unsigned long off, void *buf, - unsigned long len, bool flush) +static inline void bpf_xdp_copy_buf(struct xdp_buff *xdp, unsigned long off, void *buf, + unsigned long len, bool flush) { - return NULL; } #endif /* CONFIG_NET */ diff --git a/include/linux/frontswap.h b/include/linux/frontswap.h deleted file mode 100644 index eaa0ac5f9003..000000000000 --- a/include/linux/frontswap.h +++ /dev/null @@ -1,91 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _LINUX_FRONTSWAP_H -#define _LINUX_FRONTSWAP_H - -#include <linux/swap.h> -#include <linux/mm.h> -#include <linux/bitops.h> -#include <linux/jump_label.h> - -struct frontswap_ops { - void (*init)(unsigned); /* this swap type was just swapon'ed */ - int (*store)(unsigned, pgoff_t, struct page *); /* store a page */ - int (*load)(unsigned, pgoff_t, struct page *, bool *); /* load a page */ - void (*invalidate_page)(unsigned, pgoff_t); /* page no longer needed */ - void (*invalidate_area)(unsigned); /* swap type just swapoff'ed */ -}; - -int frontswap_register_ops(const struct frontswap_ops *ops); - -extern void frontswap_init(unsigned type, unsigned long *map); -extern int __frontswap_store(struct page *page); -extern int __frontswap_load(struct page *page); -extern void __frontswap_invalidate_page(unsigned, pgoff_t); -extern void __frontswap_invalidate_area(unsigned); - -#ifdef CONFIG_FRONTSWAP -extern struct static_key_false frontswap_enabled_key; - -static inline bool frontswap_enabled(void) -{ - return static_branch_unlikely(&frontswap_enabled_key); -} - -static inline void frontswap_map_set(struct swap_info_struct *p, - unsigned long *map) -{ - p->frontswap_map = map; -} - -static inline unsigned long *frontswap_map_get(struct swap_info_struct *p) -{ - return p->frontswap_map; -} -#else -/* all inline routines become no-ops and all externs are ignored */ - -static inline bool frontswap_enabled(void) -{ - return false; -} - -static inline void frontswap_map_set(struct swap_info_struct *p, - unsigned long *map) -{ -} - -static inline unsigned long *frontswap_map_get(struct swap_info_struct *p) -{ - return NULL; -} -#endif - -static inline int frontswap_store(struct page *page) -{ - if (frontswap_enabled()) - return __frontswap_store(page); - - return -1; -} - -static inline int frontswap_load(struct page *page) -{ - if (frontswap_enabled()) - return __frontswap_load(page); - - return -1; -} - -static inline void frontswap_invalidate_page(unsigned type, pgoff_t offset) -{ - if (frontswap_enabled()) - __frontswap_invalidate_page(type, offset); -} - -static inline void frontswap_invalidate_area(unsigned type) -{ - if (frontswap_enabled()) - __frontswap_invalidate_area(type); -} - -#endif /* _LINUX_FRONTSWAP_H */ diff --git a/include/linux/fs.h b/include/linux/fs.h index 6867512907d6..c8ff4156a0a1 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -338,6 +338,20 @@ enum rw_hint { #define IOCB_NOIO (1 << 20) /* can use bio alloc cache */ #define IOCB_ALLOC_CACHE (1 << 21) +/* + * IOCB_DIO_CALLER_COMP can be set by the iocb owner, to indicate that the + * iocb completion can be passed back to the owner for execution from a safe + * context rather than needing to be punted through a workqueue. If this + * flag is set, the bio completion handling may set iocb->dio_complete to a + * handler function and iocb->private to context information for that handler. + * The issuer should call the handler with that context information from task + * context to complete the processing of the iocb. Note that while this + * provides a task context for the dio_complete() callback, it should only be + * used on the completion side for non-IO generating completions. It's fine to + * call blocking functions from this callback, but they should not wait for + * unrelated IO (like cache flushing, new IO generation, etc). + */ +#define IOCB_DIO_CALLER_COMP (1 << 22) /* for use in trace events */ #define TRACE_IOCB_STRINGS \ @@ -351,7 +365,8 @@ enum rw_hint { { IOCB_WRITE, "WRITE" }, \ { IOCB_WAITQ, "WAITQ" }, \ { IOCB_NOIO, "NOIO" }, \ - { IOCB_ALLOC_CACHE, "ALLOC_CACHE" } + { IOCB_ALLOC_CACHE, "ALLOC_CACHE" }, \ + { IOCB_DIO_CALLER_COMP, "CALLER_COMP" } struct kiocb { struct file *ki_filp; @@ -360,7 +375,23 @@ struct kiocb { void *private; int ki_flags; u16 ki_ioprio; /* See linux/ioprio.h */ - struct wait_page_queue *ki_waitq; /* for async buffered IO */ + union { + /* + * Only used for async buffered reads, where it denotes the + * page waitqueue associated with completing the read. Valid + * IFF IOCB_WAITQ is set. + */ + struct wait_page_queue *ki_waitq; + /* + * Can be used for O_DIRECT IO, where the completion handling + * is punted back to the issuer of the IO. May only be set + * if IOCB_DIO_CALLER_COMP is set by the issuer, and the issuer + * must then check for presence of this handler when ki_complete + * is invoked. The data passed in to this handler must be + * assigned to ->private when dio_complete is assigned. + */ + ssize_t (*dio_complete)(void *data); + }; }; static inline bool is_sync_kiocb(struct kiocb *kiocb) @@ -447,11 +478,11 @@ struct address_space { atomic_t nr_thps; #endif struct rb_root_cached i_mmap; - struct rw_semaphore i_mmap_rwsem; unsigned long nrpages; pgoff_t writeback_index; const struct address_space_operations *a_ops; unsigned long flags; + struct rw_semaphore i_mmap_rwsem; errseq_t wb_err; spinlock_t private_lock; struct list_head private_list; @@ -642,7 +673,7 @@ struct inode { loff_t i_size; struct timespec64 i_atime; struct timespec64 i_mtime; - struct timespec64 i_ctime; + struct timespec64 __i_ctime; /* use inode_*_ctime accessors! */ spinlock_t i_lock; /* i_blocks, i_bytes, maybe i_size */ unsigned short i_bytes; u8 i_blkbits; @@ -1069,7 +1100,7 @@ extern void fasync_free(struct fasync_struct *); extern void kill_fasync(struct fasync_struct **, int, int); extern void __f_setown(struct file *filp, struct pid *, enum pid_type, int force); -extern int f_setown(struct file *filp, unsigned long arg, int force); +extern int f_setown(struct file *filp, int who, int force); extern void f_delown(struct file *filp); extern pid_t f_getown(struct file *filp); extern int send_sigurg(struct fown_struct *fown); @@ -1095,6 +1126,8 @@ extern int send_sigurg(struct fown_struct *fown); #define SB_LAZYTIME BIT(25) /* Update the on-disk [acm]times lazily */ /* These sb flags are internal to the kernel */ +#define SB_DEAD BIT(21) +#define SB_DYING BIT(24) #define SB_SUBMOUNT BIT(26) #define SB_FORCE BIT(27) #define SB_NOSEC BIT(28) @@ -1147,7 +1180,8 @@ enum { #define SB_FREEZE_LEVELS (SB_FREEZE_COMPLETE - 1) struct sb_writers { - int frozen; /* Is sb frozen? */ + unsigned short frozen; /* Is sb frozen? */ + unsigned short freeze_holders; /* Who froze fs? */ struct percpu_rw_semaphore rw_sem[SB_FREEZE_LEVELS]; }; @@ -1474,7 +1508,79 @@ static inline bool fsuidgid_has_mapping(struct super_block *sb, kgid_has_mapping(fs_userns, kgid); } -extern struct timespec64 current_time(struct inode *inode); +struct timespec64 current_mgtime(struct inode *inode); +struct timespec64 current_time(struct inode *inode); +struct timespec64 inode_set_ctime_current(struct inode *inode); + +/* + * Multigrain timestamps + * + * Conditionally use fine-grained ctime and mtime timestamps when there + * are users actively observing them via getattr. The primary use-case + * for this is NFS clients that use the ctime to distinguish between + * different states of the file, and that are often fooled by multiple + * operations that occur in the same coarse-grained timer tick. + * + * The kernel always keeps normalized struct timespec64 values in the ctime, + * which means that only the first 30 bits of the value are used. Use the + * 31st bit of the ctime's tv_nsec field as a flag to indicate that the value + * has been queried since it was last updated. + */ +#define I_CTIME_QUERIED (1L<<30) + +/** + * inode_get_ctime - fetch the current ctime from the inode + * @inode: inode from which to fetch ctime + * + * Grab the current ctime tv_nsec field from the inode, mask off the + * I_CTIME_QUERIED flag and return it. This is mostly intended for use by + * internal consumers of the ctime that aren't concerned with ensuring a + * fine-grained update on the next change (e.g. when preparing to store + * the value in the backing store for later retrieval). + * + * This is safe to call regardless of whether the underlying filesystem + * is using multigrain timestamps. + */ +static inline struct timespec64 inode_get_ctime(const struct inode *inode) +{ + struct timespec64 ctime; + + ctime.tv_sec = inode->__i_ctime.tv_sec; + ctime.tv_nsec = inode->__i_ctime.tv_nsec & ~I_CTIME_QUERIED; + + return ctime; +} + +/** + * inode_set_ctime_to_ts - set the ctime in the inode + * @inode: inode in which to set the ctime + * @ts: value to set in the ctime field + * + * Set the ctime in @inode to @ts + */ +static inline struct timespec64 inode_set_ctime_to_ts(struct inode *inode, + struct timespec64 ts) +{ + inode->__i_ctime = ts; + return ts; +} + +/** + * inode_set_ctime - set the ctime in the inode + * @inode: inode in which to set the ctime + * @sec: tv_sec value to set + * @nsec: tv_nsec value to set + * + * Set the ctime in @inode to { @sec, @nsec } + */ +static inline struct timespec64 inode_set_ctime(struct inode *inode, + time64_t sec, long nsec) +{ + struct timespec64 ts = { .tv_sec = sec, + .tv_nsec = nsec }; + + return inode_set_ctime_to_ts(inode, ts); +} /* * Snapshotting support. @@ -1770,6 +1876,7 @@ struct dir_context { struct iov_iter; struct io_uring_cmd; +struct offset_ctx; struct file_operations { struct module *owner; @@ -1780,7 +1887,6 @@ struct file_operations { ssize_t (*write_iter) (struct kiocb *, struct iov_iter *); int (*iopoll)(struct kiocb *kiocb, struct io_comp_batch *, unsigned int flags); - int (*iterate) (struct file *, struct dir_context *); int (*iterate_shared) (struct file *, struct dir_context *); __poll_t (*poll) (struct file *, struct poll_table_struct *); long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long); @@ -1799,7 +1905,7 @@ struct file_operations { ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int); ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int); void (*splice_eof)(struct file *file); - int (*setlease)(struct file *, long, struct file_lock **, void **); + int (*setlease)(struct file *, int, struct file_lock **, void **); long (*fallocate)(struct file *file, int mode, loff_t offset, loff_t len); void (*show_fdinfo)(struct seq_file *m, struct file *f); @@ -1817,6 +1923,13 @@ struct file_operations { unsigned int poll_flags); } __randomize_layout; +/* Wrap a directory iterator that needs exclusive inode access */ +int wrap_directory_iterator(struct file *, struct dir_context *, + int (*) (struct file *, struct dir_context *)); +#define WRAP_DIR_ITER(x) \ + static int shared_##x(struct file *file , struct dir_context *ctx) \ + { return wrap_directory_iterator(file, ctx, x); } + struct inode_operations { struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int); const char * (*get_link) (struct dentry *, struct inode *, struct delayed_call *); @@ -1844,7 +1957,7 @@ struct inode_operations { ssize_t (*listxattr) (struct dentry *, char *, size_t); int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start, u64 len); - int (*update_time)(struct inode *, struct timespec64 *, int); + int (*update_time)(struct inode *, int); int (*atomic_open)(struct inode *, struct dentry *, struct file *, unsigned open_flag, umode_t create_mode); @@ -1857,6 +1970,7 @@ struct inode_operations { int (*fileattr_set)(struct mnt_idmap *idmap, struct dentry *dentry, struct fileattr *fa); int (*fileattr_get)(struct dentry *dentry, struct fileattr *fa); + struct offset_ctx *(*get_offset_ctx)(struct inode *inode); } ____cacheline_aligned; static inline ssize_t call_read_iter(struct file *file, struct kiocb *kio, @@ -1902,6 +2016,10 @@ extern loff_t vfs_dedupe_file_range_one(struct file *src_file, loff_t src_pos, struct file *dst_file, loff_t dst_pos, loff_t len, unsigned int remap_flags); +enum freeze_holder { + FREEZE_HOLDER_KERNEL = (1U << 0), + FREEZE_HOLDER_USERSPACE = (1U << 1), +}; struct super_operations { struct inode *(*alloc_inode)(struct super_block *sb); @@ -1914,9 +2032,9 @@ struct super_operations { void (*evict_inode) (struct inode *); void (*put_super) (struct super_block *); int (*sync_fs)(struct super_block *sb, int wait); - int (*freeze_super) (struct super_block *); + int (*freeze_super) (struct super_block *, enum freeze_holder who); int (*freeze_fs) (struct super_block *); - int (*thaw_super) (struct super_block *); + int (*thaw_super) (struct super_block *, enum freeze_holder who); int (*unfreeze_fs) (struct super_block *); int (*statfs) (struct dentry *, struct kstatfs *); int (*remount_fs) (struct super_block *, int *, char *); @@ -2194,7 +2312,7 @@ enum file_time_flags { extern bool atime_needs_update(const struct path *, struct inode *); extern void touch_atime(const struct path *); -int inode_update_time(struct inode *inode, struct timespec64 *time, int flags); +int inode_update_time(struct inode *inode, int flags); static inline void file_accessed(struct file *file) { @@ -2216,6 +2334,7 @@ struct file_system_type { #define FS_USERNS_MOUNT 8 /* Can be mounted by userns root */ #define FS_DISALLOW_NOTIFY_PERM 16 /* Disable fanotify permission events */ #define FS_ALLOW_IDMAP 32 /* FS has been updated to handle vfs idmappings. */ +#define FS_MGTIME 64 /* FS uses multigrain timestamps */ #define FS_RENAME_DOES_D_MOVE 32768 /* FS will handle d_move() during rename() internally. */ int (*init_fs_context)(struct fs_context *); const struct fs_parameter_spec *parameters; @@ -2239,6 +2358,17 @@ struct file_system_type { #define MODULE_ALIAS_FS(NAME) MODULE_ALIAS("fs-" NAME) +/** + * is_mgtime: is this inode using multigrain timestamps + * @inode: inode to test for multigrain timestamps + * + * Return true if the inode uses multigrain timestamps, false otherwise. + */ +static inline bool is_mgtime(const struct inode *inode) +{ + return inode->i_sb->s_type->fs_flags & FS_MGTIME; +} + extern struct dentry *mount_bdev(struct file_system_type *fs_type, int flags, const char *dev_name, void *data, int (*fill_super)(struct super_block *, void *, int)); @@ -2290,8 +2420,8 @@ extern int unregister_filesystem(struct file_system_type *); extern int vfs_statfs(const struct path *, struct kstatfs *); extern int user_statfs(const char __user *, struct kstatfs *); extern int fd_statfs(int, struct kstatfs *); -extern int freeze_super(struct super_block *super); -extern int thaw_super(struct super_block *super); +int freeze_super(struct super_block *super, enum freeze_holder who); +int thaw_super(struct super_block *super, enum freeze_holder who); extern __printf(2, 3) int super_setup_bdi_name(struct super_block *sb, char *fmt, ...); extern int super_setup_bdi(struct super_block *sb); @@ -2300,7 +2430,8 @@ extern int current_umask(void); extern void ihold(struct inode * inode); extern void iput(struct inode *); -extern int generic_update_time(struct inode *, struct timespec64 *, int); +int inode_update_timestamps(struct inode *inode, int flags); +int generic_update_time(struct inode *, int); /* /sys/fs */ extern struct kobject *fs_kobj; @@ -2539,6 +2670,13 @@ static inline bool inode_wrong_type(const struct inode *inode, umode_t mode) return (inode->i_mode ^ mode) & S_IFMT; } +/** + * file_start_write - get write access to a superblock for regular file io + * @file: the file we want to write to + * + * This is a variant of sb_start_write() which is a noop on non-regualr file. + * Should be matched with a call to file_end_write(). + */ static inline void file_start_write(struct file *file) { if (!S_ISREG(file_inode(file)->i_mode)) @@ -2553,11 +2691,53 @@ static inline bool file_start_write_trylock(struct file *file) return sb_start_write_trylock(file_inode(file)->i_sb); } +/** + * file_end_write - drop write access to a superblock of a regular file + * @file: the file we wrote to + * + * Should be matched with a call to file_start_write(). + */ static inline void file_end_write(struct file *file) { if (!S_ISREG(file_inode(file)->i_mode)) return; - __sb_end_write(file_inode(file)->i_sb, SB_FREEZE_WRITE); + sb_end_write(file_inode(file)->i_sb); +} + +/** + * kiocb_start_write - get write access to a superblock for async file io + * @iocb: the io context we want to submit the write with + * + * This is a variant of sb_start_write() for async io submission. + * Should be matched with a call to kiocb_end_write(). + */ +static inline void kiocb_start_write(struct kiocb *iocb) +{ + struct inode *inode = file_inode(iocb->ki_filp); + + sb_start_write(inode->i_sb); + /* + * Fool lockdep by telling it the lock got released so that it + * doesn't complain about the held lock when we return to userspace. + */ + __sb_writers_release(inode->i_sb, SB_FREEZE_WRITE); +} + +/** + * kiocb_end_write - drop write access to a superblock after async file io + * @iocb: the io context we sumbitted the write with + * + * Should be matched with a call to kiocb_start_write(). + */ +static inline void kiocb_end_write(struct kiocb *iocb) +{ + struct inode *inode = file_inode(iocb->ki_filp); + + /* + * Tell lockdep we inherited freeze protection from submission thread. + */ + __sb_writers_acquired(inode->i_sb, SB_FREEZE_WRITE); + sb_end_write(inode->i_sb); } /* @@ -2607,8 +2787,7 @@ static inline bool inode_is_open_for_write(const struct inode *inode) #if defined(CONFIG_IMA) || defined(CONFIG_FILE_LOCKING) static inline void i_readcount_dec(struct inode *inode) { - BUG_ON(!atomic_read(&inode->i_readcount)); - atomic_dec(&inode->i_readcount); + BUG_ON(atomic_dec_return(&inode->i_readcount) < 0); } static inline void i_readcount_inc(struct inode *inode) { @@ -2874,7 +3053,8 @@ extern void page_put_link(void *); extern int page_symlink(struct inode *inode, const char *symname, int len); extern const struct inode_operations page_symlink_inode_operations; extern void kfree_link(void *); -void generic_fillattr(struct mnt_idmap *, struct inode *, struct kstat *); +void fill_mg_cmtime(struct kstat *stat, u32 request_mask, struct inode *inode); +void generic_fillattr(struct mnt_idmap *, u32, struct inode *, struct kstat *); void generic_fill_statx_attr(struct inode *inode, struct kstat *stat); extern int vfs_getattr_nosec(const struct path *, struct kstat *, u32, unsigned int); extern int vfs_getattr(const struct path *, struct kstat *, u32, unsigned int); @@ -2913,7 +3093,6 @@ extern int vfs_readlink(struct dentry *, char __user *, int); extern struct file_system_type *get_filesystem(struct file_system_type *fs); extern void put_filesystem(struct file_system_type *fs); extern struct file_system_type *get_fs_type(const char *name); -extern struct super_block *get_super(struct block_device *); extern struct super_block *get_active_super(struct block_device *bdev); extern void drop_super(struct super_block *sb); extern void drop_super_exclusive(struct super_block *sb); @@ -2934,6 +3113,8 @@ extern int simple_open(struct inode *inode, struct file *file); extern int simple_link(struct dentry *, struct inode *, struct dentry *); extern int simple_unlink(struct inode *, struct dentry *); extern int simple_rmdir(struct inode *, struct dentry *); +void simple_rename_timestamp(struct inode *old_dir, struct dentry *old_dentry, + struct inode *new_dir, struct dentry *new_dentry); extern int simple_rename_exchange(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry); extern int simple_rename(struct mnt_idmap *, struct inode *, @@ -2950,7 +3131,7 @@ extern int simple_write_begin(struct file *file, struct address_space *mapping, extern const struct address_space_operations ram_aops; extern int always_delete_dentry(const struct dentry *); extern struct inode *alloc_anon_inode(struct super_block *); -extern int simple_nosetlease(struct file *, long, struct file_lock **, void **); +extern int simple_nosetlease(struct file *, int, struct file_lock **, void **); extern const struct dentry_operations simple_dentry_operations; extern struct dentry *simple_lookup(struct inode *, struct dentry *, unsigned int flags); @@ -2971,6 +3152,22 @@ extern ssize_t simple_read_from_buffer(void __user *to, size_t count, extern ssize_t simple_write_to_buffer(void *to, size_t available, loff_t *ppos, const void __user *from, size_t count); +struct offset_ctx { + struct xarray xa; + u32 next_offset; +}; + +void simple_offset_init(struct offset_ctx *octx); +int simple_offset_add(struct offset_ctx *octx, struct dentry *dentry); +void simple_offset_remove(struct offset_ctx *octx, struct dentry *dentry); +int simple_offset_rename_exchange(struct inode *old_dir, + struct dentry *old_dentry, + struct inode *new_dir, + struct dentry *new_dentry); +void simple_offset_destroy(struct offset_ctx *octx); + +extern const struct file_operations simple_offset_dir_operations; + extern int __generic_file_fsync(struct file *, loff_t, loff_t, int); extern int generic_file_fsync(struct file *, loff_t, loff_t, int); diff --git a/include/linux/fs_context.h b/include/linux/fs_context.h index ff6341e09925..96332db693d5 100644 --- a/include/linux/fs_context.h +++ b/include/linux/fs_context.h @@ -109,6 +109,7 @@ struct fs_context { bool need_free:1; /* Need to call ops->free() */ bool global:1; /* Goes into &init_user_ns */ bool oldapi:1; /* Coming from mount(2) */ + bool exclusive:1; /* create new superblock, reject existing one */ }; struct fs_context_operations { @@ -150,14 +151,13 @@ extern int get_tree_nodev(struct fs_context *fc, extern int get_tree_single(struct fs_context *fc, int (*fill_super)(struct super_block *sb, struct fs_context *fc)); -extern int get_tree_single_reconf(struct fs_context *fc, - int (*fill_super)(struct super_block *sb, - struct fs_context *fc)); extern int get_tree_keyed(struct fs_context *fc, int (*fill_super)(struct super_block *sb, struct fs_context *fc), void *key); +int setup_bdev_super(struct super_block *sb, int sb_flags, + struct fs_context *fc); extern int get_tree_bdev(struct fs_context *fc, int (*fill_super)(struct super_block *sb, struct fs_context *fc)); diff --git a/include/linux/fs_enet_pd.h b/include/linux/fs_enet_pd.h deleted file mode 100644 index 77d783f71527..000000000000 --- a/include/linux/fs_enet_pd.h +++ /dev/null @@ -1,165 +0,0 @@ -/* - * Platform information definitions for the - * universal Freescale Ethernet driver. - * - * Copyright (c) 2003 Intracom S.A. - * by Pantelis Antoniou <panto@intracom.gr> - * - * 2005 (c) MontaVista Software, Inc. - * Vitaly Bordug <vbordug@ru.mvista.com> - * - * This file is licensed under the terms of the GNU General Public License - * version 2. This program is licensed "as is" without any warranty of any - * kind, whether express or implied. - */ - -#ifndef FS_ENET_PD_H -#define FS_ENET_PD_H - -#include <linux/clk.h> -#include <linux/string.h> -#include <linux/of_mdio.h> -#include <linux/if_ether.h> -#include <asm/types.h> - -#define FS_ENET_NAME "fs_enet" - -enum fs_id { - fsid_fec1, - fsid_fec2, - fsid_fcc1, - fsid_fcc2, - fsid_fcc3, - fsid_scc1, - fsid_scc2, - fsid_scc3, - fsid_scc4, -}; - -#define FS_MAX_INDEX 9 - -static inline int fs_get_fec_index(enum fs_id id) -{ - if (id >= fsid_fec1 && id <= fsid_fec2) - return id - fsid_fec1; - return -1; -} - -static inline int fs_get_fcc_index(enum fs_id id) -{ - if (id >= fsid_fcc1 && id <= fsid_fcc3) - return id - fsid_fcc1; - return -1; -} - -static inline int fs_get_scc_index(enum fs_id id) -{ - if (id >= fsid_scc1 && id <= fsid_scc4) - return id - fsid_scc1; - return -1; -} - -static inline int fs_fec_index2id(int index) -{ - int id = fsid_fec1 + index - 1; - if (id >= fsid_fec1 && id <= fsid_fec2) - return id; - return FS_MAX_INDEX; - } - -static inline int fs_fcc_index2id(int index) -{ - int id = fsid_fcc1 + index - 1; - if (id >= fsid_fcc1 && id <= fsid_fcc3) - return id; - return FS_MAX_INDEX; -} - -static inline int fs_scc_index2id(int index) -{ - int id = fsid_scc1 + index - 1; - if (id >= fsid_scc1 && id <= fsid_scc4) - return id; - return FS_MAX_INDEX; -} - -enum fs_mii_method { - fsmii_fixed, - fsmii_fec, - fsmii_bitbang, -}; - -enum fs_ioport { - fsiop_porta, - fsiop_portb, - fsiop_portc, - fsiop_portd, - fsiop_porte, -}; - -struct fs_mii_bit { - u32 offset; - u8 bit; - u8 polarity; -}; -struct fs_mii_bb_platform_info { - struct fs_mii_bit mdio_dir; - struct fs_mii_bit mdio_dat; - struct fs_mii_bit mdc_dat; - int delay; /* delay in us */ - int irq[32]; /* irqs per phy's */ -}; - -struct fs_platform_info { - - void(*init_ioports)(struct fs_platform_info *); - /* device specific information */ - int fs_no; /* controller index */ - char fs_type[4]; /* controller type */ - - u32 cp_page; /* CPM page */ - u32 cp_block; /* CPM sblock */ - u32 cp_command; /* CPM page/sblock/mcn */ - - u32 clk_trx; /* some stuff for pins & mux configuration*/ - u32 clk_rx; - u32 clk_tx; - u32 clk_route; - u32 clk_mask; - - u32 mem_offset; - u32 dpram_offset; - u32 fcc_regs_c; - - u32 device_flags; - - struct device_node *phy_node; - const struct fs_mii_bus_info *bus_info; - - int rx_ring, tx_ring; /* number of buffers on rx */ - __u8 macaddr[ETH_ALEN]; /* mac address */ - int rx_copybreak; /* limit we copy small frames */ - int napi_weight; /* NAPI weight */ - - int use_rmii; /* use RMII mode */ - int has_phy; /* if the network is phy container as well...*/ - - struct clk *clk_per; /* 'per' clock for register access */ -}; -struct fs_mii_fec_platform_info { - u32 irq[32]; - u32 mii_speed; -}; - -static inline int fs_get_id(struct fs_platform_info *fpi) -{ - if(strstr(fpi->fs_type, "SCC")) - return fs_scc_index2id(fpi->fs_no); - if(strstr(fpi->fs_type, "FCC")) - return fs_fcc_index2id(fpi->fs_no); - if(strstr(fpi->fs_type, "FEC")) - return fs_fec_index2id(fpi->fs_no); - return fpi->fs_no; -} - -#endif diff --git a/include/linux/fs_stack.h b/include/linux/fs_stack.h index 54210a42c30d..010d39d0dc1c 100644 --- a/include/linux/fs_stack.h +++ b/include/linux/fs_stack.h @@ -24,7 +24,7 @@ static inline void fsstack_copy_attr_times(struct inode *dest, { dest->i_atime = src->i_atime; dest->i_mtime = src->i_mtime; - dest->i_ctime = src->i_ctime; + inode_set_ctime_to_ts(dest, inode_get_ctime(src)); } #endif /* _LINUX_FS_STACK_H */ diff --git a/include/linux/highmem.h b/include/linux/highmem.h index 68da30625a6c..99c474de800d 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h @@ -439,6 +439,50 @@ static inline void memzero_page(struct page *page, size_t offset, size_t len) kunmap_local(addr); } +static inline void memcpy_from_folio(char *to, struct folio *folio, + size_t offset, size_t len) +{ + VM_BUG_ON(offset + len > folio_size(folio)); + + do { + const char *from = kmap_local_folio(folio, offset); + size_t chunk = len; + + if (folio_test_highmem(folio) && + chunk > PAGE_SIZE - offset_in_page(offset)) + chunk = PAGE_SIZE - offset_in_page(offset); + memcpy(to, from, chunk); + kunmap_local(from); + + from += chunk; + offset += chunk; + len -= chunk; + } while (len > 0); +} + +static inline void memcpy_to_folio(struct folio *folio, size_t offset, + const char *from, size_t len) +{ + VM_BUG_ON(offset + len > folio_size(folio)); + + do { + char *to = kmap_local_folio(folio, offset); + size_t chunk = len; + + if (folio_test_highmem(folio) && + chunk > PAGE_SIZE - offset_in_page(offset)) + chunk = PAGE_SIZE - offset_in_page(offset); + memcpy(to, from, chunk); + kunmap_local(to); + + from += chunk; + offset += chunk; + len -= chunk; + } while (len > 0); + + flush_dcache_folio(folio); +} + /** * memcpy_from_file_folio - Copy some bytes from a file folio. * @to: The destination buffer. diff --git a/include/linux/hisi_acc_qm.h b/include/linux/hisi_acc_qm.h index a7d54d4d41fd..39fbfb4be944 100644 --- a/include/linux/hisi_acc_qm.h +++ b/include/linux/hisi_acc_qm.h @@ -104,7 +104,7 @@ enum qm_stop_reason { QM_NORMAL, QM_SOFT_RESET, - QM_FLR, + QM_DOWN, }; enum qm_state { diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 20284387b841..fa0350b0812a 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -25,9 +25,6 @@ static inline void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud) #endif vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf); -struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, - unsigned long addr, pmd_t *pmd, - unsigned int flags); bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, unsigned long next); int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd, @@ -143,9 +140,7 @@ bool hugepage_vma_check(struct vm_area_struct *vma, unsigned long vm_flags, unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags); -void prep_transhuge_page(struct page *page); -void free_transhuge_page(struct page *page); - +void folio_prep_large_rmappable(struct folio *folio); bool can_split_folio(struct folio *folio, int *pextra_pins); int split_huge_page_to_list(struct page *page, struct list_head *list); static inline int split_huge_page(struct page *page) @@ -285,7 +280,7 @@ static inline bool hugepage_vma_check(struct vm_area_struct *vma, return false; } -static inline void prep_transhuge_page(struct page *page) {} +static inline void folio_prep_large_rmappable(struct folio *folio) {} #define transparent_hugepage_flags 0UL diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index ca3c8e10f24a..5b2626063f4f 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -26,6 +26,8 @@ typedef struct { unsigned long pd; } hugepd_t; #define __hugepd(x) ((hugepd_t) { (x) }) #endif +void free_huge_folio(struct folio *folio); + #ifdef CONFIG_HUGETLB_PAGE #include <linux/mempolicy.h> @@ -131,10 +133,8 @@ int move_hugetlb_page_tables(struct vm_area_struct *vma, int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *, struct vm_area_struct *); struct page *hugetlb_follow_page_mask(struct vm_area_struct *vma, - unsigned long address, unsigned int flags); -long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *, - struct page **, unsigned long *, unsigned long *, - long, unsigned int, int *); + unsigned long address, unsigned int flags, + unsigned int *page_mask); void unmap_hugepage_range(struct vm_area_struct *, unsigned long, unsigned long, struct page *, zap_flags_t); @@ -167,7 +167,6 @@ int get_huge_page_for_hwpoison(unsigned long pfn, int flags, bool *migratable_cleared); void folio_putback_active_hugetlb(struct folio *folio); void move_hugetlb_state(struct folio *old_folio, struct folio *new_folio, int reason); -void free_huge_page(struct page *page); void hugetlb_fix_reserve_counts(struct inode *inode); extern struct mutex *hugetlb_fault_mutex_table; u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx); @@ -297,21 +296,13 @@ static inline void adjust_range_if_pmd_sharing_possible( { } -static inline struct page *hugetlb_follow_page_mask(struct vm_area_struct *vma, - unsigned long address, unsigned int flags) +static inline struct page *hugetlb_follow_page_mask( + struct vm_area_struct *vma, unsigned long address, unsigned int flags, + unsigned int *page_mask) { BUILD_BUG(); /* should never be compiled in if !CONFIG_HUGETLB_PAGE*/ } -static inline long follow_hugetlb_page(struct mm_struct *mm, - struct vm_area_struct *vma, struct page **pages, - unsigned long *position, unsigned long *nr_pages, - long i, unsigned int flags, int *nonblocking) -{ - BUG(); - return 0; -} - static inline int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, struct vm_area_struct *dst_vma, @@ -851,11 +842,6 @@ static inline struct hstate *folio_hstate(struct folio *folio) return size_to_hstate(folio_size(folio)); } -static inline struct hstate *page_hstate(struct page *page) -{ - return folio_hstate(page_folio(page)); -} - static inline unsigned hstate_index_to_shift(unsigned index) { return hstates[index].order + PAGE_SHIFT; @@ -1007,6 +993,11 @@ void hugetlb_register_node(struct node *node); void hugetlb_unregister_node(struct node *node); #endif +/* + * Check if a given raw @page in a hugepage is HWPOISON. + */ +bool is_raw_hwpoison_page_in_hugepage(struct page *page); + #else /* CONFIG_HUGETLB_PAGE */ struct hstate {}; @@ -1067,11 +1058,6 @@ static inline struct hstate *folio_hstate(struct folio *folio) return NULL; } -static inline struct hstate *page_hstate(struct page *page) -{ - return NULL; -} - static inline struct hstate *size_to_hstate(unsigned long size) { return NULL; diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h index bfbc37ce223b..3ac3974b3c78 100644 --- a/include/linux/hyperv.h +++ b/include/linux/hyperv.h @@ -1239,9 +1239,6 @@ extern int vmbus_recvpacket_raw(struct vmbus_channel *channel, u32 *buffer_actual_len, u64 *requestid); - -extern void vmbus_ontimer(unsigned long data); - /* Base driver object */ struct hv_driver { const char *name; diff --git a/include/linux/icmpv6.h b/include/linux/icmpv6.h index db0f4fcfdaf4..e3b3b0fa2a8f 100644 --- a/include/linux/icmpv6.h +++ b/include/linux/icmpv6.h @@ -85,12 +85,10 @@ extern void icmpv6_param_prob_reason(struct sk_buff *skb, struct flowi6; struct in6_addr; -extern void icmpv6_flow_init(struct sock *sk, - struct flowi6 *fl6, - u8 type, - const struct in6_addr *saddr, - const struct in6_addr *daddr, - int oif); + +void icmpv6_flow_init(const struct sock *sk, struct flowi6 *fl6, u8 type, + const struct in6_addr *saddr, + const struct in6_addr *daddr, int oif); static inline void icmpv6_param_prob(struct sk_buff *skb, u8 code, int pos) { diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index 4b998090898e..bd2f6e19c357 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -836,9 +836,14 @@ enum ieee80211_preq_target_flags { }; /** - * struct ieee80211_quiet_ie + * struct ieee80211_quiet_ie - Quiet element + * @count: Quiet Count + * @period: Quiet Period + * @duration: Quiet Duration + * @offset: Quiet Offset * - * This structure refers to "Quiet information element" + * This structure represents the payload of the "Quiet element" as + * described in IEEE Std 802.11-2020 section 9.4.2.22. */ struct ieee80211_quiet_ie { u8 count; @@ -848,9 +853,15 @@ struct ieee80211_quiet_ie { } __packed; /** - * struct ieee80211_msrment_ie + * struct ieee80211_msrment_ie - Measurement element + * @token: Measurement Token + * @mode: Measurement Report Mode + * @type: Measurement Type + * @request: Measurement Request or Measurement Report * - * This structure refers to "Measurement Request/Report information element" + * This structure represents the payload of both the "Measurement + * Request element" and the "Measurement Report element" as described + * in IEEE Std 802.11-2020 sections 9.4.2.20 and 9.4.2.21. */ struct ieee80211_msrment_ie { u8 token; @@ -860,9 +871,14 @@ struct ieee80211_msrment_ie { } __packed; /** - * struct ieee80211_channel_sw_ie + * struct ieee80211_channel_sw_ie - Channel Switch Announcement element + * @mode: Channel Switch Mode + * @new_ch_num: New Channel Number + * @count: Channel Switch Count * - * This structure refers to "Channel Switch Announcement information element" + * This structure represents the payload of the "Channel Switch + * Announcement element" as described in IEEE Std 802.11-2020 section + * 9.4.2.18. */ struct ieee80211_channel_sw_ie { u8 mode; @@ -871,9 +887,14 @@ struct ieee80211_channel_sw_ie { } __packed; /** - * struct ieee80211_ext_chansw_ie + * struct ieee80211_ext_chansw_ie - Extended Channel Switch Announcement element + * @mode: Channel Switch Mode + * @new_operating_class: New Operating Class + * @new_ch_num: New Channel Number + * @count: Channel Switch Count * - * This structure represents the "Extended Channel Switch Announcement element" + * This structure represents the "Extended Channel Switch Announcement + * element" as described in IEEE Std 802.11-2020 section 9.4.2.52. */ struct ieee80211_ext_chansw_ie { u8 mode; @@ -894,8 +915,14 @@ struct ieee80211_sec_chan_offs_ie { /** * struct ieee80211_mesh_chansw_params_ie - mesh channel switch parameters IE + * @mesh_ttl: Time To Live + * @mesh_flags: Flags + * @mesh_reason: Reason Code + * @mesh_pre_value: Precedence Value * - * This structure represents the "Mesh Channel Switch Paramters element" + * This structure represents the payload of the "Mesh Channel Switch + * Parameters element" as described in IEEE Std 802.11-2020 section + * 9.4.2.102. */ struct ieee80211_mesh_chansw_params_ie { u8 mesh_ttl; @@ -906,6 +933,13 @@ struct ieee80211_mesh_chansw_params_ie { /** * struct ieee80211_wide_bw_chansw_ie - wide bandwidth channel switch IE + * @new_channel_width: New Channel Width + * @new_center_freq_seg0: New Channel Center Frequency Segment 0 + * @new_center_freq_seg1: New Channel Center Frequency Segment 1 + * + * This structure represents the payload of the "Wide Bandwidth + * Channel Switch element" as described in IEEE Std 802.11-2020 + * section 9.4.2.160. */ struct ieee80211_wide_bw_chansw_ie { u8 new_channel_width; @@ -913,9 +947,14 @@ struct ieee80211_wide_bw_chansw_ie { } __packed; /** - * struct ieee80211_tim + * struct ieee80211_tim_ie - Traffic Indication Map information element + * @dtim_count: DTIM Count + * @dtim_period: DTIM Period + * @bitmap_ctrl: Bitmap Control + * @virtual_map: Partial Virtual Bitmap * - * This structure refers to "Traffic Indication Map information element" + * This structure represents the payload of the "TIM element" as + * described in IEEE Std 802.11-2020 section 9.4.2.5. */ struct ieee80211_tim_ie { u8 dtim_count; @@ -926,9 +965,17 @@ struct ieee80211_tim_ie { } __packed; /** - * struct ieee80211_meshconf_ie + * struct ieee80211_meshconf_ie - Mesh Configuration element + * @meshconf_psel: Active Path Selection Protocol Identifier + * @meshconf_pmetric: Active Path Selection Metric Identifier + * @meshconf_congest: Congestion Control Mode Identifier + * @meshconf_synch: Synchronization Method Identifier + * @meshconf_auth: Authentication Protocol Identifier + * @meshconf_form: Mesh Formation Info + * @meshconf_cap: Mesh Capability (see &enum mesh_config_capab_flags) * - * This structure refers to "Mesh Configuration information element" + * This structure represents the payload of the "Mesh Configuration + * element" as described in IEEE Std 802.11-2020 section 9.4.2.97. */ struct ieee80211_meshconf_ie { u8 meshconf_psel; @@ -950,6 +997,9 @@ struct ieee80211_meshconf_ie { * is ongoing * @IEEE80211_MESHCONF_CAPAB_POWER_SAVE_LEVEL: STA is in deep sleep mode or has * neighbors in deep sleep mode + * + * Enumerates the "Mesh Capability" as described in IEEE Std + * 802.11-2020 section 9.4.2.97.7. */ enum mesh_config_capab_flags { IEEE80211_MESHCONF_CAPAB_ACCEPT_PLINKS = 0x01, @@ -960,7 +1010,7 @@ enum mesh_config_capab_flags { #define IEEE80211_MESHCONF_FORM_CONNECTED_TO_GATE 0x1 -/** +/* * mesh channel switch parameters element's flag indicator * */ @@ -969,9 +1019,17 @@ enum mesh_config_capab_flags { #define WLAN_EID_CHAN_SWITCH_PARAM_REASON BIT(2) /** - * struct ieee80211_rann_ie + * struct ieee80211_rann_ie - RANN (root announcement) element + * @rann_flags: Flags + * @rann_hopcount: Hop Count + * @rann_ttl: Element TTL + * @rann_addr: Root Mesh STA Address + * @rann_seq: HWMP Sequence Number + * @rann_interval: Interval + * @rann_metric: Metric * - * This structure refers to "Root Announcement information element" + * This structure represents the payload of the "RANN element" as + * described in IEEE Std 802.11-2020 section 9.4.2.111. */ struct ieee80211_rann_ie { u8 rann_flags; @@ -993,7 +1051,7 @@ enum ieee80211_ht_chanwidth_values { }; /** - * enum ieee80211_opmode_bits - VHT operating mode field bits + * enum ieee80211_vht_opmode_bits - VHT operating mode field bits * @IEEE80211_OPMODE_NOTIF_CHANWIDTH_MASK: channel width mask * @IEEE80211_OPMODE_NOTIF_CHANWIDTH_20MHZ: 20 MHz channel width * @IEEE80211_OPMODE_NOTIF_CHANWIDTH_40MHZ: 40 MHz channel width @@ -1042,9 +1100,12 @@ enum ieee80211_s1g_chanwidth { #define WLAN_USER_POSITION_LEN 16 /** - * struct ieee80211_tpc_report_ie + * struct ieee80211_tpc_report_ie - TPC Report element + * @tx_power: Transmit Power + * @link_margin: Link Margin * - * This structure refers to "TPC Report element" + * This structure represents the payload of the "TPC Report element" as + * described in IEEE Std 802.11-2020 section 9.4.2.16. */ struct ieee80211_tpc_report_ie { u8 tx_power; @@ -1062,9 +1123,14 @@ struct ieee80211_addba_ext_ie { } __packed; /** - * struct ieee80211_s1g_bcn_compat_ie + * struct ieee80211_s1g_bcn_compat_ie - S1G Beacon Compatibility element + * @compat_info: Compatibility Information + * @beacon_int: Beacon Interval + * @tsf_completion: TSF Completion * - * S1G Beacon Compatibility element + * This structure represents the payload of the "S1G Beacon + * Compatibility element" as described in IEEE Std 802.11-2020 section + * 9.4.2.196. */ struct ieee80211_s1g_bcn_compat_ie { __le16 compat_info; @@ -1073,9 +1139,15 @@ struct ieee80211_s1g_bcn_compat_ie { } __packed; /** - * struct ieee80211_s1g_oper_ie + * struct ieee80211_s1g_oper_ie - S1G Operation element + * @ch_width: S1G Operation Information Channel Width + * @oper_class: S1G Operation Information Operating Class + * @primary_ch: S1G Operation Information Primary Channel Number + * @oper_ch: S1G Operation Information Channel Center Frequency + * @basic_mcs_nss: Basic S1G-MCS and NSS Set * - * S1G Operation element + * This structure represents the payload of the "S1G Operation + * element" as described in IEEE Std 802.11-2020 section 9.4.2.212. */ struct ieee80211_s1g_oper_ie { u8 ch_width; @@ -1086,9 +1158,13 @@ struct ieee80211_s1g_oper_ie { } __packed; /** - * struct ieee80211_aid_response_ie + * struct ieee80211_aid_response_ie - AID Response element + * @aid: AID/Group AID + * @switch_count: AID Switch Count + * @response_int: AID Response Interval * - * AID Response element + * This structure represents the payload of the "AID Response element" + * as described in IEEE Std 802.11-2020 section 9.4.2.194. */ struct ieee80211_aid_response_ie { __le16 aid; @@ -1489,7 +1565,7 @@ struct ieee80211_tdls_data { /* * Peer-to-Peer IE attribute related definitions. */ -/** +/* * enum ieee80211_p2p_attr_id - identifies type of peer-to-peer attribute. */ enum ieee80211_p2p_attr_id { @@ -1539,11 +1615,17 @@ struct ieee80211_p2p_noa_attr { #define IEEE80211_P2P_OPPPS_CTWINDOW_MASK 0x7F /** - * struct ieee80211_bar - HT Block Ack Request + * struct ieee80211_bar - Block Ack Request frame format + * @frame_control: Frame Control + * @duration: Duration + * @ra: RA + * @ta: TA + * @control: BAR Control + * @start_seq_num: Starting Sequence Number (see Figure 9-37) * - * This structure refers to "HT BlockAckReq" as - * described in 802.11n draft section 7.2.1.7.1 - */ + * This structure represents the "BlockAckReq frame format" + * as described in IEEE Std 802.11-2020 section 9.3.1.7. +*/ struct ieee80211_bar { __le16 frame_control; __le16 duration; @@ -1563,13 +1645,17 @@ struct ieee80211_bar { #define IEEE80211_HT_MCS_MASK_LEN 10 /** - * struct ieee80211_mcs_info - MCS information + * struct ieee80211_mcs_info - Supported MCS Set field * @rx_mask: RX mask * @rx_highest: highest supported RX rate. If set represents * the highest supported RX data rate in units of 1 Mbps. * If this field is 0 this value should not be used to * consider the highest RX data rate supported. * @tx_params: TX parameters + * @reserved: Reserved bits + * + * This structure represents the "Supported MCS Set field" as + * described in IEEE Std 802.11-2020 section 9.4.2.55.4. */ struct ieee80211_mcs_info { u8 rx_mask[IEEE80211_HT_MCS_MASK_LEN]; @@ -1600,10 +1686,16 @@ struct ieee80211_mcs_info { (IEEE80211_HT_MCS_UNEQUAL_MODULATION_START / 8) /** - * struct ieee80211_ht_cap - HT capabilities + * struct ieee80211_ht_cap - HT capabilities element + * @cap_info: HT Capability Information + * @ampdu_params_info: A-MPDU Parameters + * @mcs: Supported MCS Set + * @extended_ht_cap_info: HT Extended Capabilities + * @tx_BF_cap_info: Transmit Beamforming Capabilities + * @antenna_selection_info: ASEL Capability * - * This structure is the "HT capabilities element" as - * described in 802.11n D5.0 7.3.2.57 + * This structure represents the payload of the "HT Capabilities + * element" as described in IEEE Std 802.11-2020 section 9.4.2.55. */ struct ieee80211_ht_cap { __le16 cap_info; @@ -1691,9 +1783,14 @@ enum ieee80211_min_mpdu_spacing { /** * struct ieee80211_ht_operation - HT operation IE + * @primary_chan: Primary Channel + * @ht_param: HT Operation Information parameters + * @operation_mode: HT Operation Information operation mode + * @stbc_param: HT Operation Information STBC params + * @basic_set: Basic HT-MCS Set * - * This structure is the "HT operation element" as - * described in 802.11n-2009 7.3.2.57 + * This structure represents the payload of the "HT Operation + * element" as described in IEEE Std 802.11-2020 section 9.4.2.56. */ struct ieee80211_ht_operation { u8 primary_chan; @@ -1862,9 +1959,12 @@ struct ieee80211_vht_operation { /** * struct ieee80211_he_cap_elem - HE capabilities element + * @mac_cap_info: HE MAC Capabilities Information + * @phy_cap_info: HE PHY Capabilities Information * - * This structure is the "HE capabilities element" fixed fields as - * described in P802.11ax_D4.0 section 9.4.2.242.2 and 9.4.2.242.3 + * This structure represents the fixed fields of the payload of the + * "HE capabilities element" as described in IEEE Std 802.11ax-2021 + * sections 9.4.2.248.2 and 9.4.2.248.3. */ struct ieee80211_he_cap_elem { u8 mac_cap_info[6]; @@ -1923,35 +2023,45 @@ struct ieee80211_he_mcs_nss_supp { } __packed; /** - * struct ieee80211_he_operation - HE capabilities element + * struct ieee80211_he_operation - HE Operation element + * @he_oper_params: HE Operation Parameters + BSS Color Information + * @he_mcs_nss_set: Basic HE-MCS And NSS Set + * @optional: Optional fields VHT Operation Information, Max Co-Hosted + * BSSID Indicator, and 6 GHz Operation Information * - * This structure is the "HE operation element" fields as - * described in P802.11ax_D4.0 section 9.4.2.243 + * This structure represents the payload of the "HE Operation + * element" as described in IEEE Std 802.11ax-2021 section 9.4.2.249. */ struct ieee80211_he_operation { __le32 he_oper_params; __le16 he_mcs_nss_set; - /* Optional 0,1,3,4,5,7 or 8 bytes: depends on @he_oper_params */ u8 optional[]; } __packed; /** - * struct ieee80211_he_spr - HE spatial reuse element + * struct ieee80211_he_spr - Spatial Reuse Parameter Set element + * @he_sr_control: SR Control + * @optional: Optional fields Non-SRG OBSS PD Max Offset, SRG OBSS PD + * Min Offset, SRG OBSS PD Max Offset, SRG BSS Color + * Bitmap, and SRG Partial BSSID Bitmap * - * This structure is the "HE spatial reuse element" element as - * described in P802.11ax_D4.0 section 9.4.2.241 + * This structure represents the payload of the "Spatial Reuse + * Parameter Set element" as described in IEEE Std 802.11ax-2021 + * section 9.4.2.252. */ struct ieee80211_he_spr { u8 he_sr_control; - /* Optional 0 to 19 bytes: depends on @he_sr_control */ u8 optional[]; } __packed; /** * struct ieee80211_he_mu_edca_param_ac_rec - MU AC Parameter Record field + * @aifsn: ACI/AIFSN + * @ecw_min_max: ECWmin/ECWmax + * @mu_edca_timer: MU EDCA Timer * - * This structure is the "MU AC Parameter Record" fields as - * described in P802.11ax_D4.0 section 9.4.2.245 + * This structure represents the "MU AC Parameter Record" as described + * in IEEE Std 802.11ax-2021 section 9.4.2.251, Figure 9-788p. */ struct ieee80211_he_mu_edca_param_ac_rec { u8 aifsn; @@ -1961,9 +2071,14 @@ struct ieee80211_he_mu_edca_param_ac_rec { /** * struct ieee80211_mu_edca_param_set - MU EDCA Parameter Set element + * @mu_qos_info: QoS Info + * @ac_be: MU AC_BE Parameter Record + * @ac_bk: MU AC_BK Parameter Record + * @ac_vi: MU AC_VI Parameter Record + * @ac_vo: MU AC_VO Parameter Record * - * This structure is the "MU EDCA Parameter Set element" fields as - * described in P802.11ax_D4.0 section 9.4.2.245 + * This structure represents the payload of the "MU EDCA Parameter Set + * element" as described in IEEE Std 802.11ax-2021 section 9.4.2.251. */ struct ieee80211_mu_edca_param_set { u8 mu_qos_info; @@ -2177,9 +2292,9 @@ int ieee80211_get_vht_max_nss(struct ieee80211_vht_cap *cap, * enum ieee80211_ap_reg_power - regulatory power for a Access Point * * @IEEE80211_REG_UNSET_AP: Access Point has no regulatory power mode - * @IEEE80211_REG_LPI: Indoor Access Point - * @IEEE80211_REG_SP: Standard power Access Point - * @IEEE80211_REG_VLP: Very low power Access Point + * @IEEE80211_REG_LPI_AP: Indoor Access Point + * @IEEE80211_REG_SP_AP: Standard power Access Point + * @IEEE80211_REG_VLP_AP: Very low power Access Point * @IEEE80211_REG_AP_POWER_AFTER_LAST: internal * @IEEE80211_REG_AP_POWER_MAX: maximum value */ @@ -2567,7 +2682,7 @@ static inline bool ieee80211_he_capa_size_ok(const u8 *data, u8 len) #define IEEE80211_6GHZ_CTRL_REG_SP_AP 1 /** - * ieee80211_he_6ghz_oper - HE 6 GHz operation Information field + * struct ieee80211_he_6ghz_oper - HE 6 GHz operation Information field * @primary: primary channel * @control: control flags * @ccfs0: channel center frequency segment 0 @@ -2614,9 +2729,13 @@ enum ieee80211_tx_power_intrpt_type { }; /** - * struct ieee80211_tx_pwr_env + * struct ieee80211_tx_pwr_env - Transmit Power Envelope + * @tx_power_info: Transmit Power Information field + * @tx_power: Maximum Transmit Power field * - * This structure represents the "Transmit Power Envelope element" + * This structure represents the payload of the "Transmit Power + * Envelope element" as described in IEEE Std 802.11ax-2021 section + * 9.4.2.161 */ struct ieee80211_tx_pwr_env { u8 tx_power_info; @@ -4478,7 +4597,7 @@ static inline bool for_each_element_completed(const struct element *element, return (const u8 *)element == (const u8 *)data + datalen; } -/** +/* * RSNX Capabilities: * bits 0-3: Field length (n-1) */ @@ -4721,7 +4840,7 @@ ieee80211_mle_get_bss_param_ch_cnt(const struct ieee80211_multi_link_elem *mle) } /** - * ieee80211_mle_get_eml_sync_delay - returns the medium sync delay + * ieee80211_mle_get_eml_med_sync_delay - returns the medium sync delay * @data: pointer to the multi link EHT IE * * The element is assumed to be of the correct type (BASIC) and big enough, diff --git a/include/linux/if_arp.h b/include/linux/if_arp.h index 1ed52441972f..10a1e81434cb 100644 --- a/include/linux/if_arp.h +++ b/include/linux/if_arp.h @@ -53,6 +53,10 @@ static inline bool dev_is_mac_header_xmit(const struct net_device *dev) case ARPHRD_NONE: case ARPHRD_RAWIP: case ARPHRD_PIMREG: + /* PPP adds its l2 header automatically in ppp_start_xmit(). + * This makes it look like an l3 device to __bpf_redirect() and tcf_mirred_init(). + */ + case ARPHRD_PPP: return false; default: return true; diff --git a/include/linux/if_team.h b/include/linux/if_team.h index 8de6b6e67829..1b9b15a492fa 100644 --- a/include/linux/if_team.h +++ b/include/linux/if_team.h @@ -162,8 +162,8 @@ struct team_option { bool per_port; unsigned int array_size; /* != 0 means the option is array */ enum team_option_type type; - int (*init)(struct team *team, struct team_option_inst_info *info); - int (*getter)(struct team *team, struct team_gsetter_ctx *ctx); + void (*init)(struct team *team, struct team_option_inst_info *info); + void (*getter)(struct team *team, struct team_gsetter_ctx *ctx); int (*setter)(struct team *team, struct team_gsetter_ctx *ctx); }; diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h index 6ba71957851e..3028af87716e 100644 --- a/include/linux/if_vlan.h +++ b/include/linux/if_vlan.h @@ -408,7 +408,7 @@ static inline int __vlan_insert_tag(struct sk_buff *skb, * @mac_len: MAC header length including outer vlan headers * * Inserts the VLAN tag into @skb as part of the payload at offset mac_len - * Returns a VLAN tagged skb. If a new skb is created, @skb is freed. + * Returns a VLAN tagged skb. This might change skb->head. * * Following the skb_unshare() example, in case of error, the calling function * doesn't have to worry about freeing the original skb. @@ -437,7 +437,7 @@ static inline struct sk_buff *vlan_insert_inner_tag(struct sk_buff *skb, * @vlan_tci: VLAN TCI to insert * * Inserts the VLAN tag into @skb as part of the payload - * Returns a VLAN tagged skb. If a new skb is created, @skb is freed. + * Returns a VLAN tagged skb. This might change skb->head. * * Following the skb_unshare() example, in case of error, the calling function * doesn't have to worry about freeing the original skb. @@ -457,7 +457,7 @@ static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb, * @vlan_tci: VLAN TCI to insert * * Inserts the VLAN tag into @skb as part of the payload - * Returns a VLAN tagged skb. If a new skb is created, @skb is freed. + * Returns a VLAN tagged skb. This might change skb->head. * * Following the skb_unshare() example, in case of error, the calling function * doesn't have to worry about freeing the original skb. diff --git a/include/linux/intel_rapl.h b/include/linux/intel_rapl.h index e6936cb25047..33f21bd85dbf 100644 --- a/include/linux/intel_rapl.h +++ b/include/linux/intel_rapl.h @@ -100,10 +100,16 @@ struct rapl_package; #define RAPL_DOMAIN_NAME_LENGTH 16 +union rapl_reg { + void __iomem *mmio; + u32 msr; + u64 val; +}; + struct rapl_domain { char name[RAPL_DOMAIN_NAME_LENGTH]; enum rapl_domain_type id; - u64 regs[RAPL_DOMAIN_REG_MAX]; + union rapl_reg regs[RAPL_DOMAIN_REG_MAX]; struct powercap_zone power_zone; struct rapl_domain_data rdd; struct rapl_power_limit rpl[NR_POWER_LIMITS]; @@ -116,7 +122,7 @@ struct rapl_domain { }; struct reg_action { - u64 reg; + union rapl_reg reg; u64 mask; u64 value; int err; @@ -143,8 +149,8 @@ struct rapl_if_priv { enum rapl_if_type type; struct powercap_control_type *control_type; enum cpuhp_state pcap_rapl_online; - u64 reg_unit; - u64 regs[RAPL_DOMAIN_MAX][RAPL_DOMAIN_REG_MAX]; + union rapl_reg reg_unit; + union rapl_reg regs[RAPL_DOMAIN_MAX][RAPL_DOMAIN_REG_MAX]; int limits[RAPL_DOMAIN_MAX]; int (*read_raw)(int id, struct reg_action *ra); int (*write_raw)(int id, struct reg_action *ra); diff --git a/include/linux/iomap.h b/include/linux/iomap.h index e2b836c2e119..fdc6e64f49d6 100644 --- a/include/linux/iomap.h +++ b/include/linux/iomap.h @@ -261,9 +261,10 @@ int iomap_file_buffered_write_punch_delalloc(struct inode *inode, int iomap_read_folio(struct folio *folio, const struct iomap_ops *ops); void iomap_readahead(struct readahead_control *, const struct iomap_ops *ops); bool iomap_is_partially_uptodate(struct folio *, size_t from, size_t count); -struct folio *iomap_get_folio(struct iomap_iter *iter, loff_t pos); +struct folio *iomap_get_folio(struct iomap_iter *iter, loff_t pos, size_t len); bool iomap_release_folio(struct folio *folio, gfp_t gfp_flags); void iomap_invalidate_folio(struct folio *folio, size_t offset, size_t len); +bool iomap_dirty_folio(struct address_space *mapping, struct folio *folio); int iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len, const struct iomap_ops *ops); int iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, diff --git a/include/linux/ioremap.h b/include/linux/ioremap.h new file mode 100644 index 000000000000..f0e99fc7dd8b --- /dev/null +++ b/include/linux/ioremap.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_IOREMAP_H +#define _LINUX_IOREMAP_H + +#include <linux/kasan.h> +#include <asm/pgtable.h> + +#if defined(CONFIG_HAS_IOMEM) || defined(CONFIG_GENERIC_IOREMAP) +/* + * Ioremap often, but not always uses the generic vmalloc area. E.g on + * Power ARCH, it could have different ioremap space. + */ +#ifndef IOREMAP_START +#define IOREMAP_START VMALLOC_START +#define IOREMAP_END VMALLOC_END +#endif +static inline bool is_ioremap_addr(const void *x) +{ + unsigned long addr = (unsigned long)kasan_reset_tag(x); + + return addr >= IOREMAP_START && addr < IOREMAP_END; +} +#else +static inline bool is_ioremap_addr(const void *x) +{ + return false; +} +#endif + +#endif /* _LINUX_IOREMAP_H */ diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index 839247a4f48e..5883551b1ee8 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h @@ -33,6 +33,7 @@ struct ipv6_devconf { __s32 accept_ra_defrtr; __u32 ra_defrtr_metric; __s32 accept_ra_min_hop_limit; + __s32 accept_ra_min_lft; __s32 accept_ra_pinfo; __s32 ignore_routes_with_linkdown; #ifdef CONFIG_IPV6_ROUTER_PREF @@ -199,14 +200,7 @@ struct inet6_cork { u8 tclass; }; -/** - * struct ipv6_pinfo - ipv6 private area - * - * In the struct sock hierarchy (tcp6_sock, upd6_sock, etc) - * this _must_ be the last member, so that inet6_sk_generic - * is able to calculate its offset from the base struct sock - * by using the struct proto->slab_obj_size member. -acme - */ +/* struct ipv6_pinfo - ipv6 private area */ struct ipv6_pinfo { struct in6_addr saddr; struct in6_pktinfo sticky_pktinfo; @@ -306,19 +300,19 @@ struct raw6_sock { __u32 offset; /* checksum offset */ struct icmp6_filter filter; __u32 ip6mr_table; - /* ipv6_pinfo has to be the last member of raw6_sock, see inet6_sk_generic */ + struct ipv6_pinfo inet6; }; struct udp6_sock { struct udp_sock udp; - /* ipv6_pinfo has to be the last member of udp6_sock, see inet6_sk_generic */ + struct ipv6_pinfo inet6; }; struct tcp6_sock { struct tcp_sock tcp; - /* ipv6_pinfo has to be the last member of tcp6_sock, see inet6_sk_generic */ + struct ipv6_pinfo inet6; }; diff --git a/include/linux/kfence.h b/include/linux/kfence.h index 726857a4b680..401af4757514 100644 --- a/include/linux/kfence.h +++ b/include/linux/kfence.h @@ -59,15 +59,16 @@ static __always_inline bool is_kfence_address(const void *addr) } /** - * kfence_alloc_pool() - allocate the KFENCE pool via memblock + * kfence_alloc_pool_and_metadata() - allocate the KFENCE pool and KFENCE + * metadata via memblock */ -void __init kfence_alloc_pool(void); +void __init kfence_alloc_pool_and_metadata(void); /** * kfence_init() - perform KFENCE initialization at boot time * - * Requires that kfence_alloc_pool() was called before. This sets up the - * allocation gate timer, and requires that workqueues are available. + * Requires that kfence_alloc_pool_and_metadata() was called before. This sets + * up the allocation gate timer, and requires that workqueues are available. */ void __init kfence_init(void); @@ -223,7 +224,7 @@ bool __kfence_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *sla #else /* CONFIG_KFENCE */ static inline bool is_kfence_address(const void *addr) { return false; } -static inline void kfence_alloc_pool(void) { } +static inline void kfence_alloc_pool_and_metadata(void) { } static inline void kfence_init(void) { } static inline void kfence_shutdown_cache(struct kmem_cache *s) { } static inline void *kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags) { return NULL; } diff --git a/include/linux/ksm.h b/include/linux/ksm.h index 899a314bc487..c2dd786a30e1 100644 --- a/include/linux/ksm.h +++ b/include/linux/ksm.h @@ -26,6 +26,22 @@ int ksm_disable(struct mm_struct *mm); int __ksm_enter(struct mm_struct *mm); void __ksm_exit(struct mm_struct *mm); +/* + * To identify zeropages that were mapped by KSM, we reuse the dirty bit + * in the PTE. If the PTE is dirty, the zeropage was mapped by KSM when + * deduplicating memory. + */ +#define is_ksm_zero_pte(pte) (is_zero_pfn(pte_pfn(pte)) && pte_dirty(pte)) + +extern unsigned long ksm_zero_pages; + +static inline void ksm_might_unmap_zero_page(struct mm_struct *mm, pte_t pte) +{ + if (is_ksm_zero_pte(pte)) { + ksm_zero_pages--; + mm->ksm_zero_pages--; + } +} static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm) { @@ -95,6 +111,10 @@ static inline void ksm_exit(struct mm_struct *mm) { } +static inline void ksm_might_unmap_zero_page(struct mm_struct *mm, pte_t pte) +{ +} + #ifdef CONFIG_MEMORY_FAILURE static inline void collect_procs_ksm(struct page *page, struct list_head *to_kill, int force_early) diff --git a/include/linux/list.h b/include/linux/list.h index f10344dbad4d..164b4d0e9d2a 100644 --- a/include/linux/list.h +++ b/include/linux/list.h @@ -38,11 +38,92 @@ static inline void INIT_LIST_HEAD(struct list_head *list) WRITE_ONCE(list->prev, list); } +#ifdef CONFIG_LIST_HARDENED + #ifdef CONFIG_DEBUG_LIST -extern bool __list_add_valid(struct list_head *new, - struct list_head *prev, - struct list_head *next); -extern bool __list_del_entry_valid(struct list_head *entry); +# define __list_valid_slowpath +#else +# define __list_valid_slowpath __cold __preserve_most +#endif + +/* + * Performs the full set of list corruption checks before __list_add(). + * On list corruption reports a warning, and returns false. + */ +extern bool __list_valid_slowpath __list_add_valid_or_report(struct list_head *new, + struct list_head *prev, + struct list_head *next); + +/* + * Performs list corruption checks before __list_add(). Returns false if a + * corruption is detected, true otherwise. + * + * With CONFIG_LIST_HARDENED only, performs minimal list integrity checking + * inline to catch non-faulting corruptions, and only if a corruption is + * detected calls the reporting function __list_add_valid_or_report(). + */ +static __always_inline bool __list_add_valid(struct list_head *new, + struct list_head *prev, + struct list_head *next) +{ + bool ret = true; + + if (!IS_ENABLED(CONFIG_DEBUG_LIST)) { + /* + * With the hardening version, elide checking if next and prev + * are NULL, since the immediate dereference of them below would + * result in a fault if NULL. + * + * With the reduced set of checks, we can afford to inline the + * checks, which also gives the compiler a chance to elide some + * of them completely if they can be proven at compile-time. If + * one of the pre-conditions does not hold, the slow-path will + * show a report which pre-condition failed. + */ + if (likely(next->prev == prev && prev->next == next && new != prev && new != next)) + return true; + ret = false; + } + + ret &= __list_add_valid_or_report(new, prev, next); + return ret; +} + +/* + * Performs the full set of list corruption checks before __list_del_entry(). + * On list corruption reports a warning, and returns false. + */ +extern bool __list_valid_slowpath __list_del_entry_valid_or_report(struct list_head *entry); + +/* + * Performs list corruption checks before __list_del_entry(). Returns false if a + * corruption is detected, true otherwise. + * + * With CONFIG_LIST_HARDENED only, performs minimal list integrity checking + * inline to catch non-faulting corruptions, and only if a corruption is + * detected calls the reporting function __list_del_entry_valid_or_report(). + */ +static __always_inline bool __list_del_entry_valid(struct list_head *entry) +{ + bool ret = true; + + if (!IS_ENABLED(CONFIG_DEBUG_LIST)) { + struct list_head *prev = entry->prev; + struct list_head *next = entry->next; + + /* + * With the hardening version, elide checking if next and prev + * are NULL, LIST_POISON1 or LIST_POISON2, since the immediate + * dereference of them below would result in a fault. + */ + if (likely(prev->next == entry && next->prev == entry)) + return true; + ret = false; + } + + ret &= __list_del_entry_valid_or_report(entry); + return ret; +} #else static inline bool __list_add_valid(struct list_head *new, struct list_head *prev, diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index 310f85903c91..dc2844b071c2 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -625,6 +625,12 @@ do { \ WARN_ON_ONCE(__lockdep_enabled && !this_cpu_read(hardirq_context)); \ } while (0) +#define lockdep_assert_no_hardirq() \ +do { \ + WARN_ON_ONCE(__lockdep_enabled && (this_cpu_read(hardirq_context) || \ + !this_cpu_read(hardirqs_enabled))); \ +} while (0) + #define lockdep_assert_preemption_enabled() \ do { \ WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT) && \ @@ -659,6 +665,7 @@ do { \ # define lockdep_assert_irqs_enabled() do { } while (0) # define lockdep_assert_irqs_disabled() do { } while (0) # define lockdep_assert_in_irq() do { } while (0) +# define lockdep_assert_no_hardirq() do { } while (0) # define lockdep_assert_preemption_enabled() do { } while (0) # define lockdep_assert_preemption_disabled() do { } while (0) diff --git a/include/linux/lsm_hook_defs.h b/include/linux/lsm_hook_defs.h index 7308a1a7599b..f5b7352afaac 100644 --- a/include/linux/lsm_hook_defs.h +++ b/include/linux/lsm_hook_defs.h @@ -54,6 +54,7 @@ LSM_HOOK(int, 0, bprm_creds_from_file, struct linux_binprm *bprm, struct file *f LSM_HOOK(int, 0, bprm_check_security, struct linux_binprm *bprm) LSM_HOOK(void, LSM_RET_VOID, bprm_committing_creds, struct linux_binprm *bprm) LSM_HOOK(void, LSM_RET_VOID, bprm_committed_creds, struct linux_binprm *bprm) +LSM_HOOK(int, 0, fs_context_submount, struct fs_context *fc, struct super_block *reference) LSM_HOOK(int, 0, fs_context_dup, struct fs_context *fc, struct fs_context *src_sc) LSM_HOOK(int, -ENOPARAM, fs_context_parse_param, struct fs_context *fc, @@ -316,7 +317,7 @@ LSM_HOOK(int, 0, sk_alloc_security, struct sock *sk, int family, gfp_t priority) LSM_HOOK(void, LSM_RET_VOID, sk_free_security, struct sock *sk) LSM_HOOK(void, LSM_RET_VOID, sk_clone_security, const struct sock *sk, struct sock *newsk) -LSM_HOOK(void, LSM_RET_VOID, sk_getsecid, struct sock *sk, u32 *secid) +LSM_HOOK(void, LSM_RET_VOID, sk_getsecid, const struct sock *sk, u32 *secid) LSM_HOOK(void, LSM_RET_VOID, sock_graft, struct sock *sk, struct socket *parent) LSM_HOOK(int, 0, inet_conn_request, const struct sock *sk, struct sk_buff *skb, struct request_sock *req) diff --git a/include/linux/maple_tree.h b/include/linux/maple_tree.h index 295548cca8b3..e41c70ac7744 100644 --- a/include/linux/maple_tree.h +++ b/include/linux/maple_tree.h @@ -29,14 +29,12 @@ #define MAPLE_NODE_SLOTS 31 /* 256 bytes including ->parent */ #define MAPLE_RANGE64_SLOTS 16 /* 256 bytes */ #define MAPLE_ARANGE64_SLOTS 10 /* 240 bytes */ -#define MAPLE_ARANGE64_META_MAX 15 /* Out of range for metadata */ #define MAPLE_ALLOC_SLOTS (MAPLE_NODE_SLOTS - 1) #else /* 32bit sizes */ #define MAPLE_NODE_SLOTS 63 /* 256 bytes including ->parent */ #define MAPLE_RANGE64_SLOTS 32 /* 256 bytes */ #define MAPLE_ARANGE64_SLOTS 21 /* 240 bytes */ -#define MAPLE_ARANGE64_META_MAX 31 /* Out of range for metadata */ #define MAPLE_ALLOC_SLOTS (MAPLE_NODE_SLOTS - 2) #endif /* defined(CONFIG_64BIT) || defined(BUILD_VDSO32_64) */ @@ -184,13 +182,23 @@ enum maple_type { #ifdef CONFIG_LOCKDEP typedef struct lockdep_map *lockdep_map_p; -#define mt_lock_is_held(mt) lock_is_held(mt->ma_external_lock) +#define mt_lock_is_held(mt) \ + (!(mt)->ma_external_lock || lock_is_held((mt)->ma_external_lock)) + +#define mt_write_lock_is_held(mt) \ + (!(mt)->ma_external_lock || \ + lock_is_held_type((mt)->ma_external_lock, 0)) + #define mt_set_external_lock(mt, lock) \ (mt)->ma_external_lock = &(lock)->dep_map + +#define mt_on_stack(mt) (mt).ma_external_lock = NULL #else typedef struct { /* nothing */ } lockdep_map_p; -#define mt_lock_is_held(mt) 1 +#define mt_lock_is_held(mt) 1 +#define mt_write_lock_is_held(mt) 1 #define mt_set_external_lock(mt, lock) do { } while (0) +#define mt_on_stack(mt) do { } while (0) #endif /* @@ -212,8 +220,8 @@ struct maple_tree { spinlock_t ma_lock; lockdep_map_p ma_external_lock; }; - void __rcu *ma_root; unsigned int ma_flags; + void __rcu *ma_root; }; /** @@ -458,7 +466,7 @@ void *mas_find(struct ma_state *mas, unsigned long max); void *mas_find_range(struct ma_state *mas, unsigned long max); void *mas_find_rev(struct ma_state *mas, unsigned long min); void *mas_find_range_rev(struct ma_state *mas, unsigned long max); -int mas_preallocate(struct ma_state *mas, gfp_t gfp); +int mas_preallocate(struct ma_state *mas, void *entry, gfp_t gfp); bool mas_is_err(struct ma_state *mas); bool mas_nomem(struct ma_state *mas, gfp_t gfp); @@ -531,6 +539,22 @@ static inline void mas_reset(struct ma_state *mas) */ #define mas_for_each(__mas, __entry, __max) \ while (((__entry) = mas_find((__mas), (__max))) != NULL) +/** + * __mas_set_range() - Set up Maple Tree operation state to a sub-range of the + * current location. + * @mas: Maple Tree operation state. + * @start: New start of range in the Maple Tree. + * @last: New end of range in the Maple Tree. + * + * set the internal maple state values to a sub-range. + * Please use mas_set_range() if you do not know where you are in the tree. + */ +static inline void __mas_set_range(struct ma_state *mas, unsigned long start, + unsigned long last) +{ + mas->index = start; + mas->last = last; +} /** * mas_set_range() - Set up Maple Tree operation state for a different index. @@ -545,9 +569,8 @@ static inline void mas_reset(struct ma_state *mas) static inline void mas_set_range(struct ma_state *mas, unsigned long start, unsigned long last) { - mas->index = start; - mas->last = last; - mas->node = MAS_START; + __mas_set_range(mas, start, last); + mas->node = MAS_START; } /** @@ -662,10 +685,11 @@ void *mt_next(struct maple_tree *mt, unsigned long index, unsigned long max); * mt_for_each - Iterate over each entry starting at index until max. * @__tree: The Maple Tree * @__entry: The current entry - * @__index: The index to update to track the location in the tree + * @__index: The index to start the search from. Subsequently used as iterator. * @__max: The maximum limit for @index * - * Note: Will not return the zero entry. + * This iterator skips all entries, which resolve to a NULL pointer, + * e.g. entries which has been reserved with XA_ZERO_ENTRY. */ #define mt_for_each(__tree, __entry, __index, __max) \ for (__entry = mt_find(__tree, &(__index), __max); \ diff --git a/include/linux/marvell_phy.h b/include/linux/marvell_phy.h index 0f06c2287b52..9b54c4f0677f 100644 --- a/include/linux/marvell_phy.h +++ b/include/linux/marvell_phy.h @@ -25,6 +25,7 @@ #define MARVELL_PHY_ID_88X3310 0x002b09a0 #define MARVELL_PHY_ID_88E2110 0x002b09b0 #define MARVELL_PHY_ID_88X2222 0x01410f10 +#define MARVELL_PHY_ID_88Q2110 0x002b0980 /* Marvel 88E1111 in Finisar SFP module with modified PHY ID */ #define MARVELL_PHY_ID_88E1111_FINISAR 0x01ff0cc0 diff --git a/include/linux/mdio.h b/include/linux/mdio.h index c1b7008826e5..8fa23bdcedbf 100644 --- a/include/linux/mdio.h +++ b/include/linux/mdio.h @@ -537,6 +537,8 @@ static inline void mii_c73_mod_linkmode(unsigned long *adv, u16 *lpa) int __mdiobus_read(struct mii_bus *bus, int addr, u32 regnum); int __mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val); +int __mdiobus_modify(struct mii_bus *bus, int addr, u32 regnum, u16 mask, + u16 set); int __mdiobus_modify_changed(struct mii_bus *bus, int addr, u32 regnum, u16 mask, u16 set); @@ -564,6 +566,30 @@ int mdiobus_c45_modify(struct mii_bus *bus, int addr, int devad, u32 regnum, int mdiobus_c45_modify_changed(struct mii_bus *bus, int addr, int devad, u32 regnum, u16 mask, u16 set); +static inline int __mdiodev_read(struct mdio_device *mdiodev, u32 regnum) +{ + return __mdiobus_read(mdiodev->bus, mdiodev->addr, regnum); +} + +static inline int __mdiodev_write(struct mdio_device *mdiodev, u32 regnum, + u16 val) +{ + return __mdiobus_write(mdiodev->bus, mdiodev->addr, regnum, val); +} + +static inline int __mdiodev_modify(struct mdio_device *mdiodev, u32 regnum, + u16 mask, u16 set) +{ + return __mdiobus_modify(mdiodev->bus, mdiodev->addr, regnum, mask, set); +} + +static inline int __mdiodev_modify_changed(struct mdio_device *mdiodev, + u32 regnum, u16 mask, u16 set) +{ + return __mdiobus_modify_changed(mdiodev->bus, mdiodev->addr, regnum, + mask, set); +} + static inline int mdiodev_read(struct mdio_device *mdiodev, u32 regnum) { return mdiobus_read(mdiodev->bus, mdiodev->addr, regnum); diff --git a/include/linux/memblock.h b/include/linux/memblock.h index f71ff9f0ec81..1c1072e3ca06 100644 --- a/include/linux/memblock.h +++ b/include/linux/memblock.h @@ -581,9 +581,7 @@ extern void *alloc_large_system_hash(const char *tablename, unsigned long high_limit); #define HASH_EARLY 0x00000001 /* Allocating during early boot? */ -#define HASH_SMALL 0x00000002 /* sub-page allocation allowed, min - * shift passed via *_hash_shift */ -#define HASH_ZERO 0x00000004 /* Zero allocated hash table */ +#define HASH_ZERO 0x00000002 /* Zero allocated hash table */ /* Only NUMA needs hash distribution. 64bit NUMA architectures have * sufficient vmalloc space. @@ -596,13 +594,11 @@ extern int hashdist; /* Distribute hashes across NUMA nodes? */ #endif #ifdef CONFIG_MEMTEST -extern phys_addr_t early_memtest_bad_size; /* Size of faulty ram found by memtest */ -extern bool early_memtest_done; /* Was early memtest done? */ -extern void early_memtest(phys_addr_t start, phys_addr_t end); +void early_memtest(phys_addr_t start, phys_addr_t end); +void memtest_report_meminfo(struct seq_file *m); #else -static inline void early_memtest(phys_addr_t start, phys_addr_t end) -{ -} +static inline void early_memtest(phys_addr_t start, phys_addr_t end) { } +static inline void memtest_report_meminfo(struct seq_file *m) { } #endif diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 5818af8eca5a..ab94ad4597d0 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -61,7 +61,6 @@ struct mem_cgroup_reclaim_cookie { #ifdef CONFIG_MEMCG #define MEM_CGROUP_ID_SHIFT 16 -#define MEM_CGROUP_ID_MAX USHRT_MAX struct mem_cgroup_id { int id; @@ -112,6 +111,9 @@ struct lruvec_stats { /* Aggregated (CPU and subtree) state */ long state[NR_VM_NODE_STAT_ITEMS]; + /* Non-hierarchical (CPU aggregated) state */ + long state_local[NR_VM_NODE_STAT_ITEMS]; + /* Pending child counts during tree propagation */ long state_pending[NR_VM_NODE_STAT_ITEMS]; }; @@ -284,6 +286,11 @@ struct mem_cgroup { atomic_long_t memory_events[MEMCG_NR_MEMORY_EVENTS]; atomic_long_t memory_events_local[MEMCG_NR_MEMORY_EVENTS]; + /* + * Hint of reclaim pressure for socket memroy management. Note + * that this indicator should NOT be used in legacy cgroup mode + * where socket memory is accounted/charged separately. + */ unsigned long socket_pressure; /* Legacy tcp memory accounting */ @@ -583,7 +590,7 @@ static inline void mem_cgroup_protection(struct mem_cgroup *root, /* * There is no reclaim protection applied to a targeted reclaim. * We are special casing this specific case here because - * mem_cgroup_protected calculation is not robust enough to keep + * mem_cgroup_calculate_protection is not robust enough to keep * the protection invariant for calculated effective values for * parallel reclaimers with different reclaim target. This is * especially a problem for tail memcgs (as they have pages on LRU) @@ -861,8 +868,7 @@ static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec) * parent_mem_cgroup - find the accounting parent of a memcg * @memcg: memcg whose parent to find * - * Returns the parent memcg, or NULL if this is the root or the memory - * controller is in legacy no-hierarchy mode. + * Returns the parent memcg, or NULL if this is the root. */ static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg) { @@ -1020,14 +1026,12 @@ static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec, { struct mem_cgroup_per_node *pn; long x = 0; - int cpu; if (mem_cgroup_disabled()) return node_page_state(lruvec_pgdat(lruvec), idx); pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); - for_each_possible_cpu(cpu) - x += per_cpu(pn->lruvec_stats_percpu->state[idx], cpu); + x = READ_ONCE(pn->lruvec_stats.state_local[idx]); #ifdef CONFIG_SMP if (x < 0) x = 0; @@ -1158,7 +1162,6 @@ unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, #else /* CONFIG_MEMCG */ #define MEM_CGROUP_ID_SHIFT 0 -#define MEM_CGROUP_ID_MAX 0 static inline struct mem_cgroup *folio_memcg(struct folio *folio) { @@ -1727,8 +1730,8 @@ void mem_cgroup_sk_alloc(struct sock *sk); void mem_cgroup_sk_free(struct sock *sk); static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg) { - if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_pressure) - return true; + if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) + return !!memcg->tcpmem_pressure; do { if (time_before(jiffies, READ_ONCE(memcg->socket_pressure))) return true; @@ -1761,7 +1764,7 @@ int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order); void __memcg_kmem_uncharge_page(struct page *page, int order); struct obj_cgroup *get_obj_cgroup_from_current(void); -struct obj_cgroup *get_obj_cgroup_from_page(struct page *page); +struct obj_cgroup *get_obj_cgroup_from_folio(struct folio *folio); int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size); void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size); @@ -1845,7 +1848,7 @@ static inline void __memcg_kmem_uncharge_page(struct page *page, int order) { } -static inline struct obj_cgroup *get_obj_cgroup_from_page(struct page *page) +static inline struct obj_cgroup *get_obj_cgroup_from_folio(struct folio *folio) { return NULL; } diff --git a/include/linux/memory-tiers.h b/include/linux/memory-tiers.h index fc9647b1b4f9..437441cdf78f 100644 --- a/include/linux/memory-tiers.h +++ b/include/linux/memory-tiers.h @@ -33,7 +33,7 @@ struct memory_dev_type { #ifdef CONFIG_NUMA extern bool numa_demotion_enabled; struct memory_dev_type *alloc_memory_type(int adistance); -void destroy_memory_type(struct memory_dev_type *memtype); +void put_memory_type(struct memory_dev_type *memtype); void init_node_memory_type(int node, struct memory_dev_type *default_type); void clear_node_memory_type(int node, struct memory_dev_type *memtype); #ifdef CONFIG_MIGRATION @@ -68,7 +68,7 @@ static inline struct memory_dev_type *alloc_memory_type(int adistance) return NULL; } -static inline void destroy_memory_type(struct memory_dev_type *memtype) +static inline void put_memory_type(struct memory_dev_type *memtype) { } diff --git a/include/linux/memory.h b/include/linux/memory.h index 31343566c221..f53cfdaaaa41 100644 --- a/include/linux/memory.h +++ b/include/linux/memory.h @@ -77,11 +77,7 @@ struct memory_block { */ struct zone *zone; struct device dev; - /* - * Number of vmemmap pages. These pages - * lay at the beginning of the memory block. - */ - unsigned long nr_vmemmap_pages; + struct vmem_altmap *altmap; struct memory_group *group; /* group (if any) for this block */ struct list_head group_next; /* next block inside memory group */ #if defined(CONFIG_MEMORY_FAILURE) && defined(CONFIG_MEMORY_HOTPLUG) @@ -147,7 +143,7 @@ static inline int hotplug_memory_notifier(notifier_fn_t fn, int pri) extern int register_memory_notifier(struct notifier_block *nb); extern void unregister_memory_notifier(struct notifier_block *nb); int create_memory_block_devices(unsigned long start, unsigned long size, - unsigned long vmemmap_pages, + struct vmem_altmap *altmap, struct memory_group *group); void remove_memory_block_devices(unsigned long start, unsigned long size); extern void memory_dev_init(void); diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index 013c69753c91..7d2076583494 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h @@ -97,6 +97,8 @@ typedef int __bitwise mhp_t; * To do so, we will use the beginning of the hot-added range to build * the page tables for the memmap array that describes the entire range. * Only selected architectures support it with SPARSE_VMEMMAP. + * This is only a hint, the core kernel can decide to not do this based on + * different alignment checks. */ #define MHP_MEMMAP_ON_MEMORY ((__force mhp_t)BIT(1)) /* @@ -354,7 +356,6 @@ extern struct zone *zone_for_pfn_range(int online_type, int nid, extern int arch_create_linear_mapping(int nid, u64 start, u64 size, struct mhp_params *params); void arch_remove_linear_mapping(u64 start, u64 size); -extern bool mhp_supports_memmap_on_memory(unsigned long size); #endif /* CONFIG_MEMORY_HOTPLUG */ #endif /* __LINUX_MEMORY_HOTPLUG_H */ diff --git a/include/linux/mfd/cs42l43-regs.h b/include/linux/mfd/cs42l43-regs.h new file mode 100644 index 000000000000..c39a49269cb7 --- /dev/null +++ b/include/linux/mfd/cs42l43-regs.h @@ -0,0 +1,1184 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * cs42l43 register definitions + * + * Copyright (c) 2022-2023 Cirrus Logic, Inc. and + * Cirrus Logic International Semiconductor Ltd. + */ + +#ifndef CS42L43_CORE_REGS_H +#define CS42L43_CORE_REGS_H + +/* Registers */ +#define CS42L43_GEN_INT_STAT_1 0x000000C0 +#define CS42L43_GEN_INT_MASK_1 0x000000C1 +#define CS42L43_DEVID 0x00003000 +#define CS42L43_REVID 0x00003004 +#define CS42L43_RELID 0x0000300C +#define CS42L43_SFT_RESET 0x00003020 +#define CS42L43_DRV_CTRL1 0x00006004 +#define CS42L43_DRV_CTRL3 0x0000600C +#define CS42L43_DRV_CTRL4 0x00006010 +#define CS42L43_DRV_CTRL_5 0x00006014 +#define CS42L43_GPIO_CTRL1 0x00006034 +#define CS42L43_GPIO_CTRL2 0x00006038 +#define CS42L43_GPIO_STS 0x0000603C +#define CS42L43_GPIO_FN_SEL 0x00006040 +#define CS42L43_MCLK_SRC_SEL 0x00007004 +#define CS42L43_CCM_BLK_CLK_CONTROL 0x00007010 +#define CS42L43_SAMPLE_RATE1 0x00007014 +#define CS42L43_SAMPLE_RATE2 0x00007018 +#define CS42L43_SAMPLE_RATE3 0x0000701C +#define CS42L43_SAMPLE_RATE4 0x00007020 +#define CS42L43_PLL_CONTROL 0x00007034 +#define CS42L43_FS_SELECT1 0x00007038 +#define CS42L43_FS_SELECT2 0x0000703C +#define CS42L43_FS_SELECT3 0x00007040 +#define CS42L43_FS_SELECT4 0x00007044 +#define CS42L43_PDM_CONTROL 0x0000704C +#define CS42L43_ASP_CLK_CONFIG1 0x00007058 +#define CS42L43_ASP_CLK_CONFIG2 0x0000705C +#define CS42L43_OSC_DIV_SEL 0x00007068 +#define CS42L43_ADC_B_CTRL1 0x00008000 +#define CS42L43_ADC_B_CTRL2 0x00008004 +#define CS42L43_DECIM_HPF_WNF_CTRL1 0x0000803C +#define CS42L43_DECIM_HPF_WNF_CTRL2 0x00008040 +#define CS42L43_DECIM_HPF_WNF_CTRL3 0x00008044 +#define CS42L43_DECIM_HPF_WNF_CTRL4 0x00008048 +#define CS42L43_DMIC_PDM_CTRL 0x0000804C +#define CS42L43_DECIM_VOL_CTRL_CH1_CH2 0x00008050 +#define CS42L43_DECIM_VOL_CTRL_CH3_CH4 0x00008054 +#define CS42L43_DECIM_VOL_CTRL_UPDATE 0x00008058 +#define CS42L43_INTP_VOLUME_CTRL1 0x00009008 +#define CS42L43_INTP_VOLUME_CTRL2 0x0000900C +#define CS42L43_AMP1_2_VOL_RAMP 0x00009010 +#define CS42L43_ASP_CTRL 0x0000A000 +#define CS42L43_ASP_FSYNC_CTRL1 0x0000A004 +#define CS42L43_ASP_FSYNC_CTRL2 0x0000A008 +#define CS42L43_ASP_FSYNC_CTRL3 0x0000A00C +#define CS42L43_ASP_FSYNC_CTRL4 0x0000A010 +#define CS42L43_ASP_DATA_CTRL 0x0000A018 +#define CS42L43_ASP_RX_EN 0x0000A020 +#define CS42L43_ASP_TX_EN 0x0000A024 +#define CS42L43_ASP_RX_CH1_CTRL 0x0000A028 +#define CS42L43_ASP_RX_CH2_CTRL 0x0000A02C +#define CS42L43_ASP_RX_CH3_CTRL 0x0000A030 +#define CS42L43_ASP_RX_CH4_CTRL 0x0000A034 +#define CS42L43_ASP_RX_CH5_CTRL 0x0000A038 +#define CS42L43_ASP_RX_CH6_CTRL 0x0000A03C +#define CS42L43_ASP_TX_CH1_CTRL 0x0000A068 +#define CS42L43_ASP_TX_CH2_CTRL 0x0000A06C +#define CS42L43_ASP_TX_CH3_CTRL 0x0000A070 +#define CS42L43_ASP_TX_CH4_CTRL 0x0000A074 +#define CS42L43_ASP_TX_CH5_CTRL 0x0000A078 +#define CS42L43_ASP_TX_CH6_CTRL 0x0000A07C +#define CS42L43_OTP_REVISION_ID 0x0000B02C +#define CS42L43_ASPTX1_INPUT 0x0000C200 +#define CS42L43_ASPTX2_INPUT 0x0000C210 +#define CS42L43_ASPTX3_INPUT 0x0000C220 +#define CS42L43_ASPTX4_INPUT 0x0000C230 +#define CS42L43_ASPTX5_INPUT 0x0000C240 +#define CS42L43_ASPTX6_INPUT 0x0000C250 +#define CS42L43_SWIRE_DP1_CH1_INPUT 0x0000C280 +#define CS42L43_SWIRE_DP1_CH2_INPUT 0x0000C290 +#define CS42L43_SWIRE_DP1_CH3_INPUT 0x0000C2A0 +#define CS42L43_SWIRE_DP1_CH4_INPUT 0x0000C2B0 +#define CS42L43_SWIRE_DP2_CH1_INPUT 0x0000C2C0 +#define CS42L43_SWIRE_DP2_CH2_INPUT 0x0000C2D0 +#define CS42L43_SWIRE_DP3_CH1_INPUT 0x0000C2E0 +#define CS42L43_SWIRE_DP3_CH2_INPUT 0x0000C2F0 +#define CS42L43_SWIRE_DP4_CH1_INPUT 0x0000C300 +#define CS42L43_SWIRE_DP4_CH2_INPUT 0x0000C310 +#define CS42L43_ASRC_INT1_INPUT1 0x0000C400 +#define CS42L43_ASRC_INT2_INPUT1 0x0000C410 +#define CS42L43_ASRC_INT3_INPUT1 0x0000C420 +#define CS42L43_ASRC_INT4_INPUT1 0x0000C430 +#define CS42L43_ASRC_DEC1_INPUT1 0x0000C440 +#define CS42L43_ASRC_DEC2_INPUT1 0x0000C450 +#define CS42L43_ASRC_DEC3_INPUT1 0x0000C460 +#define CS42L43_ASRC_DEC4_INPUT1 0x0000C470 +#define CS42L43_ISRC1INT1_INPUT1 0x0000C500 +#define CS42L43_ISRC1INT2_INPUT1 0x0000C510 +#define CS42L43_ISRC1DEC1_INPUT1 0x0000C520 +#define CS42L43_ISRC1DEC2_INPUT1 0x0000C530 +#define CS42L43_ISRC2INT1_INPUT1 0x0000C540 +#define CS42L43_ISRC2INT2_INPUT1 0x0000C550 +#define CS42L43_ISRC2DEC1_INPUT1 0x0000C560 +#define CS42L43_ISRC2DEC2_INPUT1 0x0000C570 +#define CS42L43_EQ1MIX_INPUT1 0x0000C580 +#define CS42L43_EQ1MIX_INPUT2 0x0000C584 +#define CS42L43_EQ1MIX_INPUT3 0x0000C588 +#define CS42L43_EQ1MIX_INPUT4 0x0000C58C +#define CS42L43_EQ2MIX_INPUT1 0x0000C590 +#define CS42L43_EQ2MIX_INPUT2 0x0000C594 +#define CS42L43_EQ2MIX_INPUT3 0x0000C598 +#define CS42L43_EQ2MIX_INPUT4 0x0000C59C +#define CS42L43_SPDIF1_INPUT1 0x0000C600 +#define CS42L43_SPDIF2_INPUT1 0x0000C610 +#define CS42L43_AMP1MIX_INPUT1 0x0000C620 +#define CS42L43_AMP1MIX_INPUT2 0x0000C624 +#define CS42L43_AMP1MIX_INPUT3 0x0000C628 +#define CS42L43_AMP1MIX_INPUT4 0x0000C62C +#define CS42L43_AMP2MIX_INPUT1 0x0000C630 +#define CS42L43_AMP2MIX_INPUT2 0x0000C634 +#define CS42L43_AMP2MIX_INPUT3 0x0000C638 +#define CS42L43_AMP2MIX_INPUT4 0x0000C63C +#define CS42L43_AMP3MIX_INPUT1 0x0000C640 +#define CS42L43_AMP3MIX_INPUT2 0x0000C644 +#define CS42L43_AMP3MIX_INPUT3 0x0000C648 +#define CS42L43_AMP3MIX_INPUT4 0x0000C64C +#define CS42L43_AMP4MIX_INPUT1 0x0000C650 +#define CS42L43_AMP4MIX_INPUT2 0x0000C654 +#define CS42L43_AMP4MIX_INPUT3 0x0000C658 +#define CS42L43_AMP4MIX_INPUT4 0x0000C65C +#define CS42L43_ASRC_INT_ENABLES 0x0000E000 +#define CS42L43_ASRC_DEC_ENABLES 0x0000E004 +#define CS42L43_PDNCNTL 0x00010000 +#define CS42L43_RINGSENSE_DEB_CTRL 0x0001001C +#define CS42L43_TIPSENSE_DEB_CTRL 0x00010020 +#define CS42L43_TIP_RING_SENSE_INTERRUPT_STATUS 0x00010028 +#define CS42L43_HS2 0x00010040 +#define CS42L43_HS_STAT 0x00010048 +#define CS42L43_MCU_SW_INTERRUPT 0x00010094 +#define CS42L43_STEREO_MIC_CTRL 0x000100A4 +#define CS42L43_STEREO_MIC_CLAMP_CTRL 0x000100C4 +#define CS42L43_BLOCK_EN2 0x00010104 +#define CS42L43_BLOCK_EN3 0x00010108 +#define CS42L43_BLOCK_EN4 0x0001010C +#define CS42L43_BLOCK_EN5 0x00010110 +#define CS42L43_BLOCK_EN6 0x00010114 +#define CS42L43_BLOCK_EN7 0x00010118 +#define CS42L43_BLOCK_EN8 0x0001011C +#define CS42L43_BLOCK_EN9 0x00010120 +#define CS42L43_BLOCK_EN10 0x00010124 +#define CS42L43_BLOCK_EN11 0x00010128 +#define CS42L43_TONE_CH1_CTRL 0x00010134 +#define CS42L43_TONE_CH2_CTRL 0x00010138 +#define CS42L43_MIC_DETECT_CONTROL_1 0x00011074 +#define CS42L43_DETECT_STATUS_1 0x0001107C +#define CS42L43_HS_BIAS_SENSE_AND_CLAMP_AUTOCONTROL 0x00011090 +#define CS42L43_MIC_DETECT_CONTROL_ANDROID 0x000110B0 +#define CS42L43_ISRC1_CTRL 0x00012004 +#define CS42L43_ISRC2_CTRL 0x00013004 +#define CS42L43_CTRL_REG 0x00014000 +#define CS42L43_FDIV_FRAC 0x00014004 +#define CS42L43_CAL_RATIO 0x00014008 +#define CS42L43_SPI_CLK_CONFIG1 0x00016004 +#define CS42L43_SPI_CONFIG1 0x00016010 +#define CS42L43_SPI_CONFIG2 0x00016014 +#define CS42L43_SPI_CONFIG3 0x00016018 +#define CS42L43_SPI_CONFIG4 0x00016024 +#define CS42L43_SPI_STATUS1 0x00016100 +#define CS42L43_SPI_STATUS2 0x00016104 +#define CS42L43_TRAN_CONFIG1 0x00016200 +#define CS42L43_TRAN_CONFIG2 0x00016204 +#define CS42L43_TRAN_CONFIG3 0x00016208 +#define CS42L43_TRAN_CONFIG4 0x0001620C +#define CS42L43_TRAN_CONFIG5 0x00016220 +#define CS42L43_TRAN_CONFIG6 0x00016224 +#define CS42L43_TRAN_CONFIG7 0x00016228 +#define CS42L43_TRAN_CONFIG8 0x0001622C +#define CS42L43_TRAN_STATUS1 0x00016300 +#define CS42L43_TRAN_STATUS2 0x00016304 +#define CS42L43_TRAN_STATUS3 0x00016308 +#define CS42L43_TX_DATA 0x00016400 +#define CS42L43_RX_DATA 0x00016600 +#define CS42L43_DACCNFG1 0x00017000 +#define CS42L43_DACCNFG2 0x00017004 +#define CS42L43_HPPATHVOL 0x0001700C +#define CS42L43_PGAVOL 0x00017014 +#define CS42L43_LOADDETRESULTS 0x00017018 +#define CS42L43_LOADDETENA 0x00017024 +#define CS42L43_CTRL 0x00017028 +#define CS42L43_COEFF_DATA_IN0 0x00018000 +#define CS42L43_COEFF_RD_WR0 0x00018008 +#define CS42L43_INIT_DONE0 0x00018010 +#define CS42L43_START_EQZ0 0x00018014 +#define CS42L43_MUTE_EQ_IN0 0x0001801C +#define CS42L43_DECIM_INT 0x0001B000 +#define CS42L43_EQ_INT 0x0001B004 +#define CS42L43_ASP_INT 0x0001B008 +#define CS42L43_PLL_INT 0x0001B00C +#define CS42L43_SOFT_INT 0x0001B010 +#define CS42L43_SWIRE_INT 0x0001B014 +#define CS42L43_MSM_INT 0x0001B018 +#define CS42L43_ACC_DET_INT 0x0001B01C +#define CS42L43_I2C_TGT_INT 0x0001B020 +#define CS42L43_SPI_MSTR_INT 0x0001B024 +#define CS42L43_SW_TO_SPI_BRIDGE_INT 0x0001B028 +#define CS42L43_OTP_INT 0x0001B02C +#define CS42L43_CLASS_D_AMP_INT 0x0001B030 +#define CS42L43_GPIO_INT 0x0001B034 +#define CS42L43_ASRC_INT 0x0001B038 +#define CS42L43_HPOUT_INT 0x0001B03C +#define CS42L43_DECIM_MASK 0x0001B0A0 +#define CS42L43_EQ_MIX_MASK 0x0001B0A4 +#define CS42L43_ASP_MASK 0x0001B0A8 +#define CS42L43_PLL_MASK 0x0001B0AC +#define CS42L43_SOFT_MASK 0x0001B0B0 +#define CS42L43_SWIRE_MASK 0x0001B0B4 +#define CS42L43_MSM_MASK 0x0001B0B8 +#define CS42L43_ACC_DET_MASK 0x0001B0BC +#define CS42L43_I2C_TGT_MASK 0x0001B0C0 +#define CS42L43_SPI_MSTR_MASK 0x0001B0C4 +#define CS42L43_SW_TO_SPI_BRIDGE_MASK 0x0001B0C8 +#define CS42L43_OTP_MASK 0x0001B0CC +#define CS42L43_CLASS_D_AMP_MASK 0x0001B0D0 +#define CS42L43_GPIO_INT_MASK 0x0001B0D4 +#define CS42L43_ASRC_MASK 0x0001B0D8 +#define CS42L43_HPOUT_MASK 0x0001B0DC +#define CS42L43_DECIM_INT_SHADOW 0x0001B300 +#define CS42L43_EQ_MIX_INT_SHADOW 0x0001B304 +#define CS42L43_ASP_INT_SHADOW 0x0001B308 +#define CS42L43_PLL_INT_SHADOW 0x0001B30C +#define CS42L43_SOFT_INT_SHADOW 0x0001B310 +#define CS42L43_SWIRE_INT_SHADOW 0x0001B314 +#define CS42L43_MSM_INT_SHADOW 0x0001B318 +#define CS42L43_ACC_DET_INT_SHADOW 0x0001B31C +#define CS42L43_I2C_TGT_INT_SHADOW 0x0001B320 +#define CS42L43_SPI_MSTR_INT_SHADOW 0x0001B324 +#define CS42L43_SW_TO_SPI_BRIDGE_SHADOW 0x0001B328 +#define CS42L43_OTP_INT_SHADOW 0x0001B32C +#define CS42L43_CLASS_D_AMP_INT_SHADOW 0x0001B330 +#define CS42L43_GPIO_SHADOW 0x0001B334 +#define CS42L43_ASRC_SHADOW 0x0001B338 +#define CS42L43_HP_OUT_SHADOW 0x0001B33C +#define CS42L43_BOOT_CONTROL 0x00101000 +#define CS42L43_BLOCK_EN 0x00101008 +#define CS42L43_SHUTTER_CONTROL 0x0010100C +#define CS42L43_MCU_SW_REV 0x00114000 +#define CS42L43_PATCH_START_ADDR 0x00114004 +#define CS42L43_NEED_CONFIGS 0x0011400C +#define CS42L43_BOOT_STATUS 0x0011401C +#define CS42L43_FW_SH_BOOT_CFG_NEED_CONFIGS 0x0011F8F8 +#define CS42L43_FW_MISSION_CTRL_NEED_CONFIGS 0x0011FE00 +#define CS42L43_FW_MISSION_CTRL_HAVE_CONFIGS 0x0011FE04 +#define CS42L43_FW_MISSION_CTRL_MM_CTRL_SELECTION 0x0011FE0C +#define CS42L43_FW_MISSION_CTRL_MM_MCU_CFG_REG 0x0011FE10 +#define CS42L43_MCU_RAM_MAX 0x0011FFFF + +/* CS42L43_DEVID */ +#define CS42L43_DEVID_VAL 0x00042A43 + +/* CS42L43_GEN_INT_STAT_1 */ +#define CS42L43_INT_STAT_GEN1_MASK 0x00000001 +#define CS42L43_INT_STAT_GEN1_SHIFT 0 + +/* CS42L43_SFT_RESET */ +#define CS42L43_SFT_RESET_MASK 0xFF000000 +#define CS42L43_SFT_RESET_SHIFT 24 + +#define CS42L43_SFT_RESET_VAL 0x5A000000 + +/* CS42L43_DRV_CTRL1 */ +#define CS42L43_ASP_DOUT_DRV_MASK 0x00038000 +#define CS42L43_ASP_DOUT_DRV_SHIFT 15 +#define CS42L43_ASP_FSYNC_DRV_MASK 0x00000E00 +#define CS42L43_ASP_FSYNC_DRV_SHIFT 9 +#define CS42L43_ASP_BCLK_DRV_MASK 0x000001C0 +#define CS42L43_ASP_BCLK_DRV_SHIFT 6 + +/* CS42L43_DRV_CTRL3 */ +#define CS42L43_I2C_ADDR_DRV_MASK 0x30000000 +#define CS42L43_I2C_ADDR_DRV_SHIFT 28 +#define CS42L43_I2C_SDA_DRV_MASK 0x0C000000 +#define CS42L43_I2C_SDA_DRV_SHIFT 26 +#define CS42L43_PDMOUT2_CLK_DRV_MASK 0x00E00000 +#define CS42L43_PDMOUT2_CLK_DRV_SHIFT 21 +#define CS42L43_PDMOUT2_DATA_DRV_MASK 0x001C0000 +#define CS42L43_PDMOUT2_DATA_DRV_SHIFT 18 +#define CS42L43_PDMOUT1_CLK_DRV_MASK 0x00038000 +#define CS42L43_PDMOUT1_CLK_DRV_SHIFT 15 +#define CS42L43_PDMOUT1_DATA_DRV_MASK 0x00007000 +#define CS42L43_PDMOUT1_DATA_DRV_SHIFT 12 +#define CS42L43_SPI_MISO_DRV_MASK 0x00000038 +#define CS42L43_SPI_MISO_DRV_SHIFT 3 + +/* CS42L43_DRV_CTRL4 */ +#define CS42L43_GPIO3_DRV_MASK 0x00000E00 +#define CS42L43_GPIO3_DRV_SHIFT 9 +#define CS42L43_GPIO2_DRV_MASK 0x000001C0 +#define CS42L43_GPIO2_DRV_SHIFT 6 +#define CS42L43_GPIO1_DRV_MASK 0x00000038 +#define CS42L43_GPIO1_DRV_SHIFT 3 + +/* CS42L43_DRV_CTRL_5 */ +#define CS42L43_I2C_SCL_DRV_MASK 0x18000000 +#define CS42L43_I2C_SCL_DRV_SHIFT 27 +#define CS42L43_SPI_SCK_DRV_MASK 0x07000000 +#define CS42L43_SPI_SCK_DRV_SHIFT 24 +#define CS42L43_SPI_MOSI_DRV_MASK 0x00E00000 +#define CS42L43_SPI_MOSI_DRV_SHIFT 21 +#define CS42L43_SPI_SSB_DRV_MASK 0x001C0000 +#define CS42L43_SPI_SSB_DRV_SHIFT 18 +#define CS42L43_ASP_DIN_DRV_MASK 0x000001C0 +#define CS42L43_ASP_DIN_DRV_SHIFT 6 + +/* CS42L43_GPIO_CTRL1 */ +#define CS42L43_GPIO3_POL_MASK 0x00040000 +#define CS42L43_GPIO3_POL_SHIFT 18 +#define CS42L43_GPIO2_POL_MASK 0x00020000 +#define CS42L43_GPIO2_POL_SHIFT 17 +#define CS42L43_GPIO1_POL_MASK 0x00010000 +#define CS42L43_GPIO1_POL_SHIFT 16 +#define CS42L43_GPIO3_LVL_MASK 0x00000400 +#define CS42L43_GPIO3_LVL_SHIFT 10 +#define CS42L43_GPIO2_LVL_MASK 0x00000200 +#define CS42L43_GPIO2_LVL_SHIFT 9 +#define CS42L43_GPIO1_LVL_MASK 0x00000100 +#define CS42L43_GPIO1_LVL_SHIFT 8 +#define CS42L43_GPIO3_DIR_MASK 0x00000004 +#define CS42L43_GPIO3_DIR_SHIFT 2 +#define CS42L43_GPIO2_DIR_MASK 0x00000002 +#define CS42L43_GPIO2_DIR_SHIFT 1 +#define CS42L43_GPIO1_DIR_MASK 0x00000001 +#define CS42L43_GPIO1_DIR_SHIFT 0 + +/* CS42L43_GPIO_CTRL2 */ +#define CS42L43_GPIO3_DEGLITCH_BYP_MASK 0x00000004 +#define CS42L43_GPIO3_DEGLITCH_BYP_SHIFT 2 +#define CS42L43_GPIO2_DEGLITCH_BYP_MASK 0x00000002 +#define CS42L43_GPIO2_DEGLITCH_BYP_SHIFT 1 +#define CS42L43_GPIO1_DEGLITCH_BYP_MASK 0x00000001 +#define CS42L43_GPIO1_DEGLITCH_BYP_SHIFT 0 + +/* CS42L43_GPIO_STS */ +#define CS42L43_GPIO3_STS_MASK 0x00000004 +#define CS42L43_GPIO3_STS_SHIFT 2 +#define CS42L43_GPIO2_STS_MASK 0x00000002 +#define CS42L43_GPIO2_STS_SHIFT 1 +#define CS42L43_GPIO1_STS_MASK 0x00000001 +#define CS42L43_GPIO1_STS_SHIFT 0 + +/* CS42L43_GPIO_FN_SEL */ +#define CS42L43_GPIO3_FN_SEL_MASK 0x00000004 +#define CS42L43_GPIO3_FN_SEL_SHIFT 2 +#define CS42L43_GPIO1_FN_SEL_MASK 0x00000001 +#define CS42L43_GPIO1_FN_SEL_SHIFT 0 + +/* CS42L43_MCLK_SRC_SEL */ +#define CS42L43_OSC_PLL_MCLK_SEL_MASK 0x00000001 +#define CS42L43_OSC_PLL_MCLK_SEL_SHIFT 0 + +/* CS42L43_SAMPLE_RATE1..CS42L43_SAMPLE_RATE4 */ +#define CS42L43_SAMPLE_RATE_MASK 0x0000001F +#define CS42L43_SAMPLE_RATE_SHIFT 0 + +/* CS42L43_PLL_CONTROL */ +#define CS42L43_PLL_REFCLK_EN_MASK 0x00000008 +#define CS42L43_PLL_REFCLK_EN_SHIFT 3 +#define CS42L43_PLL_REFCLK_DIV_MASK 0x00000006 +#define CS42L43_PLL_REFCLK_DIV_SHIFT 1 +#define CS42L43_PLL_REFCLK_SRC_MASK 0x00000001 +#define CS42L43_PLL_REFCLK_SRC_SHIFT 0 + +/* CS42L43_FS_SELECT1 */ +#define CS42L43_ASP_RATE_MASK 0x00000003 +#define CS42L43_ASP_RATE_SHIFT 0 + +/* CS42L43_FS_SELECT2 */ +#define CS42L43_ASRC_DEC_OUT_RATE_MASK 0x000000C0 +#define CS42L43_ASRC_DEC_OUT_RATE_SHIFT 6 +#define CS42L43_ASRC_INT_OUT_RATE_MASK 0x00000030 +#define CS42L43_ASRC_INT_OUT_RATE_SHIFT 4 +#define CS42L43_ASRC_DEC_IN_RATE_MASK 0x0000000C +#define CS42L43_ASRC_DEC_IN_RATE_SHIFT 2 +#define CS42L43_ASRC_INT_IN_RATE_MASK 0x00000003 +#define CS42L43_ASRC_INT_IN_RATE_SHIFT 0 + +/* CS42L43_FS_SELECT3 */ +#define CS42L43_HPOUT_RATE_MASK 0x0000C000 +#define CS42L43_HPOUT_RATE_SHIFT 14 +#define CS42L43_EQZ_RATE_MASK 0x00003000 +#define CS42L43_EQZ_RATE_SHIFT 12 +#define CS42L43_DIAGGEN_RATE_MASK 0x00000C00 +#define CS42L43_DIAGGEN_RATE_SHIFT 10 +#define CS42L43_DECIM_CH4_RATE_MASK 0x00000300 +#define CS42L43_DECIM_CH4_RATE_SHIFT 8 +#define CS42L43_DECIM_CH3_RATE_MASK 0x000000C0 +#define CS42L43_DECIM_CH3_RATE_SHIFT 6 +#define CS42L43_DECIM_CH2_RATE_MASK 0x00000030 +#define CS42L43_DECIM_CH2_RATE_SHIFT 4 +#define CS42L43_DECIM_CH1_RATE_MASK 0x0000000C +#define CS42L43_DECIM_CH1_RATE_SHIFT 2 +#define CS42L43_AMP1_2_RATE_MASK 0x00000003 +#define CS42L43_AMP1_2_RATE_SHIFT 0 + +/* CS42L43_FS_SELECT4 */ +#define CS42L43_SW_DP7_RATE_MASK 0x00C00000 +#define CS42L43_SW_DP7_RATE_SHIFT 22 +#define CS42L43_SW_DP6_RATE_MASK 0x00300000 +#define CS42L43_SW_DP6_RATE_SHIFT 20 +#define CS42L43_SPDIF_RATE_MASK 0x000C0000 +#define CS42L43_SPDIF_RATE_SHIFT 18 +#define CS42L43_SW_DP5_RATE_MASK 0x00030000 +#define CS42L43_SW_DP5_RATE_SHIFT 16 +#define CS42L43_SW_DP4_RATE_MASK 0x0000C000 +#define CS42L43_SW_DP4_RATE_SHIFT 14 +#define CS42L43_SW_DP3_RATE_MASK 0x00003000 +#define CS42L43_SW_DP3_RATE_SHIFT 12 +#define CS42L43_SW_DP2_RATE_MASK 0x00000C00 +#define CS42L43_SW_DP2_RATE_SHIFT 10 +#define CS42L43_SW_DP1_RATE_MASK 0x00000300 +#define CS42L43_SW_DP1_RATE_SHIFT 8 +#define CS42L43_ISRC2_LOW_RATE_MASK 0x000000C0 +#define CS42L43_ISRC2_LOW_RATE_SHIFT 6 +#define CS42L43_ISRC2_HIGH_RATE_MASK 0x00000030 +#define CS42L43_ISRC2_HIGH_RATE_SHIFT 4 +#define CS42L43_ISRC1_LOW_RATE_MASK 0x0000000C +#define CS42L43_ISRC1_LOW_RATE_SHIFT 2 +#define CS42L43_ISRC1_HIGH_RATE_MASK 0x00000003 +#define CS42L43_ISRC1_HIGH_RATE_SHIFT 0 + +/* CS42L43_PDM_CONTROL */ +#define CS42L43_PDM2_CLK_DIV_MASK 0x0000000C +#define CS42L43_PDM2_CLK_DIV_SHIFT 2 +#define CS42L43_PDM1_CLK_DIV_MASK 0x00000003 +#define CS42L43_PDM1_CLK_DIV_SHIFT 0 + +/* CS42L43_ASP_CLK_CONFIG1 */ +#define CS42L43_ASP_BCLK_N_MASK 0x03FF0000 +#define CS42L43_ASP_BCLK_N_SHIFT 16 +#define CS42L43_ASP_BCLK_M_MASK 0x000003FF +#define CS42L43_ASP_BCLK_M_SHIFT 0 + +/* CS42L43_ASP_CLK_CONFIG2 */ +#define CS42L43_ASP_MASTER_MODE_MASK 0x00000002 +#define CS42L43_ASP_MASTER_MODE_SHIFT 1 +#define CS42L43_ASP_BCLK_INV_MASK 0x00000001 +#define CS42L43_ASP_BCLK_INV_SHIFT 0 + +/* CS42L43_OSC_DIV_SEL */ +#define CS42L43_OSC_DIV2_EN_MASK 0x00000001 +#define CS42L43_OSC_DIV2_EN_SHIFT 0 + +/* CS42L43_ADC_B_CTRL1..CS42L43_ADC_B_CTRL1 */ +#define CS42L43_PGA_WIDESWING_MODE_EN_MASK 0x00000080 +#define CS42L43_PGA_WIDESWING_MODE_EN_SHIFT 7 +#define CS42L43_ADC_AIN_SEL_MASK 0x00000010 +#define CS42L43_ADC_AIN_SEL_SHIFT 4 +#define CS42L43_ADC_PGA_GAIN_MASK 0x0000000F +#define CS42L43_ADC_PGA_GAIN_SHIFT 0 + +/* CS42L43_DECIM_HPF_WNF_CTRL1..CS42L43_DECIM_HPF_WNF_CTRL4 */ +#define CS42L43_DECIM_WNF_CF_MASK 0x00000070 +#define CS42L43_DECIM_WNF_CF_SHIFT 4 +#define CS42L43_DECIM_WNF_EN_MASK 0x00000008 +#define CS42L43_DECIM_WNF_EN_SHIFT 3 +#define CS42L43_DECIM_HPF_CF_MASK 0x00000006 +#define CS42L43_DECIM_HPF_CF_SHIFT 1 +#define CS42L43_DECIM_HPF_EN_MASK 0x00000001 +#define CS42L43_DECIM_HPF_EN_SHIFT 0 + +/* CS42L43_DMIC_PDM_CTRL */ +#define CS42L43_PDM2R_INV_MASK 0x00000020 +#define CS42L43_PDM2R_INV_SHIFT 5 +#define CS42L43_PDM2L_INV_MASK 0x00000010 +#define CS42L43_PDM2L_INV_SHIFT 4 +#define CS42L43_PDM1R_INV_MASK 0x00000008 +#define CS42L43_PDM1R_INV_SHIFT 3 +#define CS42L43_PDM1L_INV_MASK 0x00000004 +#define CS42L43_PDM1L_INV_SHIFT 2 + +/* CS42L43_DECIM_VOL_CTRL_CH1_CH2 */ +#define CS42L43_DECIM2_MUTE_MASK 0x80000000 +#define CS42L43_DECIM2_MUTE_SHIFT 31 +#define CS42L43_DECIM2_VOL_MASK 0x3FC00000 +#define CS42L43_DECIM2_VOL_SHIFT 22 +#define CS42L43_DECIM2_VD_RAMP_MASK 0x00380000 +#define CS42L43_DECIM2_VD_RAMP_SHIFT 19 +#define CS42L43_DECIM2_VI_RAMP_MASK 0x00070000 +#define CS42L43_DECIM2_VI_RAMP_SHIFT 16 +#define CS42L43_DECIM1_MUTE_MASK 0x00008000 +#define CS42L43_DECIM1_MUTE_SHIFT 15 +#define CS42L43_DECIM1_VOL_MASK 0x00003FC0 +#define CS42L43_DECIM1_VOL_SHIFT 6 +#define CS42L43_DECIM1_VD_RAMP_MASK 0x00000038 +#define CS42L43_DECIM1_VD_RAMP_SHIFT 3 +#define CS42L43_DECIM1_VI_RAMP_MASK 0x00000007 +#define CS42L43_DECIM1_VI_RAMP_SHIFT 0 + +/* CS42L43_DECIM_VOL_CTRL_CH3_CH4 */ +#define CS42L43_DECIM4_MUTE_MASK 0x80000000 +#define CS42L43_DECIM4_MUTE_SHIFT 31 +#define CS42L43_DECIM4_VOL_MASK 0x3FC00000 +#define CS42L43_DECIM4_VOL_SHIFT 22 +#define CS42L43_DECIM4_VD_RAMP_MASK 0x00380000 +#define CS42L43_DECIM4_VD_RAMP_SHIFT 19 +#define CS42L43_DECIM4_VI_RAMP_MASK 0x00070000 +#define CS42L43_DECIM4_VI_RAMP_SHIFT 16 +#define CS42L43_DECIM3_MUTE_MASK 0x00008000 +#define CS42L43_DECIM3_MUTE_SHIFT 15 +#define CS42L43_DECIM3_VOL_MASK 0x00003FC0 +#define CS42L43_DECIM3_VOL_SHIFT 6 +#define CS42L43_DECIM3_VD_RAMP_MASK 0x00000038 +#define CS42L43_DECIM3_VD_RAMP_SHIFT 3 +#define CS42L43_DECIM3_VI_RAMP_MASK 0x00000007 +#define CS42L43_DECIM3_VI_RAMP_SHIFT 0 + +/* CS42L43_DECIM_VOL_CTRL_UPDATE */ +#define CS42L43_DECIM4_VOL_UPDATE_MASK 0x00000008 +#define CS42L43_DECIM4_VOL_UPDATE_SHIFT 3 +#define CS42L43_DECIM3_VOL_UPDATE_MASK 0x00000004 +#define CS42L43_DECIM3_VOL_UPDATE_SHIFT 2 +#define CS42L43_DECIM2_VOL_UPDATE_MASK 0x00000002 +#define CS42L43_DECIM2_VOL_UPDATE_SHIFT 1 +#define CS42L43_DECIM1_VOL_UPDATE_MASK 0x00000001 +#define CS42L43_DECIM1_VOL_UPDATE_SHIFT 0 + +/* CS42L43_INTP_VOLUME_CTRL1..CS42L43_INTP_VOLUME_CTRL2 */ +#define CS42L43_AMP1_2_VU_MASK 0x00000200 +#define CS42L43_AMP1_2_VU_SHIFT 9 +#define CS42L43_AMP_MUTE_MASK 0x00000100 +#define CS42L43_AMP_MUTE_SHIFT 8 +#define CS42L43_AMP_VOL_MASK 0x000000FF +#define CS42L43_AMP_VOL_SHIFT 0 + +/* CS42L43_AMP1_2_VOL_RAMP */ +#define CS42L43_AMP1_2_VD_RAMP_MASK 0x00000070 +#define CS42L43_AMP1_2_VD_RAMP_SHIFT 4 +#define CS42L43_AMP1_2_VI_RAMP_MASK 0x00000007 +#define CS42L43_AMP1_2_VI_RAMP_SHIFT 0 + +/* CS42L43_ASP_CTRL */ +#define CS42L43_ASP_FSYNC_MODE_MASK 0x00000004 +#define CS42L43_ASP_FSYNC_MODE_SHIFT 2 +#define CS42L43_ASP_BCLK_EN_MASK 0x00000002 +#define CS42L43_ASP_BCLK_EN_SHIFT 1 +#define CS42L43_ASP_FSYNC_EN_MASK 0x00000001 +#define CS42L43_ASP_FSYNC_EN_SHIFT 0 + +/* CS42L43_ASP_FSYNC_CTRL1 */ +#define CS42L43_ASP_FSYNC_M_MASK 0x0007FFFF +#define CS42L43_ASP_FSYNC_M_SHIFT 0 + +/* CS42L43_ASP_FSYNC_CTRL3 */ +#define CS42L43_ASP_FSYNC_IN_INV_MASK 0x00000002 +#define CS42L43_ASP_FSYNC_IN_INV_SHIFT 1 +#define CS42L43_ASP_FSYNC_OUT_INV_MASK 0x00000001 +#define CS42L43_ASP_FSYNC_OUT_INV_SHIFT 0 + +/* CS42L43_ASP_FSYNC_CTRL4 */ +#define CS42L43_ASP_NUM_BCLKS_PER_FSYNC_MASK 0x00001FFE +#define CS42L43_ASP_NUM_BCLKS_PER_FSYNC_SHIFT 1 + +/* CS42L43_ASP_DATA_CTRL */ +#define CS42L43_ASP_FSYNC_FRAME_START_PHASE_MASK 0x00000008 +#define CS42L43_ASP_FSYNC_FRAME_START_PHASE_SHIFT 3 +#define CS42L43_ASP_FSYNC_FRAME_START_DLY_MASK 0x00000007 +#define CS42L43_ASP_FSYNC_FRAME_START_DLY_SHIFT 0 + +/* CS42L43_ASP_RX_EN */ +#define CS42L43_ASP_RX_CH6_EN_MASK 0x00000020 +#define CS42L43_ASP_RX_CH6_EN_SHIFT 5 +#define CS42L43_ASP_RX_CH5_EN_MASK 0x00000010 +#define CS42L43_ASP_RX_CH5_EN_SHIFT 4 +#define CS42L43_ASP_RX_CH4_EN_MASK 0x00000008 +#define CS42L43_ASP_RX_CH4_EN_SHIFT 3 +#define CS42L43_ASP_RX_CH3_EN_MASK 0x00000004 +#define CS42L43_ASP_RX_CH3_EN_SHIFT 2 +#define CS42L43_ASP_RX_CH2_EN_MASK 0x00000002 +#define CS42L43_ASP_RX_CH2_EN_SHIFT 1 +#define CS42L43_ASP_RX_CH1_EN_MASK 0x00000001 +#define CS42L43_ASP_RX_CH1_EN_SHIFT 0 + +/* CS42L43_ASP_TX_EN */ +#define CS42L43_ASP_TX_CH6_EN_MASK 0x00000020 +#define CS42L43_ASP_TX_CH6_EN_SHIFT 5 +#define CS42L43_ASP_TX_CH5_EN_MASK 0x00000010 +#define CS42L43_ASP_TX_CH5_EN_SHIFT 4 +#define CS42L43_ASP_TX_CH4_EN_MASK 0x00000008 +#define CS42L43_ASP_TX_CH4_EN_SHIFT 3 +#define CS42L43_ASP_TX_CH3_EN_MASK 0x00000004 +#define CS42L43_ASP_TX_CH3_EN_SHIFT 2 +#define CS42L43_ASP_TX_CH2_EN_MASK 0x00000002 +#define CS42L43_ASP_TX_CH2_EN_SHIFT 1 +#define CS42L43_ASP_TX_CH1_EN_MASK 0x00000001 +#define CS42L43_ASP_TX_CH1_EN_SHIFT 0 + +/* CS42L43_ASP_RX_CH1_CTRL..CS42L43_ASP_TX_CH6_CTRL */ +#define CS42L43_ASP_CH_WIDTH_MASK 0x001F0000 +#define CS42L43_ASP_CH_WIDTH_SHIFT 16 +#define CS42L43_ASP_CH_SLOT_MASK 0x00001FFE +#define CS42L43_ASP_CH_SLOT_SHIFT 1 +#define CS42L43_ASP_CH_SLOT_PHASE_MASK 0x00000001 +#define CS42L43_ASP_CH_SLOT_PHASE_SHIFT 0 + +/* CS42L43_ASPTX1_INPUT..CS42L43_AMP4MIX_INPUT4 */ +#define CS42L43_MIXER_VOL_MASK 0x00FE0000 +#define CS42L43_MIXER_VOL_SHIFT 17 +#define CS42L43_MIXER_SRC_MASK 0x000001FF +#define CS42L43_MIXER_SRC_SHIFT 0 + +/* CS42L43_ASRC_INT_ENABLES */ +#define CS42L43_ASRC_INT4_EN_MASK 0x00000008 +#define CS42L43_ASRC_INT4_EN_SHIFT 3 +#define CS42L43_ASRC_INT3_EN_MASK 0x00000004 +#define CS42L43_ASRC_INT3_EN_SHIFT 2 +#define CS42L43_ASRC_INT2_EN_MASK 0x00000002 +#define CS42L43_ASRC_INT2_EN_SHIFT 1 +#define CS42L43_ASRC_INT1_EN_MASK 0x00000001 +#define CS42L43_ASRC_INT1_EN_SHIFT 0 + +/* CS42L43_ASRC_DEC_ENABLES */ +#define CS42L43_ASRC_DEC4_EN_MASK 0x00000008 +#define CS42L43_ASRC_DEC4_EN_SHIFT 3 +#define CS42L43_ASRC_DEC3_EN_MASK 0x00000004 +#define CS42L43_ASRC_DEC3_EN_SHIFT 2 +#define CS42L43_ASRC_DEC2_EN_MASK 0x00000002 +#define CS42L43_ASRC_DEC2_EN_SHIFT 1 +#define CS42L43_ASRC_DEC1_EN_MASK 0x00000001 +#define CS42L43_ASRC_DEC1_EN_SHIFT 0 + +/* CS42L43_PDNCNTL */ +#define CS42L43_RING_SENSE_EN_MASK 0x00000002 +#define CS42L43_RING_SENSE_EN_SHIFT 1 + +/* CS42L43_RINGSENSE_DEB_CTRL */ +#define CS42L43_RINGSENSE_INV_MASK 0x00000080 +#define CS42L43_RINGSENSE_INV_SHIFT 7 +#define CS42L43_RINGSENSE_PULLUP_PDNB_MASK 0x00000040 +#define CS42L43_RINGSENSE_PULLUP_PDNB_SHIFT 6 +#define CS42L43_RINGSENSE_FALLING_DB_TIME_MASK 0x00000038 +#define CS42L43_RINGSENSE_FALLING_DB_TIME_SHIFT 3 +#define CS42L43_RINGSENSE_RISING_DB_TIME_MASK 0x00000007 +#define CS42L43_RINGSENSE_RISING_DB_TIME_SHIFT 0 + +/* CS42L43_TIPSENSE_DEB_CTRL */ +#define CS42L43_TIPSENSE_INV_MASK 0x00000080 +#define CS42L43_TIPSENSE_INV_SHIFT 7 +#define CS42L43_TIPSENSE_FALLING_DB_TIME_MASK 0x00000038 +#define CS42L43_TIPSENSE_FALLING_DB_TIME_SHIFT 3 +#define CS42L43_TIPSENSE_RISING_DB_TIME_MASK 0x00000007 +#define CS42L43_TIPSENSE_RISING_DB_TIME_SHIFT 0 + +/* CS42L43_TIP_RING_SENSE_INTERRUPT_STATUS */ +#define CS42L43_TIPSENSE_UNPLUG_DB_STS_MASK 0x00000008 +#define CS42L43_TIPSENSE_UNPLUG_DB_STS_SHIFT 3 +#define CS42L43_TIPSENSE_PLUG_DB_STS_MASK 0x00000004 +#define CS42L43_TIPSENSE_PLUG_DB_STS_SHIFT 2 +#define CS42L43_RINGSENSE_UNPLUG_DB_STS_MASK 0x00000002 +#define CS42L43_RINGSENSE_UNPLUG_DB_STS_SHIFT 1 +#define CS42L43_RINGSENSE_PLUG_DB_STS_MASK 0x00000001 +#define CS42L43_RINGSENSE_PLUG_DB_STS_SHIFT 0 + +/* CS42L43_HS2 */ +#define CS42L43_HS_CLAMP_DISABLE_MASK 0x10000000 +#define CS42L43_HS_CLAMP_DISABLE_SHIFT 28 +#define CS42L43_HSBIAS_RAMP_MASK 0x0C000000 +#define CS42L43_HSBIAS_RAMP_SHIFT 26 +#define CS42L43_HSDET_MODE_MASK 0x00018000 +#define CS42L43_HSDET_MODE_SHIFT 15 +#define CS42L43_HSDET_MANUAL_MODE_MASK 0x00006000 +#define CS42L43_HSDET_MANUAL_MODE_SHIFT 13 +#define CS42L43_AUTO_HSDET_TIME_MASK 0x00000700 +#define CS42L43_AUTO_HSDET_TIME_SHIFT 8 +#define CS42L43_AMP3_4_GNDREF_HS3_SEL_MASK 0x00000080 +#define CS42L43_AMP3_4_GNDREF_HS3_SEL_SHIFT 7 +#define CS42L43_AMP3_4_GNDREF_HS4_SEL_MASK 0x00000040 +#define CS42L43_AMP3_4_GNDREF_HS4_SEL_SHIFT 6 +#define CS42L43_HSBIAS_GNDREF_HS3_SEL_MASK 0x00000020 +#define CS42L43_HSBIAS_GNDREF_HS3_SEL_SHIFT 5 +#define CS42L43_HSBIAS_GNDREF_HS4_SEL_MASK 0x00000010 +#define CS42L43_HSBIAS_GNDREF_HS4_SEL_SHIFT 4 +#define CS42L43_HSBIAS_OUT_HS3_SEL_MASK 0x00000008 +#define CS42L43_HSBIAS_OUT_HS3_SEL_SHIFT 3 +#define CS42L43_HSBIAS_OUT_HS4_SEL_MASK 0x00000004 +#define CS42L43_HSBIAS_OUT_HS4_SEL_SHIFT 2 +#define CS42L43_HSGND_HS3_SEL_MASK 0x00000002 +#define CS42L43_HSGND_HS3_SEL_SHIFT 1 +#define CS42L43_HSGND_HS4_SEL_MASK 0x00000001 +#define CS42L43_HSGND_HS4_SEL_SHIFT 0 + +/* CS42L43_HS_STAT */ +#define CS42L43_HSDET_TYPE_STS_MASK 0x00000007 +#define CS42L43_HSDET_TYPE_STS_SHIFT 0 + +/* CS42L43_MCU_SW_INTERRUPT */ +#define CS42L43_CONTROL_IND_MASK 0x00000004 +#define CS42L43_CONTROL_IND_SHIFT 2 +#define CS42L43_CONFIGS_IND_MASK 0x00000002 +#define CS42L43_CONFIGS_IND_SHIFT 1 +#define CS42L43_PATCH_IND_MASK 0x00000001 +#define CS42L43_PATCH_IND_SHIFT 0 + +/* CS42L43_STEREO_MIC_CTRL */ +#define CS42L43_HS2_BIAS_SENSE_EN_MASK 0x00000020 +#define CS42L43_HS2_BIAS_SENSE_EN_SHIFT 5 +#define CS42L43_HS1_BIAS_SENSE_EN_MASK 0x00000010 +#define CS42L43_HS1_BIAS_SENSE_EN_SHIFT 4 +#define CS42L43_HS2_BIAS_EN_MASK 0x00000008 +#define CS42L43_HS2_BIAS_EN_SHIFT 3 +#define CS42L43_HS1_BIAS_EN_MASK 0x00000004 +#define CS42L43_HS1_BIAS_EN_SHIFT 2 +#define CS42L43_JACK_STEREO_CONFIG_MASK 0x00000003 +#define CS42L43_JACK_STEREO_CONFIG_SHIFT 0 + +/* CS42L43_STEREO_MIC_CLAMP_CTRL */ +#define CS42L43_SMIC_HPAMP_CLAMP_DIS_FRC_VAL_MASK 0x00000002 +#define CS42L43_SMIC_HPAMP_CLAMP_DIS_FRC_VAL_SHIFT 1 +#define CS42L43_SMIC_HPAMP_CLAMP_DIS_FRC_MASK 0x00000001 +#define CS42L43_SMIC_HPAMP_CLAMP_DIS_FRC_SHIFT 0 + +/* CS42L43_BLOCK_EN2 */ +#define CS42L43_SPI_MSTR_EN_MASK 0x00000001 +#define CS42L43_SPI_MSTR_EN_SHIFT 0 + +/* CS42L43_BLOCK_EN3 */ +#define CS42L43_PDM2_DIN_R_EN_MASK 0x00000020 +#define CS42L43_PDM2_DIN_R_EN_SHIFT 5 +#define CS42L43_PDM2_DIN_L_EN_MASK 0x00000010 +#define CS42L43_PDM2_DIN_L_EN_SHIFT 4 +#define CS42L43_PDM1_DIN_R_EN_MASK 0x00000008 +#define CS42L43_PDM1_DIN_R_EN_SHIFT 3 +#define CS42L43_PDM1_DIN_L_EN_MASK 0x00000004 +#define CS42L43_PDM1_DIN_L_EN_SHIFT 2 +#define CS42L43_ADC2_EN_MASK 0x00000002 +#define CS42L43_ADC2_EN_SHIFT 1 +#define CS42L43_ADC1_EN_MASK 0x00000001 +#define CS42L43_ADC1_EN_SHIFT 0 + +/* CS42L43_BLOCK_EN4 */ +#define CS42L43_ASRC_DEC_BANK_EN_MASK 0x00000002 +#define CS42L43_ASRC_DEC_BANK_EN_SHIFT 1 +#define CS42L43_ASRC_INT_BANK_EN_MASK 0x00000001 +#define CS42L43_ASRC_INT_BANK_EN_SHIFT 0 + +/* CS42L43_BLOCK_EN5 */ +#define CS42L43_ISRC2_BANK_EN_MASK 0x00000002 +#define CS42L43_ISRC2_BANK_EN_SHIFT 1 +#define CS42L43_ISRC1_BANK_EN_MASK 0x00000001 +#define CS42L43_ISRC1_BANK_EN_SHIFT 0 + +/* CS42L43_BLOCK_EN6 */ +#define CS42L43_MIXER_EN_MASK 0x00000001 +#define CS42L43_MIXER_EN_SHIFT 0 + +/* CS42L43_BLOCK_EN7 */ +#define CS42L43_EQ_EN_MASK 0x00000001 +#define CS42L43_EQ_EN_SHIFT 0 + +/* CS42L43_BLOCK_EN8 */ +#define CS42L43_HP_EN_MASK 0x00000001 +#define CS42L43_HP_EN_SHIFT 0 + +/* CS42L43_BLOCK_EN9 */ +#define CS42L43_TONE_EN_MASK 0x00000001 +#define CS42L43_TONE_EN_SHIFT 0 + +/* CS42L43_BLOCK_EN10 */ +#define CS42L43_AMP2_EN_MASK 0x00000002 +#define CS42L43_AMP2_EN_SHIFT 1 +#define CS42L43_AMP1_EN_MASK 0x00000001 +#define CS42L43_AMP1_EN_SHIFT 0 + +/* CS42L43_BLOCK_EN11 */ +#define CS42L43_SPDIF_EN_MASK 0x00000001 +#define CS42L43_SPDIF_EN_SHIFT 0 + +/* CS42L43_TONE_CH1_CTRL..CS42L43_TONE_CH2_CTRL */ +#define CS42L43_TONE_FREQ_MASK 0x00000070 +#define CS42L43_TONE_FREQ_SHIFT 4 +#define CS42L43_TONE_SEL_MASK 0x0000000F +#define CS42L43_TONE_SEL_SHIFT 0 + +/* CS42L43_MIC_DETECT_CONTROL_1 */ +#define CS42L43_BUTTON_DETECT_MODE_MASK 0x00000018 +#define CS42L43_BUTTON_DETECT_MODE_SHIFT 3 +#define CS42L43_HSBIAS_MODE_MASK 0x00000006 +#define CS42L43_HSBIAS_MODE_SHIFT 1 +#define CS42L43_MIC_LVL_DET_DISABLE_MASK 0x00000001 +#define CS42L43_MIC_LVL_DET_DISABLE_SHIFT 0 + +/* CS42L43_DETECT_STATUS_1 */ +#define CS42L43_HSDET_DC_STS_MASK 0x01FF0000 +#define CS42L43_HSDET_DC_STS_SHIFT 16 +#define CS42L43_JACKDET_STS_MASK 0x00000080 +#define CS42L43_JACKDET_STS_SHIFT 7 +#define CS42L43_HSBIAS_CLAMP_STS_MASK 0x00000040 +#define CS42L43_HSBIAS_CLAMP_STS_SHIFT 6 + +/* CS42L43_HS_BIAS_SENSE_AND_CLAMP_AUTOCONTROL */ +#define CS42L43_JACKDET_MODE_MASK 0xC0000000 +#define CS42L43_JACKDET_MODE_SHIFT 30 +#define CS42L43_JACKDET_INV_MASK 0x20000000 +#define CS42L43_JACKDET_INV_SHIFT 29 +#define CS42L43_JACKDET_DB_TIME_MASK 0x03000000 +#define CS42L43_JACKDET_DB_TIME_SHIFT 24 +#define CS42L43_S0_AUTO_ADCMUTE_DISABLE_MASK 0x00800000 +#define CS42L43_S0_AUTO_ADCMUTE_DISABLE_SHIFT 23 +#define CS42L43_HSBIAS_SENSE_EN_MASK 0x00000080 +#define CS42L43_HSBIAS_SENSE_EN_SHIFT 7 +#define CS42L43_AUTO_HSBIAS_CLAMP_EN_MASK 0x00000040 +#define CS42L43_AUTO_HSBIAS_CLAMP_EN_SHIFT 6 +#define CS42L43_JACKDET_SENSE_EN_MASK 0x00000020 +#define CS42L43_JACKDET_SENSE_EN_SHIFT 5 +#define CS42L43_HSBIAS_SENSE_TRIP_MASK 0x00000007 +#define CS42L43_HSBIAS_SENSE_TRIP_SHIFT 0 + +/* CS42L43_MIC_DETECT_CONTROL_ANDROID */ +#define CS42L43_HSDET_LVL_COMBWIDTH_MASK 0xC0000000 +#define CS42L43_HSDET_LVL_COMBWIDTH_SHIFT 30 +#define CS42L43_HSDET_LVL2_THRESH_MASK 0x01FF0000 +#define CS42L43_HSDET_LVL2_THRESH_SHIFT 16 +#define CS42L43_HSDET_LVL1_THRESH_MASK 0x000001FF +#define CS42L43_HSDET_LVL1_THRESH_SHIFT 0 + +/* CS42L43_ISRC1_CTRL..CS42L43_ISRC2_CTRL */ +#define CS42L43_ISRC_INT2_EN_MASK 0x00000200 +#define CS42L43_ISRC_INT2_EN_SHIFT 9 +#define CS42L43_ISRC_INT1_EN_MASK 0x00000100 +#define CS42L43_ISRC_INT1_EN_SHIFT 8 +#define CS42L43_ISRC_DEC2_EN_MASK 0x00000002 +#define CS42L43_ISRC_DEC2_EN_SHIFT 1 +#define CS42L43_ISRC_DEC1_EN_MASK 0x00000001 +#define CS42L43_ISRC_DEC1_EN_SHIFT 0 + +/* CS42L43_CTRL_REG */ +#define CS42L43_PLL_MODE_BYPASS_500_MASK 0x00000004 +#define CS42L43_PLL_MODE_BYPASS_500_SHIFT 2 +#define CS42L43_PLL_MODE_BYPASS_1029_MASK 0x00000002 +#define CS42L43_PLL_MODE_BYPASS_1029_SHIFT 1 +#define CS42L43_PLL_EN_MASK 0x00000001 +#define CS42L43_PLL_EN_SHIFT 0 + +/* CS42L43_FDIV_FRAC */ +#define CS42L43_PLL_DIV_INT_MASK 0xFF000000 +#define CS42L43_PLL_DIV_INT_SHIFT 24 +#define CS42L43_PLL_DIV_FRAC_BYTE2_MASK 0x00FF0000 +#define CS42L43_PLL_DIV_FRAC_BYTE2_SHIFT 16 +#define CS42L43_PLL_DIV_FRAC_BYTE1_MASK 0x0000FF00 +#define CS42L43_PLL_DIV_FRAC_BYTE1_SHIFT 8 +#define CS42L43_PLL_DIV_FRAC_BYTE0_MASK 0x000000FF +#define CS42L43_PLL_DIV_FRAC_BYTE0_SHIFT 0 + +/* CS42L43_CAL_RATIO */ +#define CS42L43_PLL_CAL_RATIO_MASK 0x000000FF +#define CS42L43_PLL_CAL_RATIO_SHIFT 0 + +/* CS42L43_SPI_CLK_CONFIG1 */ +#define CS42L43_SCLK_DIV_MASK 0x0000000F +#define CS42L43_SCLK_DIV_SHIFT 0 + +/* CS42L43_SPI_CONFIG1 */ +#define CS42L43_SPI_SS_IDLE_DUR_MASK 0x0F000000 +#define CS42L43_SPI_SS_IDLE_DUR_SHIFT 24 +#define CS42L43_SPI_SS_DELAY_DUR_MASK 0x000F0000 +#define CS42L43_SPI_SS_DELAY_DUR_SHIFT 16 +#define CS42L43_SPI_THREE_WIRE_MASK 0x00000100 +#define CS42L43_SPI_THREE_WIRE_SHIFT 8 +#define CS42L43_SPI_DPHA_MASK 0x00000040 +#define CS42L43_SPI_DPHA_SHIFT 6 +#define CS42L43_SPI_CPHA_MASK 0x00000020 +#define CS42L43_SPI_CPHA_SHIFT 5 +#define CS42L43_SPI_CPOL_MASK 0x00000010 +#define CS42L43_SPI_CPOL_SHIFT 4 +#define CS42L43_SPI_SS_SEL_MASK 0x00000007 +#define CS42L43_SPI_SS_SEL_SHIFT 0 + +/* CS42L43_SPI_CONFIG2 */ +#define CS42L43_SPI_SS_FRC_MASK 0x00000001 +#define CS42L43_SPI_SS_FRC_SHIFT 0 + +/* CS42L43_SPI_CONFIG3 */ +#define CS42L43_SPI_WDT_ENA_MASK 0x00000001 +#define CS42L43_SPI_WDT_ENA_SHIFT 0 + +/* CS42L43_SPI_CONFIG4 */ +#define CS42L43_SPI_STALL_ENA_MASK 0x00010000 +#define CS42L43_SPI_STALL_ENA_SHIFT 16 + +/* CS42L43_SPI_STATUS1 */ +#define CS42L43_SPI_ABORT_STS_MASK 0x00000002 +#define CS42L43_SPI_ABORT_STS_SHIFT 1 +#define CS42L43_SPI_DONE_STS_MASK 0x00000001 +#define CS42L43_SPI_DONE_STS_SHIFT 0 + +/* CS42L43_SPI_STATUS2 */ +#define CS42L43_SPI_RX_DONE_STS_MASK 0x00000010 +#define CS42L43_SPI_RX_DONE_STS_SHIFT 4 +#define CS42L43_SPI_TX_DONE_STS_MASK 0x00000001 +#define CS42L43_SPI_TX_DONE_STS_SHIFT 0 + +/* CS42L43_TRAN_CONFIG1 */ +#define CS42L43_SPI_START_MASK 0x00000001 +#define CS42L43_SPI_START_SHIFT 0 + +/* CS42L43_TRAN_CONFIG2 */ +#define CS42L43_SPI_ABORT_MASK 0x00000001 +#define CS42L43_SPI_ABORT_SHIFT 0 + +/* CS42L43_TRAN_CONFIG3 */ +#define CS42L43_SPI_WORD_SIZE_MASK 0x00070000 +#define CS42L43_SPI_WORD_SIZE_SHIFT 16 +#define CS42L43_SPI_CMD_MASK 0x00000003 +#define CS42L43_SPI_CMD_SHIFT 0 + +/* CS42L43_TRAN_CONFIG4 */ +#define CS42L43_SPI_TX_LENGTH_MASK 0x0000FFFF +#define CS42L43_SPI_TX_LENGTH_SHIFT 0 + +/* CS42L43_TRAN_CONFIG5 */ +#define CS42L43_SPI_RX_LENGTH_MASK 0x0000FFFF +#define CS42L43_SPI_RX_LENGTH_SHIFT 0 + +/* CS42L43_TRAN_CONFIG6 */ +#define CS42L43_SPI_TX_BLOCK_LENGTH_MASK 0x0000000F +#define CS42L43_SPI_TX_BLOCK_LENGTH_SHIFT 0 + +/* CS42L43_TRAN_CONFIG7 */ +#define CS42L43_SPI_RX_BLOCK_LENGTH_MASK 0x0000000F +#define CS42L43_SPI_RX_BLOCK_LENGTH_SHIFT 0 + +/* CS42L43_TRAN_CONFIG8 */ +#define CS42L43_SPI_RX_DONE_MASK 0x00000010 +#define CS42L43_SPI_RX_DONE_SHIFT 4 +#define CS42L43_SPI_TX_DONE_MASK 0x00000001 +#define CS42L43_SPI_TX_DONE_SHIFT 0 + +/* CS42L43_TRAN_STATUS1 */ +#define CS42L43_SPI_BUSY_STS_MASK 0x00000100 +#define CS42L43_SPI_BUSY_STS_SHIFT 8 +#define CS42L43_SPI_RX_REQUEST_MASK 0x00000010 +#define CS42L43_SPI_RX_REQUEST_SHIFT 4 +#define CS42L43_SPI_TX_REQUEST_MASK 0x00000001 +#define CS42L43_SPI_TX_REQUEST_SHIFT 0 + +/* CS42L43_TRAN_STATUS2 */ +#define CS42L43_SPI_TX_BYTE_COUNT_MASK 0x0000FFFF +#define CS42L43_SPI_TX_BYTE_COUNT_SHIFT 0 + +/* CS42L43_TRAN_STATUS3 */ +#define CS42L43_SPI_RX_BYTE_COUNT_MASK 0x0000FFFF +#define CS42L43_SPI_RX_BYTE_COUNT_SHIFT 0 + +/* CS42L43_TX_DATA */ +#define CS42L43_SPI_TX_DATA_MASK 0xFFFFFFFF +#define CS42L43_SPI_TX_DATA_SHIFT 0 + +/* CS42L43_RX_DATA */ +#define CS42L43_SPI_RX_DATA_MASK 0xFFFFFFFF +#define CS42L43_SPI_RX_DATA_SHIFT 0 + +/* CS42L43_DACCNFG1 */ +#define CS42L43_HP_MSTR_VOL_CTRL_EN_MASK 0x00000008 +#define CS42L43_HP_MSTR_VOL_CTRL_EN_SHIFT 3 +#define CS42L43_AMP4_INV_MASK 0x00000002 +#define CS42L43_AMP4_INV_SHIFT 1 +#define CS42L43_AMP3_INV_MASK 0x00000001 +#define CS42L43_AMP3_INV_SHIFT 0 + +/* CS42L43_DACCNFG2 */ +#define CS42L43_HP_AUTO_CLAMP_DISABLE_MASK 0x00000002 +#define CS42L43_HP_AUTO_CLAMP_DISABLE_SHIFT 1 +#define CS42L43_HP_HPF_EN_MASK 0x00000001 +#define CS42L43_HP_HPF_EN_SHIFT 0 + +/* CS42L43_HPPATHVOL */ +#define CS42L43_AMP4_PATH_VOL_MASK 0x01FF0000 +#define CS42L43_AMP4_PATH_VOL_SHIFT 16 +#define CS42L43_AMP3_PATH_VOL_MASK 0x000001FF +#define CS42L43_AMP3_PATH_VOL_SHIFT 0 + +/* CS42L43_PGAVOL */ +#define CS42L43_HP_PATH_VOL_RAMP_MASK 0x0003C000 +#define CS42L43_HP_PATH_VOL_RAMP_SHIFT 14 +#define CS42L43_HP_PATH_VOL_ZC_MASK 0x00002000 +#define CS42L43_HP_PATH_VOL_ZC_SHIFT 13 +#define CS42L43_HP_PATH_VOL_SFT_MASK 0x00001000 +#define CS42L43_HP_PATH_VOL_SFT_SHIFT 12 +#define CS42L43_HP_DIG_VOL_RAMP_MASK 0x00000F00 +#define CS42L43_HP_DIG_VOL_RAMP_SHIFT 8 +#define CS42L43_HP_ANA_VOL_RAMP_MASK 0x0000000F +#define CS42L43_HP_ANA_VOL_RAMP_SHIFT 0 + +/* CS42L43_LOADDETRESULTS */ +#define CS42L43_AMP3_RES_DET_MASK 0x00000003 +#define CS42L43_AMP3_RES_DET_SHIFT 0 + +/* CS42L43_LOADDETENA */ +#define CS42L43_HPLOAD_DET_EN_MASK 0x00000001 +#define CS42L43_HPLOAD_DET_EN_SHIFT 0 + +/* CS42L43_CTRL */ +#define CS42L43_ADPTPWR_MODE_MASK 0x00000007 +#define CS42L43_ADPTPWR_MODE_SHIFT 0 + +/* CS42L43_COEFF_RD_WR0 */ +#define CS42L43_WRITE_MODE_MASK 0x00000002 +#define CS42L43_WRITE_MODE_SHIFT 1 + +/* CS42L43_INIT_DONE0 */ +#define CS42L43_INITIALIZE_DONE_MASK 0x00000001 +#define CS42L43_INITIALIZE_DONE_SHIFT 0 + +/* CS42L43_START_EQZ0 */ +#define CS42L43_START_FILTER_MASK 0x00000001 +#define CS42L43_START_FILTER_SHIFT 0 + +/* CS42L43_MUTE_EQ_IN0 */ +#define CS42L43_MUTE_EQ_CH2_MASK 0x00000002 +#define CS42L43_MUTE_EQ_CH2_SHIFT 1 +#define CS42L43_MUTE_EQ_CH1_MASK 0x00000001 +#define CS42L43_MUTE_EQ_CH1_SHIFT 0 + +/* CS42L43_PLL_INT */ +#define CS42L43_PLL_LOST_LOCK_INT_MASK 0x00000002 +#define CS42L43_PLL_LOST_LOCK_INT_SHIFT 1 +#define CS42L43_PLL_READY_INT_MASK 0x00000001 +#define CS42L43_PLL_READY_INT_SHIFT 0 + +/* CS42L43_SOFT_INT */ +#define CS42L43_CONTROL_APPLIED_INT_MASK 0x00000010 +#define CS42L43_CONTROL_APPLIED_INT_SHIFT 4 +#define CS42L43_CONTROL_WARN_INT_MASK 0x00000008 +#define CS42L43_CONTROL_WARN_INT_SHIFT 3 +#define CS42L43_PATCH_WARN_INT_MASK 0x00000002 +#define CS42L43_PATCH_WARN_INT_SHIFT 1 +#define CS42L43_PATCH_APPLIED_INT_MASK 0x00000001 +#define CS42L43_PATCH_APPLIED_INT_SHIFT 0 + +/* CS42L43_MSM_INT */ +#define CS42L43_HP_STARTUP_DONE_INT_MASK 0x00000800 +#define CS42L43_HP_STARTUP_DONE_INT_SHIFT 11 +#define CS42L43_HP_SHUTDOWN_DONE_INT_MASK 0x00000400 +#define CS42L43_HP_SHUTDOWN_DONE_INT_SHIFT 10 +#define CS42L43_HSDET_DONE_INT_MASK 0x00000200 +#define CS42L43_HSDET_DONE_INT_SHIFT 9 +#define CS42L43_TIPSENSE_UNPLUG_DB_INT_MASK 0x00000080 +#define CS42L43_TIPSENSE_UNPLUG_DB_INT_SHIFT 7 +#define CS42L43_TIPSENSE_PLUG_DB_INT_MASK 0x00000040 +#define CS42L43_TIPSENSE_PLUG_DB_INT_SHIFT 6 +#define CS42L43_RINGSENSE_UNPLUG_DB_INT_MASK 0x00000020 +#define CS42L43_RINGSENSE_UNPLUG_DB_INT_SHIFT 5 +#define CS42L43_RINGSENSE_PLUG_DB_INT_MASK 0x00000010 +#define CS42L43_RINGSENSE_PLUG_DB_INT_SHIFT 4 +#define CS42L43_TIPSENSE_UNPLUG_PDET_INT_MASK 0x00000008 +#define CS42L43_TIPSENSE_UNPLUG_PDET_INT_SHIFT 3 +#define CS42L43_TIPSENSE_PLUG_PDET_INT_MASK 0x00000004 +#define CS42L43_TIPSENSE_PLUG_PDET_INT_SHIFT 2 +#define CS42L43_RINGSENSE_UNPLUG_PDET_INT_MASK 0x00000002 +#define CS42L43_RINGSENSE_UNPLUG_PDET_INT_SHIFT 1 +#define CS42L43_RINGSENSE_PLUG_PDET_INT_MASK 0x00000001 +#define CS42L43_RINGSENSE_PLUG_PDET_INT_SHIFT 0 + +/* CS42L43_ACC_DET_INT */ +#define CS42L43_HS2_BIAS_SENSE_INT_MASK 0x00000800 +#define CS42L43_HS2_BIAS_SENSE_INT_SHIFT 11 +#define CS42L43_HS1_BIAS_SENSE_INT_MASK 0x00000400 +#define CS42L43_HS1_BIAS_SENSE_INT_SHIFT 10 +#define CS42L43_DC_DETECT1_FALSE_INT_MASK 0x00000080 +#define CS42L43_DC_DETECT1_FALSE_INT_SHIFT 7 +#define CS42L43_DC_DETECT1_TRUE_INT_MASK 0x00000040 +#define CS42L43_DC_DETECT1_TRUE_INT_SHIFT 6 +#define CS42L43_HSBIAS_CLAMPED_INT_MASK 0x00000008 +#define CS42L43_HSBIAS_CLAMPED_INT_SHIFT 3 +#define CS42L43_HS3_4_BIAS_SENSE_INT_MASK 0x00000001 +#define CS42L43_HS3_4_BIAS_SENSE_INT_SHIFT 0 + +/* CS42L43_SPI_MSTR_INT */ +#define CS42L43_IRQ_SPI_STALLING_INT_MASK 0x00000004 +#define CS42L43_IRQ_SPI_STALLING_INT_SHIFT 2 +#define CS42L43_IRQ_SPI_STS_INT_MASK 0x00000002 +#define CS42L43_IRQ_SPI_STS_INT_SHIFT 1 +#define CS42L43_IRQ_SPI_BLOCK_INT_MASK 0x00000001 +#define CS42L43_IRQ_SPI_BLOCK_INT_SHIFT 0 + +/* CS42L43_SW_TO_SPI_BRIDGE_INT */ +#define CS42L43_SW2SPI_BUF_OVF_UDF_INT_MASK 0x00000001 +#define CS42L43_SW2SPI_BUF_OVF_UDF_INT_SHIFT 0 + +/* CS42L43_CLASS_D_AMP_INT */ +#define CS42L43_AMP2_CLK_STOP_FAULT_INT_MASK 0x00002000 +#define CS42L43_AMP2_CLK_STOP_FAULT_INT_SHIFT 13 +#define CS42L43_AMP1_CLK_STOP_FAULT_INT_MASK 0x00001000 +#define CS42L43_AMP1_CLK_STOP_FAULT_INT_SHIFT 12 +#define CS42L43_AMP2_VDDSPK_FAULT_INT_MASK 0x00000800 +#define CS42L43_AMP2_VDDSPK_FAULT_INT_SHIFT 11 +#define CS42L43_AMP1_VDDSPK_FAULT_INT_MASK 0x00000400 +#define CS42L43_AMP1_VDDSPK_FAULT_INT_SHIFT 10 +#define CS42L43_AMP2_SHUTDOWN_DONE_INT_MASK 0x00000200 +#define CS42L43_AMP2_SHUTDOWN_DONE_INT_SHIFT 9 +#define CS42L43_AMP1_SHUTDOWN_DONE_INT_MASK 0x00000100 +#define CS42L43_AMP1_SHUTDOWN_DONE_INT_SHIFT 8 +#define CS42L43_AMP2_STARTUP_DONE_INT_MASK 0x00000080 +#define CS42L43_AMP2_STARTUP_DONE_INT_SHIFT 7 +#define CS42L43_AMP1_STARTUP_DONE_INT_MASK 0x00000040 +#define CS42L43_AMP1_STARTUP_DONE_INT_SHIFT 6 +#define CS42L43_AMP2_THERM_SHDN_INT_MASK 0x00000020 +#define CS42L43_AMP2_THERM_SHDN_INT_SHIFT 5 +#define CS42L43_AMP1_THERM_SHDN_INT_MASK 0x00000010 +#define CS42L43_AMP1_THERM_SHDN_INT_SHIFT 4 +#define CS42L43_AMP2_THERM_WARN_INT_MASK 0x00000008 +#define CS42L43_AMP2_THERM_WARN_INT_SHIFT 3 +#define CS42L43_AMP1_THERM_WARN_INT_MASK 0x00000004 +#define CS42L43_AMP1_THERM_WARN_INT_SHIFT 2 +#define CS42L43_AMP2_SCDET_INT_MASK 0x00000002 +#define CS42L43_AMP2_SCDET_INT_SHIFT 1 +#define CS42L43_AMP1_SCDET_INT_MASK 0x00000001 +#define CS42L43_AMP1_SCDET_INT_SHIFT 0 + +/* CS42L43_GPIO_INT */ +#define CS42L43_GPIO3_FALL_INT_MASK 0x00000020 +#define CS42L43_GPIO3_FALL_INT_SHIFT 5 +#define CS42L43_GPIO3_RISE_INT_MASK 0x00000010 +#define CS42L43_GPIO3_RISE_INT_SHIFT 4 +#define CS42L43_GPIO2_FALL_INT_MASK 0x00000008 +#define CS42L43_GPIO2_FALL_INT_SHIFT 3 +#define CS42L43_GPIO2_RISE_INT_MASK 0x00000004 +#define CS42L43_GPIO2_RISE_INT_SHIFT 2 +#define CS42L43_GPIO1_FALL_INT_MASK 0x00000002 +#define CS42L43_GPIO1_FALL_INT_SHIFT 1 +#define CS42L43_GPIO1_RISE_INT_MASK 0x00000001 +#define CS42L43_GPIO1_RISE_INT_SHIFT 0 + +/* CS42L43_HPOUT_INT */ +#define CS42L43_HP_ILIMIT_INT_MASK 0x00000002 +#define CS42L43_HP_ILIMIT_INT_SHIFT 1 +#define CS42L43_HP_LOADDET_DONE_INT_MASK 0x00000001 +#define CS42L43_HP_LOADDET_DONE_INT_SHIFT 0 + +/* CS42L43_BOOT_CONTROL */ +#define CS42L43_LOCK_HW_STS_MASK 0x00000002 +#define CS42L43_LOCK_HW_STS_SHIFT 1 + +/* CS42L43_BLOCK_EN */ +#define CS42L43_MCU_EN_MASK 0x00000001 +#define CS42L43_MCU_EN_SHIFT 0 + +/* CS42L43_SHUTTER_CONTROL */ +#define CS42L43_STATUS_SPK_SHUTTER_MUTE_MASK 0x00008000 +#define CS42L43_STATUS_SPK_SHUTTER_MUTE_SHIFT 15 +#define CS42L43_SPK_SHUTTER_CFG_MASK 0x00000F00 +#define CS42L43_SPK_SHUTTER_CFG_SHIFT 8 +#define CS42L43_STATUS_MIC_SHUTTER_MUTE_MASK 0x00000080 +#define CS42L43_STATUS_MIC_SHUTTER_MUTE_SHIFT 7 +#define CS42L43_MIC_SHUTTER_CFG_MASK 0x0000000F +#define CS42L43_MIC_SHUTTER_CFG_SHIFT 0 + +/* CS42L43_MCU_SW_REV */ +#define CS42L43_BIOS_SUBMINOR_REV_MASK 0xFF000000 +#define CS42L43_BIOS_SUBMINOR_REV_SHIFT 24 +#define CS42L43_BIOS_MINOR_REV_MASK 0x00F00000 +#define CS42L43_BIOS_MINOR_REV_SHIFT 20 +#define CS42L43_BIOS_MAJOR_REV_MASK 0x000F0000 +#define CS42L43_BIOS_MAJOR_REV_SHIFT 16 +#define CS42L43_FW_SUBMINOR_REV_MASK 0x0000FF00 +#define CS42L43_FW_SUBMINOR_REV_SHIFT 8 +#define CS42L43_FW_MINOR_REV_MASK 0x000000F0 +#define CS42L43_FW_MINOR_REV_SHIFT 4 +#define CS42L43_FW_MAJOR_REV_MASK 0x0000000F +#define CS42L43_FW_MAJOR_REV_SHIFT 0 + +/* CS42L43_NEED_CONFIGS */ +#define CS42L43_FW_PATCH_NEED_CFG_MASK 0x80000000 +#define CS42L43_FW_PATCH_NEED_CFG_SHIFT 31 + +/* CS42L43_FW_MISSION_CTRL_MM_CTRL_SELECTION */ +#define CS42L43_FW_MM_CTRL_MCU_SEL_MASK 0x00000001 +#define CS42L43_FW_MM_CTRL_MCU_SEL_SHIFT 0 + +/* CS42L43_FW_MISSION_CTRL_MM_MCU_CFG_REG */ +#define CS42L43_FW_MISSION_CTRL_MM_MCU_CFG_DISABLE_VAL 0xF05AA50F + +#endif /* CS42L43_CORE_REGS_H */ diff --git a/include/linux/mfd/cs42l43.h b/include/linux/mfd/cs42l43.h new file mode 100644 index 000000000000..cf8263aab41b --- /dev/null +++ b/include/linux/mfd/cs42l43.h @@ -0,0 +1,102 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * CS42L43 core driver external data + * + * Copyright (C) 2022-2023 Cirrus Logic, Inc. and + * Cirrus Logic International Semiconductor Ltd. + */ + +#include <linux/completion.h> +#include <linux/device.h> +#include <linux/gpio/consumer.h> +#include <linux/mutex.h> +#include <linux/regmap.h> +#include <linux/regulator/consumer.h> +#include <linux/soundwire/sdw.h> +#include <linux/workqueue.h> + +#ifndef CS42L43_CORE_EXT_H +#define CS42L43_CORE_EXT_H + +#define CS42L43_N_SUPPLIES 3 + +enum cs42l43_irq_numbers { + CS42L43_PLL_LOST_LOCK, + CS42L43_PLL_READY, + + CS42L43_HP_STARTUP_DONE, + CS42L43_HP_SHUTDOWN_DONE, + CS42L43_HSDET_DONE, + CS42L43_TIPSENSE_UNPLUG_DB, + CS42L43_TIPSENSE_PLUG_DB, + CS42L43_RINGSENSE_UNPLUG_DB, + CS42L43_RINGSENSE_PLUG_DB, + CS42L43_TIPSENSE_UNPLUG_PDET, + CS42L43_TIPSENSE_PLUG_PDET, + CS42L43_RINGSENSE_UNPLUG_PDET, + CS42L43_RINGSENSE_PLUG_PDET, + + CS42L43_HS2_BIAS_SENSE, + CS42L43_HS1_BIAS_SENSE, + CS42L43_DC_DETECT1_FALSE, + CS42L43_DC_DETECT1_TRUE, + CS42L43_HSBIAS_CLAMPED, + CS42L43_HS3_4_BIAS_SENSE, + + CS42L43_AMP2_CLK_STOP_FAULT, + CS42L43_AMP1_CLK_STOP_FAULT, + CS42L43_AMP2_VDDSPK_FAULT, + CS42L43_AMP1_VDDSPK_FAULT, + CS42L43_AMP2_SHUTDOWN_DONE, + CS42L43_AMP1_SHUTDOWN_DONE, + CS42L43_AMP2_STARTUP_DONE, + CS42L43_AMP1_STARTUP_DONE, + CS42L43_AMP2_THERM_SHDN, + CS42L43_AMP1_THERM_SHDN, + CS42L43_AMP2_THERM_WARN, + CS42L43_AMP1_THERM_WARN, + CS42L43_AMP2_SCDET, + CS42L43_AMP1_SCDET, + + CS42L43_GPIO3_FALL, + CS42L43_GPIO3_RISE, + CS42L43_GPIO2_FALL, + CS42L43_GPIO2_RISE, + CS42L43_GPIO1_FALL, + CS42L43_GPIO1_RISE, + + CS42L43_HP_ILIMIT, + CS42L43_HP_LOADDET_DONE, +}; + +struct cs42l43 { + struct device *dev; + struct regmap *regmap; + struct sdw_slave *sdw; + + struct regulator *vdd_p; + struct regulator *vdd_d; + struct regulator_bulk_data core_supplies[CS42L43_N_SUPPLIES]; + + struct gpio_desc *reset; + + int irq; + struct regmap_irq_chip irq_chip; + struct regmap_irq_chip_data *irq_data; + + struct work_struct boot_work; + struct completion device_attach; + struct completion device_detach; + struct completion firmware_download; + int firmware_error; + + unsigned int sdw_freq; + /* Lock to gate control of the PLL and its sources. */ + struct mutex pll_lock; + + bool sdw_pll_active; + bool attached; + bool hw_lock; +}; + +#endif /* CS42L43_CORE_EXT_H */ diff --git a/include/linux/mfd/tps65086.h b/include/linux/mfd/tps65086.h index 16f87cccc003..9185b5cd8371 100644 --- a/include/linux/mfd/tps65086.h +++ b/include/linux/mfd/tps65086.h @@ -13,8 +13,9 @@ #include <linux/regmap.h> /* List of registers for TPS65086 */ -#define TPS65086_DEVICEID 0x01 -#define TPS65086_IRQ 0x02 +#define TPS65086_DEVICEID1 0x00 +#define TPS65086_DEVICEID2 0x01 +#define TPS65086_IRQ 0x02 #define TPS65086_IRQ_MASK 0x03 #define TPS65086_PMICSTAT 0x04 #define TPS65086_SHUTDNSRC 0x05 @@ -75,10 +76,16 @@ #define TPS65086_IRQ_SHUTDN_MASK BIT(3) #define TPS65086_IRQ_FAULT_MASK BIT(7) -/* DEVICEID Register field definitions */ -#define TPS65086_DEVICEID_PART_MASK GENMASK(3, 0) -#define TPS65086_DEVICEID_OTP_MASK GENMASK(5, 4) -#define TPS65086_DEVICEID_REV_MASK GENMASK(7, 6) +/* DEVICEID1 Register field definitions */ +#define TPS6508640_ID 0x00 +#define TPS65086401_ID 0x01 +#define TPS6508641_ID 0x10 +#define TPS65086470_ID 0x70 + +/* DEVICEID2 Register field definitions */ +#define TPS65086_DEVICEID2_PART_MASK GENMASK(3, 0) +#define TPS65086_DEVICEID2_OTP_MASK GENMASK(5, 4) +#define TPS65086_DEVICEID2_REV_MASK GENMASK(7, 6) /* VID Masks */ #define BUCK_VID_MASK GENMASK(7, 1) @@ -92,6 +99,8 @@ enum tps65086_irqs { TPS65086_IRQ_FAULT, }; +struct tps65086_regulator_config; + /** * struct tps65086 - state holder for the tps65086 driver * @@ -100,6 +109,8 @@ enum tps65086_irqs { struct tps65086 { struct device *dev; struct regmap *regmap; + unsigned int chip_id; + const struct tps65086_regulator_config *reg_config; /* IRQ Data */ int irq; diff --git a/include/linux/minmax.h b/include/linux/minmax.h index 396df1121bff..4f011eb6533d 100644 --- a/include/linux/minmax.h +++ b/include/linux/minmax.h @@ -3,6 +3,7 @@ #define _LINUX_MINMAX_H #include <linux/const.h> +#include <linux/types.h> /* * min()/max()/clamp() macros must accomplish three things: @@ -158,6 +159,32 @@ */ #define clamp_val(val, lo, hi) clamp_t(typeof(val), val, lo, hi) +static inline bool in_range64(u64 val, u64 start, u64 len) +{ + return (val - start) < len; +} + +static inline bool in_range32(u32 val, u32 start, u32 len) +{ + return (val - start) < len; +} + +/** + * in_range - Determine if a value lies within a range. + * @val: Value to test. + * @start: First value in range. + * @len: Number of values in range. + * + * This is more efficient than "if (start <= val && val < (start + len))". + * It also gives a different answer if @start + @len overflows the size of + * the type by a sufficient amount to encompass @val. Decide for yourself + * which behaviour you want, or prove that start + len never overflow. + * Do not blindly replace one form with the other. + */ +#define in_range(val, start, len) \ + ((sizeof(start) | sizeof(len) | sizeof(val)) <= sizeof(u32) ? \ + in_range32(val, start, len) : in_range64(val, start, len)) + /** * swap - swap values of @a and @b * @a: first value diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index 6646634a0b9d..27f42f713c89 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -33,6 +33,7 @@ #ifndef MLX4_DEVICE_H #define MLX4_DEVICE_H +#include <linux/auxiliary_bus.h> #include <linux/if_ether.h> #include <linux/pci.h> #include <linux/completion.h> @@ -889,6 +890,12 @@ struct mlx4_dev { u8 uar_page_shift; }; +struct mlx4_adev { + struct auxiliary_device adev; + struct mlx4_dev *mdev; + int idx; +}; + struct mlx4_clock_params { u64 offset; u8 bar; @@ -1087,6 +1094,19 @@ static inline void *mlx4_buf_offset(struct mlx4_buf *buf, int offset) (offset & (PAGE_SIZE - 1)); } +static inline int mlx4_is_bonded(struct mlx4_dev *dev) +{ + return !!(dev->flags & MLX4_FLAG_BONDED); +} + +static inline int mlx4_is_mf_bonded(struct mlx4_dev *dev) +{ + return (mlx4_is_bonded(dev) && mlx4_is_mfunc(dev)); +} + +int mlx4_queue_bond_work(struct mlx4_dev *dev, int is_bonded, u8 v2p_p1, + u8 v2p_p2); + int mlx4_pd_alloc(struct mlx4_dev *dev, u32 *pdn); void mlx4_pd_free(struct mlx4_dev *dev, u32 pdn); int mlx4_xrcd_alloc(struct mlx4_dev *dev, u32 *xrcdn); diff --git a/include/linux/mlx4/driver.h b/include/linux/mlx4/driver.h index 1834c8fad12e..69825223081f 100644 --- a/include/linux/mlx4/driver.h +++ b/include/linux/mlx4/driver.h @@ -34,8 +34,12 @@ #define MLX4_DRIVER_H #include <net/devlink.h> +#include <linux/auxiliary_bus.h> +#include <linux/notifier.h> #include <linux/mlx4/device.h> +#define MLX4_ADEV_NAME "mlx4_core" + struct mlx4_dev; #define MLX4_MAC_MASK 0xffffffffffffULL @@ -54,41 +58,19 @@ enum { MLX4_INTFF_BONDING = 1 << 0 }; -struct mlx4_interface { - void * (*add) (struct mlx4_dev *dev); - void (*remove)(struct mlx4_dev *dev, void *context); - void (*event) (struct mlx4_dev *dev, void *context, - enum mlx4_dev_event event, unsigned long param); - void * (*get_dev)(struct mlx4_dev *dev, void *context, u8 port); - void (*activate)(struct mlx4_dev *dev, void *context); - struct list_head list; +struct mlx4_adrv { + struct auxiliary_driver adrv; enum mlx4_protocol protocol; int flags; }; -int mlx4_register_interface(struct mlx4_interface *intf); -void mlx4_unregister_interface(struct mlx4_interface *intf); - -int mlx4_bond(struct mlx4_dev *dev); -int mlx4_unbond(struct mlx4_dev *dev); -static inline int mlx4_is_bonded(struct mlx4_dev *dev) -{ - return !!(dev->flags & MLX4_FLAG_BONDED); -} - -static inline int mlx4_is_mf_bonded(struct mlx4_dev *dev) -{ - return (mlx4_is_bonded(dev) && mlx4_is_mfunc(dev)); -} - -struct mlx4_port_map { - u8 port1; - u8 port2; -}; - -int mlx4_port_map_set(struct mlx4_dev *dev, struct mlx4_port_map *v2p); +int mlx4_register_auxiliary_driver(struct mlx4_adrv *madrv); +void mlx4_unregister_auxiliary_driver(struct mlx4_adrv *madrv); -void *mlx4_get_protocol_dev(struct mlx4_dev *dev, enum mlx4_protocol proto, int port); +int mlx4_register_event_notifier(struct mlx4_dev *dev, + struct notifier_block *nb); +int mlx4_unregister_event_notifier(struct mlx4_dev *dev, + struct notifier_block *nb); struct devlink_port *mlx4_get_devlink_port(struct mlx4_dev *dev, int port); diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index 80cc12a9a531..4d5be378fa8c 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -364,6 +364,8 @@ enum mlx5_event { enum mlx5_driver_event { MLX5_DRIVER_EVENT_TYPE_TRAP = 0, MLX5_DRIVER_EVENT_UPLINK_NETDEV, + MLX5_DRIVER_EVENT_MACSEC_SA_ADDED, + MLX5_DRIVER_EVENT_MACSEC_SA_DELETED, }; enum { @@ -1208,9 +1210,7 @@ enum mlx5_cap_type { MLX5_CAP_FLOW_TABLE, MLX5_CAP_ESWITCH_FLOW_TABLE, MLX5_CAP_ESWITCH, - MLX5_CAP_RESERVED, - MLX5_CAP_VECTOR_CALC, - MLX5_CAP_QOS, + MLX5_CAP_QOS = 0xc, MLX5_CAP_DEBUG, MLX5_CAP_RESERVED_14, MLX5_CAP_DEV_MEM, @@ -1220,7 +1220,6 @@ enum mlx5_cap_type { MLX5_CAP_DEV_EVENT = 0x14, MLX5_CAP_IPSEC, MLX5_CAP_CRYPTO = 0x1a, - MLX5_CAP_DEV_SHAMPO = 0x1d, MLX5_CAP_MACSEC = 0x1f, MLX5_CAP_GENERAL_2 = 0x20, MLX5_CAP_PORT_SELECTION = 0x25, @@ -1239,7 +1238,6 @@ enum mlx5_pcam_feature_groups { enum mlx5_mcam_reg_groups { MLX5_MCAM_REGS_FIRST_128 = 0x0, - MLX5_MCAM_REGS_0x9080_0x90FF = 0x1, MLX5_MCAM_REGS_0x9100_0x917F = 0x2, MLX5_MCAM_REGS_NUM = 0x3, }; @@ -1279,10 +1277,6 @@ enum mlx5_qcam_feature_groups { MLX5_GET(per_protocol_networking_offload_caps,\ mdev->caps.hca[MLX5_CAP_ETHERNET_OFFLOADS]->cur, cap) -#define MLX5_CAP_ETH_MAX(mdev, cap) \ - MLX5_GET(per_protocol_networking_offload_caps,\ - mdev->caps.hca[MLX5_CAP_ETHERNET_OFFLOADS]->max, cap) - #define MLX5_CAP_IPOIB_ENHANCED(mdev, cap) \ MLX5_GET(per_protocol_networking_offload_caps,\ mdev->caps.hca[MLX5_CAP_IPOIB_ENHANCED_OFFLOADS]->cur, cap) @@ -1305,77 +1299,40 @@ enum mlx5_qcam_feature_groups { #define MLX5_CAP64_FLOWTABLE(mdev, cap) \ MLX5_GET64(flow_table_nic_cap, (mdev)->caps.hca[MLX5_CAP_FLOW_TABLE]->cur, cap) -#define MLX5_CAP_FLOWTABLE_MAX(mdev, cap) \ - MLX5_GET(flow_table_nic_cap, mdev->caps.hca[MLX5_CAP_FLOW_TABLE]->max, cap) - #define MLX5_CAP_FLOWTABLE_NIC_RX(mdev, cap) \ MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.cap) -#define MLX5_CAP_FLOWTABLE_NIC_RX_MAX(mdev, cap) \ - MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive.cap) - #define MLX5_CAP_FLOWTABLE_NIC_TX(mdev, cap) \ MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_transmit.cap) -#define MLX5_CAP_FLOWTABLE_NIC_TX_MAX(mdev, cap) \ - MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_transmit.cap) - #define MLX5_CAP_FLOWTABLE_SNIFFER_RX(mdev, cap) \ MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive_sniffer.cap) -#define MLX5_CAP_FLOWTABLE_SNIFFER_RX_MAX(mdev, cap) \ - MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive_sniffer.cap) - #define MLX5_CAP_FLOWTABLE_SNIFFER_TX(mdev, cap) \ MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_transmit_sniffer.cap) -#define MLX5_CAP_FLOWTABLE_SNIFFER_TX_MAX(mdev, cap) \ - MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_transmit_sniffer.cap) - #define MLX5_CAP_FLOWTABLE_RDMA_RX(mdev, cap) \ MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive_rdma.cap) -#define MLX5_CAP_FLOWTABLE_RDMA_RX_MAX(mdev, cap) \ - MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive_rdma.cap) - #define MLX5_CAP_FLOWTABLE_RDMA_TX(mdev, cap) \ MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_transmit_rdma.cap) -#define MLX5_CAP_FLOWTABLE_RDMA_TX_MAX(mdev, cap) \ - MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_transmit_rdma.cap) - #define MLX5_CAP_ESW_FLOWTABLE(mdev, cap) \ MLX5_GET(flow_table_eswitch_cap, \ mdev->caps.hca[MLX5_CAP_ESWITCH_FLOW_TABLE]->cur, cap) -#define MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, cap) \ - MLX5_GET(flow_table_eswitch_cap, \ - mdev->caps.hca[MLX5_CAP_ESWITCH_FLOW_TABLE]->max, cap) - #define MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, cap) \ MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_nic_esw_fdb.cap) -#define MLX5_CAP_ESW_FLOWTABLE_FDB_MAX(mdev, cap) \ - MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_nic_esw_fdb.cap) - #define MLX5_CAP_ESW_EGRESS_ACL(mdev, cap) \ MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_esw_acl_egress.cap) -#define MLX5_CAP_ESW_EGRESS_ACL_MAX(mdev, cap) \ - MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_esw_acl_egress.cap) - #define MLX5_CAP_ESW_INGRESS_ACL(mdev, cap) \ MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_esw_acl_ingress.cap) -#define MLX5_CAP_ESW_INGRESS_ACL_MAX(mdev, cap) \ - MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_esw_acl_ingress.cap) - #define MLX5_CAP_ESW_FT_FIELD_SUPPORT_2(mdev, cap) \ MLX5_CAP_ESW_FLOWTABLE(mdev, ft_field_support_2_esw_fdb.cap) -#define MLX5_CAP_ESW_FT_FIELD_SUPPORT_2_MAX(mdev, cap) \ - MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, ft_field_support_2_esw_fdb.cap) - #define MLX5_CAP_ESW(mdev, cap) \ MLX5_GET(e_switch_cap, \ mdev->caps.hca[MLX5_CAP_ESWITCH]->cur, cap) @@ -1384,10 +1341,6 @@ enum mlx5_qcam_feature_groups { MLX5_GET64(flow_table_eswitch_cap, \ (mdev)->caps.hca[MLX5_CAP_ESWITCH_FLOW_TABLE]->cur, cap) -#define MLX5_CAP_ESW_MAX(mdev, cap) \ - MLX5_GET(e_switch_cap, \ - mdev->caps.hca[MLX5_CAP_ESWITCH]->max, cap) - #define MLX5_CAP_PORT_SELECTION(mdev, cap) \ MLX5_GET(port_selection_cap, \ mdev->caps.hca[MLX5_CAP_PORT_SELECTION]->cur, cap) @@ -1400,26 +1353,15 @@ enum mlx5_qcam_feature_groups { MLX5_GET(adv_virtualization_cap, \ mdev->caps.hca[MLX5_CAP_ADV_VIRTUALIZATION]->cur, cap) -#define MLX5_CAP_ADV_VIRTUALIZATION_MAX(mdev, cap) \ - MLX5_GET(adv_virtualization_cap, \ - mdev->caps.hca[MLX5_CAP_ADV_VIRTUALIZATION]->max, cap) - #define MLX5_CAP_FLOWTABLE_PORT_SELECTION(mdev, cap) \ MLX5_CAP_PORT_SELECTION(mdev, flow_table_properties_port_selection.cap) -#define MLX5_CAP_FLOWTABLE_PORT_SELECTION_MAX(mdev, cap) \ - MLX5_CAP_PORT_SELECTION_MAX(mdev, flow_table_properties_port_selection.cap) - #define MLX5_CAP_ODP(mdev, cap)\ MLX5_GET(odp_cap, mdev->caps.hca[MLX5_CAP_ODP]->cur, cap) #define MLX5_CAP_ODP_MAX(mdev, cap)\ MLX5_GET(odp_cap, mdev->caps.hca[MLX5_CAP_ODP]->max, cap) -#define MLX5_CAP_VECTOR_CALC(mdev, cap) \ - MLX5_GET(vector_calc_cap, \ - mdev->caps.hca[MLX5_CAP_VECTOR_CALC]->cur, cap) - #define MLX5_CAP_QOS(mdev, cap)\ MLX5_GET(qos_cap, mdev->caps.hca[MLX5_CAP_QOS]->cur, cap) @@ -1436,10 +1378,6 @@ enum mlx5_qcam_feature_groups { MLX5_GET(mcam_reg, (mdev)->caps.mcam[MLX5_MCAM_REGS_FIRST_128], \ mng_access_reg_cap_mask.access_regs.reg) -#define MLX5_CAP_MCAM_REG1(mdev, reg) \ - MLX5_GET(mcam_reg, (mdev)->caps.mcam[MLX5_MCAM_REGS_0x9080_0x90FF], \ - mng_access_reg_cap_mask.access_regs1.reg) - #define MLX5_CAP_MCAM_REG2(mdev, reg) \ MLX5_GET(mcam_reg, (mdev)->caps.mcam[MLX5_MCAM_REGS_0x9100_0x917F], \ mng_access_reg_cap_mask.access_regs2.reg) @@ -1485,9 +1423,6 @@ enum mlx5_qcam_feature_groups { #define MLX5_CAP_CRYPTO(mdev, cap)\ MLX5_GET(crypto_cap, (mdev)->caps.hca[MLX5_CAP_CRYPTO]->cur, cap) -#define MLX5_CAP_DEV_SHAMPO(mdev, cap)\ - MLX5_GET(shampo_cap, mdev->caps.hca_cur[MLX5_CAP_DEV_SHAMPO], cap) - #define MLX5_CAP_MACSEC(mdev, cap)\ MLX5_GET(macsec_cap, (mdev)->caps.hca[MLX5_CAP_MACSEC]->cur, cap) diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 25d0528f9219..3033bbaeac81 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -134,6 +134,7 @@ enum { MLX5_REG_PCAM = 0x507f, MLX5_REG_NODE_DESC = 0x6001, MLX5_REG_HOST_ENDIANNESS = 0x7004, + MLX5_REG_MTCAP = 0x9009, MLX5_REG_MTMP = 0x900A, MLX5_REG_MCIA = 0x9014, MLX5_REG_MFRL = 0x9028, @@ -287,18 +288,23 @@ struct mlx5_cmd_stats { struct mlx5_cmd { struct mlx5_nb nb; + /* members which needs to be queried or reinitialized each reload */ + struct { + u16 cmdif_rev; + u8 log_sz; + u8 log_stride; + int max_reg_cmds; + unsigned long bitmask; + struct semaphore sem; + struct semaphore pages_sem; + struct semaphore throttle_sem; + } vars; enum mlx5_cmdif_state state; void *cmd_alloc_buf; dma_addr_t alloc_dma; int alloc_size; void *cmd_buf; dma_addr_t dma; - u16 cmdif_rev; - u8 log_sz; - u8 log_stride; - int max_reg_cmds; - int events; - u32 __iomem *vector; /* protect command queue allocations */ @@ -308,12 +314,8 @@ struct mlx5_cmd { */ spinlock_t token_lock; u8 token; - unsigned long bitmask; char wq_name[MLX5_CMD_WQ_MAX_NAME]; struct workqueue_struct *wq; - struct semaphore sem; - struct semaphore pages_sem; - struct semaphore throttle_sem; int mode; u16 allowed_opcode; struct mlx5_cmd_work_ent *ent_arr[MLX5_MAX_COMMANDS]; @@ -321,7 +323,7 @@ struct mlx5_cmd { struct mlx5_cmd_debug dbg; struct cmd_msg_cache cache[MLX5_NUM_COMMAND_CACHES]; int checksum_disabled; - struct mlx5_cmd_stats stats[MLX5_CMD_OP_MAX]; + struct xarray stats; }; struct mlx5_cmd_mailbox { @@ -501,7 +503,7 @@ struct mlx5_events; struct mlx5_mpfs; struct mlx5_eswitch; struct mlx5_lag; -struct mlx5_devcom; +struct mlx5_devcom_dev; struct mlx5_fw_reset; struct mlx5_eq_table; struct mlx5_irq_table; @@ -618,7 +620,7 @@ struct mlx5_priv { struct mlx5_core_sriov sriov; struct mlx5_lag *lag; u32 flags; - struct mlx5_devcom *devcom; + struct mlx5_devcom_dev *devc; struct mlx5_fw_reset *fw_reset; struct mlx5_core_roce roce; struct mlx5_fc_stats fc_stats; @@ -725,7 +727,6 @@ struct mlx5_fw_tracer; struct mlx5_vxlan; struct mlx5_geneve; struct mlx5_hv_vhca; -struct mlx5_thermal; #define MLX5_LOG_SW_ICM_BLOCK_SIZE(dev) (MLX5_CAP_DEV_MEM(dev, log_sw_icm_alloc_granularity)) #define MLX5_SW_ICM_BLOCK_SIZE(dev) (1 << MLX5_LOG_SW_ICM_BLOCK_SIZE(dev)) @@ -804,7 +805,15 @@ struct mlx5_core_dev { struct mlx5_rsc_dump *rsc_dump; u32 vsc_addr; struct mlx5_hv_vhca *hv_vhca; - struct mlx5_thermal *thermal; + struct mlx5_hwmon *hwmon; + u64 num_block_tc; + u64 num_block_ipsec; +#ifdef CONFIG_MLX5_MACSEC + struct mlx5_macsec_fs *macsec_fs; + /* MACsec notifier chain to sync MACsec core and IB database */ + struct blocking_notifier_head macsec_nh; +#endif + u64 num_ipsec_offloads; }; struct mlx5_db { @@ -1018,7 +1027,6 @@ bool mlx5_cmd_is_down(struct mlx5_core_dev *dev); void mlx5_core_uplink_netdev_set(struct mlx5_core_dev *mdev, struct net_device *netdev); void mlx5_core_uplink_netdev_event_replay(struct mlx5_core_dev *mdev); -int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type); void mlx5_health_cleanup(struct mlx5_core_dev *dev); int mlx5_health_init(struct mlx5_core_dev *dev); void mlx5_start_health_poll(struct mlx5_core_dev *dev); @@ -1055,7 +1063,7 @@ void mlx5_unregister_debugfs(void); void mlx5_fill_page_frag_array_perm(struct mlx5_frag_buf *buf, __be64 *pas, u8 perm); void mlx5_fill_page_frag_array(struct mlx5_frag_buf *frag_buf, __be64 *pas); -int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn); +int mlx5_comp_eqn_get(struct mlx5_core_dev *dev, u16 vecidx, int *eqn); int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn); int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn); @@ -1105,9 +1113,8 @@ int mlx5_alloc_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg, bool map_wc, bool fast_path); void mlx5_free_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg); -unsigned int mlx5_comp_vectors_count(struct mlx5_core_dev *dev); -struct cpumask * -mlx5_comp_irq_get_affinity_mask(struct mlx5_core_dev *dev, int vector); +unsigned int mlx5_comp_vectors_max(struct mlx5_core_dev *dev); +int mlx5_comp_vector_get_cpu(struct mlx5_core_dev *dev, int vector); unsigned int mlx5_core_reserved_gids_count(struct mlx5_core_dev *dev); int mlx5_core_roce_gid_set(struct mlx5_core_dev *dev, unsigned int index, u8 roce_version, u8 roce_l3_type, const u8 *gid, @@ -1320,6 +1327,52 @@ static inline bool mlx5_get_roce_state(struct mlx5_core_dev *dev) return mlx5_is_roce_on(dev); } +#ifdef CONFIG_MLX5_MACSEC +static inline bool mlx5e_is_macsec_device(const struct mlx5_core_dev *mdev) +{ + if (!(MLX5_CAP_GEN_64(mdev, general_obj_types) & + MLX5_GENERAL_OBJ_TYPES_CAP_MACSEC_OFFLOAD)) + return false; + + if (!MLX5_CAP_GEN(mdev, log_max_dek)) + return false; + + if (!MLX5_CAP_MACSEC(mdev, log_max_macsec_offload)) + return false; + + if (!MLX5_CAP_FLOWTABLE_NIC_RX(mdev, macsec_decrypt) || + !MLX5_CAP_FLOWTABLE_NIC_RX(mdev, reformat_remove_macsec)) + return false; + + if (!MLX5_CAP_FLOWTABLE_NIC_TX(mdev, macsec_encrypt) || + !MLX5_CAP_FLOWTABLE_NIC_TX(mdev, reformat_add_macsec)) + return false; + + if (!MLX5_CAP_MACSEC(mdev, macsec_crypto_esp_aes_gcm_128_encrypt) && + !MLX5_CAP_MACSEC(mdev, macsec_crypto_esp_aes_gcm_256_encrypt)) + return false; + + if (!MLX5_CAP_MACSEC(mdev, macsec_crypto_esp_aes_gcm_128_decrypt) && + !MLX5_CAP_MACSEC(mdev, macsec_crypto_esp_aes_gcm_256_decrypt)) + return false; + + return true; +} + +#define NIC_RDMA_BOTH_DIRS_CAPS (MLX5_FT_NIC_RX_2_NIC_RX_RDMA | MLX5_FT_NIC_TX_RDMA_2_NIC_TX) + +static inline bool mlx5_is_macsec_roce_supported(struct mlx5_core_dev *mdev) +{ + if (((MLX5_CAP_GEN_2(mdev, flow_table_type_2_type) & + NIC_RDMA_BOTH_DIRS_CAPS) != NIC_RDMA_BOTH_DIRS_CAPS) || + !MLX5_CAP_FLOWTABLE_RDMA_TX(mdev, max_modify_header_actions) || + !mlx5e_is_macsec_device(mdev) || !mdev->macsec_fs) + return false; + + return true; +} +#endif + enum { MLX5_OCTWORD = 16, }; diff --git a/include/linux/mlx5/eswitch.h b/include/linux/mlx5/eswitch.h index e2701ed0200e..950d2431a53c 100644 --- a/include/linux/mlx5/eswitch.h +++ b/include/linux/mlx5/eswitch.h @@ -144,6 +144,9 @@ u32 mlx5_eswitch_get_vport_metadata_for_set(struct mlx5_eswitch *esw, GENMASK(31 - ESW_TUN_ID_BITS - ESW_RESERVED_BITS, \ ESW_TUN_OPTS_OFFSET + 1) +/* reuse tun_opts for the mapped ipsec obj id when tun_id is 0 (invalid) */ +#define ESW_IPSEC_RX_MAPPED_ID_MASK GENMASK(ESW_TUN_OPTS_BITS - 1, 0) + u8 mlx5_eswitch_mode(const struct mlx5_core_dev *dev); u16 mlx5_eswitch_get_total_vports(const struct mlx5_core_dev *dev); struct mlx5_core_dev *mlx5_eswitch_get_core_dev(struct mlx5_eswitch *esw); diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h index 2cb404c7ea13..1e00c2436377 100644 --- a/include/linux/mlx5/fs.h +++ b/include/linux/mlx5/fs.h @@ -105,15 +105,19 @@ enum mlx5_flow_namespace_type { MLX5_FLOW_NAMESPACE_RDMA_TX_COUNTERS, MLX5_FLOW_NAMESPACE_RDMA_RX_IPSEC, MLX5_FLOW_NAMESPACE_RDMA_TX_IPSEC, + MLX5_FLOW_NAMESPACE_RDMA_RX_MACSEC, + MLX5_FLOW_NAMESPACE_RDMA_TX_MACSEC, }; enum { FDB_BYPASS_PATH, + FDB_CRYPTO_INGRESS, FDB_TC_OFFLOAD, FDB_FT_OFFLOAD, FDB_TC_MISS, FDB_BR_OFFLOAD, FDB_SLOW_PATH, + FDB_CRYPTO_EGRESS, FDB_PER_VPORT, }; diff --git a/include/linux/mlx5/macsec.h b/include/linux/mlx5/macsec.h new file mode 100644 index 000000000000..f7ff4c2a95d0 --- /dev/null +++ b/include/linux/mlx5/macsec.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. */ + +#ifndef MLX5_MACSEC_H +#define MLX5_MACSEC_H + +#ifdef CONFIG_MLX5_MACSEC +struct mlx5_macsec_event_data { + struct mlx5_macsec_fs *macsec_fs; + void *macdev; + u32 fs_id; + bool is_tx; +}; + +int mlx5_macsec_add_roce_rule(void *macdev, const struct sockaddr *addr, u16 gid_idx, + struct list_head *tx_rules_list, struct list_head *rx_rules_list, + struct mlx5_macsec_fs *macsec_fs); + +void mlx5_macsec_del_roce_rule(u16 gid_idx, struct mlx5_macsec_fs *macsec_fs, + struct list_head *tx_rules_list, struct list_head *rx_rules_list); + +void mlx5_macsec_add_roce_sa_rules(u32 fs_id, const struct sockaddr *addr, u16 gid_idx, + struct list_head *tx_rules_list, + struct list_head *rx_rules_list, + struct mlx5_macsec_fs *macsec_fs, bool is_tx); + +void mlx5_macsec_del_roce_sa_rules(u32 fs_id, struct mlx5_macsec_fs *macsec_fs, + struct list_head *tx_rules_list, + struct list_head *rx_rules_list, bool is_tx); + +#endif +#endif /* MLX5_MACSEC_H */ diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 33344a71c3e3..fc3db401f8a2 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -65,9 +65,11 @@ enum { enum { MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE = 0x0, + MLX5_SET_HCA_CAP_OP_MOD_ETHERNET_OFFLOADS = 0x1, MLX5_SET_HCA_CAP_OP_MOD_ODP = 0x2, MLX5_SET_HCA_CAP_OP_MOD_ATOMIC = 0x3, MLX5_SET_HCA_CAP_OP_MOD_ROCE = 0x4, + MLX5_SET_HCA_CAP_OP_MOD_IPSEC = 0x15, MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE2 = 0x20, MLX5_SET_HCA_CAP_OP_MOD_PORT_SELECTION = 0x25, }; @@ -464,10 +466,10 @@ struct mlx5_ifc_flow_table_prop_layout_bits { u8 reformat_add_esp_trasport[0x1]; u8 reformat_l2_to_l3_esp_tunnel[0x1]; - u8 reserved_at_42[0x1]; + u8 reformat_add_esp_transport_over_udp[0x1]; u8 reformat_del_esp_trasport[0x1]; u8 reformat_l3_esp_tunnel_to_l2[0x1]; - u8 reserved_at_45[0x1]; + u8 reformat_del_esp_transport_over_udp[0x1]; u8 execute_aso[0x1]; u8 reserved_at_47[0x19]; @@ -1314,33 +1316,6 @@ struct mlx5_ifc_odp_cap_bits { u8 reserved_at_120[0x6E0]; }; -struct mlx5_ifc_calc_op { - u8 reserved_at_0[0x10]; - u8 reserved_at_10[0x9]; - u8 op_swap_endianness[0x1]; - u8 op_min[0x1]; - u8 op_xor[0x1]; - u8 op_or[0x1]; - u8 op_and[0x1]; - u8 op_max[0x1]; - u8 op_add[0x1]; -}; - -struct mlx5_ifc_vector_calc_cap_bits { - u8 calc_matrix[0x1]; - u8 reserved_at_1[0x1f]; - u8 reserved_at_20[0x8]; - u8 max_vec_count[0x8]; - u8 reserved_at_30[0xd]; - u8 max_chunk_size[0x3]; - struct mlx5_ifc_calc_op calc0; - struct mlx5_ifc_calc_op calc1; - struct mlx5_ifc_calc_op calc2; - struct mlx5_ifc_calc_op calc3; - - u8 reserved_at_c0[0x720]; -}; - struct mlx5_ifc_tls_cap_bits { u8 tls_1_2_aes_gcm_128[0x1]; u8 tls_1_3_aes_gcm_128[0x1]; @@ -3435,20 +3410,6 @@ struct mlx5_ifc_roce_addr_layout_bits { u8 reserved_at_e0[0x20]; }; -struct mlx5_ifc_shampo_cap_bits { - u8 reserved_at_0[0x3]; - u8 shampo_log_max_reservation_size[0x5]; - u8 reserved_at_8[0x3]; - u8 shampo_log_min_reservation_size[0x5]; - u8 shampo_min_mss_size[0x10]; - - u8 reserved_at_20[0x3]; - u8 shampo_max_log_headers_entry_size[0x5]; - u8 reserved_at_28[0x18]; - - u8 reserved_at_40[0x7c0]; -}; - struct mlx5_ifc_crypto_cap_bits { u8 reserved_at_0[0x3]; u8 synchronize_dek[0x1]; @@ -3484,16 +3445,15 @@ union mlx5_ifc_hca_cap_union_bits { struct mlx5_ifc_flow_table_eswitch_cap_bits flow_table_eswitch_cap; struct mlx5_ifc_e_switch_cap_bits e_switch_cap; struct mlx5_ifc_port_selection_cap_bits port_selection_cap; - struct mlx5_ifc_vector_calc_cap_bits vector_calc_cap; struct mlx5_ifc_qos_cap_bits qos_cap; struct mlx5_ifc_debug_cap_bits debug_cap; struct mlx5_ifc_fpga_cap_bits fpga_cap; struct mlx5_ifc_tls_cap_bits tls_cap; struct mlx5_ifc_device_mem_cap_bits device_mem_cap; struct mlx5_ifc_virtio_emulation_cap_bits virtio_emulation_cap; - struct mlx5_ifc_shampo_cap_bits shampo_cap; struct mlx5_ifc_macsec_cap_bits macsec_cap; struct mlx5_ifc_crypto_cap_bits crypto_cap; + struct mlx5_ifc_ipsec_cap_bits ipsec_cap; u8 reserved_at_0[0x8000]; }; @@ -6665,9 +6625,12 @@ enum mlx5_reformat_ctx_type { MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL = 0x4, MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_IPV4 = 0x5, MLX5_REFORMAT_TYPE_L2_TO_L3_ESP_TUNNEL = 0x6, + MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_UDPV4 = 0x7, MLX5_REFORMAT_TYPE_DEL_ESP_TRANSPORT = 0x8, MLX5_REFORMAT_TYPE_L3_ESP_TUNNEL_TO_L2 = 0x9, + MLX5_REFORMAT_TYPE_DEL_ESP_TRANSPORT_OVER_UDP = 0xa, MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_IPV6 = 0xb, + MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_UDPV6 = 0xc, MLX5_REFORMAT_TYPE_INSERT_HDR = 0xf, MLX5_REFORMAT_TYPE_REMOVE_HDR = 0x10, MLX5_REFORMAT_TYPE_ADD_MACSEC = 0x11, @@ -10193,7 +10156,9 @@ struct mlx5_ifc_mcam_access_reg_bits { u8 mrtc[0x1]; u8 regs_44_to_32[0xd]; - u8 regs_31_to_0[0x20]; + u8 regs_31_to_10[0x16]; + u8 mtmp[0x1]; + u8 regs_8_to_0[0x9]; }; struct mlx5_ifc_mcam_access_reg_bits1 { @@ -10853,8 +10818,9 @@ enum { MLX5_MFRL_REG_RESET_STATE_IDLE = 0, MLX5_MFRL_REG_RESET_STATE_IN_NEGOTIATION = 1, MLX5_MFRL_REG_RESET_STATE_RESET_IN_PROGRESS = 2, - MLX5_MFRL_REG_RESET_STATE_TIMEOUT = 3, + MLX5_MFRL_REG_RESET_STATE_NEG_TIMEOUT = 3, MLX5_MFRL_REG_RESET_STATE_NACK = 4, + MLX5_MFRL_REG_RESET_STATE_UNLOAD_TIMEOUT = 5, }; enum { @@ -10946,6 +10912,15 @@ struct mlx5_ifc_mrtc_reg_bits { u8 time_l[0x20]; }; +struct mlx5_ifc_mtcap_reg_bits { + u8 reserved_at_0[0x19]; + u8 sensor_count[0x7]; + + u8 reserved_at_20[0x20]; + + u8 sensor_map[0x40]; +}; + struct mlx5_ifc_mtmp_reg_bits { u8 reserved_at_0[0x14]; u8 sensor_index[0xc]; @@ -11033,6 +11008,7 @@ union mlx5_ifc_ports_control_registers_document_bits { struct mlx5_ifc_mfrl_reg_bits mfrl_reg; struct mlx5_ifc_mtutc_reg_bits mtutc_reg; struct mlx5_ifc_mrtc_reg_bits mrtc_reg; + struct mlx5_ifc_mtcap_reg_bits mtcap_reg; struct mlx5_ifc_mtmp_reg_bits mtmp_reg; u8 reserved_at_0[0x60e0]; }; diff --git a/include/linux/mm.h b/include/linux/mm.h index 406ab9ea818f..53efddc4d178 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -532,13 +532,6 @@ struct vm_fault { */ }; -/* page entry size for vm->huge_fault() */ -enum page_entry_size { - PE_SIZE_PTE = 0, - PE_SIZE_PMD, - PE_SIZE_PUD, -}; - /* * These are the virtual MM functions - opening of an area, closing and * unmapping it (needed to keep files on disk up-to-date etc), pointer @@ -562,8 +555,7 @@ struct vm_operations_struct { int (*mprotect)(struct vm_area_struct *vma, unsigned long start, unsigned long end, unsigned long newflags); vm_fault_t (*fault)(struct vm_fault *vmf); - vm_fault_t (*huge_fault)(struct vm_fault *vmf, - enum page_entry_size pe_size); + vm_fault_t (*huge_fault)(struct vm_fault *vmf, unsigned int order); vm_fault_t (*map_pages)(struct vm_fault *vmf, pgoff_t start_pgoff, pgoff_t end_pgoff); unsigned long (*pagesize)(struct vm_area_struct * area); @@ -679,6 +671,7 @@ static inline void vma_end_read(struct vm_area_struct *vma) rcu_read_unlock(); } +/* WARNING! Can only be used if mmap_lock is expected to be write-locked */ static bool __is_vma_write_locked(struct vm_area_struct *vma, int *mm_lock_seq) { mmap_assert_write_locked(vma->vm_mm); @@ -691,6 +684,11 @@ static bool __is_vma_write_locked(struct vm_area_struct *vma, int *mm_lock_seq) return (vma->vm_lock_seq == *mm_lock_seq); } +/* + * Begin writing to a VMA. + * Exclude concurrent readers under the per-VMA lock until the currently + * write-locked mmap_lock is dropped or downgraded. + */ static inline void vma_start_write(struct vm_area_struct *vma) { int mm_lock_seq; @@ -709,26 +707,17 @@ static inline void vma_start_write(struct vm_area_struct *vma) up_write(&vma->vm_lock->lock); } -static inline bool vma_try_start_write(struct vm_area_struct *vma) +static inline void vma_assert_write_locked(struct vm_area_struct *vma) { int mm_lock_seq; - if (__is_vma_write_locked(vma, &mm_lock_seq)) - return true; - - if (!down_write_trylock(&vma->vm_lock->lock)) - return false; - - WRITE_ONCE(vma->vm_lock_seq, mm_lock_seq); - up_write(&vma->vm_lock->lock); - return true; + VM_BUG_ON_VMA(!__is_vma_write_locked(vma, &mm_lock_seq), vma); } -static inline void vma_assert_write_locked(struct vm_area_struct *vma) +static inline void vma_assert_locked(struct vm_area_struct *vma) { - int mm_lock_seq; - - VM_BUG_ON_VMA(!__is_vma_write_locked(vma, &mm_lock_seq), vma); + if (!rwsem_is_locked(&vma->vm_lock->lock)) + vma_assert_write_locked(vma); } static inline void vma_mark_detached(struct vm_area_struct *vma, bool detached) @@ -739,6 +728,22 @@ static inline void vma_mark_detached(struct vm_area_struct *vma, bool detached) vma->detached = detached; } +static inline void release_fault_lock(struct vm_fault *vmf) +{ + if (vmf->flags & FAULT_FLAG_VMA_LOCK) + vma_end_read(vmf->vma); + else + mmap_read_unlock(vmf->vma->vm_mm); +} + +static inline void assert_fault_locked(struct vm_fault *vmf) +{ + if (vmf->flags & FAULT_FLAG_VMA_LOCK) + vma_assert_locked(vmf->vma); + else + mmap_assert_locked(vmf->vma->vm_mm); +} + struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm, unsigned long address); @@ -748,25 +753,40 @@ static inline bool vma_start_read(struct vm_area_struct *vma) { return false; } static inline void vma_end_read(struct vm_area_struct *vma) {} static inline void vma_start_write(struct vm_area_struct *vma) {} -static inline bool vma_try_start_write(struct vm_area_struct *vma) - { return true; } -static inline void vma_assert_write_locked(struct vm_area_struct *vma) {} +static inline void vma_assert_write_locked(struct vm_area_struct *vma) + { mmap_assert_write_locked(vma->vm_mm); } static inline void vma_mark_detached(struct vm_area_struct *vma, bool detached) {} +static inline struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm, + unsigned long address) +{ + return NULL; +} + +static inline void release_fault_lock(struct vm_fault *vmf) +{ + mmap_read_unlock(vmf->vma->vm_mm); +} + +static inline void assert_fault_locked(struct vm_fault *vmf) +{ + mmap_assert_locked(vmf->vma->vm_mm); +} + #endif /* CONFIG_PER_VMA_LOCK */ +extern const struct vm_operations_struct vma_dummy_vm_ops; + /* * WARNING: vma_init does not initialize vma->vm_lock. * Use vm_area_alloc()/vm_area_free() if vma needs locking. */ static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm) { - static const struct vm_operations_struct dummy_vm_ops = {}; - memset(vma, 0, sizeof(*vma)); vma->vm_mm = mm; - vma->vm_ops = &dummy_vm_ops; + vma->vm_ops = &vma_dummy_vm_ops; INIT_LIST_HEAD(&vma->anon_vma_chain); vma_mark_detached(vma, false); vma_numab_state_init(vma); @@ -779,18 +799,22 @@ static inline void vm_flags_init(struct vm_area_struct *vma, ACCESS_PRIVATE(vma, __vm_flags) = flags; } -/* Use when VMA is part of the VMA tree and modifications need coordination */ +/* + * Use when VMA is part of the VMA tree and modifications need coordination + * Note: vm_flags_reset and vm_flags_reset_once do not lock the vma and + * it should be locked explicitly beforehand. + */ static inline void vm_flags_reset(struct vm_area_struct *vma, vm_flags_t flags) { - vma_start_write(vma); + vma_assert_write_locked(vma); vm_flags_init(vma, flags); } static inline void vm_flags_reset_once(struct vm_area_struct *vma, vm_flags_t flags) { - vma_start_write(vma); + vma_assert_write_locked(vma); WRITE_ONCE(ACCESS_PRIVATE(vma, __vm_flags), flags); } @@ -839,6 +863,31 @@ static inline bool vma_is_anonymous(struct vm_area_struct *vma) return !vma->vm_ops; } +/* + * Indicate if the VMA is a heap for the given task; for + * /proc/PID/maps that is the heap of the main task. + */ +static inline bool vma_is_initial_heap(const struct vm_area_struct *vma) +{ + return vma->vm_start <= vma->vm_mm->brk && + vma->vm_end >= vma->vm_mm->start_brk; +} + +/* + * Indicate if the VMA is a stack for the given task; for + * /proc/PID/maps that is the stack of the main task. + */ +static inline bool vma_is_initial_stack(const struct vm_area_struct *vma) +{ + /* + * We make no effort to guess what a given thread considers to be + * its "stack". It's not even well-defined for programs written + * languages like Go. + */ + return vma->vm_start <= vma->vm_mm->start_stack && + vma->vm_end >= vma->vm_mm->start_stack; +} + static inline bool vma_is_temporary_stack(struct vm_area_struct *vma) { int maybe_stack = vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP); @@ -976,7 +1025,7 @@ struct inode; * compound_order() can be called without holding a reference, which means * that niceties like page_folio() don't work. These callers should be * prepared to handle wild return values. For example, PG_head may be - * set before _folio_order is initialised, or this may be a tail page. + * set before the order is initialised, or this may be a tail page. * See compaction.c for some good examples. */ static inline unsigned int compound_order(struct page *page) @@ -985,7 +1034,7 @@ static inline unsigned int compound_order(struct page *page) if (!test_bit(PG_head, &folio->flags)) return 0; - return folio->_folio_order; + return folio->_flags_1 & 0xff; } /** @@ -1001,7 +1050,7 @@ static inline unsigned int folio_order(struct folio *folio) { if (!folio_test_large(folio)) return 0; - return folio->_folio_order; + return folio->_flags_1 & 0xff; } #include <linux/huge_mm.h> @@ -1072,11 +1121,6 @@ unsigned long vmalloc_to_pfn(const void *addr); * On nommu, vmalloc/vfree wrap through kmalloc/kfree directly, so there * is no special casing required. */ - -#ifndef is_ioremap_addr -#define is_ioremap_addr(x) is_vmalloc_addr(x) -#endif - #ifdef CONFIG_MMU extern bool is_vmalloc_addr(const void *x); extern int is_vmalloc_or_module_addr(const void *x); @@ -1220,33 +1264,6 @@ void folio_copy(struct folio *dst, struct folio *src); unsigned long nr_free_buffer_pages(void); -/* - * Compound pages have a destructor function. Provide a - * prototype for that function and accessor functions. - * These are _only_ valid on the head of a compound page. - */ -typedef void compound_page_dtor(struct page *); - -/* Keep the enum in sync with compound_page_dtors array in mm/page_alloc.c */ -enum compound_dtor_id { - NULL_COMPOUND_DTOR, - COMPOUND_PAGE_DTOR, -#ifdef CONFIG_HUGETLB_PAGE - HUGETLB_PAGE_DTOR, -#endif -#ifdef CONFIG_TRANSPARENT_HUGEPAGE - TRANSHUGE_PAGE_DTOR, -#endif - NR_COMPOUND_DTORS, -}; - -static inline void folio_set_compound_dtor(struct folio *folio, - enum compound_dtor_id compound_dtor) -{ - VM_BUG_ON_FOLIO(compound_dtor >= NR_COMPOUND_DTORS, folio); - folio->_folio_dtor = compound_dtor; -} - void destroy_large_folio(struct folio *folio); /* Returns the number of bytes in this potentially compound page. */ @@ -1282,8 +1299,6 @@ static inline unsigned long thp_size(struct page *page) return PAGE_SIZE << thp_order(page); } -void free_compound_page(struct page *page); - #ifdef CONFIG_MMU /* * Do pte_mkwrite, but only if the vma says VM_WRITE. We do this when @@ -1299,7 +1314,8 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma) } vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page); -void do_set_pte(struct vm_fault *vmf, struct page *page, unsigned long addr); +void set_pte_range(struct vm_fault *vmf, struct folio *folio, + struct page *page, unsigned int nr, unsigned long addr); vm_fault_t finish_fault(struct vm_fault *vmf); vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf); @@ -2006,7 +2022,7 @@ static inline long folio_nr_pages(struct folio *folio) #ifdef CONFIG_64BIT return folio->_folio_nr_pages; #else - return 1L << folio->_folio_order; + return 1L << (folio->_flags_1 & 0xff); #endif } @@ -2024,7 +2040,7 @@ static inline unsigned long compound_nr(struct page *page) #ifdef CONFIG_64BIT return folio->_folio_nr_pages; #else - return 1L << folio->_folio_order; + return 1L << (folio->_flags_1 & 0xff); #endif } @@ -2170,7 +2186,6 @@ static inline void *folio_address(const struct folio *folio) return page_address(&folio->page); } -extern void *page_rmapping(struct page *page); extern pgoff_t __page_file_index(struct page *page); /* @@ -2238,18 +2253,6 @@ extern void pagefault_out_of_memory(void); #define offset_in_folio(folio, p) ((unsigned long)(p) & (folio_size(folio) - 1)) /* - * Flags passed to show_mem() and show_free_areas() to suppress output in - * various contexts. - */ -#define SHOW_MEM_FILTER_NODES (0x0001u) /* disallowed nodes */ - -extern void __show_free_areas(unsigned int flags, nodemask_t *nodemask, int max_zone_idx); -static void __maybe_unused show_free_areas(unsigned int flags, nodemask_t *nodemask) -{ - __show_free_areas(flags, nodemask, MAX_NR_ZONES - 1); -} - -/* * Parameter block passed down to zap_pte_range in exceptional cases. */ struct zap_details { @@ -2317,9 +2320,9 @@ static inline void zap_vma_pages(struct vm_area_struct *vma) zap_page_range_single(vma, vma->vm_start, vma->vm_end - vma->vm_start, NULL); } -void unmap_vmas(struct mmu_gather *tlb, struct maple_tree *mt, +void unmap_vmas(struct mmu_gather *tlb, struct ma_state *mas, struct vm_area_struct *start_vma, unsigned long start, - unsigned long end, bool mm_wr_locked); + unsigned long end, unsigned long tree_end, bool mm_wr_locked); struct mmu_notifier_range; @@ -2766,42 +2769,93 @@ static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long a } #endif /* CONFIG_MMU */ +static inline struct ptdesc *virt_to_ptdesc(const void *x) +{ + return page_ptdesc(virt_to_page(x)); +} + +static inline void *ptdesc_to_virt(const struct ptdesc *pt) +{ + return page_to_virt(ptdesc_page(pt)); +} + +static inline void *ptdesc_address(const struct ptdesc *pt) +{ + return folio_address(ptdesc_folio(pt)); +} + +static inline bool pagetable_is_reserved(struct ptdesc *pt) +{ + return folio_test_reserved(ptdesc_folio(pt)); +} + +/** + * pagetable_alloc - Allocate pagetables + * @gfp: GFP flags + * @order: desired pagetable order + * + * pagetable_alloc allocates memory for page tables as well as a page table + * descriptor to describe that memory. + * + * Return: The ptdesc describing the allocated page tables. + */ +static inline struct ptdesc *pagetable_alloc(gfp_t gfp, unsigned int order) +{ + struct page *page = alloc_pages(gfp | __GFP_COMP, order); + + return page_ptdesc(page); +} + +/** + * pagetable_free - Free pagetables + * @pt: The page table descriptor + * + * pagetable_free frees the memory of all page tables described by a page + * table descriptor and the memory for the descriptor itself. + */ +static inline void pagetable_free(struct ptdesc *pt) +{ + struct page *page = ptdesc_page(pt); + + __free_pages(page, compound_order(page)); +} + #if USE_SPLIT_PTE_PTLOCKS #if ALLOC_SPLIT_PTLOCKS void __init ptlock_cache_init(void); -extern bool ptlock_alloc(struct page *page); -extern void ptlock_free(struct page *page); +bool ptlock_alloc(struct ptdesc *ptdesc); +void ptlock_free(struct ptdesc *ptdesc); -static inline spinlock_t *ptlock_ptr(struct page *page) +static inline spinlock_t *ptlock_ptr(struct ptdesc *ptdesc) { - return page->ptl; + return ptdesc->ptl; } #else /* ALLOC_SPLIT_PTLOCKS */ static inline void ptlock_cache_init(void) { } -static inline bool ptlock_alloc(struct page *page) +static inline bool ptlock_alloc(struct ptdesc *ptdesc) { return true; } -static inline void ptlock_free(struct page *page) +static inline void ptlock_free(struct ptdesc *ptdesc) { } -static inline spinlock_t *ptlock_ptr(struct page *page) +static inline spinlock_t *ptlock_ptr(struct ptdesc *ptdesc) { - return &page->ptl; + return &ptdesc->ptl; } #endif /* ALLOC_SPLIT_PTLOCKS */ static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd) { - return ptlock_ptr(pmd_page(*pmd)); + return ptlock_ptr(page_ptdesc(pmd_page(*pmd))); } -static inline bool ptlock_init(struct page *page) +static inline bool ptlock_init(struct ptdesc *ptdesc) { /* * prep_new_page() initialize page->private (and therefore page->ptl) @@ -2810,10 +2864,10 @@ static inline bool ptlock_init(struct page *page) * It can happen if arch try to use slab for page table allocation: * slab code uses page->slab_cache, which share storage with page->ptl. */ - VM_BUG_ON_PAGE(*(unsigned long *)&page->ptl, page); - if (!ptlock_alloc(page)) + VM_BUG_ON_PAGE(*(unsigned long *)&ptdesc->ptl, ptdesc_page(ptdesc)); + if (!ptlock_alloc(ptdesc)) return false; - spin_lock_init(ptlock_ptr(page)); + spin_lock_init(ptlock_ptr(ptdesc)); return true; } @@ -2826,24 +2880,28 @@ static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd) return &mm->page_table_lock; } static inline void ptlock_cache_init(void) {} -static inline bool ptlock_init(struct page *page) { return true; } -static inline void ptlock_free(struct page *page) {} +static inline bool ptlock_init(struct ptdesc *ptdesc) { return true; } +static inline void ptlock_free(struct ptdesc *ptdesc) {} #endif /* USE_SPLIT_PTE_PTLOCKS */ -static inline bool pgtable_pte_page_ctor(struct page *page) +static inline bool pagetable_pte_ctor(struct ptdesc *ptdesc) { - if (!ptlock_init(page)) + struct folio *folio = ptdesc_folio(ptdesc); + + if (!ptlock_init(ptdesc)) return false; - __SetPageTable(page); - inc_lruvec_page_state(page, NR_PAGETABLE); + __folio_set_pgtable(folio); + lruvec_stat_add_folio(folio, NR_PAGETABLE); return true; } -static inline void pgtable_pte_page_dtor(struct page *page) +static inline void pagetable_pte_dtor(struct ptdesc *ptdesc) { - ptlock_free(page); - __ClearPageTable(page); - dec_lruvec_page_state(page, NR_PAGETABLE); + struct folio *folio = ptdesc_folio(ptdesc); + + ptlock_free(ptdesc); + __folio_clear_pgtable(folio); + lruvec_stat_sub_folio(folio, NR_PAGETABLE); } pte_t *__pte_offset_map(pmd_t *pmd, unsigned long addr, pmd_t *pmdvalp); @@ -2892,28 +2950,33 @@ static inline struct page *pmd_pgtable_page(pmd_t *pmd) return virt_to_page((void *)((unsigned long) pmd & mask)); } +static inline struct ptdesc *pmd_ptdesc(pmd_t *pmd) +{ + return page_ptdesc(pmd_pgtable_page(pmd)); +} + static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd) { - return ptlock_ptr(pmd_pgtable_page(pmd)); + return ptlock_ptr(pmd_ptdesc(pmd)); } -static inline bool pmd_ptlock_init(struct page *page) +static inline bool pmd_ptlock_init(struct ptdesc *ptdesc) { #ifdef CONFIG_TRANSPARENT_HUGEPAGE - page->pmd_huge_pte = NULL; + ptdesc->pmd_huge_pte = NULL; #endif - return ptlock_init(page); + return ptlock_init(ptdesc); } -static inline void pmd_ptlock_free(struct page *page) +static inline void pmd_ptlock_free(struct ptdesc *ptdesc) { #ifdef CONFIG_TRANSPARENT_HUGEPAGE - VM_BUG_ON_PAGE(page->pmd_huge_pte, page); + VM_BUG_ON_PAGE(ptdesc->pmd_huge_pte, ptdesc_page(ptdesc)); #endif - ptlock_free(page); + ptlock_free(ptdesc); } -#define pmd_huge_pte(mm, pmd) (pmd_pgtable_page(pmd)->pmd_huge_pte) +#define pmd_huge_pte(mm, pmd) (pmd_ptdesc(pmd)->pmd_huge_pte) #else @@ -2922,8 +2985,8 @@ static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd) return &mm->page_table_lock; } -static inline bool pmd_ptlock_init(struct page *page) { return true; } -static inline void pmd_ptlock_free(struct page *page) {} +static inline bool pmd_ptlock_init(struct ptdesc *ptdesc) { return true; } +static inline void pmd_ptlock_free(struct ptdesc *ptdesc) {} #define pmd_huge_pte(mm, pmd) ((mm)->pmd_huge_pte) @@ -2936,20 +2999,24 @@ static inline spinlock_t *pmd_lock(struct mm_struct *mm, pmd_t *pmd) return ptl; } -static inline bool pgtable_pmd_page_ctor(struct page *page) +static inline bool pagetable_pmd_ctor(struct ptdesc *ptdesc) { - if (!pmd_ptlock_init(page)) + struct folio *folio = ptdesc_folio(ptdesc); + + if (!pmd_ptlock_init(ptdesc)) return false; - __SetPageTable(page); - inc_lruvec_page_state(page, NR_PAGETABLE); + __folio_set_pgtable(folio); + lruvec_stat_add_folio(folio, NR_PAGETABLE); return true; } -static inline void pgtable_pmd_page_dtor(struct page *page) +static inline void pagetable_pmd_dtor(struct ptdesc *ptdesc) { - pmd_ptlock_free(page); - __ClearPageTable(page); - dec_lruvec_page_state(page, NR_PAGETABLE); + struct folio *folio = ptdesc_folio(ptdesc); + + pmd_ptlock_free(ptdesc); + __folio_clear_pgtable(folio); + lruvec_stat_sub_folio(folio, NR_PAGETABLE); } /* @@ -3004,6 +3071,11 @@ static inline void mark_page_reserved(struct page *page) adjust_managed_page_count(page, -1); } +static inline void free_reserved_ptdesc(struct ptdesc *pt) +{ + free_reserved_page(ptdesc_page(pt)); +} + /* * Default method to free all the __init memory into the buddy system. * The freed pages will be poisoned with pattern "poison" if it's within @@ -3069,9 +3141,9 @@ extern void mem_init(void); extern void __init mmap_init(void); extern void __show_mem(unsigned int flags, nodemask_t *nodemask, int max_zone_idx); -static inline void show_mem(unsigned int flags, nodemask_t *nodemask) +static inline void show_mem(void) { - __show_mem(flags, nodemask, MAX_NR_ZONES - 1); + __show_mem(0, NULL, MAX_NR_ZONES - 1); } extern long si_mem_available(void); extern void si_meminfo(struct sysinfo * val); @@ -3421,15 +3493,24 @@ static inline int vm_fault_to_errno(vm_fault_t vm_fault, int foll_flags) * Indicates whether GUP can follow a PROT_NONE mapped page, or whether * a (NUMA hinting) fault is required. */ -static inline bool gup_can_follow_protnone(unsigned int flags) +static inline bool gup_can_follow_protnone(struct vm_area_struct *vma, + unsigned int flags) { /* - * FOLL_FORCE has to be able to make progress even if the VMA is - * inaccessible. Further, FOLL_FORCE access usually does not represent - * application behaviour and we should avoid triggering NUMA hinting - * faults. + * If callers don't want to honor NUMA hinting faults, no need to + * determine if we would actually have to trigger a NUMA hinting fault. + */ + if (!(flags & FOLL_HONOR_NUMA_FAULT)) + return true; + + /* + * NUMA hinting faults don't apply in inaccessible (PROT_NONE) VMAs. + * + * Requiring a fault here even for inaccessible VMAs would mean that + * FOLL_FORCE cannot make any progress, because handle_mm_fault() + * refuses to process NUMA hinting faults in inaccessible VMAs. */ - return flags & FOLL_FORCE; + return !vma_is_accessible(vma); } typedef int (*pte_fn_t)(pte_t *pte, unsigned long addr, void *data); @@ -3500,8 +3581,8 @@ static inline bool debug_pagealloc_enabled(void) } /* - * For use in fast paths after init_debug_pagealloc() has run, or when a - * false negative result is not harmful when called too early. + * For use in fast paths after mem_debugging_and_hardening_init() has run, + * or when a false negative result is not harmful when called too early. */ static inline bool debug_pagealloc_enabled_static(void) { @@ -3656,13 +3737,32 @@ void vmemmap_free(unsigned long start, unsigned long end, struct vmem_altmap *altmap); #endif -#ifdef CONFIG_ARCH_WANT_OPTIMIZE_VMEMMAP -static inline bool vmemmap_can_optimize(struct vmem_altmap *altmap, - struct dev_pagemap *pgmap) +#define VMEMMAP_RESERVE_NR 2 +#ifdef CONFIG_ARCH_WANT_OPTIMIZE_DAX_VMEMMAP +static inline bool __vmemmap_can_optimize(struct vmem_altmap *altmap, + struct dev_pagemap *pgmap) { - return is_power_of_2(sizeof(struct page)) && - pgmap && (pgmap_vmemmap_nr(pgmap) > 1) && !altmap; + unsigned long nr_pages; + unsigned long nr_vmemmap_pages; + + if (!pgmap || !is_power_of_2(sizeof(struct page))) + return false; + + nr_pages = pgmap_vmemmap_nr(pgmap); + nr_vmemmap_pages = ((nr_pages * sizeof(struct page)) >> PAGE_SHIFT); + /* + * For vmemmap optimization with DAX we need minimum 2 vmemmap + * pages. See layout diagram in Documentation/mm/vmemmap_dedup.rst + */ + return !altmap && (nr_vmemmap_pages > VMEMMAP_RESERVE_NR); } +/* + * If we don't have an architecture override, use the generic rule + */ +#ifndef vmemmap_can_optimize +#define vmemmap_can_optimize __vmemmap_can_optimize +#endif + #else static inline bool vmemmap_can_optimize(struct vmem_altmap *altmap, struct dev_pagemap *pgmap) diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h index 21d6c72bcc71..8148b30a9df1 100644 --- a/include/linux/mm_inline.h +++ b/include/linux/mm_inline.h @@ -523,6 +523,27 @@ static inline bool mm_tlb_flush_nested(struct mm_struct *mm) return atomic_read(&mm->tlb_flush_pending) > 1; } +#ifdef CONFIG_MMU +/* + * Computes the pte marker to copy from the given source entry into dst_vma. + * If no marker should be copied, returns 0. + * The caller should insert a new pte created with make_pte_marker(). + */ +static inline pte_marker copy_pte_marker( + swp_entry_t entry, struct vm_area_struct *dst_vma) +{ + pte_marker srcm = pte_marker_get(entry); + /* Always copy error entries. */ + pte_marker dstm = srcm & PTE_MARKER_POISONED; + + /* Only copy PTE markers if UFFD register matches. */ + if ((srcm & PTE_MARKER_UFFD_WP) && userfaultfd_wp(dst_vma)) + dstm |= PTE_MARKER_UFFD_WP; + + return dstm; +} +#endif + /* * If this pte is wr-protected by uffd-wp in any form, arm the special pte to * replace a none pte. NOTE! This should only be called when *pte is already diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 5e74ce4a28cd..36c5b43999e6 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -141,20 +141,6 @@ struct page { struct { /* Tail pages of compound page */ unsigned long compound_head; /* Bit zero is set */ }; - struct { /* Page table pages */ - unsigned long _pt_pad_1; /* compound_head */ - pgtable_t pmd_huge_pte; /* protected by page->ptl */ - unsigned long _pt_pad_2; /* mapping */ - union { - struct mm_struct *pt_mm; /* x86 pgds only */ - atomic_t pt_frag_refcount; /* powerpc */ - }; -#if ALLOC_SPLIT_PTLOCKS - spinlock_t *ptl; -#else - spinlock_t ptl; -#endif - }; struct { /* ZONE_DEVICE pages */ /** @pgmap: Points to the hosting device page map. */ struct dev_pagemap *pgmap; @@ -262,6 +248,14 @@ static inline struct page *encoded_page_ptr(struct encoded_page *page) return (struct page *)(~ENCODE_PAGE_BITS & (unsigned long)page); } +/* + * A swap entry has to fit into a "unsigned long", as the entry is hidden + * in the "index" field of the swapper address space. + */ +typedef struct { + unsigned long val; +} swp_entry_t; + /** * struct folio - Represents a contiguous set of bytes. * @flags: Identical to the page flags. @@ -272,14 +266,12 @@ static inline struct page *encoded_page_ptr(struct encoded_page *page) * @index: Offset within the file, in units of pages. For anonymous memory, * this is the index from the beginning of the mmap. * @private: Filesystem per-folio data (see folio_attach_private()). - * Used for swp_entry_t if folio_test_swapcache(). + * @swap: Used for swp_entry_t if folio_test_swapcache(). * @_mapcount: Do not access this member directly. Use folio_mapcount() to * find out how many times this folio is mapped by userspace. * @_refcount: Do not access this member directly. Use folio_ref_count() * to find how many references there are to this folio. * @memcg_data: Memory Control Group data. - * @_folio_dtor: Which destructor to use for this folio. - * @_folio_order: Do not use directly, call folio_order(). * @_entire_mapcount: Do not use directly, call folio_entire_mapcount(). * @_nr_pages_mapped: Do not use directly, call folio_mapcount(). * @_pincount: Do not use directly, call folio_maybe_dma_pinned(). @@ -317,7 +309,10 @@ struct folio { }; struct address_space *mapping; pgoff_t index; - void *private; + union { + void *private; + swp_entry_t swap; + }; atomic_t _mapcount; atomic_t _refcount; #ifdef CONFIG_MEMCG @@ -331,9 +326,8 @@ struct folio { struct { unsigned long _flags_1; unsigned long _head_1; + unsigned long _folio_avail; /* public: */ - unsigned char _folio_dtor; - unsigned char _folio_order; atomic_t _entire_mapcount; atomic_t _nr_pages_mapped; atomic_t _pincount; @@ -391,8 +385,89 @@ FOLIO_MATCH(compound_head, _head_1); offsetof(struct page, pg) + 2 * sizeof(struct page)) FOLIO_MATCH(flags, _flags_2); FOLIO_MATCH(compound_head, _head_2); +FOLIO_MATCH(flags, _flags_2a); +FOLIO_MATCH(compound_head, _head_2a); #undef FOLIO_MATCH +/** + * struct ptdesc - Memory descriptor for page tables. + * @__page_flags: Same as page flags. Unused for page tables. + * @pt_rcu_head: For freeing page table pages. + * @pt_list: List of used page tables. Used for s390 and x86. + * @_pt_pad_1: Padding that aliases with page's compound head. + * @pmd_huge_pte: Protected by ptdesc->ptl, used for THPs. + * @__page_mapping: Aliases with page->mapping. Unused for page tables. + * @pt_mm: Used for x86 pgds. + * @pt_frag_refcount: For fragmented page table tracking. Powerpc and s390 only. + * @_pt_pad_2: Padding to ensure proper alignment. + * @ptl: Lock for the page table. + * @__page_type: Same as page->page_type. Unused for page tables. + * @_refcount: Same as page refcount. Used for s390 page tables. + * @pt_memcg_data: Memcg data. Tracked for page tables here. + * + * This struct overlays struct page for now. Do not modify without a good + * understanding of the issues. + */ +struct ptdesc { + unsigned long __page_flags; + + union { + struct rcu_head pt_rcu_head; + struct list_head pt_list; + struct { + unsigned long _pt_pad_1; + pgtable_t pmd_huge_pte; + }; + }; + unsigned long __page_mapping; + + union { + struct mm_struct *pt_mm; + atomic_t pt_frag_refcount; + }; + + union { + unsigned long _pt_pad_2; +#if ALLOC_SPLIT_PTLOCKS + spinlock_t *ptl; +#else + spinlock_t ptl; +#endif + }; + unsigned int __page_type; + atomic_t _refcount; +#ifdef CONFIG_MEMCG + unsigned long pt_memcg_data; +#endif +}; + +#define TABLE_MATCH(pg, pt) \ + static_assert(offsetof(struct page, pg) == offsetof(struct ptdesc, pt)) +TABLE_MATCH(flags, __page_flags); +TABLE_MATCH(compound_head, pt_list); +TABLE_MATCH(compound_head, _pt_pad_1); +TABLE_MATCH(mapping, __page_mapping); +TABLE_MATCH(rcu_head, pt_rcu_head); +TABLE_MATCH(page_type, __page_type); +TABLE_MATCH(_refcount, _refcount); +#ifdef CONFIG_MEMCG +TABLE_MATCH(memcg_data, pt_memcg_data); +#endif +#undef TABLE_MATCH +static_assert(sizeof(struct ptdesc) <= sizeof(struct page)); + +#define ptdesc_page(pt) (_Generic((pt), \ + const struct ptdesc *: (const struct page *)(pt), \ + struct ptdesc *: (struct page *)(pt))) + +#define ptdesc_folio(pt) (_Generic((pt), \ + const struct ptdesc *: (const struct folio *)(pt), \ + struct ptdesc *: (struct folio *)(pt))) + +#define page_ptdesc(p) (_Generic((p), \ + const struct page *: (const struct ptdesc *)(p), \ + struct page *: (struct ptdesc *)(p))) + /* * Used for sizing the vmemmap region on some architectures */ @@ -812,7 +887,7 @@ struct mm_struct { #ifdef CONFIG_KSM /* * Represent how many pages of this process are involved in KSM - * merging. + * merging (not including ksm_zero_pages). */ unsigned long ksm_merging_pages; /* @@ -820,7 +895,12 @@ struct mm_struct { * including merged and not merged. */ unsigned long ksm_rmap_items; -#endif + /* + * Represent how many empty pages are merged with kernel zero + * pages when enabling KSM use_zero_pages. + */ + unsigned long ksm_zero_pages; +#endif /* CONFIG_KSM */ #ifdef CONFIG_LRU_GEN struct { /* this mm_struct is on lru_gen_mm_list */ @@ -1105,7 +1185,8 @@ enum vm_fault_reason { { VM_FAULT_RETRY, "RETRY" }, \ { VM_FAULT_FALLBACK, "FALLBACK" }, \ { VM_FAULT_DONE_COW, "DONE_COW" }, \ - { VM_FAULT_NEEDDSYNC, "NEEDDSYNC" } + { VM_FAULT_NEEDDSYNC, "NEEDDSYNC" }, \ + { VM_FAULT_COMPLETED, "COMPLETED" } struct vm_special_mapping { const char *name; /* The name, e.g. "[vdso]". */ @@ -1139,14 +1220,6 @@ enum tlb_flush_reason { NR_TLB_FLUSH_REASONS, }; - /* - * A swap entry has to fit into a "unsigned long", as the entry is hidden - * in the "index" field of the swapper address space. - */ -typedef struct { - unsigned long val; -} swp_entry_t; - /** * enum fault_flag - Fault flag definitions. * @FAULT_FLAG_WRITE: Fault was a write fault. @@ -1286,6 +1359,15 @@ enum { FOLL_PCI_P2PDMA = 1 << 10, /* allow interrupts from generic signals */ FOLL_INTERRUPTIBLE = 1 << 11, + /* + * Always honor (trigger) NUMA hinting faults. + * + * FOLL_WRITE implicitly honors NUMA hinting faults because a + * PROT_NONE-mapped page is not writable (exceptions with FOLL_FORCE + * apply). get_user_pages_fast_only() always implicitly honors NUMA + * hinting faults. + */ + FOLL_HONOR_NUMA_FAULT = 1 << 12, /* See also internal only FOLL flags in mm/internal.h */ }; diff --git a/include/linux/mm_types_task.h b/include/linux/mm_types_task.h index 5414b5c6a103..aa44fff8bb9d 100644 --- a/include/linux/mm_types_task.h +++ b/include/linux/mm_types_task.h @@ -52,8 +52,8 @@ struct tlbflush_unmap_batch { #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH /* * The arch code makes the following promise: generic code can modify a - * PTE, then call arch_tlbbatch_add_mm() (which internally provides all - * needed barriers), then call arch_tlbbatch_flush(), and the entries + * PTE, then call arch_tlbbatch_add_pending() (which internally provides + * all needed barriers), then call arch_tlbbatch_flush(), and the entries * will be flushed on all CPUs by the time that arch_tlbbatch_flush() * returns. */ diff --git a/include/linux/mmap_lock.h b/include/linux/mmap_lock.h index e05e167dbd16..8d38dcb6d044 100644 --- a/include/linux/mmap_lock.h +++ b/include/linux/mmap_lock.h @@ -73,6 +73,14 @@ static inline void mmap_assert_write_locked(struct mm_struct *mm) } #ifdef CONFIG_PER_VMA_LOCK +/* + * Drop all currently-held per-VMA locks. + * This is called from the mmap_lock implementation directly before releasing + * a write-locked mmap_lock (or downgrading it to read-locked). + * This should normally NOT be called manually from other places. + * If you want to call this manually anyway, keep in mind that this will release + * *all* VMA write locks, including ones from further up the stack. + */ static inline void vma_end_write_all(struct mm_struct *mm) { mmap_assert_write_locked(mm); @@ -118,16 +126,6 @@ static inline int mmap_write_lock_killable(struct mm_struct *mm) return ret; } -static inline bool mmap_write_trylock(struct mm_struct *mm) -{ - bool ret; - - __mmap_lock_trace_start_locking(mm, true); - ret = down_write_trylock(&mm->mmap_lock) != 0; - __mmap_lock_trace_acquire_returned(mm, true, ret); - return ret; -} - static inline void mmap_write_unlock(struct mm_struct *mm) { __mmap_lock_trace_released(mm, true); diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index 461d1543893b..62a6847a3b6f 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h @@ -184,6 +184,12 @@ struct mmc_host_ops { /* Execute HS400 tuning depending host driver */ int (*execute_hs400_tuning)(struct mmc_host *host, struct mmc_card *card); + /* Optional callback to prepare for SD high-speed tuning */ + int (*prepare_sd_hs_tuning)(struct mmc_host *host, struct mmc_card *card); + + /* Optional callback to execute SD high-speed tuning */ + int (*execute_sd_hs_tuning)(struct mmc_host *host, struct mmc_card *card); + /* Prepare switch to DDR during the HS400 init sequence */ int (*hs400_prepare_ddr)(struct mmc_host *host); @@ -665,6 +671,8 @@ static inline void mmc_debugfs_err_stats_inc(struct mmc_host *host, host->err_stats[stat] += 1; } +int mmc_sd_switch(struct mmc_card *card, int mode, int group, u8 value, u8 *resp); +int mmc_send_status(struct mmc_card *card, u32 *status); int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error); int mmc_send_abort_tuning(struct mmc_host *host, u32 opcode); int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd); diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h index 64a3e051c3c4..6e3c857606f1 100644 --- a/include/linux/mmu_notifier.h +++ b/include/linux/mmu_notifier.h @@ -187,27 +187,27 @@ struct mmu_notifier_ops { const struct mmu_notifier_range *range); /* - * invalidate_range() is either called between - * invalidate_range_start() and invalidate_range_end() when the - * VM has to free pages that where unmapped, but before the - * pages are actually freed, or outside of _start()/_end() when - * a (remote) TLB is necessary. + * arch_invalidate_secondary_tlbs() is used to manage a non-CPU TLB + * which shares page-tables with the CPU. The + * invalidate_range_start()/end() callbacks should not be implemented as + * invalidate_secondary_tlbs() already catches the points in time when + * an external TLB needs to be flushed. * - * If invalidate_range() is used to manage a non-CPU TLB with - * shared page-tables, it not necessary to implement the - * invalidate_range_start()/end() notifiers, as - * invalidate_range() already catches the points in time when an - * external TLB range needs to be flushed. For more in depth - * discussion on this see Documentation/mm/mmu_notifier.rst + * This requires arch_invalidate_secondary_tlbs() to be called while + * holding the ptl spin-lock and therefore this callback is not allowed + * to sleep. * - * Note that this function might be called with just a sub-range - * of what was passed to invalidate_range_start()/end(), if - * called between those functions. + * This is called by architecture code whenever invalidating a TLB + * entry. It is assumed that any secondary TLB has the same rules for + * when invalidations are required. If this is not the case architecture + * code will need to call this explicitly when required for secondary + * TLB invalidation. */ - void (*invalidate_range)(struct mmu_notifier *subscription, - struct mm_struct *mm, - unsigned long start, - unsigned long end); + void (*arch_invalidate_secondary_tlbs)( + struct mmu_notifier *subscription, + struct mm_struct *mm, + unsigned long start, + unsigned long end); /* * These callbacks are used with the get/put interface to manage the @@ -395,10 +395,9 @@ extern int __mmu_notifier_test_young(struct mm_struct *mm, extern void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address, pte_t pte); extern int __mmu_notifier_invalidate_range_start(struct mmu_notifier_range *r); -extern void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *r, - bool only_end); -extern void __mmu_notifier_invalidate_range(struct mm_struct *mm, - unsigned long start, unsigned long end); +extern void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *r); +extern void __mmu_notifier_arch_invalidate_secondary_tlbs(struct mm_struct *mm, + unsigned long start, unsigned long end); extern bool mmu_notifier_range_update_to_read_only(const struct mmu_notifier_range *range); @@ -481,21 +480,14 @@ mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range) might_sleep(); if (mm_has_notifiers(range->mm)) - __mmu_notifier_invalidate_range_end(range, false); -} - -static inline void -mmu_notifier_invalidate_range_only_end(struct mmu_notifier_range *range) -{ - if (mm_has_notifiers(range->mm)) - __mmu_notifier_invalidate_range_end(range, true); + __mmu_notifier_invalidate_range_end(range); } -static inline void mmu_notifier_invalidate_range(struct mm_struct *mm, - unsigned long start, unsigned long end) +static inline void mmu_notifier_arch_invalidate_secondary_tlbs(struct mm_struct *mm, + unsigned long start, unsigned long end) { if (mm_has_notifiers(mm)) - __mmu_notifier_invalidate_range(mm, start, end); + __mmu_notifier_arch_invalidate_secondary_tlbs(mm, start, end); } static inline void mmu_notifier_subscriptions_init(struct mm_struct *mm) @@ -582,45 +574,6 @@ static inline void mmu_notifier_range_init_owner( __young; \ }) -#define ptep_clear_flush_notify(__vma, __address, __ptep) \ -({ \ - unsigned long ___addr = __address & PAGE_MASK; \ - struct mm_struct *___mm = (__vma)->vm_mm; \ - pte_t ___pte; \ - \ - ___pte = ptep_clear_flush(__vma, __address, __ptep); \ - mmu_notifier_invalidate_range(___mm, ___addr, \ - ___addr + PAGE_SIZE); \ - \ - ___pte; \ -}) - -#define pmdp_huge_clear_flush_notify(__vma, __haddr, __pmd) \ -({ \ - unsigned long ___haddr = __haddr & HPAGE_PMD_MASK; \ - struct mm_struct *___mm = (__vma)->vm_mm; \ - pmd_t ___pmd; \ - \ - ___pmd = pmdp_huge_clear_flush(__vma, __haddr, __pmd); \ - mmu_notifier_invalidate_range(___mm, ___haddr, \ - ___haddr + HPAGE_PMD_SIZE); \ - \ - ___pmd; \ -}) - -#define pudp_huge_clear_flush_notify(__vma, __haddr, __pud) \ -({ \ - unsigned long ___haddr = __haddr & HPAGE_PUD_MASK; \ - struct mm_struct *___mm = (__vma)->vm_mm; \ - pud_t ___pud; \ - \ - ___pud = pudp_huge_clear_flush(__vma, __haddr, __pud); \ - mmu_notifier_invalidate_range(___mm, ___haddr, \ - ___haddr + HPAGE_PUD_SIZE); \ - \ - ___pud; \ -}) - /* * set_pte_at_notify() sets the pte _after_ running the notifier. * This is safe to start by updating the secondary MMUs, because the primary MMU @@ -711,12 +664,7 @@ void mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range) { } -static inline void -mmu_notifier_invalidate_range_only_end(struct mmu_notifier_range *range) -{ -} - -static inline void mmu_notifier_invalidate_range(struct mm_struct *mm, +static inline void mmu_notifier_arch_invalidate_secondary_tlbs(struct mm_struct *mm, unsigned long start, unsigned long end) { } diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 5e50b78d58ea..4106fbc5b4b3 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -676,7 +676,6 @@ enum zone_watermarks { #define high_wmark_pages(z) (z->_watermark[WMARK_HIGH] + z->watermark_boost) #define wmark_pages(z, i) (z->_watermark[i] + z->watermark_boost) -/* Fields and list protected by pagesets local_lock in page_alloc.c */ struct per_cpu_pages { spinlock_t lock; /* Protects lists field */ int count; /* number of pages in the list */ diff --git a/include/linux/net.h b/include/linux/net.h index 41c608c1b02c..c9b4a63791a4 100644 --- a/include/linux/net.h +++ b/include/linux/net.h @@ -123,7 +123,7 @@ struct socket { struct file *file; struct sock *sk; - const struct proto_ops *ops; + const struct proto_ops *ops; /* Might change with IPV6_ADDRFORM or MPTCP. */ struct socket_wq wq; }; diff --git a/include/linux/net_mm.h b/include/linux/net_mm.h deleted file mode 100644 index b298998bd5a0..000000000000 --- a/include/linux/net_mm.h +++ /dev/null @@ -1,17 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -#ifdef CONFIG_MMU - -#ifdef CONFIG_INET -extern const struct vm_operations_struct tcp_vm_ops; -static inline bool vma_is_tcp(const struct vm_area_struct *vma) -{ - return vma->vm_ops == &tcp_vm_ops; -} -#else -static inline bool vma_is_tcp(const struct vm_area_struct *vma) -{ - return false; -} -#endif /* CONFIG_INET*/ - -#endif /* CONFIG_MMU */ diff --git a/include/linux/net_tstamp.h b/include/linux/net_tstamp.h index fd67f3cc0c4b..eb01c37e71e0 100644 --- a/include/linux/net_tstamp.h +++ b/include/linux/net_tstamp.h @@ -5,12 +5,23 @@ #include <uapi/linux/net_tstamp.h> +enum hwtstamp_source { + HWTSTAMP_SOURCE_NETDEV, + HWTSTAMP_SOURCE_PHYLIB, +}; + /** * struct kernel_hwtstamp_config - Kernel copy of struct hwtstamp_config * * @flags: see struct hwtstamp_config * @tx_type: see struct hwtstamp_config * @rx_filter: see struct hwtstamp_config + * @ifr: pointer to ifreq structure from the original ioctl request, to pass to + * a legacy implementation of a lower driver + * @copied_to_user: request was passed to a legacy implementation which already + * copied the ioctl request back to user space + * @source: indication whether timestamps should come from the netdev or from + * an attached phylib PHY * * Prefer using this structure for in-kernel processing of hardware * timestamping configuration, over the inextensible struct hwtstamp_config @@ -20,6 +31,9 @@ struct kernel_hwtstamp_config { int flags; int tx_type; int rx_filter; + struct ifreq *ifr; + bool copied_to_user; + enum hwtstamp_source source; }; static inline void hwtstamp_config_to_kernel(struct kernel_hwtstamp_config *kernel_cfg, @@ -30,4 +44,20 @@ static inline void hwtstamp_config_to_kernel(struct kernel_hwtstamp_config *kern kernel_cfg->rx_filter = cfg->rx_filter; } +static inline void hwtstamp_config_from_kernel(struct hwtstamp_config *cfg, + const struct kernel_hwtstamp_config *kernel_cfg) +{ + cfg->flags = kernel_cfg->flags; + cfg->tx_type = kernel_cfg->tx_type; + cfg->rx_filter = kernel_cfg->rx_filter; +} + +static inline bool kernel_hwtstamp_config_changed(const struct kernel_hwtstamp_config *a, + const struct kernel_hwtstamp_config *b) +{ + return a->flags != b->flags || + a->tx_type != b->tx_type || + a->rx_filter != b->rx_filter; +} + #endif /* _LINUX_NET_TIMESTAMPING_H_ */ diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index b828c7a75be2..0896aaa91dd7 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -40,7 +40,6 @@ #include <net/dcbnl.h> #endif #include <net/netprio_cgroup.h> -#include <net/xdp.h> #include <linux/netdev_features.h> #include <linux/neighbour.h> @@ -57,6 +56,7 @@ struct netpoll_info; struct device; struct ethtool_ops; +struct kernel_hwtstamp_config; struct phy_device; struct dsa_port; struct ip_tunnel_parm; @@ -76,8 +76,12 @@ struct udp_tunnel_nic_info; struct udp_tunnel_nic; struct bpf_prog; struct xdp_buff; +struct xdp_frame; +struct xdp_metadata_ops; struct xdp_md; +typedef u32 xdp_features_t; + void synchronize_net(void); void netdev_set_default_ethtool_ops(struct net_device *dev, const struct ethtool_ops *ops); @@ -782,32 +786,6 @@ bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, u32 flow_id, #endif #endif /* CONFIG_RPS */ -/* This structure contains an instance of an RX queue. */ -struct netdev_rx_queue { - struct xdp_rxq_info xdp_rxq; -#ifdef CONFIG_RPS - struct rps_map __rcu *rps_map; - struct rps_dev_flow_table __rcu *rps_flow_table; -#endif - struct kobject kobj; - struct net_device *dev; - netdevice_tracker dev_tracker; - -#ifdef CONFIG_XDP_SOCKETS - struct xsk_buff_pool *pool; -#endif -} ____cacheline_aligned_in_smp; - -/* - * RX queue sysfs structures and functions. - */ -struct rx_queue_attribute { - struct attribute attr; - ssize_t (*show)(struct netdev_rx_queue *queue, char *buf); - ssize_t (*store)(struct netdev_rx_queue *queue, - const char *buf, size_t len); -}; - /* XPS map type and offset of the xps map within net_device->xps_maps[]. */ enum xps_map_type { XPS_CPUS = 0, @@ -1418,6 +1396,16 @@ struct netdev_net_notifier { * Get hardware timestamp based on normal/adjustable time or free running * cycle counter. This function is required if physical clock supports a * free running cycle counter. + * + * int (*ndo_hwtstamp_get)(struct net_device *dev, + * struct kernel_hwtstamp_config *kernel_config); + * Get the currently configured hardware timestamping parameters for the + * NIC device. + * + * int (*ndo_hwtstamp_set)(struct net_device *dev, + * struct kernel_hwtstamp_config *kernel_config, + * struct netlink_ext_ack *extack); + * Change the hardware timestamping parameters for NIC device. */ struct net_device_ops { int (*ndo_init)(struct net_device *dev); @@ -1652,12 +1640,11 @@ struct net_device_ops { ktime_t (*ndo_get_tstamp)(struct net_device *dev, const struct skb_shared_hwtstamps *hwtstamps, bool cycles); -}; - -struct xdp_metadata_ops { - int (*xmo_rx_timestamp)(const struct xdp_md *ctx, u64 *timestamp); - int (*xmo_rx_hash)(const struct xdp_md *ctx, u32 *hash, - enum xdp_rss_hash_type *rss_type); + int (*ndo_hwtstamp_get)(struct net_device *dev, + struct kernel_hwtstamp_config *kernel_config); + int (*ndo_hwtstamp_set)(struct net_device *dev, + struct kernel_hwtstamp_config *kernel_config, + struct netlink_ext_ack *extack); }; /** @@ -1708,6 +1695,9 @@ struct xdp_metadata_ops { * @IFF_TX_SKB_NO_LINEAR: device/driver is capable of xmitting frames with * skb_headlen(skb) == 0 (data starts from frag0) * @IFF_CHANGE_PROTO_DOWN: device supports setting carrier via IFLA_PROTO_DOWN + * @IFF_SEE_ALL_HWTSTAMP_REQUESTS: device wants to see calls to + * ndo_hwtstamp_set() for all timestamp requests regardless of source, + * even if those aren't HWTSTAMP_SOURCE_NETDEV. */ enum netdev_priv_flags { IFF_802_1Q_VLAN = 1<<0, @@ -1743,6 +1733,7 @@ enum netdev_priv_flags { IFF_NO_ADDRCONF = BIT_ULL(30), IFF_TX_SKB_NO_LINEAR = BIT_ULL(31), IFF_CHANGE_PROTO_DOWN = BIT_ULL(32), + IFF_SEE_ALL_HWTSTAMP_REQUESTS = BIT_ULL(33), }; #define IFF_802_1Q_VLAN IFF_802_1Q_VLAN @@ -1930,8 +1921,7 @@ enum netdev_ml_priv_type { * * @rx_handler: handler for received packets * @rx_handler_data: XXX: need comments on this one - * @miniq_ingress: ingress/clsact qdisc specific data for - * ingress processing + * @tcx_ingress: BPF & clsact qdisc specific data for ingress processing * @ingress_queue: XXX: need comments on this one * @nf_hooks_ingress: netfilter hooks executed for ingress packets * @broadcast: hw bcast address @@ -1952,8 +1942,7 @@ enum netdev_ml_priv_type { * @xps_maps: all CPUs/RXQs maps for XPS device * * @xps_maps: XXX: need comments on this one - * @miniq_egress: clsact qdisc specific data for - * egress processing + * @tcx_egress: BPF & clsact qdisc specific data for egress processing * @nf_hooks_egress: netfilter hooks executed for egress packets * @qdisc_hash: qdisc hash table * @watchdog_timeo: Represents the timeout that is used by @@ -2045,6 +2034,8 @@ enum netdev_ml_priv_type { * receive offload (GRO) * @gro_ipv4_max_size: Maximum size of aggregated packet in generic * receive offload (GRO), for IPv4. + * @xdp_zc_max_segs: Maximum number of segments supported by AF_XDP + * zero copy driver * * @dev_addr_shadow: Copy of @dev_addr to catch direct writes. * @linkwatch_dev_tracker: refcount tracker used by linkwatch. @@ -2250,11 +2241,11 @@ struct net_device { #define GRO_MAX_SIZE (8 * 65535u) unsigned int gro_max_size; unsigned int gro_ipv4_max_size; + unsigned int xdp_zc_max_segs; rx_handler_func_t __rcu *rx_handler; void __rcu *rx_handler_data; - -#ifdef CONFIG_NET_CLS_ACT - struct mini_Qdisc __rcu *miniq_ingress; +#ifdef CONFIG_NET_XGRESS + struct bpf_mprog_entry __rcu *tcx_ingress; #endif struct netdev_queue __rcu *ingress_queue; #ifdef CONFIG_NETFILTER_INGRESS @@ -2282,8 +2273,8 @@ struct net_device { #ifdef CONFIG_XPS struct xps_dev_maps __rcu *xps_maps[XPS_MAPS_MAX]; #endif -#ifdef CONFIG_NET_CLS_ACT - struct mini_Qdisc __rcu *miniq_egress; +#ifdef CONFIG_NET_XGRESS + struct bpf_mprog_entry __rcu *tcx_egress; #endif #ifdef CONFIG_NETFILTER_EGRESS struct nf_hook_entries __rcu *nf_hooks_egress; @@ -3016,6 +3007,9 @@ extern rwlock_t dev_base_lock; /* Device list lock */ if (netdev_master_upper_dev_get_rcu(slave) == (bond)) #define net_device_entry(lh) list_entry(lh, struct net_device, dev_list) +#define for_each_netdev_dump(net, d, ifindex) \ + xa_for_each_start(&(net)->dev_by_index, (ifindex), (d), (ifindex)) + static inline struct net_device *next_net_device(struct net_device *dev) { struct list_head *lh; @@ -3130,8 +3124,6 @@ struct net_device *netdev_get_by_name(struct net *net, const char *name, netdevice_tracker *tracker, gfp_t gfp); struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex); struct net_device *dev_get_by_napi_id(unsigned int napi_id); -int dev_restart(struct net_device *dev); - static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, @@ -3830,24 +3822,6 @@ static inline int netif_set_real_num_rx_queues(struct net_device *dev, int netif_set_real_num_queues(struct net_device *dev, unsigned int txq, unsigned int rxq); -static inline struct netdev_rx_queue * -__netif_get_rx_queue(struct net_device *dev, unsigned int rxq) -{ - return dev->_rx + rxq; -} - -#ifdef CONFIG_SYSFS -static inline unsigned int get_netdev_rx_queue_index( - struct netdev_rx_queue *queue) -{ - struct net_device *dev = queue->dev; - int index = queue - dev->_rx; - - BUG_ON(index >= dev->num_rx_queues); - return index; -} -#endif - int netif_get_num_default_rss_queues(void); void dev_kfree_skb_irq_reason(struct sk_buff *skb, enum skb_drop_reason reason); @@ -3933,6 +3907,11 @@ int put_user_ifreq(struct ifreq *ifr, void __user *arg); int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr, void __user *data, bool *need_copyout); int dev_ifconf(struct net *net, struct ifconf __user *ifc); +int generic_hwtstamp_get_lower(struct net_device *dev, + struct kernel_hwtstamp_config *kernel_cfg); +int generic_hwtstamp_set_lower(struct net_device *dev, + struct kernel_hwtstamp_config *kernel_cfg, + struct netlink_ext_ack *extack); int dev_ethtool(struct net *net, struct ifreq *ifr, void __user *userdata); unsigned int dev_get_flags(const struct net_device *); int __dev_change_flags(struct net_device *dev, unsigned int flags, @@ -5102,6 +5081,11 @@ static inline bool netif_is_ovs_port(const struct net_device *dev) return dev->priv_flags & IFF_OVS_DATAPATH; } +static inline bool netif_is_any_bridge_master(const struct net_device *dev) +{ + return netif_is_bridge_master(dev) || netif_is_ovs_master(dev); +} + static inline bool netif_is_any_bridge_port(const struct net_device *dev) { return netif_is_bridge_port(dev) || netif_is_ovs_port(dev); diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h index d4fed4c508ca..d68644b7c299 100644 --- a/include/linux/netfilter.h +++ b/include/linux/netfilter.h @@ -11,6 +11,7 @@ #include <linux/wait.h> #include <linux/list.h> #include <linux/static_key.h> +#include <linux/module.h> #include <linux/netfilter_defs.h> #include <linux/netdevice.h> #include <linux/sockptr.h> @@ -481,6 +482,15 @@ struct nfnl_ct_hook { }; extern const struct nfnl_ct_hook __rcu *nfnl_ct_hook; +struct nf_defrag_hook { + struct module *owner; + int (*enable)(struct net *net); + void (*disable)(struct net *net); +}; + +extern const struct nf_defrag_hook __rcu *nf_defrag_v4_hook; +extern const struct nf_defrag_hook __rcu *nf_defrag_v6_hook; + /* * nf_skb_duplicated - TEE target has sent a packet * diff --git a/include/linux/netfilter/nf_conntrack_h323.h b/include/linux/netfilter/nf_conntrack_h323.h index 9e937f64a1ad..81286c499325 100644 --- a/include/linux/netfilter/nf_conntrack_h323.h +++ b/include/linux/netfilter/nf_conntrack_h323.h @@ -34,10 +34,6 @@ struct nf_ct_h323_master { int get_h225_addr(struct nf_conn *ct, unsigned char *data, TransportAddress *taddr, union nf_inet_addr *addr, __be16 *port); -void nf_conntrack_h245_expect(struct nf_conn *new, - struct nf_conntrack_expect *this); -void nf_conntrack_q931_expect(struct nf_conn *new, - struct nf_conntrack_expect *this); struct nfct_h323_nat_hooks { int (*set_h245_addr)(struct sk_buff *skb, unsigned int protoff, diff --git a/include/linux/netfilter/nf_conntrack_proto_gre.h b/include/linux/netfilter/nf_conntrack_proto_gre.h index f33aa6021364..34ce5d2f37a2 100644 --- a/include/linux/netfilter/nf_conntrack_proto_gre.h +++ b/include/linux/netfilter/nf_conntrack_proto_gre.h @@ -25,7 +25,6 @@ struct nf_ct_gre_keymap { int nf_ct_gre_keymap_add(struct nf_conn *ct, enum ip_conntrack_dir dir, struct nf_conntrack_tuple *t); -void nf_ct_gre_keymap_flush(struct net *net); /* delete keymap entries */ void nf_ct_gre_keymap_destroy(struct nf_conn *ct); diff --git a/include/linux/netlink.h b/include/linux/netlink.h index 9eec3f4f5351..75d7de34c908 100644 --- a/include/linux/netlink.h +++ b/include/linux/netlink.h @@ -50,6 +50,7 @@ struct netlink_kernel_cfg { struct mutex *cb_mutex; int (*bind)(struct net *net, int group); void (*unbind)(struct net *net, int group); + void (*release) (struct sock *sk, unsigned long *groups); }; struct sock *__netlink_kernel_create(struct net *net, int unit, @@ -227,6 +228,11 @@ bool netlink_strict_get_check(struct sk_buff *skb); int netlink_unicast(struct sock *ssk, struct sk_buff *skb, __u32 portid, int nonblock); int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, __u32 portid, __u32 group, gfp_t allocation); +int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb, + __u32 portid, __u32 group, gfp_t allocation, + int (*filter)(struct sock *dsk, + struct sk_buff *skb, void *data), + void *filter_data); int netlink_set_err(struct sock *ssk, __u32 portid, __u32 group, int code); int netlink_register_notifier(struct notifier_block *nb); int netlink_unregister_notifier(struct notifier_block *nb); diff --git a/include/linux/notifier.h b/include/linux/notifier.h index 86544707236a..45702bdcbceb 100644 --- a/include/linux/notifier.h +++ b/include/linux/notifier.h @@ -73,9 +73,7 @@ struct raw_notifier_head { struct srcu_notifier_head { struct mutex mutex; -#ifdef CONFIG_TREE_SRCU struct srcu_usage srcuu; -#endif struct srcu_struct srcu; struct notifier_block __rcu *head; }; @@ -106,7 +104,6 @@ extern void srcu_init_notifier_head(struct srcu_notifier_head *nh); #define RAW_NOTIFIER_INIT(name) { \ .head = NULL } -#ifdef CONFIG_TREE_SRCU #define SRCU_NOTIFIER_INIT(name, pcpu) \ { \ .mutex = __MUTEX_INITIALIZER(name.mutex), \ @@ -114,14 +111,6 @@ extern void srcu_init_notifier_head(struct srcu_notifier_head *nh); .srcuu = __SRCU_USAGE_INIT(name.srcuu), \ .srcu = __SRCU_STRUCT_INIT(name.srcu, name.srcuu, pcpu), \ } -#else -#define SRCU_NOTIFIER_INIT(name, pcpu) \ - { \ - .mutex = __MUTEX_INITIALIZER(name.mutex), \ - .head = NULL, \ - .srcu = __SRCU_STRUCT_INIT(name.srcu, name.srcuu, pcpu), \ - } -#endif #define ATOMIC_NOTIFIER_HEAD(name) \ struct atomic_notifier_head name = \ diff --git a/include/linux/nsproxy.h b/include/linux/nsproxy.h index fee881cded01..771cb0285872 100644 --- a/include/linux/nsproxy.h +++ b/include/linux/nsproxy.h @@ -29,7 +29,7 @@ struct fs_struct; * nsproxy is copied. */ struct nsproxy { - atomic_t count; + refcount_t count; struct uts_namespace *uts_ns; struct ipc_namespace *ipc_ns; struct mnt_namespace *mnt_ns; @@ -102,14 +102,13 @@ int __init nsproxy_cache_init(void); static inline void put_nsproxy(struct nsproxy *ns) { - if (atomic_dec_and_test(&ns->count)) { + if (refcount_dec_and_test(&ns->count)) free_nsproxy(ns); - } } static inline void get_nsproxy(struct nsproxy *ns) { - atomic_inc(&ns->count); + refcount_inc(&ns->count); } #endif diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 92a2063a0a23..5c02720c53a5 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -99,13 +99,15 @@ */ enum pageflags { PG_locked, /* Page is locked. Don't touch. */ + PG_writeback, /* Page is under writeback */ PG_referenced, PG_uptodate, PG_dirty, PG_lru, + PG_head, /* Must be in bit 6 */ + PG_waiters, /* Page has waiters, check its waitqueue. Must be bit #7 and in the same byte as "PG_locked" */ PG_active, PG_workingset, - PG_waiters, /* Page has waiters, check its waitqueue. Must be bit #7 and in the same byte as "PG_locked" */ PG_error, PG_slab, PG_owner_priv_1, /* Owner use. If pagecache, fs may use*/ @@ -113,8 +115,6 @@ enum pageflags { PG_reserved, PG_private, /* If pagecache, has fs-private data */ PG_private_2, /* If pagecache, has fs aux data */ - PG_writeback, /* Page is under writeback */ - PG_head, /* A head page */ PG_mappedtodisk, /* Has blocks allocated on-disk */ PG_reclaim, /* To be reclaimed asap */ PG_swapbacked, /* Page is backed by RAM/swap */ @@ -171,15 +171,6 @@ enum pageflags { /* Remapped by swiotlb-xen. */ PG_xen_remapped = PG_owner_priv_1, -#ifdef CONFIG_MEMORY_FAILURE - /* - * Compound pages. Stored in first tail page's flags. - * Indicates that at least one subpage is hwpoisoned in the - * THP. - */ - PG_has_hwpoisoned = PG_error, -#endif - /* non-lru isolated movable page */ PG_isolated = PG_reclaim, @@ -190,6 +181,17 @@ enum pageflags { /* For self-hosted memmap pages */ PG_vmemmap_self_hosted = PG_owner_priv_1, #endif + + /* + * Flags only valid for compound pages. Stored in first tail page's + * flags word. Cannot use the first 8 flags or any flag marked as + * PF_ANY. + */ + + /* At least one page in this folio has the hwpoison flag set */ + PG_has_hwpoisoned = PG_error, + PG_hugetlb = PG_active, + PG_large_rmappable = PG_workingset, /* anon or file-backed */ }; #define PAGEFLAGS_MASK ((1UL << NR_PAGEFLAGS) - 1) @@ -806,13 +808,32 @@ static inline void ClearPageCompound(struct page *page) BUG_ON(!PageHead(page)); ClearPageHead(page); } +PAGEFLAG(LargeRmappable, large_rmappable, PF_SECOND) +#else +TESTPAGEFLAG_FALSE(LargeRmappable, large_rmappable) #endif #define PG_head_mask ((1UL << PG_head)) #ifdef CONFIG_HUGETLB_PAGE int PageHuge(struct page *page); -bool folio_test_hugetlb(struct folio *folio); +SETPAGEFLAG(HugeTLB, hugetlb, PF_SECOND) +CLEARPAGEFLAG(HugeTLB, hugetlb, PF_SECOND) + +/** + * folio_test_hugetlb - Determine if the folio belongs to hugetlbfs + * @folio: The folio to test. + * + * Context: Any context. Caller should have a reference on the folio to + * prevent it from being turned into a tail page. + * Return: True for hugetlbfs folios, false for anon folios or folios + * belonging to other filesystems. + */ +static inline bool folio_test_hugetlb(struct folio *folio) +{ + return folio_test_large(folio) && + test_bit(PG_hugetlb, folio_flags(folio, 1)); +} #else TESTPAGEFLAG_FALSE(Huge, hugetlb) #endif @@ -832,11 +853,6 @@ static inline int PageTransHuge(struct page *page) return PageHead(page); } -static inline bool folio_test_transhuge(struct folio *folio) -{ - return folio_test_head(folio); -} - /* * PageTransCompound returns true for both transparent huge pages * and hugetlbfs pages, so it should only be called when it's known @@ -908,6 +924,8 @@ static inline bool is_page_hwpoison(struct page *page) #define PageType(page, flag) \ ((page->page_type & (PAGE_TYPE_BASE | flag)) == PAGE_TYPE_BASE) +#define folio_test_type(folio, flag) \ + ((folio->page.page_type & (PAGE_TYPE_BASE | flag)) == PAGE_TYPE_BASE) static inline int page_type_has_type(unsigned int page_type) { @@ -919,27 +937,41 @@ static inline int page_has_type(struct page *page) return page_type_has_type(page->page_type); } -#define PAGE_TYPE_OPS(uname, lname) \ -static __always_inline int Page##uname(struct page *page) \ +#define PAGE_TYPE_OPS(uname, lname, fname) \ +static __always_inline int Page##uname(const struct page *page) \ { \ return PageType(page, PG_##lname); \ } \ +static __always_inline int folio_test_##fname(const struct folio *folio)\ +{ \ + return folio_test_type(folio, PG_##lname); \ +} \ static __always_inline void __SetPage##uname(struct page *page) \ { \ VM_BUG_ON_PAGE(!PageType(page, 0), page); \ page->page_type &= ~PG_##lname; \ } \ +static __always_inline void __folio_set_##fname(struct folio *folio) \ +{ \ + VM_BUG_ON_FOLIO(!folio_test_type(folio, 0), folio); \ + folio->page.page_type &= ~PG_##lname; \ +} \ static __always_inline void __ClearPage##uname(struct page *page) \ { \ VM_BUG_ON_PAGE(!Page##uname(page), page); \ page->page_type |= PG_##lname; \ -} +} \ +static __always_inline void __folio_clear_##fname(struct folio *folio) \ +{ \ + VM_BUG_ON_FOLIO(!folio_test_##fname(folio), folio); \ + folio->page.page_type |= PG_##lname; \ +} \ /* * PageBuddy() indicates that the page is free and in the buddy system * (see mm/page_alloc.c). */ -PAGE_TYPE_OPS(Buddy, buddy) +PAGE_TYPE_OPS(Buddy, buddy, buddy) /* * PageOffline() indicates that the page is logically offline although the @@ -963,7 +995,7 @@ PAGE_TYPE_OPS(Buddy, buddy) * pages should check PageOffline() and synchronize with such drivers using * page_offline_freeze()/page_offline_thaw(). */ -PAGE_TYPE_OPS(Offline, offline) +PAGE_TYPE_OPS(Offline, offline, offline) extern void page_offline_freeze(void); extern void page_offline_thaw(void); @@ -973,12 +1005,12 @@ extern void page_offline_end(void); /* * Marks pages in use as page tables. */ -PAGE_TYPE_OPS(Table, table) +PAGE_TYPE_OPS(Table, table, pgtable) /* * Marks guardpages used with debug_pagealloc. */ -PAGE_TYPE_OPS(Guard, guard) +PAGE_TYPE_OPS(Guard, guard, guard) extern bool is_free_buddy_page(struct page *page); @@ -1040,6 +1072,14 @@ static __always_inline void __ClearPageAnonExclusive(struct page *page) #define PAGE_FLAGS_CHECK_AT_PREP \ ((PAGEFLAGS_MASK & ~__PG_HWPOISON) | LRU_GEN_MASK | LRU_REFS_MASK) +/* + * Flags stored in the second page of a compound page. They may overlap + * the CHECK_AT_FREE flags above, so need to be cleared. + */ +#define PAGE_FLAGS_SECOND \ + (0xffUL /* order */ | 1UL << PG_has_hwpoisoned | \ + 1UL << PG_hugetlb | 1UL << PG_large_rmappable) + #define PAGE_FLAGS_PRIVATE \ (1UL << PG_private | 1UL << PG_private_2) /** diff --git a/include/linux/page_ext.h b/include/linux/page_ext.h index 67314f648aeb..be98564191e6 100644 --- a/include/linux/page_ext.h +++ b/include/linux/page_ext.h @@ -8,6 +8,7 @@ struct pglist_data; +#ifdef CONFIG_PAGE_EXTENSION /** * struct page_ext_operations - per page_ext client operations * @offset: Offset to the client's data within page_ext. Offset is returned to @@ -29,8 +30,6 @@ struct page_ext_operations { bool need_shared_flags; }; -#ifdef CONFIG_PAGE_EXTENSION - /* * The page_ext_flags users must set need_shared_flags to true. */ @@ -82,6 +81,12 @@ static inline void page_ext_init(void) extern struct page_ext *page_ext_get(struct page *page); extern void page_ext_put(struct page_ext *page_ext); +static inline void *page_ext_data(struct page_ext *page_ext, + struct page_ext_operations *ops) +{ + return (void *)(page_ext) + ops->offset; +} + static inline struct page_ext *page_ext_next(struct page_ext *curr) { void *next = curr; diff --git a/include/linux/page_idle.h b/include/linux/page_idle.h index 5cb7bd2078ec..d8f344840643 100644 --- a/include/linux/page_idle.h +++ b/include/linux/page_idle.h @@ -144,9 +144,4 @@ static inline void set_page_idle(struct page *page) { folio_set_idle(page_folio(page)); } - -static inline void clear_page_idle(struct page *page) -{ - folio_clear_idle(page_folio(page)); -} #endif /* _LINUX_MM_PAGE_IDLE_H */ diff --git a/include/linux/page_table_check.h b/include/linux/page_table_check.h index 01e16c7696ec..6722941c7cb8 100644 --- a/include/linux/page_table_check.h +++ b/include/linux/page_table_check.h @@ -14,18 +14,13 @@ extern struct static_key_true page_table_check_disabled; extern struct page_ext_operations page_table_check_ops; void __page_table_check_zero(struct page *page, unsigned int order); -void __page_table_check_pte_clear(struct mm_struct *mm, unsigned long addr, - pte_t pte); -void __page_table_check_pmd_clear(struct mm_struct *mm, unsigned long addr, - pmd_t pmd); -void __page_table_check_pud_clear(struct mm_struct *mm, unsigned long addr, - pud_t pud); -void __page_table_check_pte_set(struct mm_struct *mm, unsigned long addr, - pte_t *ptep, pte_t pte); -void __page_table_check_pmd_set(struct mm_struct *mm, unsigned long addr, - pmd_t *pmdp, pmd_t pmd); -void __page_table_check_pud_set(struct mm_struct *mm, unsigned long addr, - pud_t *pudp, pud_t pud); +void __page_table_check_pte_clear(struct mm_struct *mm, pte_t pte); +void __page_table_check_pmd_clear(struct mm_struct *mm, pmd_t pmd); +void __page_table_check_pud_clear(struct mm_struct *mm, pud_t pud); +void __page_table_check_ptes_set(struct mm_struct *mm, pte_t *ptep, pte_t pte, + unsigned int nr); +void __page_table_check_pmd_set(struct mm_struct *mm, pmd_t *pmdp, pmd_t pmd); +void __page_table_check_pud_set(struct mm_struct *mm, pud_t *pudp, pud_t pud); void __page_table_check_pte_clear_range(struct mm_struct *mm, unsigned long addr, pmd_t pmd); @@ -46,61 +41,55 @@ static inline void page_table_check_free(struct page *page, unsigned int order) __page_table_check_zero(page, order); } -static inline void page_table_check_pte_clear(struct mm_struct *mm, - unsigned long addr, pte_t pte) +static inline void page_table_check_pte_clear(struct mm_struct *mm, pte_t pte) { if (static_branch_likely(&page_table_check_disabled)) return; - __page_table_check_pte_clear(mm, addr, pte); + __page_table_check_pte_clear(mm, pte); } -static inline void page_table_check_pmd_clear(struct mm_struct *mm, - unsigned long addr, pmd_t pmd) +static inline void page_table_check_pmd_clear(struct mm_struct *mm, pmd_t pmd) { if (static_branch_likely(&page_table_check_disabled)) return; - __page_table_check_pmd_clear(mm, addr, pmd); + __page_table_check_pmd_clear(mm, pmd); } -static inline void page_table_check_pud_clear(struct mm_struct *mm, - unsigned long addr, pud_t pud) +static inline void page_table_check_pud_clear(struct mm_struct *mm, pud_t pud) { if (static_branch_likely(&page_table_check_disabled)) return; - __page_table_check_pud_clear(mm, addr, pud); + __page_table_check_pud_clear(mm, pud); } -static inline void page_table_check_pte_set(struct mm_struct *mm, - unsigned long addr, pte_t *ptep, - pte_t pte) +static inline void page_table_check_ptes_set(struct mm_struct *mm, + pte_t *ptep, pte_t pte, unsigned int nr) { if (static_branch_likely(&page_table_check_disabled)) return; - __page_table_check_pte_set(mm, addr, ptep, pte); + __page_table_check_ptes_set(mm, ptep, pte, nr); } -static inline void page_table_check_pmd_set(struct mm_struct *mm, - unsigned long addr, pmd_t *pmdp, +static inline void page_table_check_pmd_set(struct mm_struct *mm, pmd_t *pmdp, pmd_t pmd) { if (static_branch_likely(&page_table_check_disabled)) return; - __page_table_check_pmd_set(mm, addr, pmdp, pmd); + __page_table_check_pmd_set(mm, pmdp, pmd); } -static inline void page_table_check_pud_set(struct mm_struct *mm, - unsigned long addr, pud_t *pudp, +static inline void page_table_check_pud_set(struct mm_struct *mm, pud_t *pudp, pud_t pud) { if (static_branch_likely(&page_table_check_disabled)) return; - __page_table_check_pud_set(mm, addr, pudp, pud); + __page_table_check_pud_set(mm, pudp, pud); } static inline void page_table_check_pte_clear_range(struct mm_struct *mm, @@ -123,35 +112,29 @@ static inline void page_table_check_free(struct page *page, unsigned int order) { } -static inline void page_table_check_pte_clear(struct mm_struct *mm, - unsigned long addr, pte_t pte) +static inline void page_table_check_pte_clear(struct mm_struct *mm, pte_t pte) { } -static inline void page_table_check_pmd_clear(struct mm_struct *mm, - unsigned long addr, pmd_t pmd) +static inline void page_table_check_pmd_clear(struct mm_struct *mm, pmd_t pmd) { } -static inline void page_table_check_pud_clear(struct mm_struct *mm, - unsigned long addr, pud_t pud) +static inline void page_table_check_pud_clear(struct mm_struct *mm, pud_t pud) { } -static inline void page_table_check_pte_set(struct mm_struct *mm, - unsigned long addr, pte_t *ptep, - pte_t pte) +static inline void page_table_check_ptes_set(struct mm_struct *mm, + pte_t *ptep, pte_t pte, unsigned int nr) { } -static inline void page_table_check_pmd_set(struct mm_struct *mm, - unsigned long addr, pmd_t *pmdp, +static inline void page_table_check_pmd_set(struct mm_struct *mm, pmd_t *pmdp, pmd_t pmd) { } -static inline void page_table_check_pud_set(struct mm_struct *mm, - unsigned long addr, pud_t *pudp, +static inline void page_table_check_pud_set(struct mm_struct *mm, pud_t *pudp, pud_t pud) { } diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 716953ee1ebd..351c3b7f93a1 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -203,6 +203,7 @@ enum mapping_flags { /* writeback related tags are not used */ AS_NO_WRITEBACK_TAGS = 5, AS_LARGE_FOLIO_SUPPORT = 6, + AS_RELEASE_ALWAYS, /* Call ->release_folio(), even if no private data */ }; /** @@ -273,6 +274,21 @@ static inline int mapping_use_writeback_tags(struct address_space *mapping) return !test_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags); } +static inline bool mapping_release_always(const struct address_space *mapping) +{ + return test_bit(AS_RELEASE_ALWAYS, &mapping->flags); +} + +static inline void mapping_set_release_always(struct address_space *mapping) +{ + set_bit(AS_RELEASE_ALWAYS, &mapping->flags); +} + +static inline void mapping_clear_release_always(struct address_space *mapping) +{ + clear_bit(AS_RELEASE_ALWAYS, &mapping->flags); +} + static inline gfp_t mapping_gfp_mask(struct address_space * mapping) { return mapping->gfp_mask; @@ -373,23 +389,31 @@ static inline struct address_space *folio_file_mapping(struct folio *folio) return folio->mapping; } -static inline struct address_space *page_file_mapping(struct page *page) -{ - return folio_file_mapping(page_folio(page)); -} - -/* - * For file cache pages, return the address_space, otherwise return NULL +/** + * folio_flush_mapping - Find the file mapping this folio belongs to. + * @folio: The folio. + * + * For folios which are in the page cache, return the mapping that this + * page belongs to. Anonymous folios return NULL, even if they're in + * the swap cache. Other kinds of folio also return NULL. + * + * This is ONLY used by architecture cache flushing code. If you aren't + * writing cache flushing code, you want either folio_mapping() or + * folio_file_mapping(). */ -static inline struct address_space *page_mapping_file(struct page *page) +static inline struct address_space *folio_flush_mapping(struct folio *folio) { - struct folio *folio = page_folio(page); - if (unlikely(folio_test_swapcache(folio))) return NULL; + return folio_mapping(folio); } +static inline struct address_space *page_file_mapping(struct page *page) +{ + return folio_file_mapping(page_folio(page)); +} + /** * folio_inode - Get the host inode for this folio. * @folio: The folio. @@ -470,6 +494,19 @@ static inline void *detach_page_private(struct page *page) return folio_detach_private(page_folio(page)); } +/* + * There are some parts of the kernel which assume that PMD entries + * are exactly HPAGE_PMD_ORDER. Those should be fixed, but until then, + * limit the maximum allocation order to PMD size. I'm not aware of any + * assumptions about maximum order if THP are disabled, but 8 seems like + * a good order (that's 1MB if you're using 4kB pages) + */ +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +#define MAX_PAGECACHE_ORDER HPAGE_PMD_ORDER +#else +#define MAX_PAGECACHE_ORDER 8 +#endif + #ifdef CONFIG_NUMA struct folio *filemap_alloc_folio(gfp_t gfp, unsigned int order); #else @@ -501,22 +538,69 @@ pgoff_t page_cache_next_miss(struct address_space *mapping, pgoff_t page_cache_prev_miss(struct address_space *mapping, pgoff_t index, unsigned long max_scan); -#define FGP_ACCESSED 0x00000001 -#define FGP_LOCK 0x00000002 -#define FGP_CREAT 0x00000004 -#define FGP_WRITE 0x00000008 -#define FGP_NOFS 0x00000010 -#define FGP_NOWAIT 0x00000020 -#define FGP_FOR_MMAP 0x00000040 -#define FGP_STABLE 0x00000080 +/** + * typedef fgf_t - Flags for getting folios from the page cache. + * + * Most users of the page cache will not need to use these flags; + * there are convenience functions such as filemap_get_folio() and + * filemap_lock_folio(). For users which need more control over exactly + * what is done with the folios, these flags to __filemap_get_folio() + * are available. + * + * * %FGP_ACCESSED - The folio will be marked accessed. + * * %FGP_LOCK - The folio is returned locked. + * * %FGP_CREAT - If no folio is present then a new folio is allocated, + * added to the page cache and the VM's LRU list. The folio is + * returned locked. + * * %FGP_FOR_MMAP - The caller wants to do its own locking dance if the + * folio is already in cache. If the folio was allocated, unlock it + * before returning so the caller can do the same dance. + * * %FGP_WRITE - The folio will be written to by the caller. + * * %FGP_NOFS - __GFP_FS will get cleared in gfp. + * * %FGP_NOWAIT - Don't block on the folio lock. + * * %FGP_STABLE - Wait for the folio to be stable (finished writeback) + * * %FGP_WRITEBEGIN - The flags to use in a filesystem write_begin() + * implementation. + */ +typedef unsigned int __bitwise fgf_t; + +#define FGP_ACCESSED ((__force fgf_t)0x00000001) +#define FGP_LOCK ((__force fgf_t)0x00000002) +#define FGP_CREAT ((__force fgf_t)0x00000004) +#define FGP_WRITE ((__force fgf_t)0x00000008) +#define FGP_NOFS ((__force fgf_t)0x00000010) +#define FGP_NOWAIT ((__force fgf_t)0x00000020) +#define FGP_FOR_MMAP ((__force fgf_t)0x00000040) +#define FGP_STABLE ((__force fgf_t)0x00000080) +#define FGF_GET_ORDER(fgf) (((__force unsigned)fgf) >> 26) /* top 6 bits */ #define FGP_WRITEBEGIN (FGP_LOCK | FGP_WRITE | FGP_CREAT | FGP_STABLE) +/** + * fgf_set_order - Encode a length in the fgf_t flags. + * @size: The suggested size of the folio to create. + * + * The caller of __filemap_get_folio() can use this to suggest a preferred + * size for the folio that is created. If there is already a folio at + * the index, it will be returned, no matter what its size. If a folio + * is freshly created, it may be of a different size than requested + * due to alignment constraints, memory pressure, or the presence of + * other folios at nearby indices. + */ +static inline fgf_t fgf_set_order(size_t size) +{ + unsigned int shift = ilog2(size); + + if (shift <= PAGE_SHIFT) + return 0; + return (__force fgf_t)((shift - PAGE_SHIFT) << 26); +} + void *filemap_get_entry(struct address_space *mapping, pgoff_t index); struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index, - int fgp_flags, gfp_t gfp); + fgf_t fgp_flags, gfp_t gfp); struct page *pagecache_get_page(struct address_space *mapping, pgoff_t index, - int fgp_flags, gfp_t gfp); + fgf_t fgp_flags, gfp_t gfp); /** * filemap_get_folio - Find and get a folio. @@ -590,7 +674,7 @@ static inline struct page *find_get_page(struct address_space *mapping, } static inline struct page *find_get_page_flags(struct address_space *mapping, - pgoff_t offset, int fgp_flags) + pgoff_t offset, fgf_t fgp_flags) { return pagecache_get_page(mapping, offset, fgp_flags, 0); } @@ -900,8 +984,7 @@ static inline bool wake_page_match(struct wait_page_queue *wait_page, void __folio_lock(struct folio *folio); int __folio_lock_killable(struct folio *folio); -bool __folio_lock_or_retry(struct folio *folio, struct mm_struct *mm, - unsigned int flags); +vm_fault_t __folio_lock_or_retry(struct folio *folio, struct vm_fault *vmf); void unlock_page(struct page *page); void folio_unlock(struct folio *folio); @@ -1005,11 +1088,13 @@ static inline int folio_lock_killable(struct folio *folio) * Return value and mmap_lock implications depend on flags; see * __folio_lock_or_retry(). */ -static inline bool folio_lock_or_retry(struct folio *folio, - struct mm_struct *mm, unsigned int flags) +static inline vm_fault_t folio_lock_or_retry(struct folio *folio, + struct vm_fault *vmf) { might_sleep(); - return folio_trylock(folio) || __folio_lock_or_retry(folio, mm, flags); + if (!folio_trylock(folio)) + return __folio_lock_or_retry(folio, vmf); + return 0; } /* @@ -1044,11 +1129,6 @@ static inline void wait_on_page_locked(struct page *page) folio_wait_locked(page_folio(page)); } -static inline int wait_on_page_locked_killable(struct page *page) -{ - return folio_wait_locked_killable(page_folio(page)); -} - void wait_on_page_writeback(struct page *page); void folio_wait_writeback(struct folio *folio); int folio_wait_writeback_killable(struct folio *folio); diff --git a/include/linux/pagewalk.h b/include/linux/pagewalk.h index 27a6df448ee5..27cd1e59ccf7 100644 --- a/include/linux/pagewalk.h +++ b/include/linux/pagewalk.h @@ -6,6 +6,16 @@ struct mm_walk; +/* Locking requirement during a page walk. */ +enum page_walk_lock { + /* mmap_lock should be locked for read to stabilize the vma tree */ + PGWALK_RDLOCK = 0, + /* vma will be write-locked during the walk */ + PGWALK_WRLOCK = 1, + /* vma is expected to be already write-locked during the walk */ + PGWALK_WRLOCK_VERIFY = 2, +}; + /** * struct mm_walk_ops - callbacks for walk_page_range * @pgd_entry: if set, called for each non-empty PGD (top-level) entry @@ -66,6 +76,7 @@ struct mm_walk_ops { int (*pre_vma)(unsigned long start, unsigned long end, struct mm_walk *walk); void (*post_vma)(struct mm_walk *walk); + enum page_walk_lock walk_lock; }; /* diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 2dc75df1437f..8f9a459e1671 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -576,6 +576,8 @@ #define PCI_DEVICE_ID_AMD_19H_M60H_DF_F3 0x14e3 #define PCI_DEVICE_ID_AMD_19H_M70H_DF_F3 0x14f3 #define PCI_DEVICE_ID_AMD_19H_M78H_DF_F3 0x12fb +#define PCI_DEVICE_ID_AMD_1AH_M00H_DF_F3 0x12c3 +#define PCI_DEVICE_ID_AMD_1AH_M20H_DF_F3 0x16fb #define PCI_DEVICE_ID_AMD_MI200_DF_F3 0x14d3 #define PCI_DEVICE_ID_AMD_CNB17H_F3 0x1703 #define PCI_DEVICE_ID_AMD_LANCE 0x2000 diff --git a/include/linux/pcs/pcs-xpcs.h b/include/linux/pcs/pcs-xpcs.h index ff99cf7a5d0d..da3a6c30f6d2 100644 --- a/include/linux/pcs/pcs-xpcs.h +++ b/include/linux/pcs/pcs-xpcs.h @@ -20,12 +20,20 @@ #define DW_AN_C37_1000BASEX 4 #define DW_10GBASER 5 +/* device vendor OUI */ +#define DW_OUI_WX 0x0018fc80 + +/* dev_flag */ +#define DW_DEV_TXGBE BIT(0) + struct xpcs_id; struct dw_xpcs { struct mdio_device *mdiodev; const struct xpcs_id *id; struct phylink_pcs pcs; + phy_interface_t interface; + int dev_flag; }; int xpcs_get_an_mode(struct dw_xpcs *xpcs, phy_interface_t interface); diff --git a/include/linux/percpu.h b/include/linux/percpu.h index b3b458442330..68fac2e7cbe6 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h @@ -35,6 +35,12 @@ #define PCPU_BITMAP_BLOCK_BITS (PCPU_BITMAP_BLOCK_SIZE >> \ PCPU_MIN_ALLOC_SHIFT) +#ifdef CONFIG_RANDOM_KMALLOC_CACHES +#define PERCPU_DYNAMIC_SIZE_SHIFT 12 +#else +#define PERCPU_DYNAMIC_SIZE_SHIFT 10 +#endif + /* * Percpu allocator can serve percpu allocations before slab is * initialized which allows slab to depend on the percpu allocator. @@ -42,7 +48,7 @@ * for this. Keep PERCPU_DYNAMIC_RESERVE equal to or larger than * PERCPU_DYNAMIC_EARLY_SIZE. */ -#define PERCPU_DYNAMIC_EARLY_SIZE (20 << 10) +#define PERCPU_DYNAMIC_EARLY_SIZE (20 << PERCPU_DYNAMIC_SIZE_SHIFT) /* * PERCPU_DYNAMIC_RESERVE indicates the amount of free area to piggy @@ -56,9 +62,9 @@ * intelligent way to determine this would be nice. */ #if BITS_PER_LONG > 32 -#define PERCPU_DYNAMIC_RESERVE (28 << 10) +#define PERCPU_DYNAMIC_RESERVE (28 << PERCPU_DYNAMIC_SIZE_SHIFT) #else -#define PERCPU_DYNAMIC_RESERVE (20 << 10) +#define PERCPU_DYNAMIC_RESERVE (20 << PERCPU_DYNAMIC_SIZE_SHIFT) #endif extern void *pcpu_base_addr; diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h index a0801f68762b..143fbc10ecfe 100644 --- a/include/linux/perf/arm_pmu.h +++ b/include/linux/perf/arm_pmu.h @@ -187,5 +187,6 @@ void armpmu_free_irq(int irq, int cpu); #endif /* CONFIG_ARM_PMU */ #define ARMV8_SPE_PDEV_NAME "arm,spe-v1" +#define ARMV8_TRBE_PDEV_NAME "arm,trbe" #endif /* __ARM_PMU_H__ */ diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 2166a69e3bf2..05253af70ce9 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -288,10 +288,9 @@ struct perf_event_pmu_context; #define PERF_PMU_CAP_EXTENDED_REGS 0x0008 #define PERF_PMU_CAP_EXCLUSIVE 0x0010 #define PERF_PMU_CAP_ITRACE 0x0020 -#define PERF_PMU_CAP_HETEROGENEOUS_CPUS 0x0040 -#define PERF_PMU_CAP_NO_EXCLUDE 0x0080 -#define PERF_PMU_CAP_AUX_OUTPUT 0x0100 -#define PERF_PMU_CAP_EXTENDED_HW_TYPE 0x0200 +#define PERF_PMU_CAP_NO_EXCLUDE 0x0040 +#define PERF_PMU_CAP_AUX_OUTPUT 0x0080 +#define PERF_PMU_CAP_EXTENDED_HW_TYPE 0x0100 struct perf_output_handle; @@ -1194,7 +1193,8 @@ struct perf_sample_data { PERF_MEM_S(LVL, NA) |\ PERF_MEM_S(SNOOP, NA) |\ PERF_MEM_S(LOCK, NA) |\ - PERF_MEM_S(TLB, NA)) + PERF_MEM_S(TLB, NA) |\ + PERF_MEM_S(LVLNUM, NA)) static inline void perf_sample_data_init(struct perf_sample_data *data, u64 addr, u64 period) @@ -1316,15 +1316,31 @@ extern int perf_event_output(struct perf_event *event, struct pt_regs *regs); static inline bool -is_default_overflow_handler(struct perf_event *event) +__is_default_overflow_handler(perf_overflow_handler_t overflow_handler) { - if (likely(event->overflow_handler == perf_event_output_forward)) + if (likely(overflow_handler == perf_event_output_forward)) return true; - if (unlikely(event->overflow_handler == perf_event_output_backward)) + if (unlikely(overflow_handler == perf_event_output_backward)) return true; return false; } +#define is_default_overflow_handler(event) \ + __is_default_overflow_handler((event)->overflow_handler) + +#ifdef CONFIG_BPF_SYSCALL +static inline bool uses_default_overflow_handler(struct perf_event *event) +{ + if (likely(is_default_overflow_handler(event))) + return true; + + return __is_default_overflow_handler(event->orig_overflow_handler); +} +#else +#define uses_default_overflow_handler(event) \ + is_default_overflow_handler(event) +#endif + extern void perf_event_header__init_id(struct perf_event_header *header, struct perf_sample_data *data, @@ -1860,10 +1876,6 @@ extern void arch_perf_update_userpage(struct perf_event *event, struct perf_event_mmap_page *userpg, u64 now); -#ifdef CONFIG_MMU -extern __weak u64 arch_perf_get_page_size(struct mm_struct *mm, unsigned long addr); -#endif - /* * Snapshot branch stack on software events. * diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h index 5063b482e34f..f49abcfe5eda 100644 --- a/include/linux/pgtable.h +++ b/include/linux/pgtable.h @@ -5,6 +5,9 @@ #include <linux/pfn.h> #include <asm/pgtable.h> +#define PMD_ORDER (PMD_SHIFT - PAGE_SHIFT) +#define PUD_ORDER (PUD_SHIFT - PAGE_SHIFT) + #ifndef __ASSEMBLY__ #ifdef CONFIG_MMU @@ -63,7 +66,6 @@ static inline unsigned long pte_index(unsigned long address) { return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1); } -#define pte_index pte_index #ifndef pmd_index static inline unsigned long pmd_index(unsigned long address) @@ -99,7 +101,7 @@ static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address) ((pte_t *)kmap_local_page(pmd_page(*(pmd))) + pte_index((address))) #define pte_unmap(pte) do { \ kunmap_local((pte)); \ - /* rcu_read_unlock() to be added later */ \ + rcu_read_unlock(); \ } while (0) #else static inline pte_t *__pte_map(pmd_t *pmd, unsigned long address) @@ -108,10 +110,12 @@ static inline pte_t *__pte_map(pmd_t *pmd, unsigned long address) } static inline void pte_unmap(pte_t *pte) { - /* rcu_read_unlock() to be added later */ + rcu_read_unlock(); } #endif +void pte_free_defer(struct mm_struct *mm, pgtable_t pgtable); + /* Find an entry in the second-level page table.. */ #ifndef pmd_offset static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address) @@ -180,6 +184,60 @@ static inline int pmd_young(pmd_t pmd) } #endif +/* + * A facility to provide lazy MMU batching. This allows PTE updates and + * page invalidations to be delayed until a call to leave lazy MMU mode + * is issued. Some architectures may benefit from doing this, and it is + * beneficial for both shadow and direct mode hypervisors, which may batch + * the PTE updates which happen during this window. Note that using this + * interface requires that read hazards be removed from the code. A read + * hazard could result in the direct mode hypervisor case, since the actual + * write to the page tables may not yet have taken place, so reads though + * a raw PTE pointer after it has been modified are not guaranteed to be + * up to date. This mode can only be entered and left under the protection of + * the page table locks for all page tables which may be modified. In the UP + * case, this is required so that preemption is disabled, and in the SMP case, + * it must synchronize the delayed page table writes properly on other CPUs. + */ +#ifndef __HAVE_ARCH_ENTER_LAZY_MMU_MODE +#define arch_enter_lazy_mmu_mode() do {} while (0) +#define arch_leave_lazy_mmu_mode() do {} while (0) +#define arch_flush_lazy_mmu_mode() do {} while (0) +#endif + +#ifndef set_ptes +/** + * set_ptes - Map consecutive pages to a contiguous range of addresses. + * @mm: Address space to map the pages into. + * @addr: Address to map the first page at. + * @ptep: Page table pointer for the first entry. + * @pte: Page table entry for the first page. + * @nr: Number of pages to map. + * + * May be overridden by the architecture, or the architecture can define + * set_pte() and PFN_PTE_SHIFT. + * + * Context: The caller holds the page table lock. The pages all belong + * to the same folio. The PTEs are all in the same PMD. + */ +static inline void set_ptes(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pte, unsigned int nr) +{ + page_table_check_ptes_set(mm, ptep, pte, nr); + + arch_enter_lazy_mmu_mode(); + for (;;) { + set_pte(ptep, pte); + if (--nr == 0) + break; + ptep++; + pte = __pte(pte_val(pte) + (1UL << PFN_PTE_SHIFT)); + } + arch_leave_lazy_mmu_mode(); +} +#endif +#define set_pte_at(mm, addr, ptep, pte) set_ptes(mm, addr, ptep, pte, 1) + #ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS extern int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address, pte_t *ptep, @@ -320,7 +378,7 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, { pte_t pte = ptep_get(ptep); pte_clear(mm, address, ptep); - page_table_check_pte_clear(mm, address, pte); + page_table_check_pte_clear(mm, pte); return pte; } #endif @@ -390,6 +448,7 @@ static inline pmd_t pmdp_get_lockless(pmd_t *pmdp) return pmd; } #define pmdp_get_lockless pmdp_get_lockless +#define pmdp_get_lockless_sync() tlb_remove_table_sync_one() #endif /* CONFIG_PGTABLE_LEVELS > 2 */ #endif /* CONFIG_GUP_GET_PXX_LOW_HIGH */ @@ -408,6 +467,9 @@ static inline pmd_t pmdp_get_lockless(pmd_t *pmdp) { return pmdp_get(pmdp); } +static inline void pmdp_get_lockless_sync(void) +{ +} #endif #ifdef CONFIG_TRANSPARENT_HUGEPAGE @@ -419,7 +481,7 @@ static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, pmd_t pmd = *pmdp; pmd_clear(pmdp); - page_table_check_pmd_clear(mm, address, pmd); + page_table_check_pmd_clear(mm, pmd); return pmd; } @@ -432,7 +494,7 @@ static inline pud_t pudp_huge_get_and_clear(struct mm_struct *mm, pud_t pud = *pudp; pud_clear(pudp); - page_table_check_pud_clear(mm, address, pud); + page_table_check_pud_clear(mm, pud); return pud; } @@ -450,11 +512,11 @@ static inline pmd_t pmdp_huge_get_and_clear_full(struct vm_area_struct *vma, #endif #ifndef __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR_FULL -static inline pud_t pudp_huge_get_and_clear_full(struct mm_struct *mm, +static inline pud_t pudp_huge_get_and_clear_full(struct vm_area_struct *vma, unsigned long address, pud_t *pudp, int full) { - return pudp_huge_get_and_clear(mm, address, pudp); + return pudp_huge_get_and_clear(vma->vm_mm, address, pudp); } #endif #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ @@ -558,6 +620,7 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm, #endif #ifndef __HAVE_ARCH_PUDP_SET_WRPROTECT #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD +#ifdef CONFIG_TRANSPARENT_HUGEPAGE static inline void pudp_set_wrprotect(struct mm_struct *mm, unsigned long address, pud_t *pudp) { @@ -571,6 +634,7 @@ static inline void pudp_set_wrprotect(struct mm_struct *mm, { BUILD_BUG(); } +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ #endif @@ -693,11 +757,14 @@ static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b) { return pmd_val(pmd_a) == pmd_val(pmd_b); } +#endif +#ifndef pud_same static inline int pud_same(pud_t pud_a, pud_t pud_b) { return pud_val(pud_a) == pud_val(pud_b); } +#define pud_same pud_same #endif #ifndef __HAVE_ARCH_P4D_SAME @@ -1041,27 +1108,6 @@ static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot) #endif /* - * A facility to provide lazy MMU batching. This allows PTE updates and - * page invalidations to be delayed until a call to leave lazy MMU mode - * is issued. Some architectures may benefit from doing this, and it is - * beneficial for both shadow and direct mode hypervisors, which may batch - * the PTE updates which happen during this window. Note that using this - * interface requires that read hazards be removed from the code. A read - * hazard could result in the direct mode hypervisor case, since the actual - * write to the page tables may not yet have taken place, so reads though - * a raw PTE pointer after it has been modified are not guaranteed to be - * up to date. This mode can only be entered and left under the protection of - * the page table locks for all page tables which may be modified. In the UP - * case, this is required so that preemption is disabled, and in the SMP case, - * it must synchronize the delayed page table writes properly on other CPUs. - */ -#ifndef __HAVE_ARCH_ENTER_LAZY_MMU_MODE -#define arch_enter_lazy_mmu_mode() do {} while (0) -#define arch_leave_lazy_mmu_mode() do {} while (0) -#define arch_flush_lazy_mmu_mode() do {} while (0) -#endif - -/* * A facility to provide batching of the reload of page tables and * other process state with the actual context switch code for * paravirtualized guests. By convention, only one of the batched @@ -1322,12 +1368,16 @@ static inline int pud_trans_unstable(pud_t *pud) #ifndef CONFIG_NUMA_BALANCING /* - * Technically a PTE can be PROTNONE even when not doing NUMA balancing but - * the only case the kernel cares is for NUMA balancing and is only ever set - * when the VMA is accessible. For PROT_NONE VMAs, the PTEs are not marked - * _PAGE_PROTNONE so by default, implement the helper as "always no". It - * is the responsibility of the caller to distinguish between PROT_NONE - * protections and NUMA hinting fault protections. + * In an inaccessible (PROT_NONE) VMA, pte_protnone() may indicate "yes". It is + * perfectly valid to indicate "no" in that case, which is why our default + * implementation defaults to "always no". + * + * In an accessible VMA, however, pte_protnone() reliably indicates PROT_NONE + * page protection due to NUMA hinting. NUMA hinting faults only apply in + * accessible VMAs. + * + * So, to reliably identify PROT_NONE PTEs that require a NUMA hinting fault, + * looking at the VMA accessibility is sufficient. */ static inline int pte_protnone(pte_t pte) { @@ -1499,6 +1549,9 @@ typedef unsigned int pgtbl_mod_mask; #define has_transparent_hugepage() IS_BUILTIN(CONFIG_TRANSPARENT_HUGEPAGE) #endif +#ifndef has_transparent_pud_hugepage +#define has_transparent_pud_hugepage() IS_BUILTIN(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) +#endif /* * On some architectures it depends on the mm if the p4d/pud or pmd * layer of the page table hierarchy is folded or not. diff --git a/include/linux/phy.h b/include/linux/phy.h index 11c1e91563d4..1351b802ffcf 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -110,6 +110,7 @@ extern const int phy_10gbit_features_array[1]; * @PHY_INTERFACE_MODE_XGMII: 10 gigabit media-independent interface * @PHY_INTERFACE_MODE_XLGMII:40 gigabit media-independent interface * @PHY_INTERFACE_MODE_MOCA: Multimedia over Coax + * @PHY_INTERFACE_MODE_PSGMII: Penta SGMII * @PHY_INTERFACE_MODE_QSGMII: Quad SGMII * @PHY_INTERFACE_MODE_TRGMII: Turbo RGMII * @PHY_INTERFACE_MODE_100BASEX: 100 BaseX @@ -147,6 +148,7 @@ typedef enum { PHY_INTERFACE_MODE_XGMII, PHY_INTERFACE_MODE_XLGMII, PHY_INTERFACE_MODE_MOCA, + PHY_INTERFACE_MODE_PSGMII, PHY_INTERFACE_MODE_QSGMII, PHY_INTERFACE_MODE_TRGMII, PHY_INTERFACE_MODE_100BASEX, @@ -254,6 +256,8 @@ static inline const char *phy_modes(phy_interface_t interface) return "xlgmii"; case PHY_INTERFACE_MODE_MOCA: return "moca"; + case PHY_INTERFACE_MODE_PSGMII: + return "psgmii"; case PHY_INTERFACE_MODE_QSGMII: return "qsgmii"; case PHY_INTERFACE_MODE_TRGMII: @@ -298,6 +302,7 @@ static inline const char *phy_modes(phy_interface_t interface) #define MII_BUS_ID_SIZE 61 struct device; +struct kernel_hwtstamp_config; struct phylink; struct sfp_bus; struct sfp_upstream_ops; @@ -1104,6 +1109,39 @@ struct phy_driver { int (*led_blink_set)(struct phy_device *dev, u8 index, unsigned long *delay_on, unsigned long *delay_off); + /** + * @led_hw_is_supported: Can the HW support the given rules. + * @dev: PHY device which has the LED + * @index: Which LED of the PHY device + * @rules The core is interested in these rules + * + * Return 0 if yes, -EOPNOTSUPP if not, or an error code. + */ + int (*led_hw_is_supported)(struct phy_device *dev, u8 index, + unsigned long rules); + /** + * @led_hw_control_set: Set the HW to control the LED + * @dev: PHY device which has the LED + * @index: Which LED of the PHY device + * @rules The rules used to control the LED + * + * Returns 0, or a an error code. + */ + int (*led_hw_control_set)(struct phy_device *dev, u8 index, + unsigned long rules); + /** + * @led_hw_control_get: Get how the HW is controlling the LED + * @dev: PHY device which has the LED + * @index: Which LED of the PHY device + * @rules Pointer to the rules used to control the LED + * + * Set *@rules to how the HW is currently blinking. Returns 0 + * on success, or a error code if the current blinking cannot + * be represented in rules, or some other error happens. + */ + int (*led_hw_control_get)(struct phy_device *dev, u8 index, + unsigned long *rules); + }; #define to_phy_driver(d) container_of(to_mdio_common_driver(d), \ struct phy_driver, mdiodrv) @@ -1731,10 +1769,6 @@ int phy_start_cable_test_tdr(struct phy_device *phydev, } #endif -int phy_cable_test_result(struct phy_device *phydev, u8 pair, u16 result); -int phy_cable_test_fault_length(struct phy_device *phydev, u8 pair, - u16 cm); - static inline void phy_device_reset(struct phy_device *phydev, int value) { mdio_device_reset(&phydev->mdio, value); @@ -1826,6 +1860,7 @@ int genphy_c45_an_config_aneg(struct phy_device *phydev); int genphy_c45_an_disable_aneg(struct phy_device *phydev); int genphy_c45_read_mdix(struct phy_device *phydev); int genphy_c45_pma_read_abilities(struct phy_device *phydev); +int genphy_c45_pma_baset1_read_abilities(struct phy_device *phydev); int genphy_c45_read_eee_abilities(struct phy_device *phydev); int genphy_c45_pma_baset1_read_master_slave(struct phy_device *phydev); int genphy_c45_read_status(struct phy_device *phydev); @@ -1954,6 +1989,12 @@ int phy_ethtool_set_plca_cfg(struct phy_device *phydev, int phy_ethtool_get_plca_status(struct phy_device *phydev, struct phy_plca_status *plca_st); +int __phy_hwtstamp_get(struct phy_device *phydev, + struct kernel_hwtstamp_config *config); +int __phy_hwtstamp_set(struct phy_device *phydev, + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack); + static inline int phy_package_read(struct phy_device *phydev, u32 regnum) { struct phy_package_shared *shared = phydev->shared; diff --git a/include/linux/phylib_stubs.h b/include/linux/phylib_stubs.h new file mode 100644 index 000000000000..1279f48c8a70 --- /dev/null +++ b/include/linux/phylib_stubs.h @@ -0,0 +1,68 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Stubs for the Network PHY library + */ + +#include <linux/rtnetlink.h> + +struct kernel_hwtstamp_config; +struct netlink_ext_ack; +struct phy_device; + +#if IS_ENABLED(CONFIG_PHYLIB) + +extern const struct phylib_stubs *phylib_stubs; + +struct phylib_stubs { + int (*hwtstamp_get)(struct phy_device *phydev, + struct kernel_hwtstamp_config *config); + int (*hwtstamp_set)(struct phy_device *phydev, + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack); +}; + +static inline int phy_hwtstamp_get(struct phy_device *phydev, + struct kernel_hwtstamp_config *config) +{ + /* phylib_register_stubs() and phylib_unregister_stubs() + * also run under rtnl_lock(). + */ + ASSERT_RTNL(); + + if (!phylib_stubs) + return -EOPNOTSUPP; + + return phylib_stubs->hwtstamp_get(phydev, config); +} + +static inline int phy_hwtstamp_set(struct phy_device *phydev, + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack) +{ + /* phylib_register_stubs() and phylib_unregister_stubs() + * also run under rtnl_lock(). + */ + ASSERT_RTNL(); + + if (!phylib_stubs) + return -EOPNOTSUPP; + + return phylib_stubs->hwtstamp_set(phydev, config, extack); +} + +#else + +static inline int phy_hwtstamp_get(struct phy_device *phydev, + struct kernel_hwtstamp_config *config) +{ + return -EOPNOTSUPP; +} + +static inline int phy_hwtstamp_set(struct phy_device *phydev, + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack) +{ + return -EOPNOTSUPP; +} + +#endif diff --git a/include/linux/phylink.h b/include/linux/phylink.h index 1817940a3418..7d07f8736431 100644 --- a/include/linux/phylink.h +++ b/include/linux/phylink.h @@ -9,6 +9,7 @@ struct device_node; struct ethtool_cmd; struct fwnode_handle; struct net_device; +struct phylink; enum { MLO_PAUSE_NONE, @@ -200,8 +201,6 @@ enum phylink_op_type { * struct phylink_config - PHYLINK configuration structure * @dev: a pointer to a struct device associated with the MAC * @type: operation type of PHYLINK instance - * @legacy_pre_march2020: driver has not been updated for March 2020 updates - * (See commit 7cceb599d15d ("net: phylink: avoid mac_config calls") * @poll_fixed_state: if true, starts link_poll, * if MAC link is at %MLO_AN_FIXED mode. * @mac_managed_pm: if true, indicate the MAC driver is responsible for PHY PM. @@ -215,7 +214,6 @@ enum phylink_op_type { struct phylink_config { struct device *dev; enum phylink_op_type type; - bool legacy_pre_march2020; bool poll_fixed_state; bool mac_managed_pm; bool ovr_an_inband; @@ -225,15 +223,15 @@ struct phylink_config { unsigned long mac_capabilities; }; +void phylink_limit_mac_speed(struct phylink_config *config, u32 max_speed); + /** * struct phylink_mac_ops - MAC operations structure. * @validate: Validate and update the link configuration. * @mac_select_pcs: Select a PCS for the interface mode. - * @mac_pcs_get_state: Read the current link state from the hardware. * @mac_prepare: prepare for a major reconfiguration of the interface. * @mac_config: configure the MAC for the selected mode and state. * @mac_finish: finish a major reconfiguration of the interface. - * @mac_an_restart: restart 802.3z BaseX autonegotiation. * @mac_link_down: take the link down. * @mac_link_up: allow the link to come up. * @@ -245,15 +243,12 @@ struct phylink_mac_ops { struct phylink_link_state *state); struct phylink_pcs *(*mac_select_pcs)(struct phylink_config *config, phy_interface_t interface); - void (*mac_pcs_get_state)(struct phylink_config *config, - struct phylink_link_state *state); int (*mac_prepare)(struct phylink_config *config, unsigned int mode, phy_interface_t iface); void (*mac_config)(struct phylink_config *config, unsigned int mode, const struct phylink_link_state *state); int (*mac_finish)(struct phylink_config *config, unsigned int mode, phy_interface_t iface); - void (*mac_an_restart)(struct phylink_config *config); void (*mac_link_down)(struct phylink_config *config, unsigned int mode, phy_interface_t interface); void (*mac_link_up)(struct phylink_config *config, @@ -314,25 +309,6 @@ struct phylink_pcs *mac_select_pcs(struct phylink_config *config, phy_interface_t interface); /** - * mac_pcs_get_state() - Read the current inband link state from the hardware - * @config: a pointer to a &struct phylink_config. - * @state: a pointer to a &struct phylink_link_state. - * - * Read the current inband link state from the MAC PCS, reporting the - * current speed in @state->speed, duplex mode in @state->duplex, pause - * mode in @state->pause using the %MLO_PAUSE_RX and %MLO_PAUSE_TX bits, - * negotiation completion state in @state->an_complete, and link up state - * in @state->link. If possible, @state->lp_advertising should also be - * populated. - * - * Note: This is a legacy method. This function will not be called unless - * legacy_pre_march2020 is set in &struct phylink_config and there is no - * PCS attached. - */ -void mac_pcs_get_state(struct phylink_config *config, - struct phylink_link_state *state); - -/** * mac_prepare() - prepare to change the PHY interface mode * @config: a pointer to a &struct phylink_config. * @mode: one of %MLO_AN_FIXED, %MLO_AN_PHY, %MLO_AN_INBAND. @@ -368,17 +344,9 @@ int mac_prepare(struct phylink_config *config, unsigned int mode, * guaranteed to be correct, and so any mac_config() implementation must * never reference these fields. * - * Note: For legacy March 2020 drivers (drivers with legacy_pre_march2020 set - * in their &phylnk_config and which don't have a PCS), this function will be - * called on each link up event, and to also change the in-band advert. For - * non-legacy drivers, it will only be called to reconfigure the MAC for a - * "major" change in e.g. interface mode. It will not be called for changes - * in speed, duplex or pause modes or to change the in-band advertisement. - * In any case, it is strongly preferred that speed, duplex and pause settings - * are handled in the mac_link_up() method and not in this method. - * - * (this requires a rewrite - please refer to mac_link_up() for situations - * where the PCS and MAC are not tightly integrated.) + * This will only be called to reconfigure the MAC for a "major" change in + * e.g. interface mode. It will not be called for changes in speed, duplex + * or pause modes or to change the in-band advertisement. * * In all negotiation modes, as defined by @mode, @state->pause indicates the * pause settings which should be applied as follows. If %MLO_PAUSE_AN is not @@ -410,7 +378,7 @@ int mac_prepare(struct phylink_config *config, unsigned int mode, * 1000base-X or Cisco SGMII mode depending on the @state->interface * mode). In both cases, link state management (whether the link * is up or not) is performed by the MAC, and reported via the - * mac_pcs_get_state() callback. Changes in link state must be made + * pcs_get_state() callback. Changes in link state must be made * by calling phylink_mac_change(). * * Interface mode specific details are mentioned below. @@ -459,16 +427,6 @@ int mac_finish(struct phylink_config *config, unsigned int mode, phy_interface_t iface); /** - * mac_an_restart() - restart 802.3z BaseX autonegotiation - * @config: a pointer to a &struct phylink_config. - * - * Note: This is a legacy method. This function will not be called unless - * legacy_pre_march2020 is set in &struct phylink_config and there is no - * PCS attached. - */ -void mac_an_restart(struct phylink_config *config); - -/** * mac_link_down() - take the link down * @config: a pointer to a &struct phylink_config. * @mode: link autonegotiation mode @@ -520,14 +478,19 @@ struct phylink_pcs_ops; /** * struct phylink_pcs - PHYLINK PCS instance * @ops: a pointer to the &struct phylink_pcs_ops structure + * @phylink: pointer to &struct phylink_config * @neg_mode: provide PCS neg mode via "mode" argument * @poll: poll the PCS for link changes * * This structure is designed to be embedded within the PCS private data, * and will be passed between phylink and the PCS. + * + * The @phylink member is private to phylink and must not be touched by + * the PCS driver. */ struct phylink_pcs { const struct phylink_pcs_ops *ops; + struct phylink *phylink; bool neg_mode; bool poll; }; @@ -535,6 +498,10 @@ struct phylink_pcs { /** * struct phylink_pcs_ops - MAC PCS operations structure. * @pcs_validate: validate the link configuration. + * @pcs_enable: enable the PCS. + * @pcs_disable: disable the PCS. + * @pcs_pre_config: pre-mac_config method (for errata) + * @pcs_post_config: post-mac_config method (for arrata) * @pcs_get_state: read the current MAC PCS link state from the hardware. * @pcs_config: configure the MAC PCS for the selected mode and state. * @pcs_an_restart: restart 802.3z BaseX autonegotiation. @@ -544,6 +511,12 @@ struct phylink_pcs { struct phylink_pcs_ops { int (*pcs_validate)(struct phylink_pcs *pcs, unsigned long *supported, const struct phylink_link_state *state); + int (*pcs_enable)(struct phylink_pcs *pcs); + void (*pcs_disable)(struct phylink_pcs *pcs); + void (*pcs_pre_config)(struct phylink_pcs *pcs, + phy_interface_t interface); + int (*pcs_post_config)(struct phylink_pcs *pcs, + phy_interface_t interface); void (*pcs_get_state)(struct phylink_pcs *pcs, struct phylink_link_state *state); int (*pcs_config)(struct phylink_pcs *pcs, unsigned int neg_mode, @@ -574,6 +547,18 @@ int pcs_validate(struct phylink_pcs *pcs, unsigned long *supported, const struct phylink_link_state *state); /** + * pcs_enable() - enable the PCS. + * @pcs: a pointer to a &struct phylink_pcs. + */ +int pcs_enable(struct phylink_pcs *pcs); + +/** + * pcs_disable() - disable the PCS. + * @pcs: a pointer to a &struct phylink_pcs. + */ +void pcs_disable(struct phylink_pcs *pcs); + +/** * pcs_get_state() - Read the current inband link state from the hardware * @pcs: a pointer to a &struct phylink_pcs. * @state: a pointer to a &struct phylink_link_state. @@ -585,8 +570,8 @@ int pcs_validate(struct phylink_pcs *pcs, unsigned long *supported, * in @state->link. If possible, @state->lp_advertising should also be * populated. * - * When present, this overrides mac_pcs_get_state() in &struct - * phylink_mac_ops. + * When present, this overrides pcs_get_state() in &struct + * phylink_pcs_ops. */ void pcs_get_state(struct phylink_pcs *pcs, struct phylink_link_state *state); @@ -677,6 +662,7 @@ int phylink_fwnode_phy_connect(struct phylink *pl, void phylink_disconnect_phy(struct phylink *); void phylink_mac_change(struct phylink *, bool up); +void phylink_pcs_change(struct phylink_pcs *, bool up); void phylink_start(struct phylink *); void phylink_stop(struct phylink *); diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h index c758809d5bcf..f9f9931e02d6 100644 --- a/include/linux/pid_namespace.h +++ b/include/linux/pid_namespace.h @@ -17,18 +17,10 @@ struct fs_pin; #if defined(CONFIG_SYSCTL) && defined(CONFIG_MEMFD_CREATE) -/* - * sysctl for vm.memfd_noexec - * 0: memfd_create() without MFD_EXEC nor MFD_NOEXEC_SEAL - * acts like MFD_EXEC was set. - * 1: memfd_create() without MFD_EXEC nor MFD_NOEXEC_SEAL - * acts like MFD_NOEXEC_SEAL was set. - * 2: memfd_create() without MFD_NOEXEC_SEAL will be - * rejected. - */ -#define MEMFD_NOEXEC_SCOPE_EXEC 0 -#define MEMFD_NOEXEC_SCOPE_NOEXEC_SEAL 1 -#define MEMFD_NOEXEC_SCOPE_NOEXEC_ENFORCED 2 +/* modes for vm.memfd_noexec sysctl */ +#define MEMFD_NOEXEC_SCOPE_EXEC 0 /* MFD_EXEC implied if unset */ +#define MEMFD_NOEXEC_SCOPE_NOEXEC_SEAL 1 /* MFD_NOEXEC_SEAL implied if unset */ +#define MEMFD_NOEXEC_SCOPE_NOEXEC_ENFORCED 2 /* same as 1, except MFD_EXEC rejected */ #endif struct pid_namespace { @@ -47,7 +39,6 @@ struct pid_namespace { int reboot; /* group exit code if this pidns was rebooted */ struct ns_common ns; #if defined(CONFIG_SYSCTL) && defined(CONFIG_MEMFD_CREATE) - /* sysctl for vm.memfd_noexec */ int memfd_noexec_scope; #endif } __randomize_layout; @@ -64,6 +55,23 @@ static inline struct pid_namespace *get_pid_ns(struct pid_namespace *ns) return ns; } +#if defined(CONFIG_SYSCTL) && defined(CONFIG_MEMFD_CREATE) +static inline int pidns_memfd_noexec_scope(struct pid_namespace *ns) +{ + int scope = MEMFD_NOEXEC_SCOPE_EXEC; + + for (; ns; ns = ns->parent) + scope = max(scope, READ_ONCE(ns->memfd_noexec_scope)); + + return scope; +} +#else +static inline int pidns_memfd_noexec_scope(struct pid_namespace *ns) +{ + return 0; +} +#endif + extern struct pid_namespace *copy_pid_ns(unsigned long flags, struct user_namespace *user_ns, struct pid_namespace *ns); extern void zap_pid_ns_processes(struct pid_namespace *pid_ns); @@ -78,6 +86,11 @@ static inline struct pid_namespace *get_pid_ns(struct pid_namespace *ns) return ns; } +static inline int pidns_memfd_noexec_scope(struct pid_namespace *ns) +{ + return 0; +} + static inline struct pid_namespace *copy_pid_ns(unsigned long flags, struct user_namespace *user_ns, struct pid_namespace *ns) { diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h index 02e0086b10f6..608a9eb86bff 100644 --- a/include/linux/pipe_fs_i.h +++ b/include/linux/pipe_fs_i.h @@ -269,10 +269,10 @@ bool pipe_is_unprivileged_user(void); /* for F_SETPIPE_SZ and F_GETPIPE_SZ */ int pipe_resize_ring(struct pipe_inode_info *pipe, unsigned int nr_slots); -long pipe_fcntl(struct file *, unsigned int, unsigned long arg); +long pipe_fcntl(struct file *, unsigned int, unsigned int arg); struct pipe_inode_info *get_pipe_info(struct file *file, bool for_splice); int create_pipe_files(struct file **, int); -unsigned int round_pipe_size(unsigned long size); +unsigned int round_pipe_size(unsigned int size); #endif diff --git a/include/linux/platform_data/hirschmann-hellcreek.h b/include/linux/platform_data/hirschmann-hellcreek.h index 6a000df5541f..8748680e9e3c 100644 --- a/include/linux/platform_data/hirschmann-hellcreek.h +++ b/include/linux/platform_data/hirschmann-hellcreek.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: (GPL-2.0 or MIT) */ +/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */ /* * Hirschmann Hellcreek TSN switch platform data. * diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h index dc1fb5890792..91f87d7e807c 100644 --- a/include/linux/pm_opp.h +++ b/include/linux/pm_opp.h @@ -103,7 +103,7 @@ int dev_pm_opp_get_supplies(struct dev_pm_opp *opp, struct dev_pm_opp_supply *su unsigned long dev_pm_opp_get_power(struct dev_pm_opp *opp); -unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp); +unsigned long dev_pm_opp_get_freq_indexed(struct dev_pm_opp *opp, u32 index); unsigned int dev_pm_opp_get_level(struct dev_pm_opp *opp); @@ -121,17 +121,29 @@ unsigned long dev_pm_opp_get_suspend_opp_freq(struct device *dev); struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev, unsigned long freq, bool available); + +struct dev_pm_opp * +dev_pm_opp_find_freq_exact_indexed(struct device *dev, unsigned long freq, + u32 index, bool available); + struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev, unsigned long *freq); +struct dev_pm_opp *dev_pm_opp_find_freq_floor_indexed(struct device *dev, + unsigned long *freq, u32 index); + +struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev, + unsigned long *freq); + +struct dev_pm_opp *dev_pm_opp_find_freq_ceil_indexed(struct device *dev, + unsigned long *freq, u32 index); + struct dev_pm_opp *dev_pm_opp_find_level_exact(struct device *dev, unsigned int level); + struct dev_pm_opp *dev_pm_opp_find_level_ceil(struct device *dev, unsigned int *level); -struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev, - unsigned long *freq); - struct dev_pm_opp *dev_pm_opp_find_bw_ceil(struct device *dev, unsigned int *bw, int index); @@ -200,7 +212,7 @@ static inline unsigned long dev_pm_opp_get_power(struct dev_pm_opp *opp) return 0; } -static inline unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp) +static inline unsigned long dev_pm_opp_get_freq_indexed(struct dev_pm_opp *opp, u32 index) { return 0; } @@ -247,26 +259,27 @@ static inline unsigned long dev_pm_opp_get_suspend_opp_freq(struct device *dev) return 0; } -static inline struct dev_pm_opp *dev_pm_opp_find_level_exact(struct device *dev, - unsigned int level) +static inline struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev, + unsigned long freq, bool available) { return ERR_PTR(-EOPNOTSUPP); } -static inline struct dev_pm_opp *dev_pm_opp_find_level_ceil(struct device *dev, - unsigned int *level) +static inline struct dev_pm_opp * +dev_pm_opp_find_freq_exact_indexed(struct device *dev, unsigned long freq, + u32 index, bool available) { return ERR_PTR(-EOPNOTSUPP); } -static inline struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev, - unsigned long freq, bool available) +static inline struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev, + unsigned long *freq) { return ERR_PTR(-EOPNOTSUPP); } -static inline struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev, - unsigned long *freq) +static inline struct dev_pm_opp * +dev_pm_opp_find_freq_floor_indexed(struct device *dev, unsigned long *freq, u32 index) { return ERR_PTR(-EOPNOTSUPP); } @@ -277,6 +290,24 @@ static inline struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev, return ERR_PTR(-EOPNOTSUPP); } +static inline struct dev_pm_opp * +dev_pm_opp_find_freq_ceil_indexed(struct device *dev, unsigned long *freq, u32 index) +{ + return ERR_PTR(-EOPNOTSUPP); +} + +static inline struct dev_pm_opp *dev_pm_opp_find_level_exact(struct device *dev, + unsigned int level) +{ + return ERR_PTR(-EOPNOTSUPP); +} + +static inline struct dev_pm_opp *dev_pm_opp_find_level_ceil(struct device *dev, + unsigned int *level) +{ + return ERR_PTR(-EOPNOTSUPP); +} + static inline struct dev_pm_opp *dev_pm_opp_find_bw_ceil(struct device *dev, unsigned int *bw, int index) { @@ -631,4 +662,9 @@ static inline void dev_pm_opp_put_prop_name(int token) dev_pm_opp_clear_config(token); } +static inline unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp) +{ + return dev_pm_opp_get_freq_indexed(opp, 0); +} + #endif /* __LINUX_OPP_H__ */ diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h index 9a8151a2bdea..7c9b35448563 100644 --- a/include/linux/pm_runtime.h +++ b/include/linux/pm_runtime.h @@ -85,8 +85,6 @@ extern void pm_runtime_irq_safe(struct device *dev); extern void __pm_runtime_use_autosuspend(struct device *dev, bool use); extern void pm_runtime_set_autosuspend_delay(struct device *dev, int delay); extern u64 pm_runtime_autosuspend_expiration(struct device *dev); -extern void pm_runtime_update_max_time_suspended(struct device *dev, - s64 delta_ns); extern void pm_runtime_set_memalloc_noio(struct device *dev, bool enable); extern void pm_runtime_get_suppliers(struct device *dev); extern void pm_runtime_put_suppliers(struct device *dev); diff --git a/include/linux/pm_wakeup.h b/include/linux/pm_wakeup.h index 77f4849e3418..6eb9adaef52b 100644 --- a/include/linux/pm_wakeup.h +++ b/include/linux/pm_wakeup.h @@ -194,6 +194,16 @@ static inline void pm_wakeup_dev_event(struct device *dev, unsigned int msec, #endif /* !CONFIG_PM_SLEEP */ +static inline bool device_awake_path(struct device *dev) +{ + return device_wakeup_path(dev); +} + +static inline void device_set_awake_path(struct device *dev) +{ + device_set_wakeup_path(dev); +} + static inline void __pm_wakeup_event(struct wakeup_source *ws, unsigned int msec) { return pm_wakeup_ws_event(ws, msec, false); diff --git a/include/linux/power/power_on_reason.h b/include/linux/power/power_on_reason.h new file mode 100644 index 000000000000..95a1ec0c403c --- /dev/null +++ b/include/linux/power/power_on_reason.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Author: Kamel Bouhra <kamel.bouhara@bootlin.com> + */ + +#ifndef POWER_ON_REASON_H +#define POWER_ON_REASON_H + +#define POWER_ON_REASON_REGULAR "regular power-up" +#define POWER_ON_REASON_RTC "RTC wakeup" +#define POWER_ON_REASON_WATCHDOG "watchdog timeout" +#define POWER_ON_REASON_SOFTWARE "software reset" +#define POWER_ON_REASON_RST_BTN "reset button action" +#define POWER_ON_REASON_CPU_CLK_FAIL "CPU clock failure" +#define POWER_ON_REASON_XTAL_FAIL "crystal oscillator failure" +#define POWER_ON_REASON_BROWN_OUT "brown-out reset" +#define POWER_ON_REASON_UNKNOWN "unknown reason" + +#endif /* POWER_ON_REASON_H */ diff --git a/include/linux/prefetch.h b/include/linux/prefetch.h index b83a3f944f28..b068e2e60939 100644 --- a/include/linux/prefetch.h +++ b/include/linux/prefetch.h @@ -25,11 +25,10 @@ struct page; prefetch() should be defined by the architecture, if not, the #define below provides a no-op define. - There are 3 prefetch() macros: + There are 2 prefetch() macros: prefetch(x) - prefetches the cacheline at "x" for read prefetchw(x) - prefetches the cacheline at "x" for write - spin_lock_prefetch(x) - prefetches the spinlock *x for taking there is also PREFETCH_STRIDE which is the architecure-preferred "lookahead" size for prefetching streamed operations. @@ -44,10 +43,6 @@ struct page; #define prefetchw(x) __builtin_prefetch(x,1) #endif -#ifndef ARCH_HAS_SPINLOCK_PREFETCH -#define spin_lock_prefetch(x) prefetchw(x) -#endif - #ifndef PREFETCH_STRIDE #define PREFETCH_STRIDE (4*L1_CACHE_BYTES) #endif diff --git a/include/linux/psp-platform-access.h b/include/linux/psp-platform-access.h index 75da8f5f7ad8..c1dc87fc536b 100644 --- a/include/linux/psp-platform-access.h +++ b/include/linux/psp-platform-access.h @@ -8,6 +8,10 @@ enum psp_platform_access_msg { PSP_CMD_NONE = 0x0, PSP_I2C_REQ_BUS_CMD = 0x64, + PSP_DYNAMIC_BOOST_GET_NONCE, + PSP_DYNAMIC_BOOST_SET_UID, + PSP_DYNAMIC_BOOST_GET_PARAMETER, + PSP_DYNAMIC_BOOST_SET_PARAMETER, }; struct psp_req_buffer_hdr { diff --git a/include/linux/ptp_mock.h b/include/linux/ptp_mock.h new file mode 100644 index 000000000000..72eb401034d9 --- /dev/null +++ b/include/linux/ptp_mock.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Mock-up PTP Hardware Clock driver for virtual network devices + * + * Copyright 2023 NXP + */ + +#ifndef _PTP_MOCK_H_ +#define _PTP_MOCK_H_ + +struct device; +struct mock_phc; + +#if IS_ENABLED(CONFIG_PTP_1588_CLOCK_MOCK) + +struct mock_phc *mock_phc_create(struct device *dev); +void mock_phc_destroy(struct mock_phc *phc); +int mock_phc_index(struct mock_phc *phc); + +#else + +static inline struct mock_phc *mock_phc_create(struct device *dev) +{ + return NULL; +} + +static inline void mock_phc_destroy(struct mock_phc *phc) +{ +} + +static inline int mock_phc_index(struct mock_phc *phc) +{ + return -1; +} + +#endif + +#endif /* _PTP_MOCK_H_ */ diff --git a/include/linux/qed/qed_fcoe_if.h b/include/linux/qed/qed_fcoe_if.h index 90e3045b2dcb..0d3b6ed21628 100644 --- a/include/linux/qed/qed_fcoe_if.h +++ b/include/linux/qed/qed_fcoe_if.h @@ -67,9 +67,6 @@ struct qed_fcoe_cb_ops { u32 (*get_login_failures)(void *cookie); }; -void qed_fcoe_set_pf_params(struct qed_dev *cdev, - struct qed_fcoe_pf_params *params); - /** * struct qed_fcoe_ops - qed FCoE operations. * @common: common operations pointer diff --git a/include/linux/raid_class.h b/include/linux/raid_class.h index 6a9b177d5c41..e50416ba9cd9 100644 --- a/include/linux/raid_class.h +++ b/include/linux/raid_class.h @@ -77,7 +77,3 @@ DEFINE_RAID_ATTRIBUTE(enum raid_state, state) struct raid_template *raid_class_attach(struct raid_function_template *); void raid_class_release(struct raid_template *); - -int __must_check raid_component_add(struct raid_template *, struct device *, - struct device *); - diff --git a/include/linux/rbtree_augmented.h b/include/linux/rbtree_augmented.h index 7ee7ed5de722..6dbc5a1bf6a8 100644 --- a/include/linux/rbtree_augmented.h +++ b/include/linux/rbtree_augmented.h @@ -60,6 +60,32 @@ rb_insert_augmented_cached(struct rb_node *node, rb_insert_augmented(node, &root->rb_root, augment); } +static __always_inline struct rb_node * +rb_add_augmented_cached(struct rb_node *node, struct rb_root_cached *tree, + bool (*less)(struct rb_node *, const struct rb_node *), + const struct rb_augment_callbacks *augment) +{ + struct rb_node **link = &tree->rb_root.rb_node; + struct rb_node *parent = NULL; + bool leftmost = true; + + while (*link) { + parent = *link; + if (less(node, parent)) { + link = &parent->rb_left; + } else { + link = &parent->rb_right; + leftmost = false; + } + } + + rb_link_node(node, parent, link); + augment->propagate(parent, NULL); /* suboptimal */ + rb_insert_augmented_cached(node, tree, leftmost, augment); + + return leftmost ? node : NULL; +} + /* * Template for declaring augmented rbtree callbacks (generic case) * diff --git a/include/linux/rculist_nulls.h b/include/linux/rculist_nulls.h index ba4c00dd8005..89186c499dd4 100644 --- a/include/linux/rculist_nulls.h +++ b/include/linux/rculist_nulls.h @@ -101,7 +101,7 @@ static inline void hlist_nulls_add_head_rcu(struct hlist_nulls_node *n, { struct hlist_nulls_node *first = h->first; - n->next = first; + WRITE_ONCE(n->next, first); WRITE_ONCE(n->pprev, &h->first); rcu_assign_pointer(hlist_nulls_first_rcu(h), n); if (!is_a_nulls(first)) @@ -137,7 +137,7 @@ static inline void hlist_nulls_add_tail_rcu(struct hlist_nulls_node *n, last = i; if (last) { - n->next = last->next; + WRITE_ONCE(n->next, last->next); n->pprev = &last->next; rcu_assign_pointer(hlist_nulls_next_rcu(last), n); } else { diff --git a/include/linux/rcupdate_trace.h b/include/linux/rcupdate_trace.h index 9bc8cbb33340..eda493200663 100644 --- a/include/linux/rcupdate_trace.h +++ b/include/linux/rcupdate_trace.h @@ -87,6 +87,7 @@ static inline void rcu_read_unlock_trace(void) void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func); void synchronize_rcu_tasks_trace(void); void rcu_barrier_tasks_trace(void); +struct task_struct *get_rcu_tasks_trace_gp_kthread(void); #else /* * The BPF JIT forms these addresses even when it doesn't call these diff --git a/include/linux/rcupdate_wait.h b/include/linux/rcupdate_wait.h index 699b938358bf..5e0f74f2f8ca 100644 --- a/include/linux/rcupdate_wait.h +++ b/include/linux/rcupdate_wait.h @@ -42,6 +42,11 @@ do { \ * call_srcu() function, with this wrapper supplying the pointer to the * corresponding srcu_struct. * + * Note that call_rcu_hurry() should be used instead of call_rcu() + * because in kernels built with CONFIG_RCU_LAZY=y the delay between the + * invocation of call_rcu() and that of the corresponding RCU callback + * can be multiple seconds. + * * The first argument tells Tiny RCU's _wait_rcu_gp() not to * bother waiting for RCU. The reason for this is because anywhere * synchronize_rcu_mult() can be called is automatically already a full diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index 7f17acf29dda..7b949292908a 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h @@ -138,6 +138,8 @@ static inline int rcu_needs_cpu(void) return 0; } +static inline void rcu_request_urgent_qs_task(struct task_struct *t) { } + /* * Take advantage of the fact that there is only one CPU, which * allows us to ignore virtualization-based context switches. diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index 56bccb5a8fde..126f6b418f6a 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h @@ -21,6 +21,7 @@ void rcu_softirq_qs(void); void rcu_note_context_switch(bool preempt); int rcu_needs_cpu(void); void rcu_cpu_stall_reset(void); +void rcu_request_urgent_qs_task(struct task_struct *t); /* * Note a virtualization-based context switch. This is simply a diff --git a/include/linux/regmap.h b/include/linux/regmap.h index 8fc0b3ebce44..c9182a47736e 100644 --- a/include/linux/regmap.h +++ b/include/linux/regmap.h @@ -1287,6 +1287,7 @@ int regcache_drop_region(struct regmap *map, unsigned int min, void regcache_cache_only(struct regmap *map, bool enable); void regcache_cache_bypass(struct regmap *map, bool enable); void regcache_mark_dirty(struct regmap *map); +bool regcache_reg_cached(struct regmap *map, unsigned int reg); bool regmap_check_range_table(struct regmap *map, unsigned int reg, const struct regmap_access_table *table); diff --git a/include/linux/regulator/db8500-prcmu.h b/include/linux/regulator/db8500-prcmu.h index f90df9ee703e..d58ff273157e 100644 --- a/include/linux/regulator/db8500-prcmu.h +++ b/include/linux/regulator/db8500-prcmu.h @@ -35,10 +35,4 @@ enum db8500_regulator_id { DB8500_NUM_REGULATORS }; -/* - * Exported interface for CPUIdle only. This function is called with all - * interrupts turned off. - */ -int power_state_active_is_enabled(void); - #endif diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h index c6ef7d68eb9a..4b7eceb3828b 100644 --- a/include/linux/regulator/driver.h +++ b/include/linux/regulator/driver.h @@ -292,11 +292,12 @@ enum regulator_type { * @ramp_delay: Time to settle down after voltage change (unit: uV/us) * @min_dropout_uV: The minimum dropout voltage this regulator can handle * @linear_ranges: A constant table of possible voltage ranges. - * @linear_range_selectors: A constant table of voltage range selectors. - * If pickable ranges are used each range must - * have corresponding selector here. + * @linear_range_selectors_bitfield: A constant table of voltage range + * selectors as bitfield values. If + * pickable ranges are used each range + * must have corresponding selector here. * @n_linear_ranges: Number of entries in the @linear_ranges (and in - * linear_range_selectors if used) table(s). + * linear_range_selectors_bitfield if used) table(s). * @volt_table: Voltage mapping table (if table based mapping) * @curr_table: Current limit mapping table (if table based mapping) * @@ -384,7 +385,7 @@ struct regulator_desc { int min_dropout_uV; const struct linear_range *linear_ranges; - const unsigned int *linear_range_selectors; + const unsigned int *linear_range_selectors_bitfield; int n_linear_ranges; diff --git a/include/linux/rmap.h b/include/linux/rmap.h index b87d01660412..a3825ce81102 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -198,6 +198,8 @@ void folio_add_new_anon_rmap(struct folio *, struct vm_area_struct *, unsigned long address); void page_add_file_rmap(struct page *, struct vm_area_struct *, bool compound); +void folio_add_file_rmap_range(struct folio *, struct page *, unsigned int nr, + struct vm_area_struct *, bool compound); void page_remove_rmap(struct page *, struct vm_area_struct *, bool compound); diff --git a/include/linux/sched.h b/include/linux/sched.h index 609bde814cb0..177b3f3676ef 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -75,14 +75,14 @@ struct user_event_mm; * Task state bitmask. NOTE! These bits are also * encoded in fs/proc/array.c: get_task_state(). * - * We have two separate sets of flags: task->state + * We have two separate sets of flags: task->__state * is about runnability, while task->exit_state are * about the task exiting. Confusing, but this way * modifying one set can't modify the other one by * mistake. */ -/* Used in tsk->state: */ +/* Used in tsk->__state: */ #define TASK_RUNNING 0x00000000 #define TASK_INTERRUPTIBLE 0x00000001 #define TASK_UNINTERRUPTIBLE 0x00000002 @@ -92,7 +92,7 @@ struct user_event_mm; #define EXIT_DEAD 0x00000010 #define EXIT_ZOMBIE 0x00000020 #define EXIT_TRACE (EXIT_ZOMBIE | EXIT_DEAD) -/* Used in tsk->state again: */ +/* Used in tsk->__state again: */ #define TASK_PARKED 0x00000040 #define TASK_DEAD 0x00000080 #define TASK_WAKEKILL 0x00000100 @@ -173,7 +173,7 @@ struct user_event_mm; #endif /* - * set_current_state() includes a barrier so that the write of current->state + * set_current_state() includes a barrier so that the write of current->__state * is correctly serialised wrt the caller's subsequent test of whether to * actually sleep: * @@ -196,9 +196,9 @@ struct user_event_mm; * wake_up_state(p, TASK_UNINTERRUPTIBLE); * * where wake_up_state()/try_to_wake_up() executes a full memory barrier before - * accessing p->state. + * accessing p->__state. * - * Wakeup will do: if (@state & p->state) p->state = TASK_RUNNING, that is, + * Wakeup will do: if (@state & p->__state) p->__state = TASK_RUNNING, that is, * once it observes the TASK_UNINTERRUPTIBLE store the waking CPU can issue a * TASK_RUNNING store which can collide with __set_current_state(TASK_RUNNING). * @@ -549,13 +549,18 @@ struct sched_entity { /* For load-balancing: */ struct load_weight load; struct rb_node run_node; + u64 deadline; + u64 min_deadline; + struct list_head group_node; unsigned int on_rq; u64 exec_start; u64 sum_exec_runtime; - u64 vruntime; u64 prev_sum_exec_runtime; + u64 vruntime; + s64 vlag; + u64 slice; u64 nr_migrations; @@ -2433,9 +2438,11 @@ extern void sched_core_free(struct task_struct *tsk); extern void sched_core_fork(struct task_struct *p); extern int sched_core_share_pid(unsigned int cmd, pid_t pid, enum pid_type type, unsigned long uaddr); +extern int sched_core_idle_cpu(int cpu); #else static inline void sched_core_free(struct task_struct *tsk) { } static inline void sched_core_fork(struct task_struct *p) { } +static inline int sched_core_idle_cpu(int cpu) { return idle_cpu(cpu); } #endif extern void sched_set_stop_task(int cpu, struct task_struct *stop); diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h index dd35ce28bb90..a23af225c898 100644 --- a/include/linux/sched/task.h +++ b/include/linux/sched/task.h @@ -118,11 +118,47 @@ static inline struct task_struct *get_task_struct(struct task_struct *t) } extern void __put_task_struct(struct task_struct *t); +extern void __put_task_struct_rcu_cb(struct rcu_head *rhp); static inline void put_task_struct(struct task_struct *t) { - if (refcount_dec_and_test(&t->usage)) + if (!refcount_dec_and_test(&t->usage)) + return; + + /* + * In !RT, it is always safe to call __put_task_struct(). + * Under RT, we can only call it in preemptible context. + */ + if (!IS_ENABLED(CONFIG_PREEMPT_RT) || preemptible()) { + static DEFINE_WAIT_OVERRIDE_MAP(put_task_map, LD_WAIT_SLEEP); + + lock_map_acquire_try(&put_task_map); __put_task_struct(t); + lock_map_release(&put_task_map); + return; + } + + /* + * under PREEMPT_RT, we can't call put_task_struct + * in atomic context because it will indirectly + * acquire sleeping locks. + * + * call_rcu() will schedule delayed_put_task_struct_rcu() + * to be called in process context. + * + * __put_task_struct() is called when + * refcount_dec_and_test(&t->usage) succeeds. + * + * This means that it can't "conflict" with + * put_task_struct_rcu_user() which abuses ->rcu the same + * way; rcu_users has a reference so task->usage can't be + * zero after rcu_users 1 -> 0 transition. + * + * delayed_free_task() also uses ->rcu, but it is only called + * when it fails to fork a process. Therefore, there is no + * way it can conflict with put_task_struct(). + */ + call_rcu(&t->rcu, __put_task_struct_rcu_cb); } DEFINE_FREE(put_task, struct task_struct *, if (_T) put_task_struct(_T)) diff --git a/include/linux/secretmem.h b/include/linux/secretmem.h index 988528b5da43..35f3a4a8ceb1 100644 --- a/include/linux/secretmem.h +++ b/include/linux/secretmem.h @@ -6,24 +6,23 @@ extern const struct address_space_operations secretmem_aops; -static inline bool page_is_secretmem(struct page *page) +static inline bool folio_is_secretmem(struct folio *folio) { struct address_space *mapping; /* - * Using page_mapping() is quite slow because of the actual call - * instruction and repeated compound_head(page) inside the - * page_mapping() function. + * Using folio_mapping() is quite slow because of the actual call + * instruction. * We know that secretmem pages are not compound and LRU so we can * save a couple of cycles here. */ - if (PageCompound(page) || !PageLRU(page)) + if (folio_test_large(folio) || !folio_test_lru(folio)) return false; mapping = (struct address_space *) - ((unsigned long)page->mapping & ~PAGE_MAPPING_FLAGS); + ((unsigned long)folio->mapping & ~PAGE_MAPPING_FLAGS); - if (!mapping || mapping != page->mapping) + if (!mapping || mapping != folio->mapping) return false; return mapping->a_ops == &secretmem_aops; @@ -39,7 +38,7 @@ static inline bool vma_is_secretmem(struct vm_area_struct *vma) return false; } -static inline bool page_is_secretmem(struct page *page) +static inline bool folio_is_secretmem(struct folio *folio) { return false; } diff --git a/include/linux/security.h b/include/linux/security.h index 32828502f09e..b2c38bfe5647 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -293,6 +293,7 @@ int security_bprm_creds_from_file(struct linux_binprm *bprm, struct file *file); int security_bprm_check(struct linux_binprm *bprm); void security_bprm_committing_creds(struct linux_binprm *bprm); void security_bprm_committed_creds(struct linux_binprm *bprm); +int security_fs_context_submount(struct fs_context *fc, struct super_block *reference); int security_fs_context_dup(struct fs_context *fc, struct fs_context *src_fc); int security_fs_context_parse_param(struct fs_context *fc, struct fs_parameter *param); int security_sb_alloc(struct super_block *sb); @@ -629,6 +630,11 @@ static inline void security_bprm_committed_creds(struct linux_binprm *bprm) { } +static inline int security_fs_context_submount(struct fs_context *fc, + struct super_block *reference) +{ + return 0; +} static inline int security_fs_context_dup(struct fs_context *fc, struct fs_context *src_fc) { @@ -1439,7 +1445,8 @@ int security_socket_getpeersec_dgram(struct socket *sock, struct sk_buff *skb, u int security_sk_alloc(struct sock *sk, int family, gfp_t priority); void security_sk_free(struct sock *sk); void security_sk_clone(const struct sock *sk, struct sock *newsk); -void security_sk_classify_flow(struct sock *sk, struct flowi_common *flic); +void security_sk_classify_flow(const struct sock *sk, + struct flowi_common *flic); void security_req_classify_flow(const struct request_sock *req, struct flowi_common *flic); void security_sock_graft(struct sock*sk, struct socket *parent); @@ -1597,7 +1604,7 @@ static inline void security_sk_clone(const struct sock *sk, struct sock *newsk) { } -static inline void security_sk_classify_flow(struct sock *sk, +static inline void security_sk_classify_flow(const struct sock *sk, struct flowi_common *flic) { } diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h index bd023dd38ae6..386ab580b839 100644 --- a/include/linux/seq_file.h +++ b/include/linux/seq_file.h @@ -249,18 +249,19 @@ static inline void seq_show_option(struct seq_file *m, const char *name, /** * seq_show_option_n - display mount options with appropriate escapes - * where @value must be a specific length. + * where @value must be a specific length (i.e. + * not NUL-terminated). * @m: the seq_file handle * @name: the mount option name * @value: the mount option name's value, cannot be NULL - * @length: the length of @value to display + * @length: the exact length of @value to display, must be constant expression * * This is a macro since this uses "length" to define the size of the * stack buffer. */ #define seq_show_option_n(m, name, value, length) { \ char val_buf[length + 1]; \ - strncpy(val_buf, value, length); \ + memcpy(val_buf, value, length); \ val_buf[length] = '\0'; \ seq_show_option(m, name, val_buf); \ } diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index 6d58c57acdaa..a156d2ed8d9e 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h @@ -459,7 +459,8 @@ struct uart_port { struct serial_rs485 *rs485); int (*iso7816_config)(struct uart_port *, struct serial_iso7816 *iso7816); - int ctrl_id; /* optional serial core controller id */ + unsigned int ctrl_id; /* optional serial core controller id */ + unsigned int port_id; /* optional serial core port id */ unsigned int irq; /* irq number */ unsigned long irqflags; /* irq flags */ unsigned int uartclk; /* base uart clock */ diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h index 9029abd29b1c..6b0c626620f5 100644 --- a/include/linux/shmem_fs.h +++ b/include/linux/shmem_fs.h @@ -13,6 +13,10 @@ /* inode in-kernel data */ +#ifdef CONFIG_TMPFS_QUOTA +#define SHMEM_MAXQUOTAS 2 +#endif + struct shmem_inode_info { spinlock_t lock; unsigned int seals; /* shmem seals */ @@ -27,6 +31,10 @@ struct shmem_inode_info { atomic_t stop_eviction; /* hold when working on inode */ struct timespec64 i_crtime; /* file creation time */ unsigned int fsflags; /* flags for FS_IOC_[SG]ETFLAGS */ +#ifdef CONFIG_TMPFS_QUOTA + struct dquot *i_dquot[MAXQUOTAS]; +#endif + struct offset_ctx dir_offsets; /* stable entry offsets */ struct inode vfs_inode; }; @@ -35,11 +43,18 @@ struct shmem_inode_info { (FS_IMMUTABLE_FL | FS_APPEND_FL | FS_NODUMP_FL | FS_NOATIME_FL) #define SHMEM_FL_INHERITED (FS_NODUMP_FL | FS_NOATIME_FL) +struct shmem_quota_limits { + qsize_t usrquota_bhardlimit; /* Default user quota block hard limit */ + qsize_t usrquota_ihardlimit; /* Default user quota inode hard limit */ + qsize_t grpquota_bhardlimit; /* Default group quota block hard limit */ + qsize_t grpquota_ihardlimit; /* Default group quota inode hard limit */ +}; + struct shmem_sb_info { unsigned long max_blocks; /* How many blocks are allowed */ struct percpu_counter used_blocks; /* How many are allocated */ unsigned long max_inodes; /* How many inodes are allowed */ - unsigned long free_inodes; /* How many are left for allocation */ + unsigned long free_ispace; /* How much ispace left for allocation */ raw_spinlock_t stat_lock; /* Serialize shmem_sb_info changes */ umode_t mode; /* Mount mode for root directory */ unsigned char huge; /* Whether to try for hugepages */ @@ -53,6 +68,7 @@ struct shmem_sb_info { spinlock_t shrinklist_lock; /* Protects shrinklist */ struct list_head shrinklist; /* List of shinkable inodes */ unsigned long shrinklist_len; /* Length of shrinklist */ + struct shmem_quota_limits qlimits; /* Default quota limits */ }; static inline struct shmem_inode_info *SHMEM_I(struct inode *inode) @@ -172,4 +188,17 @@ extern int shmem_mfill_atomic_pte(pmd_t *dst_pmd, #endif /* CONFIG_SHMEM */ #endif /* CONFIG_USERFAULTFD */ +/* + * Used space is stored as unsigned 64-bit value in bytes but + * quota core supports only signed 64-bit values so use that + * as a limit + */ +#define SHMEM_QUOTA_MAX_SPC_LIMIT 0x7fffffffffffffffLL /* 2^63-1 */ +#define SHMEM_QUOTA_MAX_INO_LIMIT 0x7fffffffffffffffLL + +#ifdef CONFIG_TMPFS_QUOTA +extern const struct dquot_operations shmem_quota_operations; +extern struct quota_format_type shmem_quota_format; +#endif /* CONFIG_TMPFS_QUOTA */ + #endif diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 91ed66952580..4174c4b82d13 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -32,7 +32,6 @@ #include <linux/if_packet.h> #include <linux/llist.h> #include <net/flow.h> -#include <net/page_pool.h> #if IS_ENABLED(CONFIG_NF_CONNTRACK) #include <linux/netfilter/nf_conntrack_common.h> #endif @@ -441,8 +440,6 @@ static inline bool skb_frag_must_loop(struct page *p) copied += p_len, p++, p_off = 0, \ p_len = min_t(u32, f_len - copied, PAGE_SIZE)) \ -#define HAVE_HW_TIME_STAMP - /** * struct skb_shared_hwtstamps - hardware time stamps * @hwtstamp: hardware time stamp transformed into duration @@ -944,7 +941,7 @@ struct sk_buff { __u8 __mono_tc_offset[0]; /* public: */ __u8 mono_delivery_time:1; /* See SKB_MONO_DELIVERY_TIME_MASK */ -#ifdef CONFIG_NET_CLS_ACT +#ifdef CONFIG_NET_XGRESS __u8 tc_at_ingress:1; /* See TC_AT_INGRESS_MASK */ __u8 tc_skip_classify:1; #endif @@ -993,7 +990,7 @@ struct sk_buff { __u8 csum_not_inet:1; #endif -#ifdef CONFIG_NET_SCHED +#if defined(CONFIG_NET_SCHED) || defined(CONFIG_NET_XGRESS) __u16 tc_index; /* traffic control index */ #endif @@ -3152,22 +3149,38 @@ static inline int skb_orphan_frags_rx(struct sk_buff *skb, gfp_t gfp_mask) } /** - * __skb_queue_purge - empty a list + * __skb_queue_purge_reason - empty a list * @list: list to empty + * @reason: drop reason * * Delete all buffers on an &sk_buff list. Each buffer is removed from * the list and one reference dropped. This function does not take the * list lock and the caller must hold the relevant locks to use it. */ -static inline void __skb_queue_purge(struct sk_buff_head *list) +static inline void __skb_queue_purge_reason(struct sk_buff_head *list, + enum skb_drop_reason reason) { struct sk_buff *skb; + while ((skb = __skb_dequeue(list)) != NULL) - kfree_skb(skb); + kfree_skb_reason(skb, reason); +} + +static inline void __skb_queue_purge(struct sk_buff_head *list) +{ + __skb_queue_purge_reason(list, SKB_DROP_REASON_QUEUE_PURGE); +} + +void skb_queue_purge_reason(struct sk_buff_head *list, + enum skb_drop_reason reason); + +static inline void skb_queue_purge(struct sk_buff_head *list) +{ + skb_queue_purge_reason(list, SKB_DROP_REASON_QUEUE_PURGE); } -void skb_queue_purge(struct sk_buff_head *list); unsigned int skb_rbtree_purge(struct rb_root *root); +void skb_errqueue_purge(struct sk_buff_head *list); void *__netdev_alloc_frag_align(unsigned int fragsz, unsigned int align_mask); @@ -3423,13 +3436,15 @@ static inline void skb_frag_ref(struct sk_buff *skb, int f) __skb_frag_ref(&skb_shinfo(skb)->frags[f]); } +bool napi_pp_put_page(struct page *page, bool napi_safe); + static inline void napi_frag_unref(skb_frag_t *frag, bool recycle, bool napi_safe) { struct page *page = skb_frag_page(frag); #ifdef CONFIG_PAGE_POOL - if (recycle && page_pool_return_skb_page(page, napi_safe)) + if (recycle && napi_pp_put_page(page, napi_safe)) return; #endif put_page(page); @@ -4023,7 +4038,7 @@ __skb_header_pointer(const struct sk_buff *skb, int offset, int len, if (likely(hlen - offset >= len)) return (void *)data + offset; - if (!skb || !buffer || unlikely(skb_copy_bits(skb, offset, buffer, len) < 0)) + if (!skb || unlikely(skb_copy_bits(skb, offset, buffer, len) < 0)) return NULL; return buffer; @@ -4036,6 +4051,14 @@ skb_header_pointer(const struct sk_buff *skb, int offset, int len, void *buffer) skb_headlen(skb), buffer); } +static inline void * __must_check +skb_pointer_if_linear(const struct sk_buff *skb, int offset, int len) +{ + if (likely(skb_headlen(skb) - offset >= len)) + return skb->data + offset; + return NULL; +} + /** * skb_needs_linearize - check if we need to linearize a given skb * depending on the given device features. diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h index 054d7911bfc9..c1637515a8a4 100644 --- a/include/linux/skmsg.h +++ b/include/linux/skmsg.h @@ -62,6 +62,7 @@ struct sk_psock_progs { enum sk_psock_state_bits { SK_PSOCK_TX_ENABLED, + SK_PSOCK_RX_STRP_ENABLED, }; struct sk_psock_link { diff --git a/include/linux/slab.h b/include/linux/slab.h index 848c7c82ad5a..8228d1276a2f 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -19,6 +19,7 @@ #include <linux/workqueue.h> #include <linux/percpu-refcount.h> #include <linux/cleanup.h> +#include <linux/hash.h> /* @@ -345,6 +346,12 @@ static inline unsigned int arch_slab_minalign(void) #define SLAB_OBJ_MIN_SIZE (KMALLOC_MIN_SIZE < 16 ? \ (KMALLOC_MIN_SIZE) : 16) +#ifdef CONFIG_RANDOM_KMALLOC_CACHES +#define RANDOM_KMALLOC_CACHES_NR 15 // # of cache copies +#else +#define RANDOM_KMALLOC_CACHES_NR 0 +#endif + /* * Whenever changing this, take care of that kmalloc_type() and * create_kmalloc_caches() still work as intended. @@ -361,6 +368,8 @@ enum kmalloc_cache_type { #ifndef CONFIG_MEMCG_KMEM KMALLOC_CGROUP = KMALLOC_NORMAL, #endif + KMALLOC_RANDOM_START = KMALLOC_NORMAL, + KMALLOC_RANDOM_END = KMALLOC_RANDOM_START + RANDOM_KMALLOC_CACHES_NR, #ifdef CONFIG_SLUB_TINY KMALLOC_RECLAIM = KMALLOC_NORMAL, #else @@ -386,14 +395,22 @@ kmalloc_caches[NR_KMALLOC_TYPES][KMALLOC_SHIFT_HIGH + 1]; (IS_ENABLED(CONFIG_ZONE_DMA) ? __GFP_DMA : 0) | \ (IS_ENABLED(CONFIG_MEMCG_KMEM) ? __GFP_ACCOUNT : 0)) -static __always_inline enum kmalloc_cache_type kmalloc_type(gfp_t flags) +extern unsigned long random_kmalloc_seed; + +static __always_inline enum kmalloc_cache_type kmalloc_type(gfp_t flags, unsigned long caller) { /* * The most common case is KMALLOC_NORMAL, so test for it * with a single branch for all the relevant flags. */ if (likely((flags & KMALLOC_NOT_NORMAL_BITS) == 0)) +#ifdef CONFIG_RANDOM_KMALLOC_CACHES + /* RANDOM_KMALLOC_CACHES_NR (=15) copies + the KMALLOC_NORMAL */ + return KMALLOC_RANDOM_START + hash_64(caller ^ random_kmalloc_seed, + ilog2(RANDOM_KMALLOC_CACHES_NR + 1)); +#else return KMALLOC_NORMAL; +#endif /* * At least one of the flags has to be set. Their priorities in @@ -580,7 +597,7 @@ static __always_inline __alloc_size(1) void *kmalloc(size_t size, gfp_t flags) index = kmalloc_index(size); return kmalloc_trace( - kmalloc_caches[kmalloc_type(flags)][index], + kmalloc_caches[kmalloc_type(flags, _RET_IP_)][index], flags, size); } return __kmalloc(size, flags); @@ -596,7 +613,7 @@ static __always_inline __alloc_size(1) void *kmalloc_node(size_t size, gfp_t fla index = kmalloc_index(size); return kmalloc_node_trace( - kmalloc_caches[kmalloc_type(flags)][index], + kmalloc_caches[kmalloc_type(flags, _RET_IP_)][index], flags, node, size); } return __kmalloc_node(size, flags, node); diff --git a/include/linux/smscphy.h b/include/linux/smscphy.h index e1c88627755a..1a6a851d2cf8 100644 --- a/include/linux/smscphy.h +++ b/include/linux/smscphy.h @@ -38,4 +38,38 @@ int smsc_phy_set_tunable(struct phy_device *phydev, struct ethtool_tunable *tuna, const void *data); int smsc_phy_probe(struct phy_device *phydev); +#define MII_LAN874X_PHY_MMD_WOL_WUCSR 0x8010 +#define MII_LAN874X_PHY_MMD_WOL_WUF_CFGA 0x8011 +#define MII_LAN874X_PHY_MMD_WOL_WUF_CFGB 0x8012 +#define MII_LAN874X_PHY_MMD_WOL_WUF_MASK0 0x8021 +#define MII_LAN874X_PHY_MMD_WOL_WUF_MASK1 0x8022 +#define MII_LAN874X_PHY_MMD_WOL_WUF_MASK2 0x8023 +#define MII_LAN874X_PHY_MMD_WOL_WUF_MASK3 0x8024 +#define MII_LAN874X_PHY_MMD_WOL_WUF_MASK4 0x8025 +#define MII_LAN874X_PHY_MMD_WOL_WUF_MASK5 0x8026 +#define MII_LAN874X_PHY_MMD_WOL_WUF_MASK6 0x8027 +#define MII_LAN874X_PHY_MMD_WOL_WUF_MASK7 0x8028 +#define MII_LAN874X_PHY_MMD_WOL_RX_ADDRA 0x8061 +#define MII_LAN874X_PHY_MMD_WOL_RX_ADDRB 0x8062 +#define MII_LAN874X_PHY_MMD_WOL_RX_ADDRC 0x8063 +#define MII_LAN874X_PHY_MMD_MCFGR 0x8064 + +#define MII_LAN874X_PHY_PME1_SET (2 << 13) +#define MII_LAN874X_PHY_PME2_SET (2 << 11) +#define MII_LAN874X_PHY_PME_SELF_CLEAR BIT(9) +#define MII_LAN874X_PHY_WOL_PFDA_FR BIT(7) +#define MII_LAN874X_PHY_WOL_WUFR BIT(6) +#define MII_LAN874X_PHY_WOL_MPR BIT(5) +#define MII_LAN874X_PHY_WOL_BCAST_FR BIT(4) +#define MII_LAN874X_PHY_WOL_PFDAEN BIT(3) +#define MII_LAN874X_PHY_WOL_WUEN BIT(2) +#define MII_LAN874X_PHY_WOL_MPEN BIT(1) +#define MII_LAN874X_PHY_WOL_BCSTEN BIT(0) + +#define MII_LAN874X_PHY_WOL_FILTER_EN BIT(15) +#define MII_LAN874X_PHY_WOL_FILTER_MCASTTEN BIT(9) +#define MII_LAN874X_PHY_WOL_FILTER_BCSTEN BIT(8) + +#define MII_LAN874X_PHY_PME_SELF_CLEAR_DELAY 0x1000 /* 81 milliseconds */ + #endif /* __LINUX_SMSCPHY_H__ */ diff --git a/include/linux/soc/qcom/geni-se.h b/include/linux/soc/qcom/geni-se.h index 821a19135bb6..29e06905bc1f 100644 --- a/include/linux/soc/qcom/geni-se.h +++ b/include/linux/soc/qcom/geni-se.h @@ -35,6 +35,7 @@ enum geni_se_protocol_type { GENI_SE_UART, GENI_SE_I2C, GENI_SE_I3C, + GENI_SE_SPI_SLAVE, }; struct geni_wrapper; @@ -73,12 +74,14 @@ struct geni_se { /* Common SE registers */ #define GENI_FORCE_DEFAULT_REG 0x20 +#define GENI_OUTPUT_CTRL 0x24 #define SE_GENI_STATUS 0x40 #define GENI_SER_M_CLK_CFG 0x48 #define GENI_SER_S_CLK_CFG 0x4c #define GENI_IF_DISABLE_RO 0x64 #define GENI_FW_REVISION_RO 0x68 #define SE_GENI_CLK_SEL 0x7c +#define SE_GENI_CFG_SEQ_START 0x84 #define SE_GENI_DMA_MODE_EN 0x258 #define SE_GENI_M_CMD0 0x600 #define SE_GENI_M_CMD_CTRL_REG 0x604 @@ -111,6 +114,9 @@ struct geni_se { /* GENI_FORCE_DEFAULT_REG fields */ #define FORCE_DEFAULT BIT(0) +/* GENI_OUTPUT_CTRL fields */ +#define GENI_IO_MUX_0_EN BIT(0) + /* GENI_STATUS fields */ #define M_GENI_CMD_ACTIVE BIT(0) #define S_GENI_CMD_ACTIVE BIT(12) @@ -130,6 +136,9 @@ struct geni_se { /* GENI_CLK_SEL fields */ #define CLK_SEL_MSK GENMASK(2, 0) +/* SE_GENI_CFG_SEQ_START fields */ +#define START_TRIGGER BIT(0) + /* SE_GENI_DMA_MODE_EN */ #define GENI_DMA_MODE_EN BIT(0) diff --git a/include/linux/soundwire/sdw.h b/include/linux/soundwire/sdw.h index f523ceabd059..8923387a7405 100644 --- a/include/linux/soundwire/sdw.h +++ b/include/linux/soundwire/sdw.h @@ -6,6 +6,8 @@ #include <linux/bug.h> #include <linux/lockdep_types.h> +#include <linux/irq.h> +#include <linux/irqdomain.h> #include <linux/mod_devicetable.h> #include <linux/bitfield.h> @@ -370,6 +372,7 @@ struct sdw_dpn_prop { * @clock_reg_supported: the Peripheral implements the clock base and scale * registers introduced with the SoundWire 1.2 specification. SDCA devices * do not need to set this boolean property as the registers are required. + * @use_domain_irq: call actual IRQ handler on slave, as well as callback */ struct sdw_slave_prop { u32 mipi_revision; @@ -394,6 +397,7 @@ struct sdw_slave_prop { u8 scp_int1_mask; u32 quirks; bool clock_reg_supported; + bool use_domain_irq; }; #define SDW_SLAVE_QUIRKS_INVALID_INITIAL_PARITY BIT(0) @@ -641,6 +645,7 @@ struct sdw_slave_ops { * struct sdw_slave - SoundWire Slave * @id: MIPI device ID * @dev: Linux device + * @irq: IRQ number * @status: Status reported by the Slave * @bus: Bus handle * @prop: Slave properties @@ -670,6 +675,7 @@ struct sdw_slave_ops { struct sdw_slave { struct sdw_slave_id id; struct device dev; + int irq; enum sdw_slave_status status; struct sdw_bus *bus; struct sdw_slave_prop prop; @@ -885,6 +891,7 @@ struct sdw_master_ops { * is used to compute and program bus bandwidth, clock, frame shape, * transport and port parameters * @debugfs: Bus debugfs + * @domain: IRQ domain * @defer_msg: Defer message * @clk_stop_timeout: Clock stop timeout computed * @bank_switch_timeout: Bank switch timeout computed @@ -920,6 +927,8 @@ struct sdw_bus { #ifdef CONFIG_DEBUG_FS struct dentry *debugfs; #endif + struct irq_chip irq_chip; + struct irq_domain *domain; struct sdw_defer defer_msg; unsigned int clk_stop_timeout; u32 bank_switch_timeout; diff --git a/include/linux/spi/corgi_lcd.h b/include/linux/spi/corgi_lcd.h index 0b857616919c..fc6c1515dc54 100644 --- a/include/linux/spi/corgi_lcd.h +++ b/include/linux/spi/corgi_lcd.h @@ -15,4 +15,6 @@ struct corgi_lcd_platform_data { void (*kick_battery)(void); }; +void corgi_lcd_limit_intensity(int limit); + #endif /* __LINUX_SPI_CORGI_LCD_H */ diff --git a/include/linux/spi/pxa2xx_spi.h b/include/linux/spi/pxa2xx_spi.h index 4658e7801b42..0916cb9bcb0a 100644 --- a/include/linux/spi/pxa2xx_spi.h +++ b/include/linux/spi/pxa2xx_spi.h @@ -19,7 +19,7 @@ struct pxa2xx_spi_controller { u16 num_chipselect; u8 enable_dma; u8 dma_burst_size; - bool is_slave; + bool is_target; /* DMA engine specific config */ bool (*dma_filter)(struct dma_chan *chan, void *param); @@ -31,7 +31,7 @@ struct pxa2xx_spi_controller { }; /* - * The controller specific data for SPI slave devices + * The controller specific data for SPI target devices * (resides in spi_board_info.controller_data), * copied to spi_device.platform_data ... mostly for * DMA tuning. diff --git a/include/linux/spi/sh_msiof.h b/include/linux/spi/sh_msiof.h index dc2a0cbd210d..f950d280461b 100644 --- a/include/linux/spi/sh_msiof.h +++ b/include/linux/spi/sh_msiof.h @@ -3,8 +3,8 @@ #define __SPI_SH_MSIOF_H__ enum { - MSIOF_SPI_MASTER, - MSIOF_SPI_SLAVE, + MSIOF_SPI_HOST, + MSIOF_SPI_TARGET, }; struct sh_msiof_spi_info { diff --git a/include/linux/spi/spi-mem.h b/include/linux/spi/spi-mem.h index 8e984d75f5b6..6b0a7dc48a4b 100644 --- a/include/linux/spi/spi-mem.h +++ b/include/linux/spi/spi-mem.h @@ -101,6 +101,7 @@ struct spi_mem_op { u8 nbytes; u8 buswidth; u8 dtr : 1; + u8 __pad : 7; u16 opcode; } cmd; @@ -108,6 +109,7 @@ struct spi_mem_op { u8 nbytes; u8 buswidth; u8 dtr : 1; + u8 __pad : 7; u64 val; } addr; @@ -115,12 +117,14 @@ struct spi_mem_op { u8 nbytes; u8 buswidth; u8 dtr : 1; + u8 __pad : 7; } dummy; struct { u8 buswidth; u8 dtr : 1; u8 ecc : 1; + u8 __pad : 6; enum spi_mem_data_dir dir; unsigned int nbytes; union { diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index 32c94eae8926..7f8b478fdeb3 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h @@ -6,18 +6,19 @@ #ifndef __LINUX_SPI_H #define __LINUX_SPI_H +#include <linux/acpi.h> #include <linux/bits.h> +#include <linux/completion.h> #include <linux/device.h> -#include <linux/mod_devicetable.h> -#include <linux/slab.h> +#include <linux/gpio/consumer.h> #include <linux/kthread.h> -#include <linux/completion.h> +#include <linux/mod_devicetable.h> +#include <linux/overflow.h> #include <linux/scatterlist.h> -#include <linux/gpio/consumer.h> +#include <linux/slab.h> +#include <linux/u64_stats_sync.h> #include <uapi/linux/spi/spi.h> -#include <linux/acpi.h> -#include <linux/u64_stats_sync.h> struct dma_chan; struct software_node; @@ -36,7 +37,7 @@ extern struct bus_type spi_bus_type; /** * struct spi_statistics - statistics for spi transfers - * @syncp: seqcount to protect members in this struct for per-cpu udate + * @syncp: seqcount to protect members in this struct for per-cpu update * on 32-bit systems * * @messages: number of spi-messages handled @@ -55,7 +56,7 @@ extern struct bus_type spi_bus_type; * @bytes_rx: number of bytes received from device * * @transfer_bytes_histo: - * transfer bytes histogramm + * transfer bytes histogram * * @transfers_split_maxsize: * number of transfers that have been split because of @@ -156,7 +157,7 @@ extern void spi_transfer_cs_change_delay_exec(struct spi_message *msg, * the device will bind to the named driver and only the named driver. * Do not set directly, because core frees it; use driver_set_override() to * set or clear it. - * @cs_gpiod: gpio descriptor of the chipselect line (optional, NULL when + * @cs_gpiod: GPIO descriptor of the chipselect line (optional, NULL when * not using a GPIO line) * @word_delay: delay to be inserted between consecutive * words of a transfer @@ -212,7 +213,7 @@ struct spi_device { void *controller_data; char modalias[SPI_NAME_SIZE]; const char *driver_override; - struct gpio_desc *cs_gpiod; /* Chip select gpio desc */ + struct gpio_desc *cs_gpiod; /* Chip select GPIO descriptor */ struct spi_delay word_delay; /* Inter-word delay */ /* CS delays */ struct spi_delay cs_setup; @@ -223,7 +224,7 @@ struct spi_device { struct spi_statistics __percpu *pcpu_statistics; /* - * likely need more hooks for more protocol options affecting how + * Likely need more hooks for more protocol options affecting how * the controller talks to each chip, like: * - memory packing (12 bit samples into low bits, others zeroed) * - priority @@ -299,11 +300,11 @@ static inline void spi_set_csgpiod(struct spi_device *spi, u8 idx, struct gpio_d /** * struct spi_driver - Host side "protocol" driver * @id_table: List of SPI devices supported by this driver - * @probe: Binds this driver to the spi device. Drivers can verify + * @probe: Binds this driver to the SPI device. Drivers can verify * that the device is actually present, and may need to configure * characteristics (such as bits_per_word) which weren't needed for * the initial configuration done during system setup. - * @remove: Unbinds this driver from the spi device + * @remove: Unbinds this driver from the SPI device * @shutdown: Standard shutdown callback used during system state * transitions such as powerdown/halt and kexec * @driver: SPI device drivers should initialize the name and owner @@ -415,7 +416,7 @@ extern struct spi_device *spi_new_ancillary_device(struct spi_device *spi, u8 ch * @queued: whether this controller is providing an internal message queue * @kworker: pointer to thread struct for message pump * @pump_messages: work struct for scheduling work to the message pump - * @queue_lock: spinlock to syncronise access to message queue + * @queue_lock: spinlock to synchronise access to message queue * @queue: message queue * @cur_msg: the currently in-flight message * @cur_msg_completion: a completion for the current in-flight message @@ -473,7 +474,7 @@ extern struct spi_device *spi_new_ancillary_device(struct spi_device *spi, u8 ch * @unprepare_message: undo any work done by prepare_message(). * @slave_abort: abort the ongoing transfer request on an SPI slave controller * @target_abort: abort the ongoing transfer request on an SPI target controller - * @cs_gpiods: Array of GPIO descs to use as chip select lines; one per CS + * @cs_gpiods: Array of GPIO descriptors to use as chip select lines; one per CS * number. Any individual value may be NULL for CS lines that * are not GPIOs (driven by the SPI controller itself). * @use_gpio_descriptors: Turns on the code in the SPI core to parse and grab @@ -500,7 +501,7 @@ extern struct spi_device *spi_new_ancillary_device(struct spi_device *spi, u8 ch * If the driver does not set this, the SPI core takes the snapshot as * close to the driver hand-over as possible. * @irq_flags: Interrupt enable state during PTP system timestamping - * @fallback: fallback to pio if dma transfer return failure with + * @fallback: fallback to PIO if DMA transfer return failure with * SPI_TRANS_FAIL_NO_START. * @queue_empty: signal green light for opportunistically skipping the queue * for spi_sync transfers. @@ -522,15 +523,17 @@ struct spi_controller { struct list_head list; - /* Other than negative (== assign one dynamically), bus_num is fully - * board-specific. usually that simplifies to being SOC-specific. - * example: one SOC has three SPI controllers, numbered 0..2, - * and one board's schematics might show it using SPI-2. software + /* + * Other than negative (== assign one dynamically), bus_num is fully + * board-specific. Usually that simplifies to being SoC-specific. + * example: one SoC has three SPI controllers, numbered 0..2, + * and one board's schematics might show it using SPI-2. Software * would normally use bus_num=2 for that controller. */ s16 bus_num; - /* chipselects will be integral to many controllers; some others + /* + * Chipselects will be integral to many controllers; some others * might use board-specific GPIOs. */ u16 num_chipselect; @@ -562,8 +565,7 @@ struct spi_controller { #define SPI_CONTROLLER_NO_TX BIT(2) /* Can't do buffer write */ #define SPI_CONTROLLER_MUST_RX BIT(3) /* Requires rx */ #define SPI_CONTROLLER_MUST_TX BIT(4) /* Requires tx */ - -#define SPI_MASTER_GPIO_SS BIT(5) /* GPIO CS must select slave */ +#define SPI_CONTROLLER_GPIO_SS BIT(5) /* GPIO CS must select slave */ /* Flag indicating if the allocation of this struct is devres-managed */ bool devm_allocated; @@ -576,8 +578,8 @@ struct spi_controller { }; /* - * on some hardware transfer / message size may be constrained - * the limit may depend on device transfer settings + * On some hardware transfer / message size may be constrained + * the limit may depend on device transfer settings. */ size_t (*max_transfer_size)(struct spi_device *spi); size_t (*max_message_size)(struct spi_device *spi); @@ -595,7 +597,8 @@ struct spi_controller { /* Flag indicating that the SPI bus is locked for exclusive use */ bool bus_lock_flag; - /* Setup mode and clock, etc (spi driver may call many times). + /* + * Setup mode and clock, etc (SPI driver may call many times). * * IMPORTANT: this may be called when transfers to another * device are active. DO NOT UPDATE SHARED REGISTERS in ways @@ -613,18 +616,19 @@ struct spi_controller { */ int (*set_cs_timing)(struct spi_device *spi); - /* Bidirectional bulk transfers + /* + * Bidirectional bulk transfers * * + The transfer() method may not sleep; its main role is * just to add the message to the queue. * + For now there's no remove-from-queue operation, or * any other request management - * + To a given spi_device, message queueing is pure fifo + * + To a given spi_device, message queueing is pure FIFO * * + The controller's main job is to process its message queue, * selecting a chip (for masters), then transferring data * + If there are multiple spi_device children, the i/o queue - * arbitration algorithm is unspecified (round robin, fifo, + * arbitration algorithm is unspecified (round robin, FIFO, * priority, reservations, preemption, etc) * * + Chipselect stays active during the entire message @@ -705,7 +709,7 @@ struct spi_controller { const struct spi_controller_mem_ops *mem_ops; const struct spi_controller_mem_caps *mem_caps; - /* gpio chip select */ + /* GPIO chip select */ struct gpio_desc **cs_gpiods; bool use_gpio_descriptors; s8 unused_native_cs; @@ -789,7 +793,7 @@ void spi_take_timestamp_post(struct spi_controller *ctlr, struct spi_transfer *xfer, size_t progress, bool irqs_off); -/* The spi driver core manages memory for the spi_controller classdev */ +/* The SPI driver core manages memory for the spi_controller classdev */ extern struct spi_controller *__spi_alloc_controller(struct device *host, unsigned int size, bool slave); @@ -878,13 +882,13 @@ typedef void (*spi_res_release_t)(struct spi_controller *ctlr, void *res); /** - * struct spi_res - spi resource management structure + * struct spi_res - SPI resource management structure * @entry: list entry * @release: release code called prior to freeing this resource * @data: extra data allocated for the specific use-case * - * this is based on ideas from devres, but focused on life-cycle - * management during spi_message processing + * This is based on ideas from devres, but focused on life-cycle + * management during spi_message processing. */ struct spi_res { struct list_head entry; @@ -902,7 +906,7 @@ struct spi_res { * * The spi_messages themselves consist of a series of read+write transfer * segments. Those segments always read the same number of bits as they - * write; but one or the other is easily ignored by passing a null buffer + * write; but one or the other is easily ignored by passing a NULL buffer * pointer. (This is unlike most types of I/O API, because SPI hardware * is full duplex.) * @@ -913,8 +917,8 @@ struct spi_res { /** * struct spi_transfer - a read/write buffer pair - * @tx_buf: data to be written (dma-safe memory), or NULL - * @rx_buf: data to be read (dma-safe memory), or NULL + * @tx_buf: data to be written (DMA-safe memory), or NULL + * @rx_buf: data to be read (DMA-safe memory), or NULL * @tx_dma: DMA address of tx_buf, if @spi_message.is_dma_mapped * @rx_dma: DMA address of rx_buf, if @spi_message.is_dma_mapped * @tx_nbits: number of bits used for writing. If 0 the default @@ -937,7 +941,7 @@ struct spi_res { * @word_delay: inter word delay to be introduced after each word size * (set by bits_per_word) transmission. * @effective_speed_hz: the effective SCK-speed that was used to - * transfer this transfer. Set to 0 if the spi bus driver does + * transfer this transfer. Set to 0 if the SPI bus driver does * not support it. * @transfer_list: transfers are sequenced through @spi_message.transfers * @tx_sg: Scatterlist for transmit, currently not for client use @@ -966,16 +970,16 @@ struct spi_res { * transmitting the "pre" word, and the "post" timestamp after receiving * transmit confirmation from the controller for the "post" word. * @timestamped: true if the transfer has been timestamped - * @error: Error status logged by spi controller driver. + * @error: Error status logged by SPI controller driver. * * SPI transfers always write the same number of bytes as they read. * Protocol drivers should always provide @rx_buf and/or @tx_buf. * In some cases, they may also want to provide DMA addresses for * the data being transferred; that may reduce overhead, when the - * underlying driver uses dma. + * underlying driver uses DMA. * - * If the transmit buffer is null, zeroes will be shifted out - * while filling @rx_buf. If the receive buffer is null, the data + * If the transmit buffer is NULL, zeroes will be shifted out + * while filling @rx_buf. If the receive buffer is NULL, the data * shifted in will be discarded. Only "len" bytes shift out (or in). * It's an error to try to shift out a partial word. (For example, by * shifting out three bytes with word size of sixteen or twenty bits; @@ -1009,7 +1013,7 @@ struct spi_res { * Some devices need protocol transactions to be built from a series of * spi_message submissions, where the content of one message is determined * by the results of previous messages and where the whole transaction - * ends when the chipselect goes intactive. + * ends when the chipselect goes inactive. * * When SPI can transfer in 1x,2x or 4x. It can get this transfer information * from device through @tx_nbits and @rx_nbits. In Bi-direction, these @@ -1023,10 +1027,11 @@ struct spi_res { * and its transfers, ignore them until its completion callback. */ struct spi_transfer { - /* It's ok if tx_buf == rx_buf (right?) - * for MicroWire, one buffer must be null - * buffers must work with dma_*map_single() calls, unless - * spi_message.is_dma_mapped reports a pre-existing mapping + /* + * It's okay if tx_buf == rx_buf (right?). + * For MicroWire, one buffer must be NULL. + * Buffers must work with dma_*map_single() calls, unless + * spi_message.is_dma_mapped reports a pre-existing mapping. */ const void *tx_buf; void *rx_buf; @@ -1046,9 +1051,9 @@ struct spi_transfer { unsigned tx_nbits:3; unsigned rx_nbits:3; unsigned timestamped:1; -#define SPI_NBITS_SINGLE 0x01 /* 1bit transfer */ -#define SPI_NBITS_DUAL 0x02 /* 2bits transfer */ -#define SPI_NBITS_QUAD 0x04 /* 4bits transfer */ +#define SPI_NBITS_SINGLE 0x01 /* 1-bit transfer */ +#define SPI_NBITS_DUAL 0x02 /* 2-bit transfer */ +#define SPI_NBITS_QUAD 0x04 /* 4-bit transfer */ u8 bits_per_word; struct spi_delay delay; struct spi_delay cs_change_delay; @@ -1069,7 +1074,7 @@ struct spi_transfer { * struct spi_message - one multi-segment SPI transaction * @transfers: list of transfer segments in this transaction * @spi: SPI device to which the transaction is queued - * @is_dma_mapped: if true, the caller provided both dma and cpu virtual + * @is_dma_mapped: if true, the caller provided both DMA and CPU virtual * addresses for each transfer buffer * @complete: called to report transaction completions * @context: the argument to complete() when it's called @@ -1079,8 +1084,10 @@ struct spi_transfer { * @status: zero for success, else negative errno * @queue: for use by whichever driver currently owns the message * @state: for use by whichever driver currently owns the message - * @resources: for resource management when the spi message is processed + * @resources: for resource management when the SPI message is processed * @prepared: spi_prepare_message was called for the this message + * @t: for use with spi_message_alloc() when message and transfers have + * been allocated together * * A @spi_message is used to execute an atomic sequence of data transfers, * each represented by a struct spi_transfer. The sequence is "atomic" @@ -1106,7 +1113,8 @@ struct spi_message { /* spi_prepare_message() was called for this message */ bool prepared; - /* REVISIT: we might want a flag affecting the behavior of the + /* + * REVISIT: we might want a flag affecting the behavior of the * last transfer ... allowing things like "read 16 bit length L" * immediately followed by "read L bytes". Basically imposing * a specific message scheduling algorithm. @@ -1124,15 +1132,19 @@ struct spi_message { unsigned frame_length; unsigned actual_length; - /* For optional use by whatever driver currently owns the + /* + * For optional use by whatever driver currently owns the * spi_message ... between calls to spi_async and then later * complete(), that's the spi_controller controller driver. */ struct list_head queue; void *state; - /* List of spi_res reources when the spi message is processed */ + /* List of spi_res resources when the SPI message is processed */ struct list_head resources; + + /* For embedding transfers into the memory of the message */ + struct spi_transfer t[]; }; static inline void spi_message_init_no_memset(struct spi_message *m) @@ -1168,7 +1180,7 @@ spi_transfer_delay_exec(struct spi_transfer *t) /** * spi_message_init_with_transfers - Initialize spi_message and append transfers * @m: spi_message to be initialized - * @xfers: An array of spi transfers + * @xfers: An array of SPI transfers * @num_xfers: Number of items in the xfer array * * This function initializes the given spi_message and adds each spi_transfer in @@ -1185,24 +1197,21 @@ struct spi_transfer *xfers, unsigned int num_xfers) spi_message_add_tail(&xfers[i], m); } -/* It's fine to embed message and transaction structures in other data +/* + * It's fine to embed message and transaction structures in other data * structures so long as you don't free them while they're in use. */ - static inline struct spi_message *spi_message_alloc(unsigned ntrans, gfp_t flags) { struct spi_message *m; - m = kzalloc(sizeof(struct spi_message) - + ntrans * sizeof(struct spi_transfer), - flags); + m = kzalloc(struct_size(m, t, ntrans), flags); if (m) { unsigned i; - struct spi_transfer *t = (struct spi_transfer *)(m + 1); spi_message_init_no_memset(m); - for (i = 0; i < ntrans; i++, t++) - spi_message_add_tail(t, m); + for (i = 0; i < ntrans; i++) + spi_message_add_tail(&m->t[i], m); } return m; } @@ -1291,7 +1300,7 @@ typedef void (*spi_replaced_release_t)(struct spi_controller *ctlr, * replacements that have occurred * so that they can get reverted * @release: some extra release code to get executed prior to - * relasing this structure + * releasing this structure * @extradata: pointer to some extra data if requested or NULL * @replaced_transfers: transfers that have been replaced and which need * to get restored @@ -1301,9 +1310,9 @@ typedef void (*spi_replaced_release_t)(struct spi_controller *ctlr, * @inserted_transfers: array of spi_transfers of array-size @inserted, * that have been replacing replaced_transfers * - * note: that @extradata will point to @inserted_transfers[@inserted] + * Note: that @extradata will point to @inserted_transfers[@inserted] * if some extra allocation is requested, so alignment will be the same - * as for spi_transfers + * as for spi_transfers. */ struct spi_replaced_transfers { spi_replaced_release_t release; @@ -1329,7 +1338,8 @@ extern int spi_split_transfers_maxwords(struct spi_controller *ctlr, /*---------------------------------------------------------------------------*/ -/* All these synchronous SPI transfer routines are utilities layered +/* + * All these synchronous SPI transfer routines are utilities layered * over the core async transfer primitive. Here, "synchronous" means * they will sleep uninterruptibly until the async transfer completes. */ @@ -1472,7 +1482,7 @@ static inline ssize_t spi_w8r16(struct spi_device *spi, u8 cmd) * * Callable only from contexts that can sleep. * - * Return: the (unsigned) sixteen bit number returned by the device in cpu + * Return: the (unsigned) sixteen bit number returned by the device in CPU * endianness, or else a negative error code. */ static inline ssize_t spi_w8r16be(struct spi_device *spi, u8 cmd) @@ -1500,7 +1510,7 @@ static inline ssize_t spi_w8r16be(struct spi_device *spi, u8 cmd) * As a rule, SPI devices can't be probed. Instead, board init code * provides a table listing the devices which are present, with enough * information to bind and set up the device's driver. There's basic - * support for nonstatic configurations too; enough to handle adding + * support for non-static configurations too; enough to handle adding * parport adapters, or microcontrollers acting as USB-to-SPI bridges. */ @@ -1537,12 +1547,13 @@ static inline ssize_t spi_w8r16be(struct spi_device *spi, u8 cmd) * are active in some dynamic board configuration models. */ struct spi_board_info { - /* The device name and module name are coupled, like platform_bus; + /* + * The device name and module name are coupled, like platform_bus; * "modalias" is normally the driver name. * * platform_data goes to spi_device.dev.platform_data, * controller_data goes to spi_device.controller_data, - * irq is copied too + * IRQ is copied too. */ char modalias[SPI_NAME_SIZE]; const void *platform_data; @@ -1554,7 +1565,8 @@ struct spi_board_info { u32 max_speed_hz; - /* bus_num is board specific and matches the bus_num of some + /* + * bus_num is board specific and matches the bus_num of some * spi_controller that will probably be registered later. * * chip_select reflects how this chip is wired to that master; @@ -1563,12 +1575,14 @@ struct spi_board_info { u16 bus_num; u16 chip_select; - /* mode becomes spi_device.mode, and is essential for chips + /* + * mode becomes spi_device.mode, and is essential for chips * where the default of SPI_CS_HIGH = 0 is wrong. */ u32 mode; - /* ... may need additional spi_device chip config data here. + /* + * ... may need additional spi_device chip config data here. * avoid stuff protocol drivers can set; but include stuff * needed to behave without being bound to a driver: * - quirks like clock rate mattering when not selected @@ -1585,7 +1599,8 @@ spi_register_board_info(struct spi_board_info const *info, unsigned n) { return 0; } #endif -/* If you're hotplugging an adapter with devices (parport, usb, etc) +/* + * If you're hotplugging an adapter with devices (parport, USB, etc) * use spi_new_device() to describe each device. You can also call * spi_unregister_device() to start making that device vanish, but * normally that would be handled by spi_unregister_controller(). @@ -1623,10 +1638,6 @@ spi_transfer_is_last(struct spi_controller *ctlr, struct spi_transfer *xfer) #define spi_master spi_controller #define SPI_MASTER_HALF_DUPLEX SPI_CONTROLLER_HALF_DUPLEX -#define SPI_MASTER_NO_RX SPI_CONTROLLER_NO_RX -#define SPI_MASTER_NO_TX SPI_CONTROLLER_NO_TX -#define SPI_MASTER_MUST_RX SPI_CONTROLLER_MUST_RX -#define SPI_MASTER_MUST_TX SPI_CONTROLLER_MUST_TX #define spi_master_get_devdata(_ctlr) spi_controller_get_devdata(_ctlr) #define spi_master_set_devdata(_ctlr, _data) \ diff --git a/include/linux/srcutiny.h b/include/linux/srcutiny.h index ebd72491af99..447133171d95 100644 --- a/include/linux/srcutiny.h +++ b/include/linux/srcutiny.h @@ -48,6 +48,10 @@ void srcu_drive_gp(struct work_struct *wp); #define DEFINE_STATIC_SRCU(name) \ static struct srcu_struct name = __SRCU_STRUCT_INIT(name, name, name) +// Dummy structure for srcu_notifier_head. +struct srcu_usage { }; +#define __SRCU_USAGE_INIT(name) { } + void synchronize_srcu(struct srcu_struct *ssp); /* diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h index 06090538fe2d..ce89cc3e4913 100644 --- a/include/linux/stmmac.h +++ b/include/linux/stmmac.h @@ -76,6 +76,8 @@ | DMA_AXI_BLEN_32 | DMA_AXI_BLEN_64 \ | DMA_AXI_BLEN_128 | DMA_AXI_BLEN_256) +struct stmmac_priv; + /* Platfrom data for platform device structure's platform_data field */ struct stmmac_mdio_bus_data { @@ -204,14 +206,41 @@ struct dwmac4_addrs { u32 mtl_low_cred_offset; }; +#define STMMAC_FLAG_HAS_INTEGRATED_PCS BIT(0) +#define STMMAC_FLAG_SPH_DISABLE BIT(1) +#define STMMAC_FLAG_USE_PHY_WOL BIT(2) +#define STMMAC_FLAG_HAS_SUN8I BIT(3) +#define STMMAC_FLAG_TSO_EN BIT(4) +#define STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP BIT(5) +#define STMMAC_FLAG_VLAN_FAIL_Q_EN BIT(6) +#define STMMAC_FLAG_MULTI_MSI_EN BIT(7) +#define STMMAC_FLAG_EXT_SNAPSHOT_EN BIT(8) +#define STMMAC_FLAG_INT_SNAPSHOT_EN BIT(9) +#define STMMAC_FLAG_RX_CLK_RUNS_IN_LPI BIT(10) +#define STMMAC_FLAG_EN_TX_LPI_CLOCKGATING BIT(11) +#define STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY BIT(12) + struct plat_stmmacenet_data { int bus_id; int phy_addr; - int interface; + /* MAC ----- optional PCS ----- SerDes ----- optional PHY ----- Media + * ^ ^ + * mac_interface phy_interface + * + * mac_interface is the MAC-side interface, which may be the same + * as phy_interface if there is no intervening PCS. If there is a + * PCS, then mac_interface describes the interface mode between the + * MAC and PCS, and phy_interface describes the interface mode + * between the PCS and PHY. + */ + phy_interface_t mac_interface; + /* phy_interface is the PHY-side interface - the interface used by + * an attached PHY. + */ phy_interface_t phy_interface; struct stmmac_mdio_bus_data *mdio_bus_data; struct device_node *phy_node; - struct device_node *phylink_node; + struct fwnode_handle *port_node; struct device_node *mdio_node; struct stmmac_dma_cfg *dma_cfg; struct stmmac_est *est; @@ -240,12 +269,12 @@ struct plat_stmmacenet_data { u8 tx_sched_algorithm; struct stmmac_rxq_cfg rx_queues_cfg[MTL_MAX_RX_QUEUES]; struct stmmac_txq_cfg tx_queues_cfg[MTL_MAX_TX_QUEUES]; - void (*fix_mac_speed)(void *priv, unsigned int speed); + void (*fix_mac_speed)(void *priv, unsigned int speed, unsigned int mode); int (*fix_soc_reset)(void *priv, void __iomem *ioaddr); int (*serdes_powerup)(struct net_device *ndev, void *priv); void (*serdes_powerdown)(struct net_device *ndev, void *priv); void (*speed_mode_2500)(struct net_device *ndev, void *priv); - void (*ptp_clk_freq_config)(void *priv); + void (*ptp_clk_freq_config)(struct stmmac_priv *priv); int (*init)(struct platform_device *pdev, void *priv); void (*exit)(struct platform_device *pdev, void *priv); struct mac_device_info *(*setup)(void *priv); @@ -266,22 +295,14 @@ struct plat_stmmacenet_data { struct reset_control *stmmac_ahb_rst; struct stmmac_axi *axi; int has_gmac4; - bool has_sun8i; - bool tso_en; int rss_en; int mac_port_sel_speed; - bool en_tx_lpi_clockgating; - bool rx_clk_runs_in_lpi; int has_xgmac; - bool vlan_fail_q_en; u8 vlan_fail_q; unsigned int eee_usecs_rate; struct pci_dev *pdev; int int_snapshot_num; int ext_snapshot_num; - bool int_snapshot_en; - bool ext_snapshot_en; - bool multi_msi_en; int msi_mac_vec; int msi_wol_vec; int msi_lpi_vec; @@ -289,10 +310,7 @@ struct plat_stmmacenet_data { int msi_sfty_ue_vec; int msi_rx_base_vec; int msi_tx_base_vec; - bool use_phy_wol; - bool sph_disable; - bool serdes_up_after_phy_linkup; const struct dwmac4_addrs *dwmac4_addrs; - bool has_integrated_pcs; + unsigned int flags; }; #endif diff --git a/include/linux/swait.h b/include/linux/swait.h index 6a8c22b8c2a5..d324419482a0 100644 --- a/include/linux/swait.h +++ b/include/linux/swait.h @@ -146,7 +146,7 @@ static inline bool swq_has_sleeper(struct swait_queue_head *wq) extern void swake_up_one(struct swait_queue_head *q); extern void swake_up_all(struct swait_queue_head *q); -extern void swake_up_locked(struct swait_queue_head *q); +extern void swake_up_locked(struct swait_queue_head *q, int wake_flags); extern void prepare_to_swait_exclusive(struct swait_queue_head *q, struct swait_queue *wait, int state); extern long prepare_to_swait_event(struct swait_queue_head *q, struct swait_queue *wait, int state); diff --git a/include/linux/swap.h b/include/linux/swap.h index 456546443f1f..493487ed7c38 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -302,10 +302,6 @@ struct swap_info_struct { struct file *swap_file; /* seldom referenced */ unsigned int old_block_size; /* seldom referenced */ struct completion comp; /* seldom referenced */ -#ifdef CONFIG_FRONTSWAP - unsigned long *frontswap_map; /* frontswap in-use, one bit per page */ - atomic_t frontswap_pages; /* frontswap pages in-use counter */ -#endif spinlock_t lock; /* * protect map scan related fields like * swap_map, lowest_bit, highest_bit, @@ -337,15 +333,13 @@ struct swap_info_struct { */ }; -static inline swp_entry_t folio_swap_entry(struct folio *folio) +static inline swp_entry_t page_swap_entry(struct page *page) { - swp_entry_t entry = { .val = page_private(&folio->page) }; - return entry; -} + struct folio *folio = page_folio(page); + swp_entry_t entry = folio->swap; -static inline void folio_set_swap_entry(struct folio *folio, swp_entry_t entry) -{ - folio->private = (void *)entry.val; + entry.val += folio_page_idx(folio, page); + return entry; } /* linux/mm/workingset.c */ @@ -630,11 +624,6 @@ static inline int mem_cgroup_swappiness(struct mem_cgroup *mem) } #endif -#ifdef CONFIG_ZSWAP -extern u64 zswap_pool_total_size; -extern atomic_t zswap_stored_pages; -#endif - #if defined(CONFIG_SWAP) && defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP) void __folio_throttle_swaprate(struct folio *folio, gfp_t gfp); static inline void folio_throttle_swaprate(struct folio *folio, gfp_t gfp) diff --git a/include/linux/swapfile.h b/include/linux/swapfile.h index 7ed529a77c5b..99e3ed469e88 100644 --- a/include/linux/swapfile.h +++ b/include/linux/swapfile.h @@ -2,11 +2,6 @@ #ifndef _LINUX_SWAPFILE_H #define _LINUX_SWAPFILE_H -/* - * these were static in swapfile.c but frontswap.c needs them and we don't - * want to expose them to the dozens of source files that include swap.h - */ -extern struct swap_info_struct *swap_info[]; extern unsigned long generic_max_swapfile_size(void); unsigned long arch_max_swapfile_size(void); diff --git a/include/linux/swapops.h b/include/linux/swapops.h index 4c932cb45e0b..bff1e8d97de0 100644 --- a/include/linux/swapops.h +++ b/include/linux/swapops.h @@ -393,7 +393,12 @@ static inline bool is_migration_entry_dirty(swp_entry_t entry) typedef unsigned long pte_marker; #define PTE_MARKER_UFFD_WP BIT(0) -#define PTE_MARKER_SWAPIN_ERROR BIT(1) +/* + * "Poisoned" here is meant in the very general sense of "future accesses are + * invalid", instead of referring very specifically to hardware memory errors. + * This marker is meant to represent any of various different causes of this. + */ +#define PTE_MARKER_POISONED BIT(1) #define PTE_MARKER_MASK (BIT(2) - 1) static inline swp_entry_t make_pte_marker_entry(pte_marker marker) @@ -421,15 +426,15 @@ static inline pte_t make_pte_marker(pte_marker marker) return swp_entry_to_pte(make_pte_marker_entry(marker)); } -static inline swp_entry_t make_swapin_error_entry(void) +static inline swp_entry_t make_poisoned_swp_entry(void) { - return make_pte_marker_entry(PTE_MARKER_SWAPIN_ERROR); + return make_pte_marker_entry(PTE_MARKER_POISONED); } -static inline int is_swapin_error_entry(swp_entry_t entry) +static inline int is_poisoned_swp_entry(swp_entry_t entry) { return is_pte_marker_entry(entry) && - (pte_marker_get(entry) & PTE_MARKER_SWAPIN_ERROR); + (pte_marker_get(entry) & PTE_MARKER_POISONED); } /* diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 03e3d0121d5e..c0cb22cd607d 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -284,22 +284,6 @@ static inline int is_syscall_trace_event(struct trace_event_call *tp_event) #endif /* - * Called before coming back to user-mode. Returning to user-mode with an - * address limit different than USER_DS can allow to overwrite kernel memory. - */ -static inline void addr_limit_user_check(void) -{ -#ifdef TIF_FSCHECK - if (!test_thread_flag(TIF_FSCHECK)) - return; -#endif - -#ifdef TIF_FSCHECK - clear_thread_flag(TIF_FSCHECK); -#endif -} - -/* * These syscall function prototypes are kept in the same order as * include/uapi/asm-generic/unistd.h. Architecture specific entries go below, * followed by deprecated or obsolete system calls. @@ -438,8 +422,10 @@ asmlinkage long sys_chdir(const char __user *filename); asmlinkage long sys_fchdir(unsigned int fd); asmlinkage long sys_chroot(const char __user *filename); asmlinkage long sys_fchmod(unsigned int fd, umode_t mode); -asmlinkage long sys_fchmodat(int dfd, const char __user * filename, +asmlinkage long sys_fchmodat(int dfd, const char __user *filename, umode_t mode); +asmlinkage long sys_fchmodat2(int dfd, const char __user *filename, + umode_t mode, unsigned int flags); asmlinkage long sys_fchownat(int dfd, const char __user *filename, uid_t user, gid_t group, int flag); asmlinkage long sys_fchown(unsigned int fd, uid_t user, gid_t group); diff --git a/include/linux/tcp.h b/include/linux/tcp.h index 91a37c99ba66..3c5efeeb024f 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -172,6 +172,8 @@ static inline struct tcp_request_sock *tcp_rsk(const struct request_sock *req) return (struct tcp_request_sock *)req; } +#define TCP_RMEM_TO_WIN_SCALE 8 + struct tcp_sock { /* inet_connection_sock has to be the first member of tcp_sock */ struct inet_connection_sock inet_conn; @@ -238,7 +240,7 @@ struct tcp_sock { u32 window_clamp; /* Maximal window to advertise */ u32 rcv_ssthresh; /* Current window clamp */ - + u8 scaling_ratio; /* see tcp_win_from_space() */ /* Information of the most recently (s)acked skb */ struct tcp_rack { u64 mstamp; /* (Re)sent time of the skb */ @@ -562,6 +564,6 @@ void __tcp_sock_set_nodelay(struct sock *sk, bool on); void tcp_sock_set_nodelay(struct sock *sk); void tcp_sock_set_quickack(struct sock *sk, int val); int tcp_sock_set_syncnt(struct sock *sk, int val); -void tcp_sock_set_user_timeout(struct sock *sk, u32 val); +int tcp_sock_set_user_timeout(struct sock *sk, int val); #endif /* _LINUX_TCP_H */ diff --git a/include/linux/thermal.h b/include/linux/thermal.h index dee66ade89a0..b449a46766f5 100644 --- a/include/linux/thermal.h +++ b/include/linux/thermal.h @@ -81,11 +81,13 @@ struct thermal_zone_device_ops { * @temperature: temperature value in miliCelsius * @hysteresis: relative hysteresis in miliCelsius * @type: trip point type + * @priv: pointer to driver data associated with this trip */ struct thermal_trip { int temperature; int hysteresis; enum thermal_trip_type type; + void *priv; }; struct thermal_cooling_device_ops { @@ -287,6 +289,9 @@ int thermal_zone_get_trip(struct thermal_zone_device *tz, int trip_id, int thermal_zone_set_trip(struct thermal_zone_device *tz, int trip_id, const struct thermal_trip *trip); +int for_each_thermal_trip(struct thermal_zone_device *tz, + int (*cb)(struct thermal_trip *, void *), + void *data); int thermal_zone_get_num_trips(struct thermal_zone_device *tz); int thermal_zone_get_crit_temp(struct thermal_zone_device *tz, int *temp); @@ -323,6 +328,10 @@ int thermal_zone_unbind_cooling_device(struct thermal_zone_device *, int, struct thermal_cooling_device *); void thermal_zone_device_update(struct thermal_zone_device *, enum thermal_notify_event); +void thermal_zone_device_exec(struct thermal_zone_device *tz, + void (*cb)(struct thermal_zone_device *, + unsigned long), + unsigned long data); struct thermal_cooling_device *thermal_cooling_device_register(const char *, void *, const struct thermal_cooling_device_ops *); diff --git a/include/linux/torture.h b/include/linux/torture.h index 7038104463e4..bb466eec01e4 100644 --- a/include/linux/torture.h +++ b/include/linux/torture.h @@ -108,12 +108,15 @@ bool torture_must_stop(void); bool torture_must_stop_irq(void); void torture_kthread_stopping(char *title); int _torture_create_kthread(int (*fn)(void *arg), void *arg, char *s, char *m, - char *f, struct task_struct **tp); + char *f, struct task_struct **tp, void (*cbf)(struct task_struct *tp)); void _torture_stop_kthread(char *m, struct task_struct **tp); #define torture_create_kthread(n, arg, tp) \ _torture_create_kthread(n, (arg), #n, "Creating " #n " task", \ - "Failed to create " #n, &(tp)) + "Failed to create " #n, &(tp), NULL) +#define torture_create_kthread_cb(n, arg, tp, cbf) \ + _torture_create_kthread(n, (arg), #n, "Creating " #n " task", \ + "Failed to create " #n, &(tp), cbf) #define torture_stop_kthread(n, tp) \ _torture_stop_kthread("Stopping " #n " task", &(tp)) diff --git a/include/linux/tpm.h b/include/linux/tpm.h index 6a1e8f157255..4ee9d13749ad 100644 --- a/include/linux/tpm.h +++ b/include/linux/tpm.h @@ -283,6 +283,7 @@ enum tpm_chip_flags { TPM_CHIP_FLAG_FIRMWARE_POWER_MANAGED = BIT(6), TPM_CHIP_FLAG_FIRMWARE_UPGRADE = BIT(7), TPM_CHIP_FLAG_SUSPENDED = BIT(8), + TPM_CHIP_FLAG_HWRNG_DISABLED = BIT(9), }; #define to_tpm_chip(d) container_of(d, struct tpm_chip, dev) diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h index 3930e676436c..c1a0a19d80fb 100644 --- a/include/linux/trace_events.h +++ b/include/linux/trace_events.h @@ -59,6 +59,17 @@ int trace_raw_output_prep(struct trace_iterator *iter, extern __printf(2, 3) void trace_event_printf(struct trace_iterator *iter, const char *fmt, ...); +/* Used to find the offset and length of dynamic fields in trace events */ +struct trace_dynamic_info { +#ifdef CONFIG_CPU_BIG_ENDIAN + u16 offset; + u16 len; +#else + u16 len; + u16 offset; +#endif +}; + /* * The trace entry - the most basic unit of tracing. This is what * is printed in the end as a single line in the trace output, such as: @@ -752,6 +763,7 @@ int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id, u32 *fd_type, const char **buf, u64 *probe_offset, u64 *probe_addr); int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog); +int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog); #else static inline unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx) { @@ -798,6 +810,11 @@ bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) { return -EOPNOTSUPP; } +static inline int +bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) +{ + return -EOPNOTSUPP; +} #endif enum { @@ -867,7 +884,8 @@ extern int perf_uprobe_init(struct perf_event *event, extern void perf_uprobe_destroy(struct perf_event *event); extern int bpf_get_uprobe_info(const struct perf_event *event, u32 *fd_type, const char **filename, - u64 *probe_offset, bool perf_type_tracepoint); + u64 *probe_offset, u64 *probe_addr, + bool perf_type_tracepoint); #endif extern int ftrace_profile_set_filter(struct perf_event *event, int event_id, char *filter_str); diff --git a/include/linux/uio.h b/include/linux/uio.h index ff81e5ccaef2..42bce38a8e87 100644 --- a/include/linux/uio.h +++ b/include/linux/uio.h @@ -163,7 +163,7 @@ static inline size_t iov_length(const struct iovec *iov, unsigned long nr_segs) return ret; } -size_t copy_page_from_iter_atomic(struct page *page, unsigned offset, +size_t copy_page_from_iter_atomic(struct page *page, size_t offset, size_t bytes, struct iov_iter *i); void iov_iter_advance(struct iov_iter *i, size_t bytes); void iov_iter_revert(struct iov_iter *i, size_t bytes); @@ -184,6 +184,13 @@ static inline size_t copy_folio_to_iter(struct folio *folio, size_t offset, { return copy_page_to_iter(&folio->page, offset, bytes, i); } + +static inline size_t copy_folio_from_iter_atomic(struct folio *folio, + size_t offset, size_t bytes, struct iov_iter *i) +{ + return copy_page_from_iter_atomic(&folio->page, offset, bytes, i); +} + size_t copy_page_to_iter_nofault(struct page *page, unsigned offset, size_t bytes, struct iov_iter *i); diff --git a/include/linux/usb/r8152.h b/include/linux/usb/r8152.h index 20d88b1defc3..287e9d83fb8b 100644 --- a/include/linux/usb/r8152.h +++ b/include/linux/usb/r8152.h @@ -29,6 +29,7 @@ #define VENDOR_ID_LINKSYS 0x13b1 #define VENDOR_ID_NVIDIA 0x0955 #define VENDOR_ID_TPLINK 0x2357 +#define VENDOR_ID_DLINK 0x2001 #if IS_REACHABLE(CONFIG_USB_RTL8152) extern u8 rtl8152_get_version(struct usb_interface *intf); diff --git a/include/linux/userfaultfd_k.h b/include/linux/userfaultfd_k.h index ac7b0c96d351..ac8c6854097c 100644 --- a/include/linux/userfaultfd_k.h +++ b/include/linux/userfaultfd_k.h @@ -46,6 +46,7 @@ enum mfill_atomic_mode { MFILL_ATOMIC_COPY, MFILL_ATOMIC_ZEROPAGE, MFILL_ATOMIC_CONTINUE, + MFILL_ATOMIC_POISON, NR_MFILL_ATOMIC_MODES, }; @@ -83,6 +84,9 @@ extern ssize_t mfill_atomic_zeropage(struct mm_struct *dst_mm, extern ssize_t mfill_atomic_continue(struct mm_struct *dst_mm, unsigned long dst_start, unsigned long len, atomic_t *mmap_changing, uffd_flags_t flags); +extern ssize_t mfill_atomic_poison(struct mm_struct *dst_mm, unsigned long start, + unsigned long len, atomic_t *mmap_changing, + uffd_flags_t flags); extern int mwriteprotect_range(struct mm_struct *dst_mm, unsigned long start, unsigned long len, bool enable_wp, atomic_t *mmap_changing); diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h index bdf8de2cdd93..7b4dd69555e4 100644 --- a/include/linux/virtio_net.h +++ b/include/linux/virtio_net.h @@ -155,6 +155,10 @@ retry: if (gso_type & SKB_GSO_UDP) nh_off -= thlen; + /* Kernel has a special handling for GSO_BY_FRAGS. */ + if (gso_size == GSO_BY_FRAGS) + return -EINVAL; + /* Too small packets are not really GSO ones. */ if (skb->len - nh_off > gso_size) { shinfo->gso_size = gso_size; diff --git a/include/linux/wait.h b/include/linux/wait.h index a0307b516b09..5ec7739400f4 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h @@ -210,6 +210,7 @@ __remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq } int __wake_up(struct wait_queue_head *wq_head, unsigned int mode, int nr, void *key); +void __wake_up_on_current_cpu(struct wait_queue_head *wq_head, unsigned int mode, void *key); void __wake_up_locked_key(struct wait_queue_head *wq_head, unsigned int mode, void *key); void __wake_up_locked_key_bookmark(struct wait_queue_head *wq_head, unsigned int mode, void *key, wait_queue_entry_t *bookmark); @@ -237,6 +238,8 @@ void __wake_up_pollfree(struct wait_queue_head *wq_head); #define key_to_poll(m) ((__force __poll_t)(uintptr_t)(void *)(m)) #define wake_up_poll(x, m) \ __wake_up(x, TASK_NORMAL, 1, poll_to_key(m)) +#define wake_up_poll_on_current_cpu(x, m) \ + __wake_up_on_current_cpu(x, TASK_NORMAL, poll_to_key(m)) #define wake_up_locked_poll(x, m) \ __wake_up_locked_key((x), TASK_NORMAL, poll_to_key(m)) #define wake_up_interruptible_poll(x, m) \ diff --git a/include/linux/writeback.h b/include/linux/writeback.h index fba937999fbf..083387c00f0c 100644 --- a/include/linux/writeback.h +++ b/include/linux/writeback.h @@ -375,11 +375,6 @@ void tag_pages_for_writeback(struct address_space *mapping, pgoff_t start, pgoff_t end); bool filemap_dirty_folio(struct address_space *mapping, struct folio *folio); -void folio_account_redirty(struct folio *folio); -static inline void account_page_redirty(struct page *page) -{ - folio_account_redirty(page_folio(page)); -} bool folio_redirty_for_writepage(struct writeback_control *, struct folio *); bool redirty_page_for_writepage(struct writeback_control *, struct page *); diff --git a/include/linux/xattr.h b/include/linux/xattr.h index d591ef59aa98..d20051865800 100644 --- a/include/linux/xattr.h +++ b/include/linux/xattr.h @@ -114,13 +114,15 @@ struct simple_xattr { }; void simple_xattrs_init(struct simple_xattrs *xattrs); -void simple_xattrs_free(struct simple_xattrs *xattrs); +void simple_xattrs_free(struct simple_xattrs *xattrs, size_t *freed_space); +size_t simple_xattr_space(const char *name, size_t size); struct simple_xattr *simple_xattr_alloc(const void *value, size_t size); +void simple_xattr_free(struct simple_xattr *xattr); int simple_xattr_get(struct simple_xattrs *xattrs, const char *name, void *buffer, size_t size); -int simple_xattr_set(struct simple_xattrs *xattrs, const char *name, - const void *value, size_t size, int flags, - ssize_t *removed_size); +struct simple_xattr *simple_xattr_set(struct simple_xattrs *xattrs, + const char *name, const void *value, + size_t size, int flags); ssize_t simple_xattr_list(struct inode *inode, struct simple_xattrs *xattrs, char *buffer, size_t size); void simple_xattr_add(struct simple_xattrs *xattrs, diff --git a/include/linux/zswap.h b/include/linux/zswap.h new file mode 100644 index 000000000000..2a60ce39cfde --- /dev/null +++ b/include/linux/zswap.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_ZSWAP_H +#define _LINUX_ZSWAP_H + +#include <linux/types.h> +#include <linux/mm_types.h> + +extern u64 zswap_pool_total_size; +extern atomic_t zswap_stored_pages; + +#ifdef CONFIG_ZSWAP + +bool zswap_store(struct folio *folio); +bool zswap_load(struct folio *folio); +void zswap_invalidate(int type, pgoff_t offset); +void zswap_swapon(int type); +void zswap_swapoff(int type); + +#else + +static inline bool zswap_store(struct folio *folio) +{ + return false; +} + +static inline bool zswap_load(struct folio *folio) +{ + return false; +} + +static inline void zswap_invalidate(int type, pgoff_t offset) {} +static inline void zswap_swapon(int type) {} +static inline void zswap_swapoff(int type) {} + +#endif + +#endif /* _LINUX_ZSWAP_H */ diff --git a/include/net/Space.h b/include/net/Space.h index 08ca9cef0213..c29f3d51c078 100644 --- a/include/net/Space.h +++ b/include/net/Space.h @@ -3,18 +3,11 @@ * ethernet adaptor have the name "eth[0123...]". */ -struct net_device *hp100_probe(int unit); struct net_device *ultra_probe(int unit); struct net_device *wd_probe(int unit); struct net_device *ne_probe(int unit); -struct net_device *fmv18x_probe(int unit); -struct net_device *ni65_probe(int unit); -struct net_device *sonic_probe(int unit); struct net_device *smc_init(int unit); struct net_device *cs89x0_probe(int unit); struct net_device *tc515_probe(int unit); struct net_device *lance_probe(int unit); struct net_device *cops_probe(int unit); - -/* Fibre Channel adapters */ -int iph5526_probe(struct net_device *dev); diff --git a/include/net/af_vsock.h b/include/net/af_vsock.h index 0e7504a42925..b01cf9ac2437 100644 --- a/include/net/af_vsock.h +++ b/include/net/af_vsock.h @@ -201,7 +201,6 @@ static inline bool __vsock_in_connected_table(struct vsock_sock *vsk) return !list_empty(&vsk->connected_table); } -void vsock_release_pending(struct sock *pending); void vsock_add_pending(struct sock *listener, struct sock *pending); void vsock_remove_pending(struct sock *listener, struct sock *pending); void vsock_enqueue_accept(struct sock *listener, struct sock *connected); @@ -225,7 +224,6 @@ struct vsock_tap { struct list_head list; }; -int vsock_init_tap(void); int vsock_add_tap(struct vsock_tap *vt); int vsock_remove_tap(struct vsock_tap *vt); void vsock_deliver_tap(struct sk_buff *build_skb(void *opaque), void *opaque); diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h index af729859385e..aa90adc3b2a4 100644 --- a/include/net/bluetooth/bluetooth.h +++ b/include/net/bluetooth/bluetooth.h @@ -386,6 +386,7 @@ struct bt_sock { enum { BT_SK_DEFER_SETUP, BT_SK_SUSPEND, + BT_SK_PKT_STATUS }; struct bt_sock_list { @@ -400,6 +401,8 @@ int bt_sock_register(int proto, const struct net_proto_family *ops); void bt_sock_unregister(int proto); void bt_sock_link(struct bt_sock_list *l, struct sock *s); void bt_sock_unlink(struct bt_sock_list *l, struct sock *s); +struct sock *bt_sock_alloc(struct net *net, struct socket *sock, + struct proto *prot, int proto, gfp_t prio, int kern); int bt_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, int flags); int bt_sock_stream_recvmsg(struct socket *sock, struct msghdr *msg, @@ -430,10 +433,6 @@ struct l2cap_ctrl { struct l2cap_chan *chan; }; -struct sco_ctrl { - u8 pkt_status; -}; - struct hci_dev; typedef void (*hci_req_complete_t)(struct hci_dev *hdev, u8 status, u16 opcode); @@ -464,16 +463,18 @@ struct bt_skb_cb { u8 force_active; u16 expect; u8 incoming:1; + u8 pkt_status:2; union { struct l2cap_ctrl l2cap; - struct sco_ctrl sco; struct hci_ctrl hci; struct mgmt_ctrl mgmt; + struct scm_creds creds; }; }; #define bt_cb(skb) ((struct bt_skb_cb *)((skb)->cb)) #define hci_skb_pkt_type(skb) bt_cb((skb))->pkt_type +#define hci_skb_pkt_status(skb) bt_cb((skb))->pkt_status #define hci_skb_expect(skb) bt_cb((skb))->expect #define hci_skb_opcode(skb) bt_cb((skb))->hci.opcode #define hci_skb_event(skb) bt_cb((skb))->hci.req_event diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h index 872dcb91a540..87d92accc26e 100644 --- a/include/net/bluetooth/hci.h +++ b/include/net/bluetooth/hci.h @@ -309,6 +309,26 @@ enum { * to support it. */ HCI_QUIRK_BROKEN_SET_RPA_TIMEOUT, + + /* When this quirk is set, MSFT extension monitor tracking by + * address filter is supported. Since tracking quantity of each + * pattern is limited, this feature supports tracking multiple + * devices concurrently if controller supports multiple + * address filters. + * + * This quirk must be set before hci_register_dev is called. + */ + HCI_QUIRK_USE_MSFT_EXT_ADDRESS_FILTER, + + /* + * When this quirk is set, LE Coded PHY shall not be used. This is + * required for some Intel controllers which erroneously claim to + * support it but it causes problems with extended scanning. + * + * This quirk can be set before hci_register_dev is called or + * during the hdev->setup vendor callback. + */ + HCI_QUIRK_BROKEN_LE_CODED, }; /* HCI device flags */ @@ -577,6 +597,7 @@ enum { #define HCI_LE_CIS_CENTRAL 0x10 #define HCI_LE_CIS_PERIPHERAL 0x20 #define HCI_LE_ISO_BROADCASTER 0x40 +#define HCI_LE_ISO_SYNC_RECEIVER 0x80 /* Connection modes */ #define HCI_CM_ACTIVE 0x0000 @@ -2760,6 +2781,17 @@ struct hci_ev_le_enh_conn_complete { __u8 clk_accurancy; } __packed; +#define HCI_EV_LE_PER_ADV_REPORT 0x0f +struct hci_ev_le_per_adv_report { + __le16 sync_handle; + __u8 tx_power; + __u8 rssi; + __u8 cte_type; + __u8 data_status; + __u8 length; + __u8 data[]; +} __packed; + #define HCI_EV_LE_EXT_ADV_SET_TERM 0x12 struct hci_evt_le_ext_adv_set_term { __u8 status; diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index e01d52cb668c..e6359f7346f1 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -83,7 +83,7 @@ struct discovery_state { u8 last_adv_addr_type; s8 last_adv_rssi; u32 last_adv_flags; - u8 last_adv_data[HCI_MAX_AD_LENGTH]; + u8 last_adv_data[HCI_MAX_EXT_AD_LENGTH]; u8 last_adv_data_len; bool report_invalid_rssi; bool result_filtering; @@ -290,7 +290,7 @@ struct adv_pattern { __u8 ad_type; __u8 offset; __u8 length; - __u8 value[HCI_MAX_AD_LENGTH]; + __u8 value[HCI_MAX_EXT_AD_LENGTH]; }; struct adv_rssi_thresholds { @@ -321,8 +321,8 @@ struct adv_monitor { #define HCI_MAX_SHORT_NAME_LENGTH 10 -#define HCI_CONN_HANDLE_UNSET 0xffff #define HCI_CONN_HANDLE_MAX 0x0eff +#define HCI_CONN_HANDLE_UNSET(_handle) (_handle > HCI_CONN_HANDLE_MAX) /* Min encryption key size to match with SMP */ #define HCI_MIN_ENC_KEY_SIZE 7 @@ -726,7 +726,7 @@ struct hci_conn { __u16 le_conn_interval; __u16 le_conn_latency; __u16 le_supv_timeout; - __u8 le_adv_data[HCI_MAX_AD_LENGTH]; + __u8 le_adv_data[HCI_MAX_EXT_AD_LENGTH]; __u8 le_adv_data_len; __u8 le_per_adv_data[HCI_MAX_PER_AD_LENGTH]; __u8 le_per_adv_data_len; @@ -739,6 +739,7 @@ struct hci_conn { unsigned long flags; enum conn_reasons conn_reason; + __u8 abort_reason; __u32 clock; __u16 clock_accuracy; @@ -758,7 +759,6 @@ struct hci_conn { struct delayed_work auto_accept_work; struct delayed_work idle_work; struct delayed_work le_conn_timeout; - struct work_struct le_scan_cleanup; struct device dev; struct dentry *debugfs; @@ -974,6 +974,12 @@ enum { HCI_CONN_SCANNING, HCI_CONN_AUTH_FAILURE, HCI_CONN_PER_ADV, + HCI_CONN_BIG_CREATED, + HCI_CONN_CREATE_CIS, + HCI_CONN_BIG_SYNC, + HCI_CONN_BIG_SYNC_FAILED, + HCI_CONN_PA_SYNC, + HCI_CONN_PA_SYNC_FAILED, }; static inline bool hci_conn_ssp_enabled(struct hci_conn *conn) @@ -1093,8 +1099,7 @@ static inline __u8 hci_conn_lookup_type(struct hci_dev *hdev, __u16 handle) } static inline struct hci_conn *hci_conn_hash_lookup_bis(struct hci_dev *hdev, - bdaddr_t *ba, - __u8 big, __u8 bis) + bdaddr_t *ba, __u8 bis) { struct hci_conn_hash *h = &hdev->conn_hash; struct hci_conn *c; @@ -1105,7 +1110,33 @@ static inline struct hci_conn *hci_conn_hash_lookup_bis(struct hci_dev *hdev, if (bacmp(&c->dst, ba) || c->type != ISO_LINK) continue; - if (c->iso_qos.bcast.big == big && c->iso_qos.bcast.bis == bis) { + if (c->iso_qos.bcast.bis == bis) { + rcu_read_unlock(); + return c; + } + } + rcu_read_unlock(); + + return NULL; +} + +static inline struct hci_conn * +hci_conn_hash_lookup_per_adv_bis(struct hci_dev *hdev, + bdaddr_t *ba, + __u8 big, __u8 bis) +{ + struct hci_conn_hash *h = &hdev->conn_hash; + struct hci_conn *c; + + rcu_read_lock(); + + list_for_each_entry_rcu(c, &h->list, list) { + if (bacmp(&c->dst, ba) || c->type != ISO_LINK || + !test_bit(HCI_CONN_PER_ADV, &c->flags)) + continue; + + if (c->iso_qos.bcast.big == big && + c->iso_qos.bcast.bis == bis) { rcu_read_unlock(); return c; } @@ -1190,7 +1221,7 @@ static inline struct hci_conn *hci_conn_hash_lookup_cis(struct hci_dev *hdev, rcu_read_lock(); list_for_each_entry_rcu(c, &h->list, list) { - if (c->type != ISO_LINK) + if (c->type != ISO_LINK || !bacmp(&c->dst, BDADDR_ANY)) continue; /* Match CIG ID if set */ @@ -1222,7 +1253,7 @@ static inline struct hci_conn *hci_conn_hash_lookup_cig(struct hci_dev *hdev, rcu_read_lock(); list_for_each_entry_rcu(c, &h->list, list) { - if (c->type != ISO_LINK) + if (c->type != ISO_LINK || !bacmp(&c->dst, BDADDR_ANY)) continue; if (handle == c->iso_qos.ucast.cig) { @@ -1259,6 +1290,52 @@ static inline struct hci_conn *hci_conn_hash_lookup_big(struct hci_dev *hdev, return NULL; } +static inline struct hci_conn *hci_conn_hash_lookup_big_any_dst(struct hci_dev *hdev, + __u8 handle) +{ + struct hci_conn_hash *h = &hdev->conn_hash; + struct hci_conn *c; + + rcu_read_lock(); + + list_for_each_entry_rcu(c, &h->list, list) { + if (c->type != ISO_LINK) + continue; + + if (handle != BT_ISO_QOS_BIG_UNSET && handle == c->iso_qos.bcast.big) { + rcu_read_unlock(); + return c; + } + } + + rcu_read_unlock(); + + return NULL; +} + +static inline struct hci_conn * +hci_conn_hash_lookup_pa_sync(struct hci_dev *hdev, __u8 big) +{ + struct hci_conn_hash *h = &hdev->conn_hash; + struct hci_conn *c; + + rcu_read_lock(); + + list_for_each_entry_rcu(c, &h->list, list) { + if (c->type != ISO_LINK || + !test_bit(HCI_CONN_PA_SYNC, &c->flags)) + continue; + + if (c->iso_qos.bcast.big == big) { + rcu_read_unlock(); + return c; + } + } + rcu_read_unlock(); + + return NULL; +} + static inline struct hci_conn *hci_conn_hash_lookup_state(struct hci_dev *hdev, __u8 type, __u16 state) { @@ -1320,11 +1397,33 @@ static inline struct hci_conn *hci_lookup_le_connect(struct hci_dev *hdev) return NULL; } +/* Returns true if an le connection is in the scanning state */ +static inline bool hci_is_le_conn_scanning(struct hci_dev *hdev) +{ + struct hci_conn_hash *h = &hdev->conn_hash; + struct hci_conn *c; + + rcu_read_lock(); + + list_for_each_entry_rcu(c, &h->list, list) { + if (c->type == LE_LINK && c->state == BT_CONNECT && + test_bit(HCI_CONN_SCANNING, &c->flags)) { + rcu_read_unlock(); + return true; + } + } + + rcu_read_unlock(); + + return false; +} + int hci_disconnect(struct hci_conn *conn, __u8 reason); bool hci_setup_sync(struct hci_conn *conn, __u16 handle); void hci_sco_setup(struct hci_conn *conn, __u8 status); bool hci_iso_setup_path(struct hci_conn *conn); -int hci_le_create_cis(struct hci_conn *conn); +int hci_le_create_cis_pending(struct hci_dev *hdev); +int hci_conn_check_create_cis(struct hci_conn *conn); struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst, u8 role); @@ -1351,6 +1450,9 @@ struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst, __u16 setting, struct bt_codec *codec); struct hci_conn *hci_bind_cis(struct hci_dev *hdev, bdaddr_t *dst, __u8 dst_type, struct bt_iso_qos *qos); +struct hci_conn *hci_bind_bis(struct hci_dev *hdev, bdaddr_t *dst, + struct bt_iso_qos *qos, + __u8 base_len, __u8 *base); struct hci_conn *hci_connect_cis(struct hci_dev *hdev, bdaddr_t *dst, __u8 dst_type, struct bt_iso_qos *qos); struct hci_conn *hci_connect_bis(struct hci_dev *hdev, bdaddr_t *dst, @@ -1358,7 +1460,8 @@ struct hci_conn *hci_connect_bis(struct hci_dev *hdev, bdaddr_t *dst, __u8 data_len, __u8 *data); int hci_pa_create_sync(struct hci_dev *hdev, bdaddr_t *dst, __u8 dst_type, __u8 sid, struct bt_iso_qos *qos); -int hci_le_big_create_sync(struct hci_dev *hdev, struct bt_iso_qos *qos, +int hci_le_big_create_sync(struct hci_dev *hdev, struct hci_conn *hcon, + struct bt_iso_qos *qos, __u16 sync_handle, __u8 num_bis, __u8 bis[]); int hci_conn_check_link_mode(struct hci_conn *conn); int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level); @@ -1369,6 +1472,7 @@ int hci_conn_switch_role(struct hci_conn *conn, __u8 role); void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active); void hci_conn_failed(struct hci_conn *conn, u8 status); +u8 hci_conn_set_handle(struct hci_conn *conn, u16 handle); /* * hci_conn_get() and hci_conn_put() are used to control the life-time of an @@ -1713,7 +1817,9 @@ void hci_conn_del_sysfs(struct hci_conn *conn); #define scan_2m(dev) (((dev)->le_tx_def_phys & HCI_LE_SET_PHY_2M) || \ ((dev)->le_rx_def_phys & HCI_LE_SET_PHY_2M)) -#define le_coded_capable(dev) (((dev)->le_features[1] & HCI_LE_PHY_CODED)) +#define le_coded_capable(dev) (((dev)->le_features[1] & HCI_LE_PHY_CODED) && \ + !test_bit(HCI_QUIRK_BROKEN_LE_CODED, \ + &(dev)->quirks)) #define scan_coded(dev) (((dev)->le_tx_def_phys & HCI_LE_SET_PHY_CODED) || \ ((dev)->le_rx_def_phys & HCI_LE_SET_PHY_CODED)) @@ -1745,6 +1851,10 @@ void hci_conn_del_sysfs(struct hci_conn *conn); /* Extended advertising support */ #define ext_adv_capable(dev) (((dev)->le_features[1] & HCI_LE_EXT_ADV)) +/* Maximum advertising length */ +#define max_adv_len(dev) \ + (ext_adv_capable(dev) ? HCI_MAX_EXT_AD_LENGTH : HCI_MAX_AD_LENGTH) + /* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E page 1789: * * C24: Mandatory if the LE Controller supports Connection State and either @@ -1765,6 +1875,7 @@ void hci_conn_del_sysfs(struct hci_conn *conn); #define cis_peripheral_capable(dev) \ ((dev)->le_features[3] & HCI_LE_CIS_PERIPHERAL) #define bis_capable(dev) ((dev)->le_features[3] & HCI_LE_ISO_BROADCASTER) +#define sync_recv_capable(dev) ((dev)->le_features[3] & HCI_LE_ISO_SYNC_RECEIVER) #define mws_transport_config_capable(dev) (((dev)->commands[30] & 0x08) && \ (!test_bit(HCI_QUIRK_BROKEN_MWS_TRANSPORT_CONFIG, &(dev)->quirks))) diff --git a/include/net/bluetooth/hci_sync.h b/include/net/bluetooth/hci_sync.h index 2495be4d8b82..57eeb07aeb25 100644 --- a/include/net/bluetooth/hci_sync.h +++ b/include/net/bluetooth/hci_sync.h @@ -5,6 +5,9 @@ * Copyright (C) 2021 Intel Corporation */ +#define UINT_PTR(_handle) ((void *)((uintptr_t)_handle)) +#define PTR_UINT(_ptr) ((uintptr_t)((void *)_ptr)) + typedef int (*hci_cmd_sync_work_func_t)(struct hci_dev *hdev, void *data); typedef void (*hci_cmd_sync_work_destroy_t)(struct hci_dev *hdev, void *data, int err); @@ -124,7 +127,7 @@ int hci_abort_conn_sync(struct hci_dev *hdev, struct hci_conn *conn, u8 reason); int hci_le_create_conn_sync(struct hci_dev *hdev, struct hci_conn *conn); -int hci_le_create_cis_sync(struct hci_dev *hdev, struct hci_conn *conn); +int hci_le_create_cis_sync(struct hci_dev *hdev); int hci_le_remove_cig_sync(struct hci_dev *hdev, u8 handle); diff --git a/include/net/bluetooth/mgmt.h b/include/net/bluetooth/mgmt.h index 5e68b3dd4422..d382679efd2b 100644 --- a/include/net/bluetooth/mgmt.h +++ b/include/net/bluetooth/mgmt.h @@ -111,6 +111,8 @@ struct mgmt_rp_read_index_list { #define MGMT_SETTING_WIDEBAND_SPEECH BIT(17) #define MGMT_SETTING_CIS_CENTRAL BIT(18) #define MGMT_SETTING_CIS_PERIPHERAL BIT(19) +#define MGMT_SETTING_ISO_BROADCASTER BIT(20) +#define MGMT_SETTING_ISO_SYNC_RECEIVER BIT(21) #define MGMT_OP_READ_INFO 0x0004 #define MGMT_READ_INFO_SIZE 0 diff --git a/include/net/bluetooth/sco.h b/include/net/bluetooth/sco.h index 1aa2e14b6c94..f40ddb4264fc 100644 --- a/include/net/bluetooth/sco.h +++ b/include/net/bluetooth/sco.h @@ -46,6 +46,4 @@ struct sco_conninfo { __u8 dev_class[3]; }; -#define SCO_CMSG_PKT_STATUS 0x01 - #endif /* __SCO_H */ diff --git a/include/net/bond_3ad.h b/include/net/bond_3ad.h index a016f275cb01..c5e57c6bd873 100644 --- a/include/net/bond_3ad.h +++ b/include/net/bond_3ad.h @@ -301,7 +301,6 @@ int __bond_3ad_get_active_agg_info(struct bonding *bond, int bond_3ad_lacpdu_recv(const struct sk_buff *skb, struct bonding *bond, struct slave *slave); int bond_3ad_set_carrier(struct bonding *bond); -void bond_3ad_update_lacp_active(struct bonding *bond); void bond_3ad_update_lacp_rate(struct bonding *bond); void bond_3ad_update_ad_actor_settings(struct bonding *bond); int bond_3ad_stats_fill(struct sk_buff *skb, struct bond_3ad_stats *stats); diff --git a/include/net/bonding.h b/include/net/bonding.h index 30ac427cf0c6..5b8b1b644a2d 100644 --- a/include/net/bonding.h +++ b/include/net/bonding.h @@ -722,23 +722,14 @@ static inline struct slave *bond_slave_has_mac(struct bonding *bond, } /* Caller must hold rcu_read_lock() for read */ -static inline bool bond_slave_has_mac_rx(struct bonding *bond, const u8 *mac) +static inline bool bond_slave_has_mac_rcu(struct bonding *bond, const u8 *mac) { struct list_head *iter; struct slave *tmp; - struct netdev_hw_addr *ha; bond_for_each_slave_rcu(bond, tmp, iter) if (ether_addr_equal_64bits(mac, tmp->dev->dev_addr)) return true; - - if (netdev_uc_empty(bond->dev)) - return false; - - netdev_for_each_uc_addr(ha, bond->dev) - if (ether_addr_equal_64bits(mac, ha->addr)) - return true; - return false; } diff --git a/include/net/busy_poll.h b/include/net/busy_poll.h index f90f0021f5f2..4dabeb6c76d3 100644 --- a/include/net/busy_poll.h +++ b/include/net/busy_poll.h @@ -16,6 +16,7 @@ #include <linux/sched/clock.h> #include <linux/sched/signal.h> #include <net/ip.h> +#include <net/xdp.h> /* 0 - Reserved to indicate value not set * 1..NR_CPUS - Reserved for sender_cpu diff --git a/include/net/caif/cfsrvl.h b/include/net/caif/cfsrvl.h index bd5440977f7f..5ee7b322e18b 100644 --- a/include/net/caif/cfsrvl.h +++ b/include/net/caif/cfsrvl.h @@ -33,9 +33,6 @@ struct cflayer *cfrfml_create(u8 linkid, struct dev_info *dev_info, int mtu_size); struct cflayer *cfdbgl_create(u8 linkid, struct dev_info *dev_info); -void cfsrvl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl, - int phyid); - bool cfsrvl_phyid_match(struct cflayer *layer, int phyid); void cfsrvl_init(struct cfsrvl *service, diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 7c7d03aa9d06..3a4b684f89bf 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -263,7 +263,7 @@ enum ieee80211_privacy { * are only for driver use when pointers to this structure are * passed around. * - * @flags: rate-specific flags + * @flags: rate-specific flags from &enum ieee80211_rate_flags * @bitrate: bitrate in units of 100 Kbps * @hw_value: driver/hardware value for this rate * @hw_value_short: driver/hardware value for this rate when @@ -562,6 +562,9 @@ ieee80211_get_sband_iftype_data(const struct ieee80211_supported_band *sband, if (WARN_ON(iftype >= NL80211_IFTYPE_MAX)) return NULL; + if (iftype == NL80211_IFTYPE_AP_VLAN) + iftype = NL80211_IFTYPE_AP; + for (i = 0; i < sband->n_iftype_data; i++) { const struct ieee80211_sband_iftype_data *data = &sband->iftype_data[i]; @@ -808,7 +811,7 @@ struct cfg80211_tid_cfg { struct cfg80211_tid_config { const u8 *peer; u32 n_tid_conf; - struct cfg80211_tid_cfg tid_conf[]; + struct cfg80211_tid_cfg tid_conf[] __counted_by(n_tid_conf); }; /** @@ -1184,7 +1187,7 @@ struct cfg80211_mbssid_elems { struct { const u8 *data; size_t len; - } elem[]; + } elem[] __counted_by(cnt); }; /** @@ -1201,7 +1204,7 @@ struct cfg80211_rnr_elems { struct { const u8 *data; size_t len; - } elem[]; + } elem[] __counted_by(cnt); }; /** @@ -1279,7 +1282,7 @@ struct cfg80211_acl_data { int n_acl_entries; /* Keep it last */ - struct mac_address mac_addrs[]; + struct mac_address mac_addrs[] __counted_by(n_acl_entries); }; /** @@ -1350,7 +1353,7 @@ struct cfg80211_unsol_bcast_probe_resp { * @twt_responder: Enable Target Wait Time * @he_required: stations must support HE * @sae_h2e_required: stations must support direct H2E technique in SAE - * @flags: flags, as defined in enum cfg80211_ap_settings_flags + * @flags: flags, as defined in &enum nl80211_ap_settings_flags * @he_obss_pd: OBSS Packet Detection settings * @he_oper: HE operation IE (or %NULL if HE isn't enabled) * @fils_discovery: FILS discovery transmission parameters @@ -1479,7 +1482,6 @@ struct iface_combination_params { * @STATION_PARAM_APPLY_UAPSD: apply new uAPSD parameters (uapsd_queues, max_sp) * @STATION_PARAM_APPLY_CAPABILITY: apply new capability * @STATION_PARAM_APPLY_PLINK_STATE: apply new plink state - * @STATION_PARAM_APPLY_STA_TXPOWER: apply tx power for STA * * Not all station parameters have in-band "no change" signalling, * for those that don't these flags will are used. @@ -2153,7 +2155,7 @@ enum mpath_info_flags { * @sn: target sequence number * @metric: metric (cost) of this mesh path * @exptime: expiration time for the mesh path from now, in msecs - * @flags: mesh path flags + * @flags: mesh path flags from &enum mesh_path_flags * @discovery_timeout: total mesh path discovery timeout, in msecs * @discovery_retries: mesh path discovery retries * @generation: generation number for nl80211 dumps. @@ -2493,7 +2495,7 @@ struct cfg80211_scan_6ghz_params { * the actual dwell time may be shorter. * @duration_mandatory: if set, the scan duration must be as specified by the * %duration field. - * @flags: bit field of flags controlling operation + * @flags: control flags from &enum nl80211_scan_flags * @rates: bitmap of rates to advertise for each band * @wiphy: the wiphy this was for * @scan_start: time (in jiffies) when the scan started @@ -2541,7 +2543,7 @@ struct cfg80211_scan_request { struct cfg80211_scan_6ghz_params *scan_6ghz_params; /* keep last */ - struct ieee80211_channel *channels[]; + struct ieee80211_channel *channels[] __counted_by(n_channels); }; static inline void get_random_mask_addr(u8 *buf, const u8 *addr, const u8 *mask) @@ -2613,7 +2615,7 @@ struct cfg80211_bss_select_adjust { * @scan_width: channel width for scanning * @ie: optional information element(s) to add into Probe Request or %NULL * @ie_len: length of ie in octets - * @flags: bit field of flags controlling operation + * @flags: control flags from &enum nl80211_scan_flags * @match_sets: sets of parameters to be matched for a scan result * entry to be considered valid and to be passed to the host * (others are filtered out). @@ -3945,7 +3947,7 @@ struct cfg80211_pmsr_request { struct list_head list; - struct cfg80211_pmsr_request_peer peers[]; + struct cfg80211_pmsr_request_peer peers[] __counted_by(n_peers); }; /** @@ -8115,7 +8117,7 @@ void cfg80211_conn_failed(struct net_device *dev, const u8 *mac_addr, * @link_id: the ID of the link the frame was received on * @buf: Management frame (header + body) * @len: length of the frame data - * @flags: flags, as defined in enum nl80211_rxmgmt_flags + * @flags: flags, as defined in &enum nl80211_rxmgmt_flags * @rx_tstamp: Hardware timestamp of frame RX in nanoseconds * @ack_tstamp: Hardware timestamp of ack TX in nanoseconds */ diff --git a/include/net/datalink.h b/include/net/datalink.h index c837ffc7ebf8..6c529a40e00d 100644 --- a/include/net/datalink.h +++ b/include/net/datalink.h @@ -23,6 +23,4 @@ struct datalink_proto { struct list_head node; }; -struct datalink_proto *make_EII_client(void); -void destroy_EII_client(struct datalink_proto *dl); #endif diff --git a/include/net/devlink.h b/include/net/devlink.h index 0cdb4b16e5b5..29fd1b4ee654 100644 --- a/include/net/devlink.h +++ b/include/net/devlink.h @@ -1583,6 +1583,24 @@ void devlink_free(struct devlink *devlink); * Should be used by device drivers set * the admin state of a function managed * by the devlink port. + * @port_fn_ipsec_crypto_get: Callback used to get port function's ipsec_crypto + * capability. Should be used by device drivers + * to report the current state of ipsec_crypto + * capability of a function managed by the devlink + * port. + * @port_fn_ipsec_crypto_set: Callback used to set port function's ipsec_crypto + * capability. Should be used by device drivers to + * enable/disable ipsec_crypto capability of a + * function managed by the devlink port. + * @port_fn_ipsec_packet_get: Callback used to get port function's ipsec_packet + * capability. Should be used by device drivers + * to report the current state of ipsec_packet + * capability of a function managed by the devlink + * port. + * @port_fn_ipsec_packet_set: Callback used to set port function's ipsec_packet + * capability. Should be used by device drivers to + * enable/disable ipsec_packet capability of a + * function managed by the devlink port. * * Note: Driver should return -EOPNOTSUPP if it doesn't support * port function (@port_fn_*) handling for a particular port. @@ -1620,6 +1638,18 @@ struct devlink_port_ops { int (*port_fn_state_set)(struct devlink_port *port, enum devlink_port_fn_state state, struct netlink_ext_ack *extack); + int (*port_fn_ipsec_crypto_get)(struct devlink_port *devlink_port, + bool *is_enable, + struct netlink_ext_ack *extack); + int (*port_fn_ipsec_crypto_set)(struct devlink_port *devlink_port, + bool enable, + struct netlink_ext_ack *extack); + int (*port_fn_ipsec_packet_get)(struct devlink_port *devlink_port, + bool *is_enable, + struct netlink_ext_ack *extack); + int (*port_fn_ipsec_packet_set)(struct devlink_port *devlink_port, + bool enable, + struct netlink_ext_ack *extack); }; void devlink_port_init(struct devlink *devlink, @@ -1743,9 +1773,6 @@ int devl_resource_size_get(struct devlink *devlink, int devl_dpipe_table_resource_set(struct devlink *devlink, const char *table_name, u64 resource_id, u64 resource_units); -int devlink_dpipe_table_resource_set(struct devlink *devlink, - const char *table_name, u64 resource_id, - u64 resource_units); void devl_resource_occ_get_register(struct devlink *devlink, u64 resource_id, devlink_resource_occ_get_t *occ_get, @@ -1790,8 +1817,6 @@ devlink_port_region_create(struct devlink_port *port, u32 region_max_snapshots, u64 region_size); void devl_region_destroy(struct devlink_region *region); void devlink_region_destroy(struct devlink_region *region); -void devlink_port_region_destroy(struct devlink_region *region); - int devlink_region_snapshot_id_get(struct devlink *devlink, u32 *id); void devlink_region_snapshot_id_put(struct devlink *devlink, u32 id); int devlink_region_snapshot_create(struct devlink_region *region, diff --git a/include/net/dropreason-core.h b/include/net/dropreason-core.h index a2b953b57689..a587e83fc169 100644 --- a/include/net/dropreason-core.h +++ b/include/net/dropreason-core.h @@ -30,6 +30,7 @@ FN(TCP_OVERWINDOW) \ FN(TCP_OFOMERGE) \ FN(TCP_RFC7323_PAWS) \ + FN(TCP_OLD_SEQUENCE) \ FN(TCP_INVALID_SEQUENCE) \ FN(TCP_RESET) \ FN(TCP_INVALID_SYN) \ @@ -78,6 +79,7 @@ FN(IPV6_NDISC_BAD_CODE) \ FN(IPV6_NDISC_BAD_OPTIONS) \ FN(IPV6_NDISC_NS_OTHERHOST) \ + FN(QUEUE_PURGE) \ FNe(MAX) /** @@ -188,6 +190,8 @@ enum skb_drop_reason { * LINUX_MIB_PAWSESTABREJECTED */ SKB_DROP_REASON_TCP_RFC7323_PAWS, + /** @SKB_DROP_REASON_TCP_OLD_SEQUENCE: Old SEQ field (duplicate packet) */ + SKB_DROP_REASON_TCP_OLD_SEQUENCE, /** @SKB_DROP_REASON_TCP_INVALID_SEQUENCE: Not acceptable SEQ field */ SKB_DROP_REASON_TCP_INVALID_SEQUENCE, /** @SKB_DROP_REASON_TCP_RESET: Invalid RST packet */ @@ -339,6 +343,8 @@ enum skb_drop_reason { * for another host. */ SKB_DROP_REASON_IPV6_NDISC_NS_OTHERHOST, + /** @SKB_DROP_REASON_QUEUE_PURGE: bulk free. */ + SKB_DROP_REASON_QUEUE_PURGE, /** * @SKB_DROP_REASON_MAX: the maximum of core drop reasons, which * shouldn't be used as a real 'reason' - only for tracing code gen diff --git a/include/net/dropreason.h b/include/net/dropreason.h index 685fb37df8e8..56cb7be92244 100644 --- a/include/net/dropreason.h +++ b/include/net/dropreason.h @@ -23,6 +23,12 @@ enum skb_drop_reason_subsys { */ SKB_DROP_REASON_SUBSYS_MAC80211_MONITOR, + /** + * @SKB_DROP_REASON_SUBSYS_OPENVSWITCH: openvswitch drop reasons, + * see net/openvswitch/drop.h + */ + SKB_DROP_REASON_SUBSYS_OPENVSWITCH, + /** @SKB_DROP_REASON_SUBSYS_NUM: number of subsystems defined */ SKB_DROP_REASON_SUBSYS_NUM }; diff --git a/include/net/dsa.h b/include/net/dsa.h index d309ee7ed04b..0b9c6aa27047 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -873,8 +873,6 @@ struct dsa_switch_ops { struct phylink_pcs *(*phylink_mac_select_pcs)(struct dsa_switch *ds, int port, phy_interface_t iface); - int (*phylink_mac_link_state)(struct dsa_switch *ds, int port, - struct phylink_link_state *state); int (*phylink_mac_prepare)(struct dsa_switch *ds, int port, unsigned int mode, phy_interface_t interface); @@ -884,7 +882,6 @@ struct dsa_switch_ops { int (*phylink_mac_finish)(struct dsa_switch *ds, int port, unsigned int mode, phy_interface_t interface); - void (*phylink_mac_an_restart)(struct dsa_switch *ds, int port); void (*phylink_mac_link_down)(struct dsa_switch *ds, int port, unsigned int mode, phy_interface_t interface); diff --git a/include/net/dst_ops.h b/include/net/dst_ops.h index 632086b2f644..6d1c8541183d 100644 --- a/include/net/dst_ops.h +++ b/include/net/dst_ops.h @@ -23,7 +23,7 @@ struct dst_ops { u32 * (*cow_metrics)(struct dst_entry *, unsigned long); void (*destroy)(struct dst_entry *); void (*ifdown)(struct dst_entry *, - struct net_device *dev, int how); + struct net_device *dev); struct dst_entry * (*negative_advice)(struct dst_entry *); void (*link_failure)(struct sk_buff *); void (*update_pmtu)(struct dst_entry *dst, struct sock *sk, diff --git a/include/net/flow_dissector.h b/include/net/flow_dissector.h index 8664ed4fbbdf..1a7131d6cb0e 100644 --- a/include/net/flow_dissector.h +++ b/include/net/flow_dissector.h @@ -302,6 +302,14 @@ struct flow_dissector_key_l2tpv3 { }; /** + * struct flow_dissector_key_ipsec: + * @spi: identifier for a ipsec connection + */ +struct flow_dissector_key_ipsec { + __be32 spi; +}; + +/** * struct flow_dissector_key_cfm * @mdl_ver: maintenance domain level (mdl) and cfm protocol version * @opcode: code specifying a type of cfm protocol packet @@ -354,6 +362,7 @@ enum flow_dissector_key_id { FLOW_DISSECTOR_KEY_PPPOE, /* struct flow_dissector_key_pppoe */ FLOW_DISSECTOR_KEY_L2TPV3, /* struct flow_dissector_key_l2tpv3 */ FLOW_DISSECTOR_KEY_CFM, /* struct flow_dissector_key_cfm */ + FLOW_DISSECTOR_KEY_IPSEC, /* struct flow_dissector_key_ipsec */ FLOW_DISSECTOR_KEY_MAX, }; @@ -370,7 +379,8 @@ struct flow_dissector_key { }; struct flow_dissector { - unsigned int used_keys; /* each bit repesents presence of one key id */ + unsigned long long used_keys; + /* each bit represents presence of one key id */ unsigned short int offset[FLOW_DISSECTOR_KEY_MAX]; }; @@ -430,7 +440,7 @@ void skb_flow_get_icmp_tci(const struct sk_buff *skb, static inline bool dissector_uses_key(const struct flow_dissector *flow_dissector, enum flow_dissector_key_id key_id) { - return flow_dissector->used_keys & (1 << key_id); + return flow_dissector->used_keys & (1ULL << key_id); } static inline void *skb_flow_dissector_target(struct flow_dissector *flow_dissector, diff --git a/include/net/flow_offload.h b/include/net/flow_offload.h index 118082eae48c..9efa9a59e81f 100644 --- a/include/net/flow_offload.h +++ b/include/net/flow_offload.h @@ -64,6 +64,10 @@ struct flow_match_tcp { struct flow_dissector_key_tcp *key, *mask; }; +struct flow_match_ipsec { + struct flow_dissector_key_ipsec *key, *mask; +}; + struct flow_match_mpls { struct flow_dissector_key_mpls *key, *mask; }; @@ -116,6 +120,8 @@ void flow_rule_match_ports_range(const struct flow_rule *rule, struct flow_match_ports_range *out); void flow_rule_match_tcp(const struct flow_rule *rule, struct flow_match_tcp *out); +void flow_rule_match_ipsec(const struct flow_rule *rule, + struct flow_match_ipsec *out); void flow_rule_match_icmp(const struct flow_rule *rule, struct flow_match_icmp *out); void flow_rule_match_mpls(const struct flow_rule *rule, diff --git a/include/net/fq.h b/include/net/fq.h index 07b5aff6ec58..99fbe4127b95 100644 --- a/include/net/fq.h +++ b/include/net/fq.h @@ -98,9 +98,4 @@ typedef bool fq_skb_filter_t(struct fq *, struct sk_buff *, void *); -typedef struct fq_flow *fq_flow_get_default_t(struct fq *, - struct fq_tin *, - int idx, - struct sk_buff *); - #endif diff --git a/include/net/genetlink.h b/include/net/genetlink.h index ed4622dd4828..e18a4c0d69ee 100644 --- a/include/net/genetlink.h +++ b/include/net/genetlink.h @@ -93,9 +93,9 @@ struct genl_family { * struct genl_info - receiving information * @snd_seq: sending sequence number * @snd_portid: netlink portid of sender + * @family: generic netlink family * @nlhdr: netlink message header * @genlhdr: generic netlink message header - * @userhdr: user specific header * @attrs: netlink attributes * @_net: network namespace * @user_ptr: user pointers @@ -104,16 +104,16 @@ struct genl_family { struct genl_info { u32 snd_seq; u32 snd_portid; - struct nlmsghdr * nlhdr; + const struct genl_family *family; + const struct nlmsghdr * nlhdr; struct genlmsghdr * genlhdr; - void * userhdr; struct nlattr ** attrs; possible_net_t _net; void * user_ptr[2]; struct netlink_ext_ack *extack; }; -static inline struct net *genl_info_net(struct genl_info *info) +static inline struct net *genl_info_net(const struct genl_info *info) { return read_pnet(&info->_net); } @@ -123,6 +123,11 @@ static inline void genl_info_net_set(struct genl_info *info, struct net *net) write_pnet(&info->_net, net); } +static inline void *genl_info_userhdr(const struct genl_info *info) +{ + return (u8 *)info->genlhdr + GENL_HDRLEN; +} + #define GENL_SET_ERR_MSG(info, msg) NL_SET_ERR_MSG((info)->extack, msg) #define GENL_SET_ERR_MSG_FMT(info, msg, args...) \ @@ -244,14 +249,13 @@ struct genl_split_ops { /** * struct genl_dumpit_info - info that is available during dumpit op call - * @family: generic netlink family - for internal genl code usage * @op: generic netlink ops - for internal genl code usage * @attrs: netlink attributes + * @info: struct genl_info describing the request */ struct genl_dumpit_info { - const struct genl_family *family; struct genl_split_ops op; - struct nlattr **attrs; + struct genl_info info; }; static inline const struct genl_dumpit_info * @@ -260,6 +264,38 @@ genl_dumpit_info(struct netlink_callback *cb) return cb->data; } +static inline const struct genl_info * +genl_info_dump(struct netlink_callback *cb) +{ + return &genl_dumpit_info(cb)->info; +} + +/** + * genl_info_init_ntf() - initialize genl_info for notifications + * @info: genl_info struct to set up + * @family: pointer to the genetlink family + * @cmd: command to be used in the notification + * + * Initialize a locally declared struct genl_info to pass to various APIs. + * Intended to be used when creating notifications. + */ +static inline void +genl_info_init_ntf(struct genl_info *info, const struct genl_family *family, + u8 cmd) +{ + struct genlmsghdr *hdr = (void *) &info->user_ptr[0]; + + memset(info, 0, sizeof(*info)); + info->family = family; + info->genlhdr = hdr; + hdr->cmd = cmd; +} + +static inline bool genl_info_is_ntf(const struct genl_info *info) +{ + return !info->nlhdr; +} + int genl_register_family(struct genl_family *family); int genl_unregister_family(const struct genl_family *family); void genl_notify(const struct genl_family *family, struct sk_buff *skb, @@ -268,6 +304,32 @@ void genl_notify(const struct genl_family *family, struct sk_buff *skb, void *genlmsg_put(struct sk_buff *skb, u32 portid, u32 seq, const struct genl_family *family, int flags, u8 cmd); +static inline void * +__genlmsg_iput(struct sk_buff *skb, const struct genl_info *info, int flags) +{ + return genlmsg_put(skb, info->snd_portid, info->snd_seq, info->family, + flags, info->genlhdr->cmd); +} + +/** + * genlmsg_iput - start genetlink message based on genl_info + * @skb: skb in which message header will be placed + * @info: genl_info as provided to do/dump handlers + * + * Convenience wrapper which starts a genetlink message based on + * information in user request. @info should be either the struct passed + * by genetlink core to do/dump handlers (when constructing replies to + * such requests) or a struct initialized by genl_info_init_ntf() + * when constructing notifications. + * + * Returns pointer to new genetlink header. + */ +static inline void * +genlmsg_iput(struct sk_buff *skb, const struct genl_info *info) +{ + return __genlmsg_iput(skb, info, 0); +} + /** * genlmsg_nlhdr - Obtain netlink header from user specified header * @user_hdr: user header as returned from genlmsg_put() diff --git a/include/net/gro.h b/include/net/gro.h index 75efa6fb8441..88644b3ca660 100644 --- a/include/net/gro.h +++ b/include/net/gro.h @@ -452,6 +452,49 @@ static inline void gro_normal_one(struct napi_struct *napi, struct sk_buff *skb, gro_normal_list(napi); } +/* This function is the alternative of 'inet_iif' and 'inet_sdif' + * functions in case we can not rely on fields of IPCB. + * + * The caller must verify skb_valid_dst(skb) is false and skb->dev is initialized. + * The caller must hold the RCU read lock. + */ +static inline void inet_get_iif_sdif(const struct sk_buff *skb, int *iif, int *sdif) +{ + *iif = inet_iif(skb) ?: skb->dev->ifindex; + *sdif = 0; + +#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV) + if (netif_is_l3_slave(skb->dev)) { + struct net_device *master = netdev_master_upper_dev_get_rcu(skb->dev); + + *sdif = *iif; + *iif = master ? master->ifindex : 0; + } +#endif +} + +/* This function is the alternative of 'inet6_iif' and 'inet6_sdif' + * functions in case we can not rely on fields of IP6CB. + * + * The caller must verify skb_valid_dst(skb) is false and skb->dev is initialized. + * The caller must hold the RCU read lock. + */ +static inline void inet6_get_iif_sdif(const struct sk_buff *skb, int *iif, int *sdif) +{ + /* using skb->dev->ifindex because skb_dst(skb) is not initialized */ + *iif = skb->dev->ifindex; + *sdif = 0; + +#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV) + if (netif_is_l3_slave(skb->dev)) { + struct net_device *master = netdev_master_upper_dev_get_rcu(skb->dev); + + *sdif = *iif; + *iif = master ? master->ifindex : 0; + } +#endif +} + extern struct list_head offload_base; #endif /* _NET_IPV6_GRO_H */ diff --git a/include/net/handshake.h b/include/net/handshake.h index 2e26e436e85f..8ebd4f9ed26e 100644 --- a/include/net/handshake.h +++ b/include/net/handshake.h @@ -40,5 +40,10 @@ int tls_server_hello_x509(const struct tls_handshake_args *args, gfp_t flags); int tls_server_hello_psk(const struct tls_handshake_args *args, gfp_t flags); bool tls_handshake_cancel(struct sock *sk); +void tls_handshake_close(struct socket *sock); + +u8 tls_get_record_type(const struct sock *sk, const struct cmsghdr *msg); +void tls_alert_recv(const struct sock *sk, const struct msghdr *msg, + u8 *level, u8 *description); #endif /* _NET_HANDSHAKE_H */ diff --git a/include/net/ieee80211_radiotap.h b/include/net/ieee80211_radiotap.h index c4722a9963de..2338f8d2a8b3 100644 --- a/include/net/ieee80211_radiotap.h +++ b/include/net/ieee80211_radiotap.h @@ -21,7 +21,7 @@ #include <asm/unaligned.h> /** - * struct ieee82011_radiotap_header - base radiotap header + * struct ieee80211_radiotap_header - base radiotap header */ struct ieee80211_radiotap_header { /** @@ -575,6 +575,7 @@ enum ieee80211_radiotap_eht_usig_tb { /** * ieee80211_get_radiotap_len - get radiotap header length + * @data: pointer to the header */ static inline u16 ieee80211_get_radiotap_len(const char *data) { diff --git a/include/net/ila.h b/include/net/ila.h deleted file mode 100644 index 73ebe5eab272..000000000000 --- a/include/net/ila.h +++ /dev/null @@ -1,16 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * ILA kernel interface - * - * Copyright (c) 2015 Tom Herbert <tom@herbertland.com> - */ - -#ifndef _NET_ILA_H -#define _NET_ILA_H - -struct sk_buff; - -int ila_xlat_outgoing(struct sk_buff *skb); -int ila_xlat_incoming(struct sk_buff *skb); - -#endif /* _NET_ILA_H */ diff --git a/include/net/inet6_hashtables.h b/include/net/inet6_hashtables.h index 56f1286583d3..533a7337865a 100644 --- a/include/net/inet6_hashtables.h +++ b/include/net/inet6_hashtables.h @@ -48,6 +48,22 @@ struct sock *__inet6_lookup_established(struct net *net, const u16 hnum, const int dif, const int sdif); +typedef u32 (inet6_ehashfn_t)(const struct net *net, + const struct in6_addr *laddr, const u16 lport, + const struct in6_addr *faddr, const __be16 fport); + +inet6_ehashfn_t inet6_ehashfn; + +INDIRECT_CALLABLE_DECLARE(inet6_ehashfn_t udp6_ehashfn); + +struct sock *inet6_lookup_reuseport(struct net *net, struct sock *sk, + struct sk_buff *skb, int doff, + const struct in6_addr *saddr, + __be16 sport, + const struct in6_addr *daddr, + unsigned short hnum, + inet6_ehashfn_t *ehashfn); + struct sock *inet6_lookup_listener(struct net *net, struct inet_hashinfo *hashinfo, struct sk_buff *skb, int doff, @@ -57,6 +73,15 @@ struct sock *inet6_lookup_listener(struct net *net, const unsigned short hnum, const int dif, const int sdif); +struct sock *inet6_lookup_run_sk_lookup(struct net *net, + int protocol, + struct sk_buff *skb, int doff, + const struct in6_addr *saddr, + const __be16 sport, + const struct in6_addr *daddr, + const u16 hnum, const int dif, + inet6_ehashfn_t *ehashfn); + static inline struct sock *__inet6_lookup(struct net *net, struct inet_hashinfo *hashinfo, struct sk_buff *skb, int doff, @@ -78,6 +103,46 @@ static inline struct sock *__inet6_lookup(struct net *net, daddr, hnum, dif, sdif); } +static inline +struct sock *inet6_steal_sock(struct net *net, struct sk_buff *skb, int doff, + const struct in6_addr *saddr, const __be16 sport, + const struct in6_addr *daddr, const __be16 dport, + bool *refcounted, inet6_ehashfn_t *ehashfn) +{ + struct sock *sk, *reuse_sk; + bool prefetched; + + sk = skb_steal_sock(skb, refcounted, &prefetched); + if (!sk) + return NULL; + + if (!prefetched || !sk_fullsock(sk)) + return sk; + + if (sk->sk_protocol == IPPROTO_TCP) { + if (sk->sk_state != TCP_LISTEN) + return sk; + } else if (sk->sk_protocol == IPPROTO_UDP) { + if (sk->sk_state != TCP_CLOSE) + return sk; + } else { + return sk; + } + + reuse_sk = inet6_lookup_reuseport(net, sk, skb, doff, + saddr, sport, daddr, ntohs(dport), + ehashfn); + if (!reuse_sk) + return sk; + + /* We've chosen a new reuseport sock which is never refcounted. This + * implies that sk also isn't refcounted. + */ + WARN_ON_ONCE(*refcounted); + + return reuse_sk; +} + static inline struct sock *__inet6_lookup_skb(struct inet_hashinfo *hashinfo, struct sk_buff *skb, int doff, const __be16 sport, @@ -85,14 +150,20 @@ static inline struct sock *__inet6_lookup_skb(struct inet_hashinfo *hashinfo, int iif, int sdif, bool *refcounted) { - struct sock *sk = skb_steal_sock(skb, refcounted); - + struct net *net = dev_net(skb_dst(skb)->dev); + const struct ipv6hdr *ip6h = ipv6_hdr(skb); + struct sock *sk; + + sk = inet6_steal_sock(net, skb, doff, &ip6h->saddr, sport, &ip6h->daddr, dport, + refcounted, inet6_ehashfn); + if (IS_ERR(sk)) + return NULL; if (sk) return sk; - return __inet6_lookup(dev_net(skb_dst(skb)->dev), hashinfo, skb, - doff, &ipv6_hdr(skb)->saddr, sport, - &ipv6_hdr(skb)->daddr, ntohs(dport), + return __inet6_lookup(net, hashinfo, skb, + doff, &ip6h->saddr, sport, + &ip6h->daddr, ntohs(dport), iif, sdif, refcounted); } diff --git a/include/net/inet_common.h b/include/net/inet_common.h index b86b8e21de7f..f50a644d87a9 100644 --- a/include/net/inet_common.h +++ b/include/net/inet_common.h @@ -40,8 +40,10 @@ int inet_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, int flags); int inet_shutdown(struct socket *sock, int how); int inet_listen(struct socket *sock, int backlog); +int __inet_listen_sk(struct sock *sk, int backlog); void inet_sock_destruct(struct sock *sk); int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len); +int inet_bind_sk(struct sock *sk, struct sockaddr *uaddr, int addr_len); /* Don't allocate port at this moment, defer to connect. */ #define BIND_FORCE_ADDRESS_NO_PORT (1 << 0) /* Grab and release socket lock. */ diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h index c2b15f7e5516..5d2fcc137b88 100644 --- a/include/net/inet_connection_sock.h +++ b/include/net/inet_connection_sock.h @@ -164,7 +164,8 @@ enum inet_csk_ack_state_t { ICSK_ACK_TIMER = 2, ICSK_ACK_PUSHED = 4, ICSK_ACK_PUSHED2 = 8, - ICSK_ACK_NOW = 16 /* Send the next ACK immediately (once) */ + ICSK_ACK_NOW = 16, /* Send the next ACK immediately (once) */ + ICSK_ACK_NOMEM = 32, }; void inet_csk_init_xmit_timers(struct sock *sk, @@ -341,9 +342,9 @@ static inline bool inet_csk_in_pingpong_mode(struct sock *sk) return inet_csk(sk)->icsk_ack.pingpong >= TCP_PINGPONG_THRESH; } -static inline bool inet_csk_has_ulp(struct sock *sk) +static inline bool inet_csk_has_ulp(const struct sock *sk) { - return inet_sk(sk)->is_icsk && !!inet_csk(sk)->icsk_ulp_ops; + return inet_test_bit(IS_ICSK, sk) && !!inet_csk(sk)->icsk_ulp_ops; } #endif /* _INET_CONNECTION_SOCK_H */ diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h index 99bd823e97f6..3ecfeadbfa06 100644 --- a/include/net/inet_hashtables.h +++ b/include/net/inet_hashtables.h @@ -177,7 +177,7 @@ struct inet_hashinfo { struct inet_listen_hashbucket *lhash2; bool pernet; -}; +} ____cacheline_aligned_in_smp; static inline struct inet_hashinfo *tcp_or_dccp_get_hashinfo(const struct sock *sk) { @@ -379,6 +379,27 @@ struct sock *__inet_lookup_established(struct net *net, const __be32 daddr, const u16 hnum, const int dif, const int sdif); +typedef u32 (inet_ehashfn_t)(const struct net *net, + const __be32 laddr, const __u16 lport, + const __be32 faddr, const __be16 fport); + +inet_ehashfn_t inet_ehashfn; + +INDIRECT_CALLABLE_DECLARE(inet_ehashfn_t udp_ehashfn); + +struct sock *inet_lookup_reuseport(struct net *net, struct sock *sk, + struct sk_buff *skb, int doff, + __be32 saddr, __be16 sport, + __be32 daddr, unsigned short hnum, + inet_ehashfn_t *ehashfn); + +struct sock *inet_lookup_run_sk_lookup(struct net *net, + int protocol, + struct sk_buff *skb, int doff, + __be32 saddr, __be16 sport, + __be32 daddr, u16 hnum, const int dif, + inet_ehashfn_t *ehashfn); + static inline struct sock * inet_lookup_established(struct net *net, struct inet_hashinfo *hashinfo, const __be32 saddr, const __be16 sport, @@ -428,6 +449,46 @@ static inline struct sock *inet_lookup(struct net *net, return sk; } +static inline +struct sock *inet_steal_sock(struct net *net, struct sk_buff *skb, int doff, + const __be32 saddr, const __be16 sport, + const __be32 daddr, const __be16 dport, + bool *refcounted, inet_ehashfn_t *ehashfn) +{ + struct sock *sk, *reuse_sk; + bool prefetched; + + sk = skb_steal_sock(skb, refcounted, &prefetched); + if (!sk) + return NULL; + + if (!prefetched || !sk_fullsock(sk)) + return sk; + + if (sk->sk_protocol == IPPROTO_TCP) { + if (sk->sk_state != TCP_LISTEN) + return sk; + } else if (sk->sk_protocol == IPPROTO_UDP) { + if (sk->sk_state != TCP_CLOSE) + return sk; + } else { + return sk; + } + + reuse_sk = inet_lookup_reuseport(net, sk, skb, doff, + saddr, sport, daddr, ntohs(dport), + ehashfn); + if (!reuse_sk) + return sk; + + /* We've chosen a new reuseport sock which is never refcounted. This + * implies that sk also isn't refcounted. + */ + WARN_ON_ONCE(*refcounted); + + return reuse_sk; +} + static inline struct sock *__inet_lookup_skb(struct inet_hashinfo *hashinfo, struct sk_buff *skb, int doff, @@ -436,22 +497,23 @@ static inline struct sock *__inet_lookup_skb(struct inet_hashinfo *hashinfo, const int sdif, bool *refcounted) { - struct sock *sk = skb_steal_sock(skb, refcounted); + struct net *net = dev_net(skb_dst(skb)->dev); const struct iphdr *iph = ip_hdr(skb); + struct sock *sk; + sk = inet_steal_sock(net, skb, doff, iph->saddr, sport, iph->daddr, dport, + refcounted, inet_ehashfn); + if (IS_ERR(sk)) + return NULL; if (sk) return sk; - return __inet_lookup(dev_net(skb_dst(skb)->dev), hashinfo, skb, + return __inet_lookup(net, hashinfo, skb, doff, iph->saddr, sport, iph->daddr, dport, inet_iif(skb), sdif, refcounted); } -u32 inet6_ehashfn(const struct net *net, - const struct in6_addr *laddr, const u16 lport, - const struct in6_addr *faddr, const __be16 fport); - static inline void sk_daddr_set(struct sock *sk, __be32 addr) { sk->sk_daddr = addr; /* alias of inet_daddr */ diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h index caa20a905531..2de0e4d4a027 100644 --- a/include/net/inet_sock.h +++ b/include/net/inet_sock.h @@ -107,11 +107,12 @@ static inline struct inet_request_sock *inet_rsk(const struct request_sock *sk) static inline u32 inet_request_mark(const struct sock *sk, struct sk_buff *skb) { - if (!sk->sk_mark && - READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fwmark_accept)) + u32 mark = READ_ONCE(sk->sk_mark); + + if (!mark && READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fwmark_accept)) return skb->mark; - return sk->sk_mark; + return mark; } static inline int inet_request_bound_dev_if(const struct sock *sk, @@ -193,13 +194,13 @@ struct rtable; * @inet_rcv_saddr - Bound local IPv4 addr * @inet_dport - Destination port * @inet_num - Local port + * @inet_flags - various atomic flags * @inet_saddr - Sending source * @uc_ttl - Unicast TTL * @inet_sport - Source port * @inet_id - ID counter for DF pkts * @tos - TOS * @mc_ttl - Multicasting TTL - * @is_icsk - is this an inet_connection_sock? * @uc_index - Unicast outgoing device index * @mc_index - Multicast device index * @mc_list - Group array @@ -217,57 +218,88 @@ struct inet_sock { #define inet_dport sk.__sk_common.skc_dport #define inet_num sk.__sk_common.skc_num + unsigned long inet_flags; __be32 inet_saddr; __s16 uc_ttl; - __u16 cmsg_flags; - struct ip_options_rcu __rcu *inet_opt; __be16 inet_sport; - __u16 inet_id; + struct ip_options_rcu __rcu *inet_opt; + atomic_t inet_id; __u8 tos; __u8 min_ttl; __u8 mc_ttl; __u8 pmtudisc; - __u8 recverr:1, - is_icsk:1, - freebind:1, - hdrincl:1, - mc_loop:1, - transparent:1, - mc_all:1, - nodefrag:1; - __u8 bind_address_no_port:1, - recverr_rfc4884:1, - defer_connect:1; /* Indicates that fastopen_connect is set - * and cookie exists so we defer connect - * until first data frame is written - */ __u8 rcv_tos; __u8 convert_csum; int uc_index; int mc_index; __be32 mc_addr; - struct ip_mc_socklist __rcu *mc_list; - struct inet_cork_full cork; struct { __u16 lo; __u16 hi; } local_port_range; + + struct ip_mc_socklist __rcu *mc_list; + struct inet_cork_full cork; }; #define IPCORK_OPT 1 /* ip-options has been held in ipcork.opt */ #define IPCORK_ALLFRAG 2 /* always fragment (for ipv6 for now) */ +enum { + INET_FLAGS_PKTINFO = 0, + INET_FLAGS_TTL = 1, + INET_FLAGS_TOS = 2, + INET_FLAGS_RECVOPTS = 3, + INET_FLAGS_RETOPTS = 4, + INET_FLAGS_PASSSEC = 5, + INET_FLAGS_ORIGDSTADDR = 6, + INET_FLAGS_CHECKSUM = 7, + INET_FLAGS_RECVFRAGSIZE = 8, + + INET_FLAGS_RECVERR = 9, + INET_FLAGS_RECVERR_RFC4884 = 10, + INET_FLAGS_FREEBIND = 11, + INET_FLAGS_HDRINCL = 12, + INET_FLAGS_MC_LOOP = 13, + INET_FLAGS_MC_ALL = 14, + INET_FLAGS_TRANSPARENT = 15, + INET_FLAGS_IS_ICSK = 16, + INET_FLAGS_NODEFRAG = 17, + INET_FLAGS_BIND_ADDRESS_NO_PORT = 18, + INET_FLAGS_DEFER_CONNECT = 19, +}; + /* cmsg flags for inet */ -#define IP_CMSG_PKTINFO BIT(0) -#define IP_CMSG_TTL BIT(1) -#define IP_CMSG_TOS BIT(2) -#define IP_CMSG_RECVOPTS BIT(3) -#define IP_CMSG_RETOPTS BIT(4) -#define IP_CMSG_PASSSEC BIT(5) -#define IP_CMSG_ORIGDSTADDR BIT(6) -#define IP_CMSG_CHECKSUM BIT(7) -#define IP_CMSG_RECVFRAGSIZE BIT(8) +#define IP_CMSG_PKTINFO BIT(INET_FLAGS_PKTINFO) +#define IP_CMSG_TTL BIT(INET_FLAGS_TTL) +#define IP_CMSG_TOS BIT(INET_FLAGS_TOS) +#define IP_CMSG_RECVOPTS BIT(INET_FLAGS_RECVOPTS) +#define IP_CMSG_RETOPTS BIT(INET_FLAGS_RETOPTS) +#define IP_CMSG_PASSSEC BIT(INET_FLAGS_PASSSEC) +#define IP_CMSG_ORIGDSTADDR BIT(INET_FLAGS_ORIGDSTADDR) +#define IP_CMSG_CHECKSUM BIT(INET_FLAGS_CHECKSUM) +#define IP_CMSG_RECVFRAGSIZE BIT(INET_FLAGS_RECVFRAGSIZE) + +#define IP_CMSG_ALL (IP_CMSG_PKTINFO | IP_CMSG_TTL | \ + IP_CMSG_TOS | IP_CMSG_RECVOPTS | \ + IP_CMSG_RETOPTS | IP_CMSG_PASSSEC | \ + IP_CMSG_ORIGDSTADDR | IP_CMSG_CHECKSUM | \ + IP_CMSG_RECVFRAGSIZE) + +static inline unsigned long inet_cmsg_flags(const struct inet_sock *inet) +{ + return READ_ONCE(inet->inet_flags) & IP_CMSG_ALL; +} + +#define inet_test_bit(nr, sk) \ + test_bit(INET_FLAGS_##nr, &inet_sk(sk)->inet_flags) +#define inet_set_bit(nr, sk) \ + set_bit(INET_FLAGS_##nr, &inet_sk(sk)->inet_flags) +#define inet_clear_bit(nr, sk) \ + clear_bit(INET_FLAGS_##nr, &inet_sk(sk)->inet_flags) +#define inet_assign_bit(nr, sk, val) \ + assign_bit(INET_FLAGS_##nr, &inet_sk(sk)->inet_flags, val) static inline bool sk_is_inet(struct sock *sk) { @@ -362,7 +394,7 @@ static inline __u8 inet_sk_flowi_flags(const struct sock *sk) { __u8 flags = 0; - if (inet_sk(sk)->transparent || inet_sk(sk)->hdrincl) + if (inet_test_bit(TRANSPARENT, sk) || inet_test_bit(HDRINCL, sk)) flags |= FLOWI_FLAG_ANYSRC; return flags; } @@ -388,7 +420,8 @@ static inline bool inet_can_nonlocal_bind(struct net *net, struct inet_sock *inet) { return READ_ONCE(net->ipv4.sysctl_ip_nonlocal_bind) || - inet->freebind || inet->transparent; + test_bit(INET_FLAGS_FREEBIND, &inet->inet_flags) || + test_bit(INET_FLAGS_TRANSPARENT, &inet->inet_flags); } static inline bool inet_addr_valid_or_nonlocal(struct net *net, diff --git a/include/net/ip.h b/include/net/ip.h index 50d435855ae2..19adacd5ece0 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -93,7 +93,7 @@ static inline void ipcm_init_sk(struct ipcm_cookie *ipcm, { ipcm_init(ipcm); - ipcm->sockc.mark = inet->sk.sk_mark; + ipcm->sockc.mark = READ_ONCE(inet->sk.sk_mark); ipcm->sockc.tsflags = inet->sk.sk_tsflags; ipcm->oif = READ_ONCE(inet->sk.sk_bound_dev_if); ipcm->addr = inet->inet_saddr; @@ -538,8 +538,19 @@ static inline void ip_select_ident_segs(struct net *net, struct sk_buff *skb, * generator as much as we can. */ if (sk && inet_sk(sk)->inet_daddr) { - iph->id = htons(inet_sk(sk)->inet_id); - inet_sk(sk)->inet_id += segs; + int val; + + /* avoid atomic operations for TCP, + * as we hold socket lock at this point. + */ + if (sk_is_tcp(sk)) { + sock_owned_by_me(sk); + val = atomic_read(&inet_sk(sk)->inet_id); + atomic_set(&inet_sk(sk)->inet_id, val + segs); + } else { + val = atomic_add_return(segs, &inet_sk(sk)->inet_id); + } + iph->id = htons(val); return; } if ((iph->frag_off & htons(IP_DF)) && !skb->ignore_df) { diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h index 05e6f756feaf..c9ff23cf313e 100644 --- a/include/net/ip6_fib.h +++ b/include/net/ip6_fib.h @@ -179,6 +179,9 @@ struct fib6_info { refcount_t fib6_ref; unsigned long expires; + + struct hlist_node gc_link; + struct dst_metrics *fib6_metrics; #define fib6_pmtu fib6_metrics->metrics[RTAX_MTU-1] @@ -247,19 +250,6 @@ static inline bool fib6_requires_src(const struct fib6_info *rt) return rt->fib6_src.plen > 0; } -static inline void fib6_clean_expires(struct fib6_info *f6i) -{ - f6i->fib6_flags &= ~RTF_EXPIRES; - f6i->expires = 0; -} - -static inline void fib6_set_expires(struct fib6_info *f6i, - unsigned long expires) -{ - f6i->expires = expires; - f6i->fib6_flags |= RTF_EXPIRES; -} - static inline bool fib6_check_expired(const struct fib6_info *f6i) { if (f6i->fib6_flags & RTF_EXPIRES) @@ -267,6 +257,11 @@ static inline bool fib6_check_expired(const struct fib6_info *f6i) return false; } +static inline bool fib6_has_expires(const struct fib6_info *f6i) +{ + return f6i->fib6_flags & RTF_EXPIRES; +} + /* Function to safely get fn->fn_sernum for passed in rt * and store result in passed in cookie. * Return true if we can get cookie safely @@ -388,6 +383,7 @@ struct fib6_table { struct inet_peer_base tb6_peers; unsigned int flags; unsigned int fib_seq; + struct hlist_head tb6_gc_hlist; /* GC candidates */ #define RT6_TABLE_HAS_DFLT_ROUTER BIT(0) }; @@ -504,6 +500,48 @@ void fib6_gc_cleanup(void); int fib6_init(void); +/* fib6_info must be locked by the caller, and fib6_info->fib6_table can be + * NULL. + */ +static inline void fib6_set_expires_locked(struct fib6_info *f6i, + unsigned long expires) +{ + struct fib6_table *tb6; + + tb6 = f6i->fib6_table; + f6i->expires = expires; + if (tb6 && !fib6_has_expires(f6i)) + hlist_add_head(&f6i->gc_link, &tb6->tb6_gc_hlist); + f6i->fib6_flags |= RTF_EXPIRES; +} + +/* fib6_info must be locked by the caller, and fib6_info->fib6_table can be + * NULL. If fib6_table is NULL, the fib6_info will no be inserted into the + * list of GC candidates until it is inserted into a table. + */ +static inline void fib6_set_expires(struct fib6_info *f6i, + unsigned long expires) +{ + spin_lock_bh(&f6i->fib6_table->tb6_lock); + fib6_set_expires_locked(f6i, expires); + spin_unlock_bh(&f6i->fib6_table->tb6_lock); +} + +static inline void fib6_clean_expires_locked(struct fib6_info *f6i) +{ + if (fib6_has_expires(f6i)) + hlist_del_init(&f6i->gc_link); + f6i->fib6_flags &= ~RTF_EXPIRES; + f6i->expires = 0; +} + +static inline void fib6_clean_expires(struct fib6_info *f6i) +{ + spin_lock_bh(&f6i->fib6_table->tb6_lock); + fib6_clean_expires_locked(f6i); + spin_unlock_bh(&f6i->fib6_table->tb6_lock); +} + struct ipv6_route_iter { struct seq_net_private p; struct fib6_walker w; diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h index 3556595ce59a..b32539bb0fb0 100644 --- a/include/net/ip6_route.h +++ b/include/net/ip6_route.h @@ -156,7 +156,7 @@ void fib6_force_start_gc(struct net *net); struct fib6_info *addrconf_f6i_alloc(struct net *net, struct inet6_dev *idev, const struct in6_addr *addr, bool anycast, - gfp_t gfp_flags); + gfp_t gfp_flags, struct netlink_ext_ack *extack); struct rt6_info *ip6_dst_alloc(struct net *net, struct net_device *dev, int flags); diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h index ed4b6ad3fcac..e8750b4ef7e1 100644 --- a/include/net/ip_tunnels.h +++ b/include/net/ip_tunnels.h @@ -52,6 +52,7 @@ struct ip_tunnel_key { u8 tos; /* TOS for IPv4, TC for IPv6 */ u8 ttl; /* TTL for IPv4, HL for IPv6 */ __be32 label; /* Flow Label for IPv6 */ + u32 nhid; __be16 tp_src; __be16 tp_dst; __u8 flow_flags; diff --git a/include/net/ipv6.h b/include/net/ipv6.h index 2acc4c808d45..d40d8238d4c2 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -937,7 +937,8 @@ static inline bool ipv6_can_nonlocal_bind(struct net *net, struct inet_sock *inet) { return net->ipv6.sysctl.ip_nonlocal_bind || - inet->freebind || inet->transparent; + test_bit(INET_FLAGS_FREEBIND, &inet->inet_flags) || + test_bit(INET_FLAGS_TRANSPARENT, &inet->inet_flags); } /* Sysctl settings for net ipv6.auto_flowlabels */ @@ -1216,6 +1217,7 @@ void inet6_cleanup_sock(struct sock *sk); void inet6_sock_destruct(struct sock *sk); int inet6_release(struct socket *sock); int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len); +int inet6_bind_sk(struct sock *sk, struct sockaddr *uaddr, int addr_len); int inet6_getname(struct socket *sock, struct sockaddr *uaddr, int peer); int inet6_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg); diff --git a/include/net/iw_handler.h b/include/net/iw_handler.h index d2ea5863eedc..b2cf243ebe44 100644 --- a/include/net/iw_handler.h +++ b/include/net/iw_handler.h @@ -426,17 +426,10 @@ struct iw_public_data { /**************************** PROTOTYPES ****************************/ /* - * Functions part of the Wireless Extensions (defined in net/core/wireless.c). - * Those may be called only within the kernel. + * Functions part of the Wireless Extensions (defined in net/wireless/wext-core.c). + * Those may be called by driver modules. */ -/* First : function strictly used inside the kernel */ - -/* Handle /proc/net/wireless, called in net/code/dev.c */ -int dev_get_wireless_info(char *buffer, char **start, off_t offset, int length); - -/* Second : functions that may be called by driver modules */ - /* Send a single event to user space */ void wireless_send_event(struct net_device *dev, unsigned int cmd, union iwreq_data *wrqu, const char *extra); diff --git a/include/net/llc_c_ac.h b/include/net/llc_c_ac.h index 3e1f76786d7b..7620a9196922 100644 --- a/include/net/llc_c_ac.h +++ b/include/net/llc_c_ac.h @@ -175,7 +175,6 @@ int llc_conn_ac_send_ack_if_needed(struct sock *sk, struct sk_buff *skb); int llc_conn_ac_adjust_npta_by_rr(struct sock *sk, struct sk_buff *skb); int llc_conn_ac_adjust_npta_by_rnr(struct sock *sk, struct sk_buff *skb); int llc_conn_ac_rst_sendack_flag(struct sock *sk, struct sk_buff *skb); -int llc_conn_ac_send_i_rsp_as_ack(struct sock *sk, struct sk_buff *skb); int llc_conn_ac_send_i_as_ack(struct sock *sk, struct sk_buff *skb); void llc_conn_busy_tmr_cb(struct timer_list *t); diff --git a/include/net/llc_c_ev.h b/include/net/llc_c_ev.h index 3948cf111dd0..241889955157 100644 --- a/include/net/llc_c_ev.h +++ b/include/net/llc_c_ev.h @@ -158,7 +158,6 @@ int llc_conn_ev_p_tmr_exp(struct sock *sk, struct sk_buff *skb); int llc_conn_ev_ack_tmr_exp(struct sock *sk, struct sk_buff *skb); int llc_conn_ev_rej_tmr_exp(struct sock *sk, struct sk_buff *skb); int llc_conn_ev_busy_tmr_exp(struct sock *sk, struct sk_buff *skb); -int llc_conn_ev_sendack_tmr_exp(struct sock *sk, struct sk_buff *skb); /* NOT_USED functions and their variations */ int llc_conn_ev_rx_xxx_cmd_pbit_set_1(struct sock *sk, struct sk_buff *skb); int llc_conn_ev_rx_xxx_rsp_fbit_set_1(struct sock *sk, struct sk_buff *skb); diff --git a/include/net/lwtunnel.h b/include/net/lwtunnel.h index 6f15e6fa154e..53bd2d02a4f0 100644 --- a/include/net/lwtunnel.h +++ b/include/net/lwtunnel.h @@ -16,9 +16,12 @@ #define LWTUNNEL_STATE_INPUT_REDIRECT BIT(1) #define LWTUNNEL_STATE_XMIT_REDIRECT BIT(2) +/* LWTUNNEL_XMIT_CONTINUE should be distinguishable from dst_output return + * values (NET_XMIT_xxx and NETDEV_TX_xxx in linux/netdevice.h) for safety. + */ enum { LWTUNNEL_XMIT_DONE, - LWTUNNEL_XMIT_CONTINUE, + LWTUNNEL_XMIT_CONTINUE = 0x100, }; diff --git a/include/net/mac80211.h b/include/net/mac80211.h index 3a8a2d2c58c3..7c707358d15c 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -1192,9 +1192,11 @@ struct ieee80211_tx_info { u8 ampdu_ack_len; u8 ampdu_len; u8 antenna; + u8 pad; u16 tx_time; u8 flags; - void *status_driver_data[18 / sizeof(void *)]; + u8 pad2; + void *status_driver_data[16 / sizeof(void *)]; } status; struct { struct ieee80211_tx_rate driver_rates[ @@ -2259,6 +2261,7 @@ struct ieee80211_sta_aggregates { * @he_cap: HE capabilities of this STA * @he_6ghz_capa: on 6 GHz, holds the HE 6 GHz band capabilities * @eht_cap: EHT capabilities of this STA + * @agg: per-link data for multi-link aggregation * @bandwidth: current bandwidth the station can receive with * @rx_nss: in HT/VHT, the maximum number of spatial streams the * station can receive at the moment, changed by operating mode @@ -6612,6 +6615,7 @@ void ieee80211_stop_rx_ba_session(struct ieee80211_vif *vif, u16 ba_rx_bitmap, * marks frames marked in the bitmap as having been filtered. Afterwards, it * checks if any frames in the window starting from @ssn can now be released * (in case they were only waiting for frames that were filtered.) + * (Only work correctly if @max_rx_aggregation_subframes <= 64 frames) */ void ieee80211_mark_rx_ba_filtered_frames(struct ieee80211_sta *pubsta, u8 tid, u16 ssn, u64 filtered, diff --git a/include/net/macsec.h b/include/net/macsec.h index 441ed8fd4b5f..75a6f4863c83 100644 --- a/include/net/macsec.h +++ b/include/net/macsec.h @@ -312,6 +312,8 @@ static inline bool macsec_send_sci(const struct macsec_secy *secy) return tx_sc->send_sci || (secy->n_rx_sc > 1 && !tx_sc->end_station && !tx_sc->scb); } +struct net_device *macsec_get_real_dev(const struct net_device *dev); +bool macsec_netdev_is_offloaded(struct net_device *dev); static inline void *macsec_netdev_priv(const struct net_device *dev) { diff --git a/include/net/mana/gdma.h b/include/net/mana/gdma.h index 96c120160f15..88b6ef7ce1a6 100644 --- a/include/net/mana/gdma.h +++ b/include/net/mana/gdma.h @@ -33,6 +33,7 @@ enum gdma_request_type { GDMA_DESTROY_PD = 30, GDMA_CREATE_MR = 31, GDMA_DESTROY_MR = 32, + GDMA_QUERY_HWC_TIMEOUT = 84, /* 0x54 */ }; #define GDMA_RESOURCE_DOORBELL_PAGE 27 @@ -57,6 +58,8 @@ enum gdma_eqe_type { GDMA_EQE_HWC_INIT_EQ_ID_DB = 129, GDMA_EQE_HWC_INIT_DATA = 130, GDMA_EQE_HWC_INIT_DONE = 131, + GDMA_EQE_HWC_SOC_RECONFIG = 132, + GDMA_EQE_HWC_SOC_RECONFIG_DATA = 133, }; enum { @@ -531,10 +534,12 @@ enum { * so the driver is able to reliably support features like busy_poll. */ #define GDMA_DRV_CAP_FLAG_1_NAPI_WKDONE_FIX BIT(2) +#define GDMA_DRV_CAP_FLAG_1_HWC_TIMEOUT_RECONFIG BIT(3) #define GDMA_DRV_CAP_FLAGS1 \ (GDMA_DRV_CAP_FLAG_1_EQ_SHARING_MULTI_VPORT | \ - GDMA_DRV_CAP_FLAG_1_NAPI_WKDONE_FIX) + GDMA_DRV_CAP_FLAG_1_NAPI_WKDONE_FIX | \ + GDMA_DRV_CAP_FLAG_1_HWC_TIMEOUT_RECONFIG) #define GDMA_DRV_CAP_FLAGS2 0 @@ -664,6 +669,19 @@ struct gdma_disable_queue_req { u32 alloc_res_id_on_creation; }; /* HW DATA */ +/* GDMA_QUERY_HWC_TIMEOUT */ +struct gdma_query_hwc_timeout_req { + struct gdma_req_hdr hdr; + u32 timeout_ms; + u32 reserved; +}; + +struct gdma_query_hwc_timeout_resp { + struct gdma_resp_hdr hdr; + u32 timeout_ms; + u32 reserved; +}; + enum atb_page_size { ATB_PAGE_SIZE_4K, ATB_PAGE_SIZE_8K, diff --git a/include/net/mana/hw_channel.h b/include/net/mana/hw_channel.h index 6a757a6e2732..3d3b5c881bc1 100644 --- a/include/net/mana/hw_channel.h +++ b/include/net/mana/hw_channel.h @@ -23,6 +23,10 @@ #define HWC_INIT_DATA_PF_DEST_RQ_ID 10 #define HWC_INIT_DATA_PF_DEST_CQ_ID 11 +#define HWC_DATA_CFG_HWC_TIMEOUT 1 + +#define HW_CHANNEL_WAIT_RESOURCE_TIMEOUT_MS 30000 + /* Structures labeled with "HW DATA" are exchanged with the hardware. All of * them are naturally aligned and hence don't need __packed. */ @@ -182,6 +186,7 @@ struct hw_channel_context { u32 pf_dest_vrq_id; u32 pf_dest_vrcq_id; + u32 hwc_timeout; struct hwc_caller_ctx *caller_ctx; }; diff --git a/include/net/mana/mana.h b/include/net/mana/mana.h index 024ad8ddb27e..9f70b4332238 100644 --- a/include/net/mana/mana.h +++ b/include/net/mana/mana.h @@ -4,6 +4,8 @@ #ifndef _MANA_H #define _MANA_H +#include <net/xdp.h> + #include "gdma.h" #include "hw_channel.h" @@ -280,6 +282,7 @@ struct mana_recv_buf_oob { struct gdma_wqe_request wqe_req; void *buf_va; + bool from_pool; /* allocated from a page pool */ /* SGL of the buffer going to be sent has part of the work request. */ u32 num_sge; @@ -330,6 +333,8 @@ struct mana_rxq { bool xdp_flush; int xdp_rc; /* XDP redirect return code */ + struct page_pool *page_pool; + /* MUST BE THE LAST MEMBER: * Each receive buffer has an associated mana_recv_buf_oob. */ @@ -347,6 +352,13 @@ struct mana_tx_qp { struct mana_ethtool_stats { u64 stop_queue; u64 wake_queue; + u64 hc_tx_bytes; + u64 hc_tx_ucast_pkts; + u64 hc_tx_ucast_bytes; + u64 hc_tx_bcast_pkts; + u64 hc_tx_bcast_bytes; + u64 hc_tx_mcast_pkts; + u64 hc_tx_mcast_bytes; u64 tx_cqe_err; u64 tx_cqe_unknown_type; u64 rx_coalesced_err; @@ -437,6 +449,7 @@ u32 mana_run_xdp(struct net_device *ndev, struct mana_rxq *rxq, struct bpf_prog *mana_xdp_get(struct mana_port_context *apc); void mana_chn_setxdp(struct mana_port_context *apc, struct bpf_prog *prog); int mana_bpf(struct net_device *ndev, struct netdev_bpf *bpf); +void mana_query_gf_stats(struct mana_port_context *apc); extern const struct ethtool_ops mana_ethtool_ops; @@ -578,6 +591,49 @@ struct mana_fence_rq_resp { struct gdma_resp_hdr hdr; }; /* HW DATA */ +/* Query stats RQ */ +struct mana_query_gf_stat_req { + struct gdma_req_hdr hdr; + u64 req_stats; +}; /* HW DATA */ + +struct mana_query_gf_stat_resp { + struct gdma_resp_hdr hdr; + u64 reported_stats; + /* rx errors/discards */ + u64 discard_rx_nowqe; + u64 err_rx_vport_disabled; + /* rx bytes/packets */ + u64 hc_rx_bytes; + u64 hc_rx_ucast_pkts; + u64 hc_rx_ucast_bytes; + u64 hc_rx_bcast_pkts; + u64 hc_rx_bcast_bytes; + u64 hc_rx_mcast_pkts; + u64 hc_rx_mcast_bytes; + /* tx errors */ + u64 err_tx_gf_disabled; + u64 err_tx_vport_disabled; + u64 err_tx_inval_vport_offset_pkt; + u64 err_tx_vlan_enforcement; + u64 err_tx_ethtype_enforcement; + u64 err_tx_SA_enforecement; + u64 err_tx_SQPDID_enforcement; + u64 err_tx_CQPDID_enforcement; + u64 err_tx_mtu_violation; + u64 err_tx_inval_oob; + /* tx bytes/packets */ + u64 hc_tx_bytes; + u64 hc_tx_ucast_pkts; + u64 hc_tx_ucast_bytes; + u64 hc_tx_bcast_pkts; + u64 hc_tx_bcast_bytes; + u64 hc_tx_mcast_pkts; + u64 hc_tx_mcast_bytes; + /* tx error */ + u64 err_tx_gdma; +}; /* HW DATA */ + /* Configure vPort Rx Steering */ struct mana_cfg_rx_steer_req_v2 { struct gdma_req_hdr hdr; @@ -657,6 +713,42 @@ struct mana_deregister_filter_resp { struct gdma_resp_hdr hdr; }; /* HW DATA */ +/* Requested GF stats Flags */ +/* Rx discards/Errors */ +#define STATISTICS_FLAGS_RX_DISCARDS_NO_WQE 0x0000000000000001 +#define STATISTICS_FLAGS_RX_ERRORS_VPORT_DISABLED 0x0000000000000002 +/* Rx bytes/pkts */ +#define STATISTICS_FLAGS_HC_RX_BYTES 0x0000000000000004 +#define STATISTICS_FLAGS_HC_RX_UCAST_PACKETS 0x0000000000000008 +#define STATISTICS_FLAGS_HC_RX_UCAST_BYTES 0x0000000000000010 +#define STATISTICS_FLAGS_HC_RX_MCAST_PACKETS 0x0000000000000020 +#define STATISTICS_FLAGS_HC_RX_MCAST_BYTES 0x0000000000000040 +#define STATISTICS_FLAGS_HC_RX_BCAST_PACKETS 0x0000000000000080 +#define STATISTICS_FLAGS_HC_RX_BCAST_BYTES 0x0000000000000100 +/* Tx errors */ +#define STATISTICS_FLAGS_TX_ERRORS_GF_DISABLED 0x0000000000000200 +#define STATISTICS_FLAGS_TX_ERRORS_VPORT_DISABLED 0x0000000000000400 +#define STATISTICS_FLAGS_TX_ERRORS_INVAL_VPORT_OFFSET_PACKETS \ + 0x0000000000000800 +#define STATISTICS_FLAGS_TX_ERRORS_VLAN_ENFORCEMENT 0x0000000000001000 +#define STATISTICS_FLAGS_TX_ERRORS_ETH_TYPE_ENFORCEMENT \ + 0x0000000000002000 +#define STATISTICS_FLAGS_TX_ERRORS_SA_ENFORCEMENT 0x0000000000004000 +#define STATISTICS_FLAGS_TX_ERRORS_SQPDID_ENFORCEMENT 0x0000000000008000 +#define STATISTICS_FLAGS_TX_ERRORS_CQPDID_ENFORCEMENT 0x0000000000010000 +#define STATISTICS_FLAGS_TX_ERRORS_MTU_VIOLATION 0x0000000000020000 +#define STATISTICS_FLAGS_TX_ERRORS_INVALID_OOB 0x0000000000040000 +/* Tx bytes/pkts */ +#define STATISTICS_FLAGS_HC_TX_BYTES 0x0000000000080000 +#define STATISTICS_FLAGS_HC_TX_UCAST_PACKETS 0x0000000000100000 +#define STATISTICS_FLAGS_HC_TX_UCAST_BYTES 0x0000000000200000 +#define STATISTICS_FLAGS_HC_TX_MCAST_PACKETS 0x0000000000400000 +#define STATISTICS_FLAGS_HC_TX_MCAST_BYTES 0x0000000000800000 +#define STATISTICS_FLAGS_HC_TX_BCAST_PACKETS 0x0000000001000000 +#define STATISTICS_FLAGS_HC_TX_BCAST_BYTES 0x0000000002000000 +/* Tx error */ +#define STATISTICS_FLAGS_TX_ERRORS_GDMA_ERROR 0x0000000004000000 + #define MANA_MAX_NUM_QUEUES 64 #define MANA_SHORT_VPORT_OFFSET_MAX ((1U << 8) - 1) diff --git a/include/net/mptcp.h b/include/net/mptcp.h index 3c5c68618fcc..fb996124b3d5 100644 --- a/include/net/mptcp.h +++ b/include/net/mptcp.h @@ -96,6 +96,27 @@ struct mptcp_out_options { #endif }; +#define MPTCP_SCHED_NAME_MAX 16 +#define MPTCP_SUBFLOWS_MAX 8 + +struct mptcp_sched_data { + bool reinject; + u8 subflows; + struct mptcp_subflow_context *contexts[MPTCP_SUBFLOWS_MAX]; +}; + +struct mptcp_sched_ops { + int (*get_subflow)(struct mptcp_sock *msk, + struct mptcp_sched_data *data); + + char name[MPTCP_SCHED_NAME_MAX]; + struct module *owner; + struct list_head list; + + void (*init)(struct mptcp_sock *msk); + void (*release)(struct mptcp_sock *msk); +} ____cacheline_aligned_in_smp; + #ifdef CONFIG_MPTCP void mptcp_init(void); diff --git a/include/net/ndisc.h b/include/net/ndisc.h index 52eae0943433..9bbdf6eaa942 100644 --- a/include/net/ndisc.h +++ b/include/net/ndisc.h @@ -488,9 +488,6 @@ void igmp6_event_report(struct sk_buff *skb); #ifdef CONFIG_SYSCTL int ndisc_ifinfo_sysctl_change(struct ctl_table *ctl, int write, void *buffer, size_t *lenp, loff_t *ppos); -int ndisc_ifinfo_sysctl_strategy(struct ctl_table *ctl, - void __user *oldval, size_t __user *oldlenp, - void __user *newval, size_t newlen); #endif void inet6_ifinfo_notify(int event, struct inet6_dev *idev); diff --git a/include/net/neighbour.h b/include/net/neighbour.h index f6a8ecc6b1fa..6da68886fabb 100644 --- a/include/net/neighbour.h +++ b/include/net/neighbour.h @@ -394,8 +394,6 @@ void neigh_for_each(struct neigh_table *tbl, void __neigh_for_each_release(struct neigh_table *tbl, int (*cb)(struct neighbour *)); int neigh_xmit(int fam, struct net_device *, const void *, struct sk_buff *); -void pneigh_for_each(struct neigh_table *tbl, - void (*cb)(struct pneigh_entry *)); struct neigh_seq_state { struct seq_net_private p; diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h index 78beaa765c73..9f6add96de2d 100644 --- a/include/net/net_namespace.h +++ b/include/net/net_namespace.h @@ -42,6 +42,7 @@ #include <linux/idr.h> #include <linux/skbuff.h> #include <linux/notifier.h> +#include <linux/xarray.h> struct user_namespace; struct proc_dir_entry; @@ -69,7 +70,7 @@ struct net { atomic_t dev_unreg_count; unsigned int dev_base_seq; /* protected by rtnl_mutex */ - int ifindex; + u32 ifindex; spinlock_t nsid_lock; atomic_t fnhe_genid; @@ -110,6 +111,7 @@ struct net { struct hlist_head *dev_name_head; struct hlist_head *dev_index_head; + struct xarray dev_by_index; struct raw_notifier_head netdev_chain; /* Note that @hash_mix can be read millions times per second, diff --git a/include/net/netdev_rx_queue.h b/include/net/netdev_rx_queue.h new file mode 100644 index 000000000000..cdcafb30d437 --- /dev/null +++ b/include/net/netdev_rx_queue.h @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_NETDEV_RX_QUEUE_H +#define _LINUX_NETDEV_RX_QUEUE_H + +#include <linux/kobject.h> +#include <linux/netdevice.h> +#include <linux/sysfs.h> +#include <net/xdp.h> + +/* This structure contains an instance of an RX queue. */ +struct netdev_rx_queue { + struct xdp_rxq_info xdp_rxq; +#ifdef CONFIG_RPS + struct rps_map __rcu *rps_map; + struct rps_dev_flow_table __rcu *rps_flow_table; +#endif + struct kobject kobj; + struct net_device *dev; + netdevice_tracker dev_tracker; + +#ifdef CONFIG_XDP_SOCKETS + struct xsk_buff_pool *pool; +#endif +} ____cacheline_aligned_in_smp; + +/* + * RX queue sysfs structures and functions. + */ +struct rx_queue_attribute { + struct attribute attr; + ssize_t (*show)(struct netdev_rx_queue *queue, char *buf); + ssize_t (*store)(struct netdev_rx_queue *queue, + const char *buf, size_t len); +}; + +static inline struct netdev_rx_queue * +__netif_get_rx_queue(struct net_device *dev, unsigned int rxq) +{ + return dev->_rx + rxq; +} + +#ifdef CONFIG_SYSFS +static inline unsigned int +get_netdev_rx_queue_index(struct netdev_rx_queue *queue) +{ + struct net_device *dev = queue->dev; + int index = queue - dev->_rx; + + BUG_ON(index >= dev->num_rx_queues); + return index; +} +#endif +#endif diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h index a72028dbef0c..4085765c3370 100644 --- a/include/net/netfilter/nf_conntrack.h +++ b/include/net/netfilter/nf_conntrack.h @@ -190,10 +190,6 @@ static inline void nf_ct_put(struct nf_conn *ct) nf_ct_destroy(&ct->ct_general); } -/* Protocol module loading */ -int nf_ct_l3proto_try_module_get(unsigned short l3proto); -void nf_ct_l3proto_module_put(unsigned short l3proto); - /* load module; enable/disable conntrack in this namespace */ int nf_ct_netns_get(struct net *net, u8 nfproto); void nf_ct_netns_put(struct net *net, u8 nfproto); diff --git a/include/net/netfilter/nf_conntrack_acct.h b/include/net/netfilter/nf_conntrack_acct.h index 4b2b7f8914ea..a120685cac93 100644 --- a/include/net/netfilter/nf_conntrack_acct.h +++ b/include/net/netfilter/nf_conntrack_acct.h @@ -78,6 +78,4 @@ static inline void nf_ct_acct_update(struct nf_conn *ct, u32 dir, void nf_conntrack_acct_pernet_init(struct net *net); -void nf_conntrack_acct_fini(void); - #endif /* _NF_CONNTRACK_ACCT_H */ diff --git a/include/net/netfilter/nf_conntrack_expect.h b/include/net/netfilter/nf_conntrack_expect.h index cf0d81be5a96..165e7a03b8e9 100644 --- a/include/net/netfilter/nf_conntrack_expect.h +++ b/include/net/netfilter/nf_conntrack_expect.h @@ -100,7 +100,7 @@ nf_ct_expect_find_get(struct net *net, struct nf_conntrack_expect * nf_ct_find_expectation(struct net *net, const struct nf_conntrack_zone *zone, - const struct nf_conntrack_tuple *tuple); + const struct nf_conntrack_tuple *tuple, bool unlink); void nf_ct_unlink_expect_report(struct nf_conntrack_expect *exp, u32 portid, int report); diff --git a/include/net/netfilter/nf_conntrack_helper.h b/include/net/netfilter/nf_conntrack_helper.h index f30b1694b690..de2f956abf34 100644 --- a/include/net/netfilter/nf_conntrack_helper.h +++ b/include/net/netfilter/nf_conntrack_helper.h @@ -136,8 +136,6 @@ static inline void *nfct_help_data(const struct nf_conn *ct) return (void *)help->data; } -void nf_conntrack_helper_pernet_init(struct net *net); - int nf_conntrack_helper_init(void); void nf_conntrack_helper_fini(void); @@ -182,5 +180,4 @@ void nf_nat_helper_unregister(struct nf_conntrack_nat_helper *nat); int nf_nat_helper_try_module_get(const char *name, u16 l3num, u8 protonum); void nf_nat_helper_put(struct nf_conntrack_helper *helper); -void nf_ct_set_auto_assign_helper_warned(struct net *net); #endif /*_NF_CONNTRACK_HELPER_H*/ diff --git a/include/net/netfilter/nf_conntrack_labels.h b/include/net/netfilter/nf_conntrack_labels.h index 66bab6c60d12..fcb19a4e8f2b 100644 --- a/include/net/netfilter/nf_conntrack_labels.h +++ b/include/net/netfilter/nf_conntrack_labels.h @@ -52,7 +52,6 @@ int nf_connlabels_replace(struct nf_conn *ct, const u32 *data, const u32 *mask, unsigned int words); #ifdef CONFIG_NF_CONNTRACK_LABELS -int nf_conntrack_labels_init(void); int nf_connlabels_get(struct net *net, unsigned int bit); void nf_connlabels_put(struct net *net); #else diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index 640441a2f926..dd40c75011d2 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h @@ -512,6 +512,7 @@ struct nft_set_elem_expr { * * @list: table set list node * @bindings: list of set bindings + * @refs: internal refcounting for async set destruction * @table: table this set belongs to * @net: netnamespace this set belongs to * @name: name of the set @@ -533,6 +534,7 @@ struct nft_set_elem_expr { * @expr: stateful expression * @ops: set ops * @flags: set flags + * @dead: set will be freed, never cleared * @genmask: generation mask * @klen: key length * @dlen: data length @@ -541,6 +543,7 @@ struct nft_set_elem_expr { struct nft_set { struct list_head list; struct list_head bindings; + refcount_t refs; struct nft_table *table; possible_net_t net; char *name; @@ -562,7 +565,8 @@ struct nft_set { struct list_head pending_update; /* runtime data below here */ const struct nft_set_ops *ops ____cacheline_aligned; - u16 flags:14, + u16 flags:13, + dead:1, genmask:2; u8 klen; u8 dlen; @@ -583,6 +587,11 @@ static inline void *nft_set_priv(const struct nft_set *set) return (void *)set->data; } +static inline bool nft_set_gc_is_pending(const struct nft_set *s) +{ + return refcount_read(&s->refs) != 1; +} + static inline struct nft_set *nft_set_container_of(const void *priv) { return (void *)priv - offsetof(struct nft_set, data); @@ -596,7 +605,6 @@ struct nft_set *nft_set_lookup_global(const struct net *net, struct nft_set_ext *nft_set_catchall_lookup(const struct net *net, const struct nft_set *set); -void *nft_set_catchall_gc(const struct nft_set *set); static inline unsigned long nft_set_gc_interval(const struct nft_set *set) { @@ -813,62 +821,6 @@ void nft_set_elem_destroy(const struct nft_set *set, void *elem, void nf_tables_set_elem_destroy(const struct nft_ctx *ctx, const struct nft_set *set, void *elem); -/** - * struct nft_set_gc_batch_head - nf_tables set garbage collection batch - * - * @rcu: rcu head - * @set: set the elements belong to - * @cnt: count of elements - */ -struct nft_set_gc_batch_head { - struct rcu_head rcu; - const struct nft_set *set; - unsigned int cnt; -}; - -#define NFT_SET_GC_BATCH_SIZE ((PAGE_SIZE - \ - sizeof(struct nft_set_gc_batch_head)) / \ - sizeof(void *)) - -/** - * struct nft_set_gc_batch - nf_tables set garbage collection batch - * - * @head: GC batch head - * @elems: garbage collection elements - */ -struct nft_set_gc_batch { - struct nft_set_gc_batch_head head; - void *elems[NFT_SET_GC_BATCH_SIZE]; -}; - -struct nft_set_gc_batch *nft_set_gc_batch_alloc(const struct nft_set *set, - gfp_t gfp); -void nft_set_gc_batch_release(struct rcu_head *rcu); - -static inline void nft_set_gc_batch_complete(struct nft_set_gc_batch *gcb) -{ - if (gcb != NULL) - call_rcu(&gcb->head.rcu, nft_set_gc_batch_release); -} - -static inline struct nft_set_gc_batch * -nft_set_gc_batch_check(const struct nft_set *set, struct nft_set_gc_batch *gcb, - gfp_t gfp) -{ - if (gcb != NULL) { - if (gcb->head.cnt + 1 < ARRAY_SIZE(gcb->elems)) - return gcb; - nft_set_gc_batch_complete(gcb); - } - return nft_set_gc_batch_alloc(set, gfp); -} - -static inline void nft_set_gc_batch_add(struct nft_set_gc_batch *gcb, - void *elem) -{ - gcb->elems[gcb->head.cnt++] = elem; -} - struct nft_expr_ops; /** * struct nft_expr_type - nf_tables expression type @@ -1557,39 +1509,30 @@ static inline void nft_set_elem_change_active(const struct net *net, #endif /* IS_ENABLED(CONFIG_NF_TABLES) */ -/* - * We use a free bit in the genmask field to indicate the element - * is busy, meaning it is currently being processed either by - * the netlink API or GC. - * - * Even though the genmask is only a single byte wide, this works - * because the extension structure if fully constant once initialized, - * so there are no non-atomic write accesses unless it is already - * marked busy. - */ -#define NFT_SET_ELEM_BUSY_MASK (1 << 2) +#define NFT_SET_ELEM_DEAD_MASK (1 << 2) #if defined(__LITTLE_ENDIAN_BITFIELD) -#define NFT_SET_ELEM_BUSY_BIT 2 +#define NFT_SET_ELEM_DEAD_BIT 2 #elif defined(__BIG_ENDIAN_BITFIELD) -#define NFT_SET_ELEM_BUSY_BIT (BITS_PER_LONG - BITS_PER_BYTE + 2) +#define NFT_SET_ELEM_DEAD_BIT (BITS_PER_LONG - BITS_PER_BYTE + 2) #else #error #endif -static inline int nft_set_elem_mark_busy(struct nft_set_ext *ext) +static inline void nft_set_elem_dead(struct nft_set_ext *ext) { unsigned long *word = (unsigned long *)ext; BUILD_BUG_ON(offsetof(struct nft_set_ext, genmask) != 0); - return test_and_set_bit(NFT_SET_ELEM_BUSY_BIT, word); + set_bit(NFT_SET_ELEM_DEAD_BIT, word); } -static inline void nft_set_elem_clear_busy(struct nft_set_ext *ext) +static inline int nft_set_elem_is_dead(const struct nft_set_ext *ext) { unsigned long *word = (unsigned long *)ext; - clear_bit(NFT_SET_ELEM_BUSY_BIT, word); + BUILD_BUG_ON(offsetof(struct nft_set_ext, genmask) != 0); + return test_bit(NFT_SET_ELEM_DEAD_BIT, word); } /** @@ -1732,6 +1675,38 @@ struct nft_trans_flowtable { #define nft_trans_flowtable_flags(trans) \ (((struct nft_trans_flowtable *)trans->data)->flags) +#define NFT_TRANS_GC_BATCHCOUNT 256 + +struct nft_trans_gc { + struct list_head list; + struct net *net; + struct nft_set *set; + u32 seq; + u8 count; + void *priv[NFT_TRANS_GC_BATCHCOUNT]; + struct rcu_head rcu; +}; + +struct nft_trans_gc *nft_trans_gc_alloc(struct nft_set *set, + unsigned int gc_seq, gfp_t gfp); +void nft_trans_gc_destroy(struct nft_trans_gc *trans); + +struct nft_trans_gc *nft_trans_gc_queue_async(struct nft_trans_gc *gc, + unsigned int gc_seq, gfp_t gfp); +void nft_trans_gc_queue_async_done(struct nft_trans_gc *gc); + +struct nft_trans_gc *nft_trans_gc_queue_sync(struct nft_trans_gc *gc, gfp_t gfp); +void nft_trans_gc_queue_sync_done(struct nft_trans_gc *trans); + +void nft_trans_gc_elem_add(struct nft_trans_gc *gc, void *priv); + +struct nft_trans_gc *nft_trans_gc_catchall(struct nft_trans_gc *gc, + unsigned int gc_seq); + +void nft_setelem_data_deactivate(const struct net *net, + const struct nft_set *set, + struct nft_set_elem *elem); + int __init nft_chain_filter_init(void); void nft_chain_filter_fini(void); @@ -1758,6 +1733,8 @@ struct nftables_pernet { struct mutex commit_mutex; u64 table_handle; unsigned int base_seq; + unsigned int gc_seq; + u8 validate_state; }; extern unsigned int nf_tables_net_id; diff --git a/include/net/netlink.h b/include/net/netlink.h index b12cd957abb4..8a7cd1170e1f 100644 --- a/include/net/netlink.h +++ b/include/net/netlink.h @@ -375,12 +375,11 @@ struct nla_policy { #define NLA_POLICY_BITFIELD32(valid) \ { .type = NLA_BITFIELD32, .bitfield32_valid = valid } -#define __NLA_IS_UINT_TYPE(tp) \ - (tp == NLA_U8 || tp == NLA_U16 || tp == NLA_U32 || tp == NLA_U64) +#define __NLA_IS_UINT_TYPE(tp) \ + (tp == NLA_U8 || tp == NLA_U16 || tp == NLA_U32 || \ + tp == NLA_U64 || tp == NLA_BE16 || tp == NLA_BE32) #define __NLA_IS_SINT_TYPE(tp) \ (tp == NLA_S8 || tp == NLA_S16 || tp == NLA_S32 || tp == NLA_S64) -#define __NLA_IS_BEINT_TYPE(tp) \ - (tp == NLA_BE16 || tp == NLA_BE32) #define __NLA_ENSURE(condition) BUILD_BUG_ON_ZERO(!(condition)) #define NLA_ENSURE_UINT_TYPE(tp) \ @@ -394,7 +393,6 @@ struct nla_policy { #define NLA_ENSURE_INT_OR_BINARY_TYPE(tp) \ (__NLA_ENSURE(__NLA_IS_UINT_TYPE(tp) || \ __NLA_IS_SINT_TYPE(tp) || \ - __NLA_IS_BEINT_TYPE(tp) || \ tp == NLA_MSECS || \ tp == NLA_BINARY) + tp) #define NLA_ENSURE_NO_VALIDATION_PTR(tp) \ @@ -402,8 +400,6 @@ struct nla_policy { tp != NLA_REJECT && \ tp != NLA_NESTED && \ tp != NLA_NESTED_ARRAY) + tp) -#define NLA_ENSURE_BEINT_TYPE(tp) \ - (__NLA_ENSURE(__NLA_IS_BEINT_TYPE(tp)) + tp) #define NLA_POLICY_RANGE(tp, _min, _max) { \ .type = NLA_ENSURE_INT_OR_BINARY_TYPE(tp), \ diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h index f00374718159..7a41c4791536 100644 --- a/include/net/netns/ipv4.h +++ b/include/net/netns/ipv4.h @@ -152,7 +152,7 @@ struct netns_ipv4 { u8 sysctl_tcp_abort_on_overflow; u8 sysctl_tcp_fack; /* obsolete */ int sysctl_tcp_max_reordering; - int sysctl_tcp_adv_win_scale; + int sysctl_tcp_adv_win_scale; /* obsolete */ u8 sysctl_tcp_dsack; u8 sysctl_tcp_app_win; u8 sysctl_tcp_frto; diff --git a/include/net/netns/nftables.h b/include/net/netns/nftables.h index 8c77832d0240..cc8060c017d5 100644 --- a/include/net/netns/nftables.h +++ b/include/net/netns/nftables.h @@ -2,8 +2,6 @@ #ifndef _NETNS_NFTABLES_H_ #define _NETNS_NFTABLES_H_ -#include <linux/list.h> - struct netns_nftables { u8 gencursor; }; diff --git a/include/net/p8022.h b/include/net/p8022.h index b690ffcad66b..a29e224ac498 100644 --- a/include/net/p8022.h +++ b/include/net/p8022.h @@ -13,7 +13,4 @@ register_8022_client(unsigned char type, struct packet_type *pt, struct net_device *orig_dev)); void unregister_8022_client(struct datalink_proto *proto); - -struct datalink_proto *make_8023_client(void); -void destroy_8023_client(struct datalink_proto *dl); #endif diff --git a/include/net/page_pool.h b/include/net/page_pool.h deleted file mode 100644 index 126f9e294389..000000000000 --- a/include/net/page_pool.h +++ /dev/null @@ -1,402 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 - * - * page_pool.h - * Author: Jesper Dangaard Brouer <netoptimizer@brouer.com> - * Copyright (C) 2016 Red Hat, Inc. - */ - -/** - * DOC: page_pool allocator - * - * This page_pool allocator is optimized for the XDP mode that - * uses one-frame-per-page, but have fallbacks that act like the - * regular page allocator APIs. - * - * Basic use involve replacing alloc_pages() calls with the - * page_pool_alloc_pages() call. Drivers should likely use - * page_pool_dev_alloc_pages() replacing dev_alloc_pages(). - * - * API keeps track of in-flight pages, in-order to let API user know - * when it is safe to dealloactor page_pool object. Thus, API users - * must make sure to call page_pool_release_page() when a page is - * "leaving" the page_pool. Or call page_pool_put_page() where - * appropiate. For maintaining correct accounting. - * - * API user must only call page_pool_put_page() once on a page, as it - * will either recycle the page, or in case of elevated refcnt, it - * will release the DMA mapping and in-flight state accounting. We - * hope to lift this requirement in the future. - */ -#ifndef _NET_PAGE_POOL_H -#define _NET_PAGE_POOL_H - -#include <linux/mm.h> /* Needed by ptr_ring */ -#include <linux/ptr_ring.h> -#include <linux/dma-direction.h> - -#define PP_FLAG_DMA_MAP BIT(0) /* Should page_pool do the DMA - * map/unmap - */ -#define PP_FLAG_DMA_SYNC_DEV BIT(1) /* If set all pages that the driver gets - * from page_pool will be - * DMA-synced-for-device according to - * the length provided by the device - * driver. - * Please note DMA-sync-for-CPU is still - * device driver responsibility - */ -#define PP_FLAG_PAGE_FRAG BIT(2) /* for page frag feature */ -#define PP_FLAG_ALL (PP_FLAG_DMA_MAP |\ - PP_FLAG_DMA_SYNC_DEV |\ - PP_FLAG_PAGE_FRAG) - -/* - * Fast allocation side cache array/stack - * - * The cache size and refill watermark is related to the network - * use-case. The NAPI budget is 64 packets. After a NAPI poll the RX - * ring is usually refilled and the max consumed elements will be 64, - * thus a natural max size of objects needed in the cache. - * - * Keeping room for more objects, is due to XDP_DROP use-case. As - * XDP_DROP allows the opportunity to recycle objects directly into - * this array, as it shares the same softirq/NAPI protection. If - * cache is already full (or partly full) then the XDP_DROP recycles - * would have to take a slower code path. - */ -#define PP_ALLOC_CACHE_SIZE 128 -#define PP_ALLOC_CACHE_REFILL 64 -struct pp_alloc_cache { - u32 count; - struct page *cache[PP_ALLOC_CACHE_SIZE]; -}; - -struct page_pool_params { - unsigned int flags; - unsigned int order; - unsigned int pool_size; - int nid; /* Numa node id to allocate from pages from */ - struct device *dev; /* device, for DMA pre-mapping purposes */ - struct napi_struct *napi; /* Sole consumer of pages, otherwise NULL */ - enum dma_data_direction dma_dir; /* DMA mapping direction */ - unsigned int max_len; /* max DMA sync memory size */ - unsigned int offset; /* DMA addr offset */ - void (*init_callback)(struct page *page, void *arg); - void *init_arg; -}; - -#ifdef CONFIG_PAGE_POOL_STATS -struct page_pool_alloc_stats { - u64 fast; /* fast path allocations */ - u64 slow; /* slow-path order 0 allocations */ - u64 slow_high_order; /* slow-path high order allocations */ - u64 empty; /* failed refills due to empty ptr ring, forcing - * slow path allocation - */ - u64 refill; /* allocations via successful refill */ - u64 waive; /* failed refills due to numa zone mismatch */ -}; - -struct page_pool_recycle_stats { - u64 cached; /* recycling placed page in the cache. */ - u64 cache_full; /* cache was full */ - u64 ring; /* recycling placed page back into ptr ring */ - u64 ring_full; /* page was released from page-pool because - * PTR ring was full. - */ - u64 released_refcnt; /* page released because of elevated - * refcnt - */ -}; - -/* This struct wraps the above stats structs so users of the - * page_pool_get_stats API can pass a single argument when requesting the - * stats for the page pool. - */ -struct page_pool_stats { - struct page_pool_alloc_stats alloc_stats; - struct page_pool_recycle_stats recycle_stats; -}; - -int page_pool_ethtool_stats_get_count(void); -u8 *page_pool_ethtool_stats_get_strings(u8 *data); -u64 *page_pool_ethtool_stats_get(u64 *data, void *stats); - -/* - * Drivers that wish to harvest page pool stats and report them to users - * (perhaps via ethtool, debugfs, or another mechanism) can allocate a - * struct page_pool_stats call page_pool_get_stats to get stats for the specified pool. - */ -bool page_pool_get_stats(struct page_pool *pool, - struct page_pool_stats *stats); -#else - -static inline int page_pool_ethtool_stats_get_count(void) -{ - return 0; -} - -static inline u8 *page_pool_ethtool_stats_get_strings(u8 *data) -{ - return data; -} - -static inline u64 *page_pool_ethtool_stats_get(u64 *data, void *stats) -{ - return data; -} - -#endif - -struct page_pool { - struct page_pool_params p; - - struct delayed_work release_dw; - void (*disconnect)(void *); - unsigned long defer_start; - unsigned long defer_warn; - - u32 pages_state_hold_cnt; - unsigned int frag_offset; - struct page *frag_page; - long frag_users; - -#ifdef CONFIG_PAGE_POOL_STATS - /* these stats are incremented while in softirq context */ - struct page_pool_alloc_stats alloc_stats; -#endif - u32 xdp_mem_id; - - /* - * Data structure for allocation side - * - * Drivers allocation side usually already perform some kind - * of resource protection. Piggyback on this protection, and - * require driver to protect allocation side. - * - * For NIC drivers this means, allocate a page_pool per - * RX-queue. As the RX-queue is already protected by - * Softirq/BH scheduling and napi_schedule. NAPI schedule - * guarantee that a single napi_struct will only be scheduled - * on a single CPU (see napi_schedule). - */ - struct pp_alloc_cache alloc ____cacheline_aligned_in_smp; - - /* Data structure for storing recycled pages. - * - * Returning/freeing pages is more complicated synchronization - * wise, because free's can happen on remote CPUs, with no - * association with allocation resource. - * - * Use ptr_ring, as it separates consumer and producer - * effeciently, it a way that doesn't bounce cache-lines. - * - * TODO: Implement bulk return pages into this structure. - */ - struct ptr_ring ring; - -#ifdef CONFIG_PAGE_POOL_STATS - /* recycle stats are per-cpu to avoid locking */ - struct page_pool_recycle_stats __percpu *recycle_stats; -#endif - atomic_t pages_state_release_cnt; - - /* A page_pool is strictly tied to a single RX-queue being - * protected by NAPI, due to above pp_alloc_cache. This - * refcnt serves purpose is to simplify drivers error handling. - */ - refcount_t user_cnt; - - u64 destroy_cnt; -}; - -struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp); - -static inline struct page *page_pool_dev_alloc_pages(struct page_pool *pool) -{ - gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN); - - return page_pool_alloc_pages(pool, gfp); -} - -struct page *page_pool_alloc_frag(struct page_pool *pool, unsigned int *offset, - unsigned int size, gfp_t gfp); - -static inline struct page *page_pool_dev_alloc_frag(struct page_pool *pool, - unsigned int *offset, - unsigned int size) -{ - gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN); - - return page_pool_alloc_frag(pool, offset, size, gfp); -} - -/* get the stored dma direction. A driver might decide to treat this locally and - * avoid the extra cache line from page_pool to determine the direction - */ -static -inline enum dma_data_direction page_pool_get_dma_dir(struct page_pool *pool) -{ - return pool->p.dma_dir; -} - -bool page_pool_return_skb_page(struct page *page, bool napi_safe); - -struct page_pool *page_pool_create(const struct page_pool_params *params); - -struct xdp_mem_info; - -#ifdef CONFIG_PAGE_POOL -void page_pool_unlink_napi(struct page_pool *pool); -void page_pool_destroy(struct page_pool *pool); -void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *), - struct xdp_mem_info *mem); -void page_pool_release_page(struct page_pool *pool, struct page *page); -void page_pool_put_page_bulk(struct page_pool *pool, void **data, - int count); -#else -static inline void page_pool_unlink_napi(struct page_pool *pool) -{ -} - -static inline void page_pool_destroy(struct page_pool *pool) -{ -} - -static inline void page_pool_use_xdp_mem(struct page_pool *pool, - void (*disconnect)(void *), - struct xdp_mem_info *mem) -{ -} -static inline void page_pool_release_page(struct page_pool *pool, - struct page *page) -{ -} - -static inline void page_pool_put_page_bulk(struct page_pool *pool, void **data, - int count) -{ -} -#endif - -void page_pool_put_defragged_page(struct page_pool *pool, struct page *page, - unsigned int dma_sync_size, - bool allow_direct); - -/* pp_frag_count represents the number of writers who can update the page - * either by updating skb->data or via DMA mappings for the device. - * We can't rely on the page refcnt for that as we don't know who might be - * holding page references and we can't reliably destroy or sync DMA mappings - * of the fragments. - * - * When pp_frag_count reaches 0 we can either recycle the page if the page - * refcnt is 1 or return it back to the memory allocator and destroy any - * mappings we have. - */ -static inline void page_pool_fragment_page(struct page *page, long nr) -{ - atomic_long_set(&page->pp_frag_count, nr); -} - -static inline long page_pool_defrag_page(struct page *page, long nr) -{ - long ret; - - /* If nr == pp_frag_count then we have cleared all remaining - * references to the page. No need to actually overwrite it, instead - * we can leave this to be overwritten by the calling function. - * - * The main advantage to doing this is that an atomic_read is - * generally a much cheaper operation than an atomic update, - * especially when dealing with a page that may be partitioned - * into only 2 or 3 pieces. - */ - if (atomic_long_read(&page->pp_frag_count) == nr) - return 0; - - ret = atomic_long_sub_return(nr, &page->pp_frag_count); - WARN_ON(ret < 0); - return ret; -} - -static inline bool page_pool_is_last_frag(struct page_pool *pool, - struct page *page) -{ - /* If fragments aren't enabled or count is 0 we were the last user */ - return !(pool->p.flags & PP_FLAG_PAGE_FRAG) || - (page_pool_defrag_page(page, 1) == 0); -} - -static inline void page_pool_put_page(struct page_pool *pool, - struct page *page, - unsigned int dma_sync_size, - bool allow_direct) -{ - /* When page_pool isn't compiled-in, net/core/xdp.c doesn't - * allow registering MEM_TYPE_PAGE_POOL, but shield linker. - */ -#ifdef CONFIG_PAGE_POOL - if (!page_pool_is_last_frag(pool, page)) - return; - - page_pool_put_defragged_page(pool, page, dma_sync_size, allow_direct); -#endif -} - -/* Same as above but will try to sync the entire area pool->max_len */ -static inline void page_pool_put_full_page(struct page_pool *pool, - struct page *page, bool allow_direct) -{ - page_pool_put_page(pool, page, -1, allow_direct); -} - -/* Same as above but the caller must guarantee safe context. e.g NAPI */ -static inline void page_pool_recycle_direct(struct page_pool *pool, - struct page *page) -{ - page_pool_put_full_page(pool, page, true); -} - -#define PAGE_POOL_DMA_USE_PP_FRAG_COUNT \ - (sizeof(dma_addr_t) > sizeof(unsigned long)) - -static inline dma_addr_t page_pool_get_dma_addr(struct page *page) -{ - dma_addr_t ret = page->dma_addr; - - if (PAGE_POOL_DMA_USE_PP_FRAG_COUNT) - ret |= (dma_addr_t)page->dma_addr_upper << 16 << 16; - - return ret; -} - -static inline void page_pool_set_dma_addr(struct page *page, dma_addr_t addr) -{ - page->dma_addr = addr; - if (PAGE_POOL_DMA_USE_PP_FRAG_COUNT) - page->dma_addr_upper = upper_32_bits(addr); -} - -static inline bool is_page_pool_compiled_in(void) -{ -#ifdef CONFIG_PAGE_POOL - return true; -#else - return false; -#endif -} - -static inline bool page_pool_put(struct page_pool *pool) -{ - return refcount_dec_and_test(&pool->user_cnt); -} - -/* Caller must provide appropriate safe context, e.g. NAPI. */ -void page_pool_update_nid(struct page_pool *pool, int new_nid); -static inline void page_pool_nid_changed(struct page_pool *pool, int new_nid) -{ - if (unlikely(pool->p.nid != new_nid)) - page_pool_update_nid(pool, new_nid); -} - -#endif /* _NET_PAGE_POOL_H */ diff --git a/include/net/page_pool/helpers.h b/include/net/page_pool/helpers.h new file mode 100644 index 000000000000..94231533a369 --- /dev/null +++ b/include/net/page_pool/helpers.h @@ -0,0 +1,238 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * page_pool/helpers.h + * Author: Jesper Dangaard Brouer <netoptimizer@brouer.com> + * Copyright (C) 2016 Red Hat, Inc. + */ + +/** + * DOC: page_pool allocator + * + * The page_pool allocator is optimized for the XDP mode that + * uses one frame per-page, but it can fallback on the + * regular page allocator APIs. + * + * Basic use involves replacing alloc_pages() calls with the + * page_pool_alloc_pages() call. Drivers should use + * page_pool_dev_alloc_pages() replacing dev_alloc_pages(). + * + * API keeps track of in-flight pages, in order to let API user know + * when it is safe to free a page_pool object. Thus, API users + * must call page_pool_put_page() to free the page, or attach + * the page to a page_pool-aware objects like skbs marked with + * skb_mark_for_recycle(). + * + * API user must call page_pool_put_page() once on a page, as it + * will either recycle the page, or in case of refcnt > 1, it will + * release the DMA mapping and in-flight state accounting. + */ +#ifndef _NET_PAGE_POOL_HELPERS_H +#define _NET_PAGE_POOL_HELPERS_H + +#include <net/page_pool/types.h> + +#ifdef CONFIG_PAGE_POOL_STATS +int page_pool_ethtool_stats_get_count(void); +u8 *page_pool_ethtool_stats_get_strings(u8 *data); +u64 *page_pool_ethtool_stats_get(u64 *data, void *stats); + +/* + * Drivers that wish to harvest page pool stats and report them to users + * (perhaps via ethtool, debugfs, or another mechanism) can allocate a + * struct page_pool_stats call page_pool_get_stats to get stats for the specified pool. + */ +bool page_pool_get_stats(struct page_pool *pool, + struct page_pool_stats *stats); +#else +static inline int page_pool_ethtool_stats_get_count(void) +{ + return 0; +} + +static inline u8 *page_pool_ethtool_stats_get_strings(u8 *data) +{ + return data; +} + +static inline u64 *page_pool_ethtool_stats_get(u64 *data, void *stats) +{ + return data; +} +#endif + +/** + * page_pool_dev_alloc_pages() - allocate a page. + * @pool: pool from which to allocate + * + * Get a page from the page allocator or page_pool caches. + */ +static inline struct page *page_pool_dev_alloc_pages(struct page_pool *pool) +{ + gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN); + + return page_pool_alloc_pages(pool, gfp); +} + +static inline struct page *page_pool_dev_alloc_frag(struct page_pool *pool, + unsigned int *offset, + unsigned int size) +{ + gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN); + + return page_pool_alloc_frag(pool, offset, size, gfp); +} + +/** + * page_pool_get_dma_dir() - Retrieve the stored DMA direction. + * @pool: pool from which page was allocated + * + * Get the stored dma direction. A driver might decide to store this locally + * and avoid the extra cache line from page_pool to determine the direction. + */ +static +inline enum dma_data_direction page_pool_get_dma_dir(struct page_pool *pool) +{ + return pool->p.dma_dir; +} + +/* pp_frag_count represents the number of writers who can update the page + * either by updating skb->data or via DMA mappings for the device. + * We can't rely on the page refcnt for that as we don't know who might be + * holding page references and we can't reliably destroy or sync DMA mappings + * of the fragments. + * + * When pp_frag_count reaches 0 we can either recycle the page if the page + * refcnt is 1 or return it back to the memory allocator and destroy any + * mappings we have. + */ +static inline void page_pool_fragment_page(struct page *page, long nr) +{ + atomic_long_set(&page->pp_frag_count, nr); +} + +static inline long page_pool_defrag_page(struct page *page, long nr) +{ + long ret; + + /* If nr == pp_frag_count then we have cleared all remaining + * references to the page. No need to actually overwrite it, instead + * we can leave this to be overwritten by the calling function. + * + * The main advantage to doing this is that an atomic_read is + * generally a much cheaper operation than an atomic update, + * especially when dealing with a page that may be partitioned + * into only 2 or 3 pieces. + */ + if (atomic_long_read(&page->pp_frag_count) == nr) + return 0; + + ret = atomic_long_sub_return(nr, &page->pp_frag_count); + WARN_ON(ret < 0); + return ret; +} + +static inline bool page_pool_is_last_frag(struct page_pool *pool, + struct page *page) +{ + /* If fragments aren't enabled or count is 0 we were the last user */ + return !(pool->p.flags & PP_FLAG_PAGE_FRAG) || + (page_pool_defrag_page(page, 1) == 0); +} + +/** + * page_pool_put_page() - release a reference to a page pool page + * @pool: pool from which page was allocated + * @page: page to release a reference on + * @dma_sync_size: how much of the page may have been touched by the device + * @allow_direct: released by the consumer, allow lockless caching + * + * The outcome of this depends on the page refcnt. If the driver bumps + * the refcnt > 1 this will unmap the page. If the page refcnt is 1 + * the allocator owns the page and will try to recycle it in one of the pool + * caches. If PP_FLAG_DMA_SYNC_DEV is set, the page will be synced for_device + * using dma_sync_single_range_for_device(). + */ +static inline void page_pool_put_page(struct page_pool *pool, + struct page *page, + unsigned int dma_sync_size, + bool allow_direct) +{ + /* When page_pool isn't compiled-in, net/core/xdp.c doesn't + * allow registering MEM_TYPE_PAGE_POOL, but shield linker. + */ +#ifdef CONFIG_PAGE_POOL + if (!page_pool_is_last_frag(pool, page)) + return; + + page_pool_put_defragged_page(pool, page, dma_sync_size, allow_direct); +#endif +} + +/** + * page_pool_put_full_page() - release a reference on a page pool page + * @pool: pool from which page was allocated + * @page: page to release a reference on + * @allow_direct: released by the consumer, allow lockless caching + * + * Similar to page_pool_put_page(), but will DMA sync the entire memory area + * as configured in &page_pool_params.max_len. + */ +static inline void page_pool_put_full_page(struct page_pool *pool, + struct page *page, bool allow_direct) +{ + page_pool_put_page(pool, page, -1, allow_direct); +} + +/** + * page_pool_recycle_direct() - release a reference on a page pool page + * @pool: pool from which page was allocated + * @page: page to release a reference on + * + * Similar to page_pool_put_full_page() but caller must guarantee safe context + * (e.g NAPI), since it will recycle the page directly into the pool fast cache. + */ +static inline void page_pool_recycle_direct(struct page_pool *pool, + struct page *page) +{ + page_pool_put_full_page(pool, page, true); +} + +#define PAGE_POOL_DMA_USE_PP_FRAG_COUNT \ + (sizeof(dma_addr_t) > sizeof(unsigned long)) + +/** + * page_pool_get_dma_addr() - Retrieve the stored DMA address. + * @page: page allocated from a page pool + * + * Fetch the DMA address of the page. The page pool to which the page belongs + * must had been created with PP_FLAG_DMA_MAP. + */ +static inline dma_addr_t page_pool_get_dma_addr(struct page *page) +{ + dma_addr_t ret = page->dma_addr; + + if (PAGE_POOL_DMA_USE_PP_FRAG_COUNT) + ret |= (dma_addr_t)page->dma_addr_upper << 16 << 16; + + return ret; +} + +static inline void page_pool_set_dma_addr(struct page *page, dma_addr_t addr) +{ + page->dma_addr = addr; + if (PAGE_POOL_DMA_USE_PP_FRAG_COUNT) + page->dma_addr_upper = upper_32_bits(addr); +} + +static inline bool page_pool_put(struct page_pool *pool) +{ + return refcount_dec_and_test(&pool->user_cnt); +} + +static inline void page_pool_nid_changed(struct page_pool *pool, int new_nid) +{ + if (unlikely(pool->p.nid != new_nid)) + page_pool_update_nid(pool, new_nid); +} + +#endif /* _NET_PAGE_POOL_HELPERS_H */ diff --git a/include/net/page_pool/types.h b/include/net/page_pool/types.h new file mode 100644 index 000000000000..887e7946a597 --- /dev/null +++ b/include/net/page_pool/types.h @@ -0,0 +1,236 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _NET_PAGE_POOL_TYPES_H +#define _NET_PAGE_POOL_TYPES_H + +#include <linux/dma-direction.h> +#include <linux/ptr_ring.h> + +#define PP_FLAG_DMA_MAP BIT(0) /* Should page_pool do the DMA + * map/unmap + */ +#define PP_FLAG_DMA_SYNC_DEV BIT(1) /* If set all pages that the driver gets + * from page_pool will be + * DMA-synced-for-device according to + * the length provided by the device + * driver. + * Please note DMA-sync-for-CPU is still + * device driver responsibility + */ +#define PP_FLAG_PAGE_FRAG BIT(2) /* for page frag feature */ +#define PP_FLAG_ALL (PP_FLAG_DMA_MAP |\ + PP_FLAG_DMA_SYNC_DEV |\ + PP_FLAG_PAGE_FRAG) + +/* + * Fast allocation side cache array/stack + * + * The cache size and refill watermark is related to the network + * use-case. The NAPI budget is 64 packets. After a NAPI poll the RX + * ring is usually refilled and the max consumed elements will be 64, + * thus a natural max size of objects needed in the cache. + * + * Keeping room for more objects, is due to XDP_DROP use-case. As + * XDP_DROP allows the opportunity to recycle objects directly into + * this array, as it shares the same softirq/NAPI protection. If + * cache is already full (or partly full) then the XDP_DROP recycles + * would have to take a slower code path. + */ +#define PP_ALLOC_CACHE_SIZE 128 +#define PP_ALLOC_CACHE_REFILL 64 +struct pp_alloc_cache { + u32 count; + struct page *cache[PP_ALLOC_CACHE_SIZE]; +}; + +/** + * struct page_pool_params - page pool parameters + * @flags: PP_FLAG_DMA_MAP, PP_FLAG_DMA_SYNC_DEV, PP_FLAG_PAGE_FRAG + * @order: 2^order pages on allocation + * @pool_size: size of the ptr_ring + * @nid: NUMA node id to allocate from pages from + * @dev: device, for DMA pre-mapping purposes + * @napi: NAPI which is the sole consumer of pages, otherwise NULL + * @dma_dir: DMA mapping direction + * @max_len: max DMA sync memory size for PP_FLAG_DMA_SYNC_DEV + * @offset: DMA sync address offset for PP_FLAG_DMA_SYNC_DEV + */ +struct page_pool_params { + unsigned int flags; + unsigned int order; + unsigned int pool_size; + int nid; + struct device *dev; + struct napi_struct *napi; + enum dma_data_direction dma_dir; + unsigned int max_len; + unsigned int offset; +/* private: used by test code only */ + void (*init_callback)(struct page *page, void *arg); + void *init_arg; +}; + +#ifdef CONFIG_PAGE_POOL_STATS +/** + * struct page_pool_alloc_stats - allocation statistics + * @fast: successful fast path allocations + * @slow: slow path order-0 allocations + * @slow_high_order: slow path high order allocations + * @empty: ptr ring is empty, so a slow path allocation was forced + * @refill: an allocation which triggered a refill of the cache + * @waive: pages obtained from the ptr ring that cannot be added to + * the cache due to a NUMA mismatch + */ +struct page_pool_alloc_stats { + u64 fast; + u64 slow; + u64 slow_high_order; + u64 empty; + u64 refill; + u64 waive; +}; + +/** + * struct page_pool_recycle_stats - recycling (freeing) statistics + * @cached: recycling placed page in the page pool cache + * @cache_full: page pool cache was full + * @ring: page placed into the ptr ring + * @ring_full: page released from page pool because the ptr ring was full + * @released_refcnt: page released (and not recycled) because refcnt > 1 + */ +struct page_pool_recycle_stats { + u64 cached; + u64 cache_full; + u64 ring; + u64 ring_full; + u64 released_refcnt; +}; + +/** + * struct page_pool_stats - combined page pool use statistics + * @alloc_stats: see struct page_pool_alloc_stats + * @recycle_stats: see struct page_pool_recycle_stats + * + * Wrapper struct for combining page pool stats with different storage + * requirements. + */ +struct page_pool_stats { + struct page_pool_alloc_stats alloc_stats; + struct page_pool_recycle_stats recycle_stats; +}; +#endif + +struct page_pool { + struct page_pool_params p; + + long frag_users; + struct page *frag_page; + unsigned int frag_offset; + u32 pages_state_hold_cnt; + + struct delayed_work release_dw; + void (*disconnect)(void *pool); + unsigned long defer_start; + unsigned long defer_warn; + +#ifdef CONFIG_PAGE_POOL_STATS + /* these stats are incremented while in softirq context */ + struct page_pool_alloc_stats alloc_stats; +#endif + u32 xdp_mem_id; + + /* + * Data structure for allocation side + * + * Drivers allocation side usually already perform some kind + * of resource protection. Piggyback on this protection, and + * require driver to protect allocation side. + * + * For NIC drivers this means, allocate a page_pool per + * RX-queue. As the RX-queue is already protected by + * Softirq/BH scheduling and napi_schedule. NAPI schedule + * guarantee that a single napi_struct will only be scheduled + * on a single CPU (see napi_schedule). + */ + struct pp_alloc_cache alloc ____cacheline_aligned_in_smp; + + /* Data structure for storing recycled pages. + * + * Returning/freeing pages is more complicated synchronization + * wise, because free's can happen on remote CPUs, with no + * association with allocation resource. + * + * Use ptr_ring, as it separates consumer and producer + * efficiently, it a way that doesn't bounce cache-lines. + * + * TODO: Implement bulk return pages into this structure. + */ + struct ptr_ring ring; + +#ifdef CONFIG_PAGE_POOL_STATS + /* recycle stats are per-cpu to avoid locking */ + struct page_pool_recycle_stats __percpu *recycle_stats; +#endif + atomic_t pages_state_release_cnt; + + /* A page_pool is strictly tied to a single RX-queue being + * protected by NAPI, due to above pp_alloc_cache. This + * refcnt serves purpose is to simplify drivers error handling. + */ + refcount_t user_cnt; + + u64 destroy_cnt; +}; + +struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp); +struct page *page_pool_alloc_frag(struct page_pool *pool, unsigned int *offset, + unsigned int size, gfp_t gfp); +struct page_pool *page_pool_create(const struct page_pool_params *params); + +struct xdp_mem_info; + +#ifdef CONFIG_PAGE_POOL +void page_pool_unlink_napi(struct page_pool *pool); +void page_pool_destroy(struct page_pool *pool); +void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *), + struct xdp_mem_info *mem); +void page_pool_put_page_bulk(struct page_pool *pool, void **data, + int count); +#else +static inline void page_pool_unlink_napi(struct page_pool *pool) +{ +} + +static inline void page_pool_destroy(struct page_pool *pool) +{ +} + +static inline void page_pool_use_xdp_mem(struct page_pool *pool, + void (*disconnect)(void *), + struct xdp_mem_info *mem) +{ +} + +static inline void page_pool_put_page_bulk(struct page_pool *pool, void **data, + int count) +{ +} +#endif + +void page_pool_put_defragged_page(struct page_pool *pool, struct page *page, + unsigned int dma_sync_size, + bool allow_direct); + +static inline bool is_page_pool_compiled_in(void) +{ +#ifdef CONFIG_PAGE_POOL + return true; +#else + return false; +#endif +} + +/* Caller must provide appropriate safe context, e.g. NAPI. */ +void page_pool_update_nid(struct page_pool *pool, int new_nid); + +#endif /* _NET_PAGE_POOL_H */ diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index a2ea45c7b53e..f308e8268651 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h @@ -138,19 +138,6 @@ static inline struct Qdisc *tcf_block_q(struct tcf_block *block) return NULL; } -static inline -int tc_setup_cb_block_register(struct tcf_block *block, flow_setup_cb_t *cb, - void *cb_priv) -{ - return 0; -} - -static inline -void tc_setup_cb_block_unregister(struct tcf_block *block, flow_setup_cb_t *cb, - void *cb_priv) -{ -} - static inline int tcf_classify(struct sk_buff *skb, const struct tcf_block *block, const struct tcf_proto *tp, @@ -866,6 +853,7 @@ struct tc_htb_qopt_offload { u32 parent_classid; u16 classid; u16 qid; + u32 quantum; u64 rate; u64 ceil; u8 prio; diff --git a/include/net/route.h b/include/net/route.h index 5a5c726472bd..51a45b1887b5 100644 --- a/include/net/route.h +++ b/include/net/route.h @@ -163,12 +163,12 @@ static inline struct rtable *ip_route_output(struct net *net, __be32 daddr, } static inline struct rtable *ip_route_output_ports(struct net *net, struct flowi4 *fl4, - struct sock *sk, + const struct sock *sk, __be32 daddr, __be32 saddr, __be16 dport, __be16 sport, __u8 proto, __u8 tos, int oif) { - flowi4_init_output(fl4, oif, sk ? sk->sk_mark : 0, tos, + flowi4_init_output(fl4, oif, sk ? READ_ONCE(sk->sk_mark) : 0, tos, RT_SCOPE_UNIVERSE, proto, sk ? inet_sk_flowi_flags(sk) : 0, daddr, saddr, dport, sport, sock_net_uid(net, sk)); @@ -298,10 +298,10 @@ static inline void ip_route_connect_init(struct flowi4 *fl4, __be32 dst, { __u8 flow_flags = 0; - if (inet_sk(sk)->transparent) + if (inet_test_bit(TRANSPARENT, sk)) flow_flags |= FLOWI_FLAG_ANYSRC; - flowi4_init_output(fl4, oif, sk->sk_mark, ip_sock_rt_tos(sk), + flowi4_init_output(fl4, oif, READ_ONCE(sk->sk_mark), ip_sock_rt_tos(sk), ip_sock_rt_scope(sk), protocol, flow_flags, dst, src, dport, sport, sk->sk_uid); } @@ -309,7 +309,7 @@ static inline void ip_route_connect_init(struct flowi4 *fl4, __be32 dst, static inline struct rtable *ip_route_connect(struct flowi4 *fl4, __be32 dst, __be32 src, int oif, u8 protocol, __be16 sport, __be16 dport, - struct sock *sk) + const struct sock *sk) { struct net *net = sock_net(sk); struct rtable *rt; @@ -330,7 +330,7 @@ static inline struct rtable *ip_route_connect(struct flowi4 *fl4, __be32 dst, static inline struct rtable *ip_route_newports(struct flowi4 *fl4, struct rtable *rt, __be16 orig_sport, __be16 orig_dport, __be16 sport, __be16 dport, - struct sock *sk) + const struct sock *sk) { if (sport != orig_sport || dport != orig_dport) { fl4->fl4_dport = dport; diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h index d9076a7a430c..6506221c5fe3 100644 --- a/include/net/rtnetlink.h +++ b/include/net/rtnetlink.h @@ -190,8 +190,8 @@ int rtnl_delete_link(struct net_device *dev, u32 portid, const struct nlmsghdr * int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm, u32 portid, const struct nlmsghdr *nlh); -int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len, - struct netlink_ext_ack *exterr); +int rtnl_nla_parse_ifinfomsg(struct nlattr **tb, const struct nlattr *nla_peer, + struct netlink_ext_ack *exterr); struct net *rtnl_get_net_ns_capable(struct sock *sk, int netnsid); #define MODULE_ALIAS_RTNL_LINK(kind) MODULE_ALIAS("rtnl-link-" kind) diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index e92f73bb3198..f232512505f8 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -599,6 +599,7 @@ get_default_qdisc_ops(const struct net_device *dev, int ntx) struct Qdisc_class_common { u32 classid; + unsigned int filter_cnt; struct hlist_node hnode; }; @@ -633,6 +634,31 @@ qdisc_class_find(const struct Qdisc_class_hash *hash, u32 id) return NULL; } +static inline bool qdisc_class_in_use(const struct Qdisc_class_common *cl) +{ + return cl->filter_cnt > 0; +} + +static inline void qdisc_class_get(struct Qdisc_class_common *cl) +{ + unsigned int res; + + if (check_add_overflow(cl->filter_cnt, 1, &res)) + WARN(1, "Qdisc class overflow"); + + cl->filter_cnt = res; +} + +static inline void qdisc_class_put(struct Qdisc_class_common *cl) +{ + unsigned int res; + + if (check_sub_overflow(cl->filter_cnt, 1, &res)) + WARN(1, "Qdisc class underflow"); + + cl->filter_cnt = res; +} + static inline int tc_classid_to_hwtc(struct net_device *dev, u32 classid) { u32 hwtc = TC_H_MIN(classid) - TC_H_MIN_PRIORITY; @@ -703,7 +729,7 @@ int skb_do_redirect(struct sk_buff *); static inline bool skb_at_tc_ingress(const struct sk_buff *skb) { -#ifdef CONFIG_NET_CLS_ACT +#ifdef CONFIG_NET_XGRESS return skb->tc_at_ingress; #else return false; diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h index 2a67100b2a17..a2310fa995f6 100644 --- a/include/net/sctp/sctp.h +++ b/include/net/sctp/sctp.h @@ -148,8 +148,6 @@ void sctp_icmp_redirect(struct sock *, struct sctp_transport *, void sctp_icmp_proto_unreachable(struct sock *sk, struct sctp_association *asoc, struct sctp_transport *t); -void sctp_backlog_migrate(struct sctp_association *assoc, - struct sock *oldsk, struct sock *newsk); int sctp_transport_hashtable_init(void); void sctp_transport_hashtable_destroy(void); int sctp_hash_transport(struct sctp_transport *t); diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h index f37c7a558d6d..64c42bd56bb2 100644 --- a/include/net/sctp/sm.h +++ b/include/net/sctp/sm.h @@ -156,7 +156,6 @@ sctp_state_fn_t sctp_sf_do_6_2_sack; sctp_state_fn_t sctp_sf_autoclose_timer_expire; /* Prototypes for utility support functions. */ -__u8 sctp_get_chunk_type(struct sctp_chunk *chunk); const struct sctp_sm_table_entry *sctp_sm_lookup_event( struct net *net, enum sctp_event_type event_type, @@ -166,8 +165,6 @@ int sctp_chunk_iif(const struct sctp_chunk *); struct sctp_association *sctp_make_temp_asoc(const struct sctp_endpoint *, struct sctp_chunk *, gfp_t gfp); -__u32 sctp_generate_verification_tag(void); -void sctp_populate_tie_tags(__u8 *cookie, __u32 curTag, __u32 hisTag); /* Prototypes for chunk-building functions. */ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc, diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index 5c72d1864dd6..5a24d6d8522a 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -1122,8 +1122,6 @@ void sctp_outq_free(struct sctp_outq*); void sctp_outq_tail(struct sctp_outq *, struct sctp_chunk *chunk, gfp_t); int sctp_outq_sack(struct sctp_outq *, struct sctp_chunk *); int sctp_outq_is_empty(const struct sctp_outq *); -void sctp_outq_restart(struct sctp_outq *); - void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport, enum sctp_retransmit_reason reason); void sctp_retransmit_mark(struct sctp_outq *, struct sctp_transport *, __u8); diff --git a/include/net/sock.h b/include/net/sock.h index 2eb916d1ff64..11d503417591 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -1323,6 +1323,7 @@ struct proto { /* * Pressure flag: try to collapse. * Technical note: it is used by multiple contexts non atomically. + * Make sure to use READ_ONCE()/WRITE_ONCE() for all reads/writes. * All the __sk_mem_schedule() is of this nature: accounting * is strict, actions are advisory and have some latency. */ @@ -1339,6 +1340,7 @@ struct proto { struct kmem_cache *slab; unsigned int obj_size; + unsigned int ipv6_pinfo_offset; slab_flags_t slab_flags; unsigned int useroffset; /* Usercopy region offset */ unsigned int usersize; /* Usercopy region size */ @@ -1420,6 +1422,12 @@ static inline bool sk_has_memory_pressure(const struct sock *sk) return sk->sk_prot->memory_pressure != NULL; } +static inline bool sk_under_global_memory_pressure(const struct sock *sk) +{ + return sk->sk_prot->memory_pressure && + !!READ_ONCE(*sk->sk_prot->memory_pressure); +} + static inline bool sk_under_memory_pressure(const struct sock *sk) { if (!sk->sk_prot->memory_pressure) @@ -1429,7 +1437,7 @@ static inline bool sk_under_memory_pressure(const struct sock *sk) mem_cgroup_under_socket_pressure(sk->sk_memcg)) return true; - return !!*sk->sk_prot->memory_pressure; + return !!READ_ONCE(*sk->sk_prot->memory_pressure); } static inline long @@ -1506,7 +1514,7 @@ proto_memory_pressure(struct proto *prot) { if (!prot->memory_pressure) return false; - return !!*prot->memory_pressure; + return !!READ_ONCE(*prot->memory_pressure); } @@ -2814,20 +2822,23 @@ sk_is_refcounted(struct sock *sk) * skb_steal_sock - steal a socket from an sk_buff * @skb: sk_buff to steal the socket from * @refcounted: is set to true if the socket is reference-counted + * @prefetched: is set to true if the socket was assigned from bpf */ static inline struct sock * -skb_steal_sock(struct sk_buff *skb, bool *refcounted) +skb_steal_sock(struct sk_buff *skb, bool *refcounted, bool *prefetched) { if (skb->sk) { struct sock *sk = skb->sk; *refcounted = true; - if (skb_sk_is_prefetched(skb)) + *prefetched = skb_sk_is_prefetched(skb); + if (*prefetched) *refcounted = sk_is_refcounted(sk); skb->destructor = NULL; skb->sk = NULL; return sk; } + *prefetched = false; *refcounted = false; return NULL; } diff --git a/include/net/switchdev.h b/include/net/switchdev.h index ca0312b78294..a43062d4c734 100644 --- a/include/net/switchdev.h +++ b/include/net/switchdev.h @@ -201,8 +201,6 @@ struct switchdev_obj_in_state_mrp { #define SWITCHDEV_OBJ_IN_STATE_MRP(OBJ) \ container_of((OBJ), struct switchdev_obj_in_state_mrp, obj) -typedef int switchdev_obj_dump_cb_t(struct switchdev_obj *obj); - struct switchdev_brport { struct net_device *dev; const void *ctx; @@ -231,6 +229,7 @@ enum switchdev_notifier_type { SWITCHDEV_BRPORT_OFFLOADED, SWITCHDEV_BRPORT_UNOFFLOADED, + SWITCHDEV_BRPORT_REPLAY, }; struct switchdev_notifier_info { @@ -299,6 +298,11 @@ void switchdev_bridge_port_unoffload(struct net_device *brport_dev, const void *ctx, struct notifier_block *atomic_nb, struct notifier_block *blocking_nb); +int switchdev_bridge_port_replay(struct net_device *brport_dev, + struct net_device *dev, const void *ctx, + struct notifier_block *atomic_nb, + struct notifier_block *blocking_nb, + struct netlink_ext_ack *extack); void switchdev_deferred_process(void); int switchdev_port_attr_set(struct net_device *dev, @@ -322,10 +326,6 @@ int call_switchdev_blocking_notifiers(unsigned long val, struct net_device *dev, struct switchdev_notifier_info *info, struct netlink_ext_ack *extack); -void switchdev_port_fwd_mark_set(struct net_device *dev, - struct net_device *group_dev, - bool joining); - int switchdev_handle_fdb_event_to_device(struct net_device *dev, unsigned long event, const struct switchdev_notifier_fdb_info *fdb_info, bool (*check_cb)(const struct net_device *dev), diff --git a/include/net/tcp.h b/include/net/tcp.h index 0ca972ebd3dd..91688d0dadcd 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -45,7 +45,6 @@ #include <linux/memcontrol.h> #include <linux/bpf-cgroup.h> #include <linux/siphash.h> -#include <linux/net_mm.h> extern struct inet_hashinfo tcp_hashinfo; @@ -323,7 +322,6 @@ int tcp_v4_early_demux(struct sk_buff *skb); int tcp_v4_rcv(struct sk_buff *skb); void tcp_remove_empty_skb(struct sock *sk); -int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw); int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size); int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size); int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg, int *copied, @@ -350,7 +348,6 @@ ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos, struct sk_buff *tcp_stream_alloc_skb(struct sock *sk, gfp_t gfp, bool force_schedule); -void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks); static inline void tcp_dec_quickack_mode(struct sock *sk, const unsigned int pkts) { @@ -606,7 +603,6 @@ int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue, unsigned int mss_now, gfp_t gfp); void tcp_send_probe0(struct sock *); -void tcp_send_partial(struct sock *); int tcp_write_wakeup(struct sock *, int mib); void tcp_send_fin(struct sock *sk); void tcp_send_active_reset(struct sock *sk, gfp_t priority); @@ -624,7 +620,6 @@ void tcp_skb_collapse_tstamp(struct sk_buff *skb, void tcp_rearm_rto(struct sock *sk); void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req); void tcp_reset(struct sock *sk, struct sk_buff *skb); -void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp, struct sk_buff *skb); void tcp_fin(struct sock *sk); void tcp_check_space(struct sock *sk); void tcp_sack_compress_send_ack(struct sock *sk); @@ -1432,13 +1427,39 @@ void tcp_select_initial_window(const struct sock *sk, int __space, __u32 *window_clamp, int wscale_ok, __u8 *rcv_wscale, __u32 init_rcv_wnd); +static inline int __tcp_win_from_space(u8 scaling_ratio, int space) +{ + s64 scaled_space = (s64)space * scaling_ratio; + + return scaled_space >> TCP_RMEM_TO_WIN_SCALE; +} + static inline int tcp_win_from_space(const struct sock *sk, int space) { - int tcp_adv_win_scale = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_adv_win_scale); + return __tcp_win_from_space(tcp_sk(sk)->scaling_ratio, space); +} + +/* inverse of __tcp_win_from_space() */ +static inline int __tcp_space_from_win(u8 scaling_ratio, int win) +{ + u64 val = (u64)win << TCP_RMEM_TO_WIN_SCALE; - return tcp_adv_win_scale <= 0 ? - (space>>(-tcp_adv_win_scale)) : - space - (space>>tcp_adv_win_scale); + do_div(val, scaling_ratio); + return val; +} + +static inline int tcp_space_from_win(const struct sock *sk, int win) +{ + return __tcp_space_from_win(tcp_sk(sk)->scaling_ratio, win); +} + +static inline void tcp_scaling_ratio_init(struct sock *sk) +{ + /* Assume a conservative default of 1200 bytes of payload per 4K page. + * This may be adjusted later in tcp_measure_rcv_mss(). + */ + tcp_sk(sk)->scaling_ratio = (1200 << TCP_RMEM_TO_WIN_SCALE) / + SKB_TRUESIZE(4096); } /* Note: caller must be prepared to deal with negative returns */ @@ -2009,7 +2030,7 @@ static inline bool inet_sk_transparent(const struct sock *sk) case TCP_NEW_SYN_RECV: return inet_rsk(inet_reqsk(sk))->no_srccheck; } - return inet_sk(sk)->transparent; + return inet_test_bit(TRANSPARENT, sk); } /* Determines whether this is a thin stream (which may suffer from @@ -2336,7 +2357,6 @@ struct sk_msg; struct sk_psock; #ifdef CONFIG_BPF_SYSCALL -struct proto *tcp_bpf_get_proto(struct sock *sk, struct sk_psock *psock); int tcp_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore); void tcp_bpf_clone(const struct sock *sk, struct sock *newsk); #endif /* CONFIG_BPF_SYSCALL */ diff --git a/include/net/tcx.h b/include/net/tcx.h new file mode 100644 index 000000000000..264f147953ba --- /dev/null +++ b/include/net/tcx.h @@ -0,0 +1,206 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2023 Isovalent */ +#ifndef __NET_TCX_H +#define __NET_TCX_H + +#include <linux/bpf.h> +#include <linux/bpf_mprog.h> + +#include <net/sch_generic.h> + +struct mini_Qdisc; + +struct tcx_entry { + struct mini_Qdisc __rcu *miniq; + struct bpf_mprog_bundle bundle; + bool miniq_active; + struct rcu_head rcu; +}; + +struct tcx_link { + struct bpf_link link; + struct net_device *dev; + u32 location; +}; + +static inline void tcx_set_ingress(struct sk_buff *skb, bool ingress) +{ +#ifdef CONFIG_NET_XGRESS + skb->tc_at_ingress = ingress; +#endif +} + +#ifdef CONFIG_NET_XGRESS +static inline struct tcx_entry *tcx_entry(struct bpf_mprog_entry *entry) +{ + struct bpf_mprog_bundle *bundle = entry->parent; + + return container_of(bundle, struct tcx_entry, bundle); +} + +static inline struct tcx_link *tcx_link(struct bpf_link *link) +{ + return container_of(link, struct tcx_link, link); +} + +static inline const struct tcx_link *tcx_link_const(const struct bpf_link *link) +{ + return tcx_link((struct bpf_link *)link); +} + +void tcx_inc(void); +void tcx_dec(void); + +static inline void tcx_entry_sync(void) +{ + /* bpf_mprog_entry got a/b swapped, therefore ensure that + * there are no inflight users on the old one anymore. + */ + synchronize_rcu(); +} + +static inline void +tcx_entry_update(struct net_device *dev, struct bpf_mprog_entry *entry, + bool ingress) +{ + ASSERT_RTNL(); + if (ingress) + rcu_assign_pointer(dev->tcx_ingress, entry); + else + rcu_assign_pointer(dev->tcx_egress, entry); +} + +static inline struct bpf_mprog_entry * +tcx_entry_fetch(struct net_device *dev, bool ingress) +{ + ASSERT_RTNL(); + if (ingress) + return rcu_dereference_rtnl(dev->tcx_ingress); + else + return rcu_dereference_rtnl(dev->tcx_egress); +} + +static inline struct bpf_mprog_entry *tcx_entry_create(void) +{ + struct tcx_entry *tcx = kzalloc(sizeof(*tcx), GFP_KERNEL); + + if (tcx) { + bpf_mprog_bundle_init(&tcx->bundle); + return &tcx->bundle.a; + } + return NULL; +} + +static inline void tcx_entry_free(struct bpf_mprog_entry *entry) +{ + kfree_rcu(tcx_entry(entry), rcu); +} + +static inline struct bpf_mprog_entry * +tcx_entry_fetch_or_create(struct net_device *dev, bool ingress, bool *created) +{ + struct bpf_mprog_entry *entry = tcx_entry_fetch(dev, ingress); + + *created = false; + if (!entry) { + entry = tcx_entry_create(); + if (!entry) + return NULL; + *created = true; + } + return entry; +} + +static inline void tcx_skeys_inc(bool ingress) +{ + tcx_inc(); + if (ingress) + net_inc_ingress_queue(); + else + net_inc_egress_queue(); +} + +static inline void tcx_skeys_dec(bool ingress) +{ + if (ingress) + net_dec_ingress_queue(); + else + net_dec_egress_queue(); + tcx_dec(); +} + +static inline void tcx_miniq_set_active(struct bpf_mprog_entry *entry, + const bool active) +{ + ASSERT_RTNL(); + tcx_entry(entry)->miniq_active = active; +} + +static inline bool tcx_entry_is_active(struct bpf_mprog_entry *entry) +{ + ASSERT_RTNL(); + return bpf_mprog_total(entry) || tcx_entry(entry)->miniq_active; +} + +static inline enum tcx_action_base tcx_action_code(struct sk_buff *skb, + int code) +{ + switch (code) { + case TCX_PASS: + skb->tc_index = qdisc_skb_cb(skb)->tc_classid; + fallthrough; + case TCX_DROP: + case TCX_REDIRECT: + return code; + case TCX_NEXT: + default: + return TCX_NEXT; + } +} +#endif /* CONFIG_NET_XGRESS */ + +#if defined(CONFIG_NET_XGRESS) && defined(CONFIG_BPF_SYSCALL) +int tcx_prog_attach(const union bpf_attr *attr, struct bpf_prog *prog); +int tcx_link_attach(const union bpf_attr *attr, struct bpf_prog *prog); +int tcx_prog_detach(const union bpf_attr *attr, struct bpf_prog *prog); +void tcx_uninstall(struct net_device *dev, bool ingress); + +int tcx_prog_query(const union bpf_attr *attr, + union bpf_attr __user *uattr); + +static inline void dev_tcx_uninstall(struct net_device *dev) +{ + ASSERT_RTNL(); + tcx_uninstall(dev, true); + tcx_uninstall(dev, false); +} +#else +static inline int tcx_prog_attach(const union bpf_attr *attr, + struct bpf_prog *prog) +{ + return -EINVAL; +} + +static inline int tcx_link_attach(const union bpf_attr *attr, + struct bpf_prog *prog) +{ + return -EINVAL; +} + +static inline int tcx_prog_detach(const union bpf_attr *attr, + struct bpf_prog *prog) +{ + return -EINVAL; +} + +static inline int tcx_prog_query(const union bpf_attr *attr, + union bpf_attr __user *uattr) +{ + return -EINVAL; +} + +static inline void dev_tcx_uninstall(struct net_device *dev) +{ +} +#endif /* CONFIG_NET_XGRESS && CONFIG_BPF_SYSCALL */ +#endif /* __NET_TCX_H */ diff --git a/include/net/tls.h b/include/net/tls.h index 5e71dd3df8ca..a2b44578dcb7 100644 --- a/include/net/tls.h +++ b/include/net/tls.h @@ -51,16 +51,6 @@ struct tls_rec; -struct tls_cipher_size_desc { - unsigned int iv; - unsigned int key; - unsigned int salt; - unsigned int tag; - unsigned int rec_seq; -}; - -extern const struct tls_cipher_size_desc tls_cipher_size_desc[]; - /* Maximum data size carried in a TLS record */ #define TLS_MAX_PAYLOAD_SIZE ((size_t)1 << 14) @@ -69,10 +59,6 @@ extern const struct tls_cipher_size_desc tls_cipher_size_desc[]; #define TLS_CRYPTO_INFO_READY(info) ((info)->cipher_type) -#define TLS_RECORD_TYPE_ALERT 0x15 -#define TLS_RECORD_TYPE_HANDSHAKE 0x16 -#define TLS_RECORD_TYPE_DATA 0x17 - #define TLS_AAD_SPACE_SIZE 13 #define MAX_IV_SIZE 16 diff --git a/include/net/tls_prot.h b/include/net/tls_prot.h new file mode 100644 index 000000000000..68a40756440b --- /dev/null +++ b/include/net/tls_prot.h @@ -0,0 +1,68 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (c) 2023, Oracle and/or its affiliates. + * + * TLS Protocol definitions + * + * From https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml + */ + +#ifndef _TLS_PROT_H +#define _TLS_PROT_H + +/* + * TLS Record protocol: ContentType + */ +enum { + TLS_RECORD_TYPE_CHANGE_CIPHER_SPEC = 20, + TLS_RECORD_TYPE_ALERT = 21, + TLS_RECORD_TYPE_HANDSHAKE = 22, + TLS_RECORD_TYPE_DATA = 23, + TLS_RECORD_TYPE_HEARTBEAT = 24, + TLS_RECORD_TYPE_TLS12_CID = 25, + TLS_RECORD_TYPE_ACK = 26, +}; + +/* + * TLS Alert protocol: AlertLevel + */ +enum { + TLS_ALERT_LEVEL_WARNING = 1, + TLS_ALERT_LEVEL_FATAL = 2, +}; + +/* + * TLS Alert protocol: AlertDescription + */ +enum { + TLS_ALERT_DESC_CLOSE_NOTIFY = 0, + TLS_ALERT_DESC_UNEXPECTED_MESSAGE = 10, + TLS_ALERT_DESC_BAD_RECORD_MAC = 20, + TLS_ALERT_DESC_RECORD_OVERFLOW = 22, + TLS_ALERT_DESC_HANDSHAKE_FAILURE = 40, + TLS_ALERT_DESC_BAD_CERTIFICATE = 42, + TLS_ALERT_DESC_UNSUPPORTED_CERTIFICATE = 43, + TLS_ALERT_DESC_CERTIFICATE_REVOKED = 44, + TLS_ALERT_DESC_CERTIFICATE_EXPIRED = 45, + TLS_ALERT_DESC_CERTIFICATE_UNKNOWN = 46, + TLS_ALERT_DESC_ILLEGAL_PARAMETER = 47, + TLS_ALERT_DESC_UNKNOWN_CA = 48, + TLS_ALERT_DESC_ACCESS_DENIED = 49, + TLS_ALERT_DESC_DECODE_ERROR = 50, + TLS_ALERT_DESC_DECRYPT_ERROR = 51, + TLS_ALERT_DESC_TOO_MANY_CIDS_REQUESTED = 52, + TLS_ALERT_DESC_PROTOCOL_VERSION = 70, + TLS_ALERT_DESC_INSUFFICIENT_SECURITY = 71, + TLS_ALERT_DESC_INTERNAL_ERROR = 80, + TLS_ALERT_DESC_INAPPROPRIATE_FALLBACK = 86, + TLS_ALERT_DESC_USER_CANCELED = 90, + TLS_ALERT_DESC_MISSING_EXTENSION = 109, + TLS_ALERT_DESC_UNSUPPORTED_EXTENSION = 110, + TLS_ALERT_DESC_UNRECOGNIZED_NAME = 112, + TLS_ALERT_DESC_BAD_CERTIFICATE_STATUS_RESPONSE = 113, + TLS_ALERT_DESC_UNKNOWN_PSK_IDENTITY = 115, + TLS_ALERT_DESC_CERTIFICATE_REQUIRED = 116, + TLS_ALERT_DESC_NO_APPLICATION_PROTOCOL = 120, +}; + +#endif /* _TLS_PROT_H */ diff --git a/include/net/transp_v6.h b/include/net/transp_v6.h index d27b1caf3753..1a97e3f32029 100644 --- a/include/net/transp_v6.h +++ b/include/net/transp_v6.h @@ -33,8 +33,6 @@ void udplitev6_exit(void); int tcpv6_init(void); void tcpv6_exit(void); -int udpv6_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len); - /* this does all the common and the specific ctl work */ void ip6_datagram_recv_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb); diff --git a/include/net/udp.h b/include/net/udp.h index 4d13424f8f72..488a6d2babcc 100644 --- a/include/net/udp.h +++ b/include/net/udp.h @@ -273,9 +273,6 @@ static inline struct sk_buff *skb_recv_udp(struct sock *sk, unsigned int flags, int udp_v4_early_demux(struct sk_buff *skb); bool udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst); -int udp_get_port(struct sock *sk, unsigned short snum, - int (*saddr_cmp)(const struct sock *, - const struct sock *)); int udp_err(struct sk_buff *, u32); int udp_abort(struct sock *sk, int err); int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len); @@ -529,7 +526,6 @@ static inline void udp_post_segment_fix_csum(struct sk_buff *skb) #ifdef CONFIG_BPF_SYSCALL struct sk_psock; -struct proto *udp_bpf_get_proto(struct sock *sk, struct sk_psock *psock); int udp_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore); #endif diff --git a/include/net/udplite.h b/include/net/udplite.h index 299c14ce2bb9..bd33ff2b8f42 100644 --- a/include/net/udplite.h +++ b/include/net/udplite.h @@ -81,6 +81,4 @@ static inline __wsum udplite_csum(struct sk_buff *skb) } void udplite4_register(void); -int udplite_get_port(struct sock *sk, unsigned short snum, - int (*scmp)(const struct sock *, const struct sock *)); #endif /* _UDPLITE_H */ diff --git a/include/net/vxlan.h b/include/net/vxlan.h index 1648240c9668..6a9f8a5f387c 100644 --- a/include/net/vxlan.h +++ b/include/net/vxlan.h @@ -556,12 +556,12 @@ static inline void vxlan_flag_attr_error(int attrtype, } static inline bool vxlan_fdb_nh_path_select(struct nexthop *nh, - int hash, + u32 hash, struct vxlan_rdst *rdst) { struct fib_nh_common *nhc; - nhc = nexthop_path_fdb_result(nh, hash); + nhc = nexthop_path_fdb_result(nh, hash >> 1); if (unlikely(!nhc)) return false; diff --git a/include/net/xdp.h b/include/net/xdp.h index d1c5381fc95f..de08c8e0d134 100644 --- a/include/net/xdp.h +++ b/include/net/xdp.h @@ -6,9 +6,10 @@ #ifndef __LINUX_NET_XDP_H__ #define __LINUX_NET_XDP_H__ -#include <linux/skbuff.h> /* skb_shared_info */ -#include <uapi/linux/netdev.h> #include <linux/bitfield.h> +#include <linux/filter.h> +#include <linux/netdevice.h> +#include <linux/skbuff.h> /* skb_shared_info */ /** * DOC: XDP RX-queue information @@ -45,8 +46,6 @@ enum xdp_mem_type { MEM_TYPE_MAX, }; -typedef u32 xdp_features_t; - /* XDP flags for ndo_xdp_xmit */ #define XDP_XMIT_FLUSH (1U << 0) /* doorbell signal consumer */ #define XDP_XMIT_FLAGS_MASK XDP_XMIT_FLUSH @@ -443,6 +442,12 @@ enum xdp_rss_hash_type { XDP_RSS_TYPE_L4_IPV6_SCTP_EX = XDP_RSS_TYPE_L4_IPV6_SCTP | XDP_RSS_L3_DYNHDR, }; +struct xdp_metadata_ops { + int (*xmo_rx_timestamp)(const struct xdp_md *ctx, u64 *timestamp); + int (*xmo_rx_hash)(const struct xdp_md *ctx, u32 *hash, + enum xdp_rss_hash_type *rss_type); +}; + #ifdef CONFIG_NET u32 bpf_xdp_metadata_kfunc_id(int id); bool bpf_dev_bound_kfunc_id(u32 btf_id); @@ -474,4 +479,20 @@ static inline void xdp_clear_features_flag(struct net_device *dev) xdp_set_features_flag(dev, 0); } +static __always_inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog, + struct xdp_buff *xdp) +{ + /* Driver XDP hooks are invoked within a single NAPI poll cycle and thus + * under local_bh_disable(), which provides the needed RCU protection + * for accessing map entries. + */ + u32 act = __bpf_prog_run(prog, xdp, BPF_DISPATCHER_FUNC(xdp)); + + if (static_branch_unlikely(&bpf_master_redirect_enabled_key)) { + if (act == XDP_TX && netif_is_bond_slave(xdp->rxq->dev)) + act = xdp_master_redirect(xdp); + } + + return act; +} #endif /* __LINUX_NET_XDP_H__ */ diff --git a/include/net/xdp_sock.h b/include/net/xdp_sock.h index e96a1151ec75..1617af380162 100644 --- a/include/net/xdp_sock.h +++ b/include/net/xdp_sock.h @@ -52,6 +52,7 @@ struct xdp_sock { struct xsk_buff_pool *pool; u16 queue_id; bool zc; + bool sg; enum { XSK_READY = 0, XSK_BOUND, @@ -67,6 +68,12 @@ struct xdp_sock { u64 rx_dropped; u64 rx_queue_full; + /* When __xsk_generic_xmit() must return before it sees the EOP descriptor for the current + * packet, the partially built skb is saved here so that packet building can resume in next + * call of __xsk_generic_xmit(). + */ + struct sk_buff *skb; + struct list_head map_list; /* Protects map_list */ spinlock_t map_list_lock; diff --git a/include/net/xdp_sock_drv.h b/include/net/xdp_sock_drv.h index c243f906ebed..1f6fc8c7a84c 100644 --- a/include/net/xdp_sock_drv.h +++ b/include/net/xdp_sock_drv.h @@ -89,6 +89,11 @@ static inline struct xdp_buff *xsk_buff_alloc(struct xsk_buff_pool *pool) return xp_alloc(pool); } +static inline bool xsk_is_eop_desc(struct xdp_desc *desc) +{ + return !xp_mb_desc(desc); +} + /* Returns as many entries as possible up to max. 0 <= N <= max. */ static inline u32 xsk_buff_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max) { @@ -103,10 +108,45 @@ static inline bool xsk_buff_can_alloc(struct xsk_buff_pool *pool, u32 count) static inline void xsk_buff_free(struct xdp_buff *xdp) { struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp); + struct list_head *xskb_list = &xskb->pool->xskb_list; + struct xdp_buff_xsk *pos, *tmp; + + if (likely(!xdp_buff_has_frags(xdp))) + goto out; + list_for_each_entry_safe(pos, tmp, xskb_list, xskb_list_node) { + list_del(&pos->xskb_list_node); + xp_free(pos); + } + + xdp_get_shared_info_from_buff(xdp)->nr_frags = 0; +out: xp_free(xskb); } +static inline void xsk_buff_add_frag(struct xdp_buff *xdp) +{ + struct xdp_buff_xsk *frag = container_of(xdp, struct xdp_buff_xsk, xdp); + + list_add_tail(&frag->xskb_list_node, &frag->pool->xskb_list); +} + +static inline struct xdp_buff *xsk_buff_get_frag(struct xdp_buff *first) +{ + struct xdp_buff_xsk *xskb = container_of(first, struct xdp_buff_xsk, xdp); + struct xdp_buff *ret = NULL; + struct xdp_buff_xsk *frag; + + frag = list_first_entry_or_null(&xskb->pool->xskb_list, + struct xdp_buff_xsk, xskb_list_node); + if (frag) { + list_del(&frag->xskb_list_node); + ret = &frag->xdp; + } + + return ret; +} + static inline void xsk_buff_set_size(struct xdp_buff *xdp, u32 size) { xdp->data = xdp->data_hard_start + XDP_PACKET_HEADROOM; @@ -241,6 +281,11 @@ static inline struct xdp_buff *xsk_buff_alloc(struct xsk_buff_pool *pool) return NULL; } +static inline bool xsk_is_eop_desc(struct xdp_desc *desc) +{ + return false; +} + static inline u32 xsk_buff_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max) { return 0; @@ -255,6 +300,15 @@ static inline void xsk_buff_free(struct xdp_buff *xdp) { } +static inline void xsk_buff_add_frag(struct xdp_buff *xdp) +{ +} + +static inline struct xdp_buff *xsk_buff_get_frag(struct xdp_buff *first) +{ + return NULL; +} + static inline void xsk_buff_set_size(struct xdp_buff *xdp, u32 size) { } diff --git a/include/net/xfrm.h b/include/net/xfrm.h index 151ca95dd08d..363c7d510554 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -1984,6 +1984,7 @@ static inline void xfrm_dev_state_free(struct xfrm_state *x) if (dev->xfrmdev_ops->xdo_dev_state_free) dev->xfrmdev_ops->xdo_dev_state_free(x); xso->dev = NULL; + xso->type = XFRM_DEV_OFFLOAD_UNSPECIFIED; netdev_put(dev, &xso->dev_tracker); } } diff --git a/include/net/xsk_buff_pool.h b/include/net/xsk_buff_pool.h index a8d7b8a3688a..b0bdff26fc88 100644 --- a/include/net/xsk_buff_pool.h +++ b/include/net/xsk_buff_pool.h @@ -29,6 +29,7 @@ struct xdp_buff_xsk { struct xsk_buff_pool *pool; u64 orig_addr; struct list_head free_list_node; + struct list_head xskb_list_node; }; #define XSK_CHECK_PRIV_TYPE(t) BUILD_BUG_ON(sizeof(t) > offsetofend(struct xdp_buff_xsk, cb)) @@ -54,6 +55,7 @@ struct xsk_buff_pool { struct xdp_umem *umem; struct work_struct work; struct list_head free_list; + struct list_head xskb_list; u32 heads_cnt; u16 queue_id; @@ -184,6 +186,11 @@ static inline bool xp_desc_crosses_non_contig_pg(struct xsk_buff_pool *pool, !(pool->dma_pages[addr >> PAGE_SHIFT] & XSK_NEXT_PG_CONTIG_MASK); } +static inline bool xp_mb_desc(struct xdp_desc *desc) +{ + return desc->options & XDP_PKT_CONTD; +} + static inline u64 xp_aligned_extract_addr(struct xsk_buff_pool *pool, u64 addr) { return addr & pool->chunk_mask; diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h index 75b2235b99e2..b9230b6add04 100644 --- a/include/scsi/scsi_device.h +++ b/include/scsi/scsi_device.h @@ -194,6 +194,7 @@ struct scsi_device { unsigned no_start_on_add:1; /* do not issue start on add */ unsigned allow_restart:1; /* issue START_UNIT in error handler */ unsigned manage_start_stop:1; /* Let HLD (sd) manage start/stop */ + unsigned no_start_on_resume:1; /* Do not issue START_STOP_UNIT on resume */ unsigned start_stop_pwr_cond:1; /* Set power cond. in START_STOP_UNIT */ unsigned no_uld_attach:1; /* disable connecting to upper level drivers */ unsigned select_no_atn:1; diff --git a/include/soc/mscc/ocelot.h b/include/soc/mscc/ocelot.h index a8c2817335b9..1e1b40f4e664 100644 --- a/include/soc/mscc/ocelot.h +++ b/include/soc/mscc/ocelot.h @@ -1165,7 +1165,6 @@ int ocelot_port_get_mm(struct ocelot *ocelot, int port, struct ethtool_mm_state *state); int ocelot_port_mqprio(struct ocelot *ocelot, int port, struct tc_mqprio_qopt_offload *mqprio); -void ocelot_port_update_preemptible_tcs(struct ocelot *ocelot, int port); #if IS_ENABLED(CONFIG_BRIDGE_MRP) int ocelot_mrp_add(struct ocelot *ocelot, int port, diff --git a/include/soc/tegra/mc.h b/include/soc/tegra/mc.h index fc3001483e62..a5ef84944a06 100644 --- a/include/soc/tegra/mc.h +++ b/include/soc/tegra/mc.h @@ -175,6 +175,9 @@ struct tegra_mc_icc_ops { int (*get_bw)(struct icc_node *node, u32 *avg, u32 *peak); }; +struct icc_node *tegra_mc_icc_xlate(struct of_phandle_args *spec, void *data); +extern const struct tegra_mc_icc_ops tegra_mc_icc_ops; + struct tegra_mc_ops { /* * @probe: Callback to set up SoC-specific bits of the memory controller. This is called diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h index a8206f5332e9..b2db2c2f1c57 100644 --- a/include/trace/events/btrfs.h +++ b/include/trace/events/btrfs.h @@ -38,7 +38,6 @@ struct find_free_extent_ctl; __print_symbolic(type, \ { BTRFS_TREE_BLOCK_REF_KEY, "TREE_BLOCK_REF" }, \ { BTRFS_EXTENT_DATA_REF_KEY, "EXTENT_DATA_REF" }, \ - { BTRFS_EXTENT_REF_V0_KEY, "EXTENT_REF_V0" }, \ { BTRFS_SHARED_BLOCK_REF_KEY, "SHARED_BLOCK_REF" }, \ { BTRFS_SHARED_DATA_REF_KEY, "SHARED_DATA_REF" }) @@ -2482,7 +2481,7 @@ DECLARE_EVENT_CLASS(btrfs_raid56_bio, __entry->offset, __entry->opf, __entry->physical, __entry->len) ); -DEFINE_EVENT(btrfs_raid56_bio, raid56_read_partial, +DEFINE_EVENT(btrfs_raid56_bio, raid56_read, TP_PROTO(const struct btrfs_raid_bio *rbio, const struct bio *bio, const struct raid56_bio_trace_info *trace_info), @@ -2490,32 +2489,7 @@ DEFINE_EVENT(btrfs_raid56_bio, raid56_read_partial, TP_ARGS(rbio, bio, trace_info) ); -DEFINE_EVENT(btrfs_raid56_bio, raid56_write_stripe, - TP_PROTO(const struct btrfs_raid_bio *rbio, - const struct bio *bio, - const struct raid56_bio_trace_info *trace_info), - - TP_ARGS(rbio, bio, trace_info) -); - - -DEFINE_EVENT(btrfs_raid56_bio, raid56_scrub_write_stripe, - TP_PROTO(const struct btrfs_raid_bio *rbio, - const struct bio *bio, - const struct raid56_bio_trace_info *trace_info), - - TP_ARGS(rbio, bio, trace_info) -); - -DEFINE_EVENT(btrfs_raid56_bio, raid56_scrub_read, - TP_PROTO(const struct btrfs_raid_bio *rbio, - const struct bio *bio, - const struct raid56_bio_trace_info *trace_info), - - TP_ARGS(rbio, bio, trace_info) -); - -DEFINE_EVENT(btrfs_raid56_bio, raid56_scrub_read_recover, +DEFINE_EVENT(btrfs_raid56_bio, raid56_write, TP_PROTO(const struct btrfs_raid_bio *rbio, const struct bio *bio, const struct raid56_bio_trace_info *trace_info), diff --git a/include/trace/events/erofs.h b/include/trace/events/erofs.h index 71dbe8bfa7db..e18684b02c3d 100644 --- a/include/trace/events/erofs.h +++ b/include/trace/events/erofs.h @@ -80,11 +80,11 @@ TRACE_EVENT(erofs_fill_inode, __entry->blkaddr, __entry->ofs) ); -TRACE_EVENT(erofs_readpage, +TRACE_EVENT(erofs_read_folio, - TP_PROTO(struct page *page, bool raw), + TP_PROTO(struct folio *folio, bool raw), - TP_ARGS(page, raw), + TP_ARGS(folio, raw), TP_STRUCT__entry( __field(dev_t, dev ) @@ -96,11 +96,11 @@ TRACE_EVENT(erofs_readpage, ), TP_fast_assign( - __entry->dev = page->mapping->host->i_sb->s_dev; - __entry->nid = EROFS_I(page->mapping->host)->nid; - __entry->dir = S_ISDIR(page->mapping->host->i_mode); - __entry->index = page->index; - __entry->uptodate = PageUptodate(page); + __entry->dev = folio->mapping->host->i_sb->s_dev; + __entry->nid = EROFS_I(folio->mapping->host)->nid; + __entry->dir = S_ISDIR(folio->mapping->host->i_mode); + __entry->index = folio->index; + __entry->uptodate = folio_test_uptodate(folio); __entry->raw = raw; ), diff --git a/include/trace/events/handshake.h b/include/trace/events/handshake.h index 8dadcab5f12a..bdd8a03cf5ba 100644 --- a/include/trace/events/handshake.h +++ b/include/trace/events/handshake.h @@ -6,7 +6,86 @@ #define _TRACE_HANDSHAKE_H #include <linux/net.h> +#include <net/tls_prot.h> #include <linux/tracepoint.h> +#include <trace/events/net_probe_common.h> + +#define TLS_RECORD_TYPE_LIST \ + record_type(CHANGE_CIPHER_SPEC) \ + record_type(ALERT) \ + record_type(HANDSHAKE) \ + record_type(DATA) \ + record_type(HEARTBEAT) \ + record_type(TLS12_CID) \ + record_type_end(ACK) + +#undef record_type +#undef record_type_end +#define record_type(x) TRACE_DEFINE_ENUM(TLS_RECORD_TYPE_##x); +#define record_type_end(x) TRACE_DEFINE_ENUM(TLS_RECORD_TYPE_##x); + +TLS_RECORD_TYPE_LIST + +#undef record_type +#undef record_type_end +#define record_type(x) { TLS_RECORD_TYPE_##x, #x }, +#define record_type_end(x) { TLS_RECORD_TYPE_##x, #x } + +#define show_tls_content_type(type) \ + __print_symbolic(type, TLS_RECORD_TYPE_LIST) + +TRACE_DEFINE_ENUM(TLS_ALERT_LEVEL_WARNING); +TRACE_DEFINE_ENUM(TLS_ALERT_LEVEL_FATAL); + +#define show_tls_alert_level(level) \ + __print_symbolic(level, \ + { TLS_ALERT_LEVEL_WARNING, "Warning" }, \ + { TLS_ALERT_LEVEL_FATAL, "Fatal" }) + +#define TLS_ALERT_DESCRIPTION_LIST \ + alert_description(CLOSE_NOTIFY) \ + alert_description(UNEXPECTED_MESSAGE) \ + alert_description(BAD_RECORD_MAC) \ + alert_description(RECORD_OVERFLOW) \ + alert_description(HANDSHAKE_FAILURE) \ + alert_description(BAD_CERTIFICATE) \ + alert_description(UNSUPPORTED_CERTIFICATE) \ + alert_description(CERTIFICATE_REVOKED) \ + alert_description(CERTIFICATE_EXPIRED) \ + alert_description(CERTIFICATE_UNKNOWN) \ + alert_description(ILLEGAL_PARAMETER) \ + alert_description(UNKNOWN_CA) \ + alert_description(ACCESS_DENIED) \ + alert_description(DECODE_ERROR) \ + alert_description(DECRYPT_ERROR) \ + alert_description(TOO_MANY_CIDS_REQUESTED) \ + alert_description(PROTOCOL_VERSION) \ + alert_description(INSUFFICIENT_SECURITY) \ + alert_description(INTERNAL_ERROR) \ + alert_description(INAPPROPRIATE_FALLBACK) \ + alert_description(USER_CANCELED) \ + alert_description(MISSING_EXTENSION) \ + alert_description(UNSUPPORTED_EXTENSION) \ + alert_description(UNRECOGNIZED_NAME) \ + alert_description(BAD_CERTIFICATE_STATUS_RESPONSE) \ + alert_description(UNKNOWN_PSK_IDENTITY) \ + alert_description(CERTIFICATE_REQUIRED) \ + alert_description_end(NO_APPLICATION_PROTOCOL) + +#undef alert_description +#undef alert_description_end +#define alert_description(x) TRACE_DEFINE_ENUM(TLS_ALERT_DESC_##x); +#define alert_description_end(x) TRACE_DEFINE_ENUM(TLS_ALERT_DESC_##x); + +TLS_ALERT_DESCRIPTION_LIST + +#undef alert_description +#undef alert_description_end +#define alert_description(x) { TLS_ALERT_DESC_##x, #x }, +#define alert_description_end(x) { TLS_ALERT_DESC_##x, #x } + +#define show_tls_alert_description(desc) \ + __print_symbolic(desc, TLS_ALERT_DESCRIPTION_LIST) DECLARE_EVENT_CLASS(handshake_event_class, TP_PROTO( @@ -106,6 +185,47 @@ DECLARE_EVENT_CLASS(handshake_error_class, ), \ TP_ARGS(net, req, sk, err)) +DECLARE_EVENT_CLASS(handshake_alert_class, + TP_PROTO( + const struct sock *sk, + unsigned char level, + unsigned char description + ), + TP_ARGS(sk, level, description), + TP_STRUCT__entry( + /* sockaddr_in6 is always bigger than sockaddr_in */ + __array(__u8, saddr, sizeof(struct sockaddr_in6)) + __array(__u8, daddr, sizeof(struct sockaddr_in6)) + __field(unsigned int, netns_ino) + __field(unsigned long, level) + __field(unsigned long, description) + ), + TP_fast_assign( + const struct inet_sock *inet = inet_sk(sk); + + memset(__entry->saddr, 0, sizeof(struct sockaddr_in6)); + memset(__entry->daddr, 0, sizeof(struct sockaddr_in6)); + TP_STORE_ADDR_PORTS(__entry, inet, sk); + + __entry->netns_ino = sock_net(sk)->ns.inum; + __entry->level = level; + __entry->description = description; + ), + TP_printk("src=%pISpc dest=%pISpc %s: %s", + __entry->saddr, __entry->daddr, + show_tls_alert_level(__entry->level), + show_tls_alert_description(__entry->description) + ) +); +#define DEFINE_HANDSHAKE_ALERT(name) \ + DEFINE_EVENT(handshake_alert_class, name, \ + TP_PROTO( \ + const struct sock *sk, \ + unsigned char level, \ + unsigned char description \ + ), \ + TP_ARGS(sk, level, description)) + /* * Request lifetime events @@ -154,6 +274,46 @@ DEFINE_HANDSHAKE_ERROR(handshake_cmd_accept_err); DEFINE_HANDSHAKE_FD_EVENT(handshake_cmd_done); DEFINE_HANDSHAKE_ERROR(handshake_cmd_done_err); +/* + * TLS Record events + */ + +TRACE_EVENT(tls_contenttype, + TP_PROTO( + const struct sock *sk, + unsigned char type + ), + TP_ARGS(sk, type), + TP_STRUCT__entry( + /* sockaddr_in6 is always bigger than sockaddr_in */ + __array(__u8, saddr, sizeof(struct sockaddr_in6)) + __array(__u8, daddr, sizeof(struct sockaddr_in6)) + __field(unsigned int, netns_ino) + __field(unsigned long, type) + ), + TP_fast_assign( + const struct inet_sock *inet = inet_sk(sk); + + memset(__entry->saddr, 0, sizeof(struct sockaddr_in6)); + memset(__entry->daddr, 0, sizeof(struct sockaddr_in6)); + TP_STORE_ADDR_PORTS(__entry, inet, sk); + + __entry->netns_ino = sock_net(sk)->ns.inum; + __entry->type = type; + ), + TP_printk("src=%pISpc dest=%pISpc %s", + __entry->saddr, __entry->daddr, + show_tls_content_type(__entry->type) + ) +); + +/* + * TLS Alert events + */ + +DEFINE_HANDSHAKE_ALERT(tls_alert_send); +DEFINE_HANDSHAKE_ALERT(tls_alert_recv); + #endif /* _TRACE_HANDSHAKE_H */ #include <trace/define_trace.h> diff --git a/include/trace/events/page_pool.h b/include/trace/events/page_pool.h index ca534501158b..6834356b2d2a 100644 --- a/include/trace/events/page_pool.h +++ b/include/trace/events/page_pool.h @@ -9,7 +9,7 @@ #include <linux/tracepoint.h> #include <trace/events/mmflags.h> -#include <net/page_pool.h> +#include <net/page_pool/types.h> TRACE_EVENT(page_pool_release, diff --git a/include/trace/events/spi.h b/include/trace/events/spi.h index c0248a8fa79c..e63d4a24d879 100644 --- a/include/trace/events/spi.h +++ b/include/trace/events/spi.h @@ -167,7 +167,7 @@ TRACE_EVENT(spi_message_done, ); /* - * consider a buffer valid if non-NULL and if it doesn't match the dummy buffer + * Consider a buffer valid if non-NULL and if it doesn't match the dummy buffer * that only exist to work with controllers that have SPI_CONTROLLER_MUST_TX or * SPI_CONTROLLER_MUST_RX. */ diff --git a/include/trace/events/tcp.h b/include/trace/events/tcp.h index bf06db8d2046..7b1ddffa3dfc 100644 --- a/include/trace/events/tcp.h +++ b/include/trace/events/tcp.h @@ -381,6 +381,7 @@ TRACE_EVENT(tcp_cong_state_set, __field(const void *, skaddr) __field(__u16, sport) __field(__u16, dport) + __field(__u16, family) __array(__u8, saddr, 4) __array(__u8, daddr, 4) __array(__u8, saddr_v6, 16) @@ -396,6 +397,7 @@ TRACE_EVENT(tcp_cong_state_set, __entry->sport = ntohs(inet->inet_sport); __entry->dport = ntohs(inet->inet_dport); + __entry->family = sk->sk_family; p32 = (__be32 *) __entry->saddr; *p32 = inet->inet_saddr; @@ -409,7 +411,8 @@ TRACE_EVENT(tcp_cong_state_set, __entry->cong_state = ca_state; ), - TP_printk("sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c cong_state=%u", + TP_printk("family=%s sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c cong_state=%u", + show_family_name(__entry->family), __entry->sport, __entry->dport, __entry->saddr, __entry->daddr, __entry->saddr_v6, __entry->daddr_v6, diff --git a/include/trace/events/thp.h b/include/trace/events/thp.h index 202b3e3e67ff..f50048af5fcc 100644 --- a/include/trace/events/thp.h +++ b/include/trace/events/thp.h @@ -8,25 +8,34 @@ #include <linux/types.h> #include <linux/tracepoint.h> -TRACE_EVENT(hugepage_set_pmd, +DECLARE_EVENT_CLASS(hugepage_set, - TP_PROTO(unsigned long addr, unsigned long pmd), - TP_ARGS(addr, pmd), + TP_PROTO(unsigned long addr, unsigned long pte), + TP_ARGS(addr, pte), TP_STRUCT__entry( __field(unsigned long, addr) - __field(unsigned long, pmd) + __field(unsigned long, pte) ), TP_fast_assign( __entry->addr = addr; - __entry->pmd = pmd; + __entry->pte = pte; ), - TP_printk("Set pmd with 0x%lx with 0x%lx", __entry->addr, __entry->pmd) + TP_printk("Set page table entry with 0x%lx with 0x%lx", __entry->addr, __entry->pte) ); +DEFINE_EVENT(hugepage_set, hugepage_set_pmd, + TP_PROTO(unsigned long addr, unsigned long pmd), + TP_ARGS(addr, pmd) +); -TRACE_EVENT(hugepage_update, +DEFINE_EVENT(hugepage_set, hugepage_set_pud, + TP_PROTO(unsigned long addr, unsigned long pud), + TP_ARGS(addr, pud) +); + +DECLARE_EVENT_CLASS(hugepage_update, TP_PROTO(unsigned long addr, unsigned long pte, unsigned long clr, unsigned long set), TP_ARGS(addr, pte, clr, set), @@ -48,6 +57,16 @@ TRACE_EVENT(hugepage_update, TP_printk("hugepage update at addr 0x%lx and pte = 0x%lx clr = 0x%lx, set = 0x%lx", __entry->addr, __entry->pte, __entry->clr, __entry->set) ); +DEFINE_EVENT(hugepage_update, hugepage_update_pmd, + TP_PROTO(unsigned long addr, unsigned long pmd, unsigned long clr, unsigned long set), + TP_ARGS(addr, pmd, clr, set) +); + +DEFINE_EVENT(hugepage_update, hugepage_update_pud, + TP_PROTO(unsigned long addr, unsigned long pud, unsigned long clr, unsigned long set), + TP_ARGS(addr, pud, clr, set) +); + DECLARE_EVENT_CLASS(migration_pmd, TP_PROTO(unsigned long addr, unsigned long pmd), diff --git a/include/trace/events/xdp.h b/include/trace/events/xdp.h index c40fc97f9417..9adc2bdf2f94 100644 --- a/include/trace/events/xdp.h +++ b/include/trace/events/xdp.h @@ -9,6 +9,7 @@ #include <linux/filter.h> #include <linux/tracepoint.h> #include <linux/bpf.h> +#include <net/xdp.h> #define __XDP_ACT_MAP(FN) \ FN(ABORTED) \ @@ -404,6 +405,23 @@ TRACE_EVENT(mem_return_failed, ) ); +TRACE_EVENT(bpf_xdp_link_attach_failed, + + TP_PROTO(const char *msg), + + TP_ARGS(msg), + + TP_STRUCT__entry( + __string(msg, msg) + ), + + TP_fast_assign( + __assign_str(msg, msg); + ), + + TP_printk("errmsg=%s", __get_str(msg)) +); + #endif /* _TRACE_XDP_H */ #include <trace/define_trace.h> diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h index fd6c1cb585db..abe087c53b4b 100644 --- a/include/uapi/asm-generic/unistd.h +++ b/include/uapi/asm-generic/unistd.h @@ -820,8 +820,11 @@ __SYSCALL(__NR_set_mempolicy_home_node, sys_set_mempolicy_home_node) #define __NR_cachestat 451 __SYSCALL(__NR_cachestat, sys_cachestat) +#define __NR_fchmodat2 452 +__SYSCALL(__NR_fchmodat2, sys_fchmodat2) + #undef __NR_syscalls -#define __NR_syscalls 452 +#define __NR_syscalls 453 /* * 32 bit systems traditionally used different diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 60a9d59beeab..8790b3962e4b 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -19,6 +19,7 @@ /* ld/ldx fields */ #define BPF_DW 0x18 /* double word (64-bit) */ +#define BPF_MEMSX 0x80 /* load with sign extension */ #define BPF_ATOMIC 0xc0 /* atomic memory ops - op type in immediate */ #define BPF_XADD 0xc0 /* exclusive add - legacy name */ @@ -1036,6 +1037,9 @@ enum bpf_attach_type { BPF_LSM_CGROUP, BPF_STRUCT_OPS, BPF_NETFILTER, + BPF_TCX_INGRESS, + BPF_TCX_EGRESS, + BPF_TRACE_UPROBE_MULTI, __MAX_BPF_ATTACH_TYPE }; @@ -1053,10 +1057,21 @@ enum bpf_link_type { BPF_LINK_TYPE_KPROBE_MULTI = 8, BPF_LINK_TYPE_STRUCT_OPS = 9, BPF_LINK_TYPE_NETFILTER = 10, - + BPF_LINK_TYPE_TCX = 11, + BPF_LINK_TYPE_UPROBE_MULTI = 12, MAX_BPF_LINK_TYPE, }; +enum bpf_perf_event_type { + BPF_PERF_EVENT_UNSPEC = 0, + BPF_PERF_EVENT_UPROBE = 1, + BPF_PERF_EVENT_URETPROBE = 2, + BPF_PERF_EVENT_KPROBE = 3, + BPF_PERF_EVENT_KRETPROBE = 4, + BPF_PERF_EVENT_TRACEPOINT = 5, + BPF_PERF_EVENT_EVENT = 6, +}; + /* cgroup-bpf attach flags used in BPF_PROG_ATTACH command * * NONE(default): No further bpf programs allowed in the subtree. @@ -1103,7 +1118,12 @@ enum bpf_link_type { */ #define BPF_F_ALLOW_OVERRIDE (1U << 0) #define BPF_F_ALLOW_MULTI (1U << 1) +/* Generic attachment flags. */ #define BPF_F_REPLACE (1U << 2) +#define BPF_F_BEFORE (1U << 3) +#define BPF_F_AFTER (1U << 4) +#define BPF_F_ID (1U << 5) +#define BPF_F_LINK BPF_F_LINK /* 1 << 13 */ /* If BPF_F_STRICT_ALIGNMENT is used in BPF_PROG_LOAD command, the * verifier will perform strict alignment checking as if the kernel @@ -1168,7 +1188,21 @@ enum bpf_link_type { /* link_create.kprobe_multi.flags used in LINK_CREATE command for * BPF_TRACE_KPROBE_MULTI attach type to create return probe. */ -#define BPF_F_KPROBE_MULTI_RETURN (1U << 0) +enum { + BPF_F_KPROBE_MULTI_RETURN = (1U << 0) +}; + +/* link_create.uprobe_multi.flags used in LINK_CREATE command for + * BPF_TRACE_UPROBE_MULTI attach type to create return probe. + */ +enum { + BPF_F_UPROBE_MULTI_RETURN = (1U << 0) +}; + +/* link_create.netfilter.flags used in LINK_CREATE command for + * BPF_PROG_TYPE_NETFILTER to enable IP packet defragmentation. + */ +#define BPF_F_NETFILTER_IP_DEFRAG (1U << 0) /* When BPF ldimm64's insn[0].src_reg != 0 then this can have * the following extensions: @@ -1434,14 +1468,19 @@ union bpf_attr { }; struct { /* anonymous struct used by BPF_PROG_ATTACH/DETACH commands */ - __u32 target_fd; /* container object to attach to */ - __u32 attach_bpf_fd; /* eBPF program to attach */ + union { + __u32 target_fd; /* target object to attach to or ... */ + __u32 target_ifindex; /* target ifindex */ + }; + __u32 attach_bpf_fd; __u32 attach_type; __u32 attach_flags; - __u32 replace_bpf_fd; /* previously attached eBPF - * program to replace if - * BPF_F_REPLACE is used - */ + __u32 replace_bpf_fd; + union { + __u32 relative_fd; + __u32 relative_id; + }; + __u64 expected_revision; }; struct { /* anonymous struct used by BPF_PROG_TEST_RUN command */ @@ -1487,16 +1526,26 @@ union bpf_attr { } info; struct { /* anonymous struct used by BPF_PROG_QUERY command */ - __u32 target_fd; /* container object to query */ + union { + __u32 target_fd; /* target object to query or ... */ + __u32 target_ifindex; /* target ifindex */ + }; __u32 attach_type; __u32 query_flags; __u32 attach_flags; __aligned_u64 prog_ids; - __u32 prog_cnt; + union { + __u32 prog_cnt; + __u32 count; + }; + __u32 :32; /* output: per-program attach_flags. * not allowed to be set during effective query. */ __aligned_u64 prog_attach_flags; + __aligned_u64 link_ids; + __aligned_u64 link_attach_flags; + __u64 revision; } query; struct { /* anonymous struct used by BPF_RAW_TRACEPOINT_OPEN command */ @@ -1539,13 +1588,13 @@ union bpf_attr { __u32 map_fd; /* struct_ops to attach */ }; union { - __u32 target_fd; /* object to attach to */ - __u32 target_ifindex; /* target ifindex */ + __u32 target_fd; /* target object to attach to or ... */ + __u32 target_ifindex; /* target ifindex */ }; __u32 attach_type; /* attach type */ __u32 flags; /* extra flags */ union { - __u32 target_btf_id; /* btf_id of target to attach to */ + __u32 target_btf_id; /* btf_id of target to attach to */ struct { __aligned_u64 iter_info; /* extra bpf_iter_link_info */ __u32 iter_info_len; /* iter_info length */ @@ -1579,6 +1628,22 @@ union bpf_attr { __s32 priority; __u32 flags; } netfilter; + struct { + union { + __u32 relative_fd; + __u32 relative_id; + }; + __u64 expected_revision; + } tcx; + struct { + __aligned_u64 path; + __aligned_u64 offsets; + __aligned_u64 ref_ctr_offsets; + __aligned_u64 cookies; + __u32 cnt; + __u32 flags; + __u32 pid; + } uprobe_multi; }; } link_create; @@ -4159,9 +4224,6 @@ union bpf_attr { * **-EOPNOTSUPP** if the operation is not supported, for example * a call from outside of TC ingress. * - * **-ESOCKTNOSUPPORT** if the socket type is not supported - * (reuseport). - * * long bpf_sk_assign(struct bpf_sk_lookup *ctx, struct bpf_sock *sk, u64 flags) * Description * Helper is overloaded depending on BPF program type. This @@ -5044,9 +5106,14 @@ union bpf_attr { * u64 bpf_get_func_ip(void *ctx) * Description * Get address of the traced function (for tracing and kprobe programs). + * + * When called for kprobe program attached as uprobe it returns + * probe address for both entry and return uprobe. + * * Return - * Address of the traced function. + * Address of the traced function for kprobe. * 0 for kprobes placed within the function (not at the entry). + * Address of the probe for uprobe and return uprobe. * * u64 bpf_get_attach_cookie(void *ctx) * Description @@ -6187,6 +6254,19 @@ struct bpf_sock_tuple { }; }; +/* (Simplified) user return codes for tcx prog type. + * A valid tcx program must return one of these defined values. All other + * return codes are reserved for future use. Must remain compatible with + * their TC_ACT_* counter-parts. For compatibility in behavior, unknown + * return codes are mapped to TCX_NEXT. + */ +enum tcx_action_base { + TCX_NEXT = -1, + TCX_PASS = 0, + TCX_DROP = 2, + TCX_REDIRECT = 7, +}; + struct bpf_xdp_sock { __u32 queue_id; }; @@ -6439,6 +6519,40 @@ struct bpf_link_info { __s32 priority; __u32 flags; } netfilter; + struct { + __aligned_u64 addrs; + __u32 count; /* in/out: kprobe_multi function count */ + __u32 flags; + } kprobe_multi; + struct { + __u32 type; /* enum bpf_perf_event_type */ + __u32 :32; + union { + struct { + __aligned_u64 file_name; /* in/out */ + __u32 name_len; + __u32 offset; /* offset from file_name */ + } uprobe; /* BPF_PERF_EVENT_UPROBE, BPF_PERF_EVENT_URETPROBE */ + struct { + __aligned_u64 func_name; /* in/out */ + __u32 name_len; + __u32 offset; /* offset from func_name */ + __u64 addr; + } kprobe; /* BPF_PERF_EVENT_KPROBE, BPF_PERF_EVENT_KRETPROBE */ + struct { + __aligned_u64 tp_name; /* in/out */ + __u32 name_len; + } tracepoint; /* BPF_PERF_EVENT_TRACEPOINT */ + struct { + __u64 config; + __u32 type; + } event; /* BPF_PERF_EVENT_EVENT */ + }; + } perf_event; + struct { + __u32 ifindex; + __u32 attach_type; + } tcx; }; } __attribute__((aligned(8))); @@ -7012,6 +7126,7 @@ struct bpf_list_head { struct bpf_list_node { __u64 :64; __u64 :64; + __u64 :64; } __attribute__((aligned(8))); struct bpf_rb_root { @@ -7023,6 +7138,7 @@ struct bpf_rb_node { __u64 :64; __u64 :64; __u64 :64; + __u64 :64; } __attribute__((aligned(8))); struct bpf_refcount { diff --git a/include/uapi/linux/btrfs_tree.h b/include/uapi/linux/btrfs_tree.h index ab38d0f411fa..fc3c32186d7e 100644 --- a/include/uapi/linux/btrfs_tree.h +++ b/include/uapi/linux/btrfs_tree.h @@ -220,7 +220,11 @@ #define BTRFS_EXTENT_DATA_REF_KEY 178 -#define BTRFS_EXTENT_REF_V0_KEY 180 +/* + * Obsolete key. Defintion removed in 6.6, value may be reused in the future. + * + * #define BTRFS_EXTENT_REF_V0_KEY 180 + */ #define BTRFS_SHARED_BLOCK_REF_KEY 182 diff --git a/include/uapi/linux/cn_proc.h b/include/uapi/linux/cn_proc.h index db210625cee8..f2afb7cc4926 100644 --- a/include/uapi/linux/cn_proc.h +++ b/include/uapi/linux/cn_proc.h @@ -30,6 +30,49 @@ enum proc_cn_mcast_op { PROC_CN_MCAST_IGNORE = 2 }; +#define PROC_EVENT_ALL (PROC_EVENT_FORK | PROC_EVENT_EXEC | PROC_EVENT_UID | \ + PROC_EVENT_GID | PROC_EVENT_SID | PROC_EVENT_PTRACE | \ + PROC_EVENT_COMM | PROC_EVENT_NONZERO_EXIT | \ + PROC_EVENT_COREDUMP | PROC_EVENT_EXIT) + +/* + * If you add an entry in proc_cn_event, make sure you add it in + * PROC_EVENT_ALL above as well. + */ +enum proc_cn_event { + /* Use successive bits so the enums can be used to record + * sets of events as well + */ + PROC_EVENT_NONE = 0x00000000, + PROC_EVENT_FORK = 0x00000001, + PROC_EVENT_EXEC = 0x00000002, + PROC_EVENT_UID = 0x00000004, + PROC_EVENT_GID = 0x00000040, + PROC_EVENT_SID = 0x00000080, + PROC_EVENT_PTRACE = 0x00000100, + PROC_EVENT_COMM = 0x00000200, + /* "next" should be 0x00000400 */ + /* "last" is the last process event: exit, + * while "next to last" is coredumping event + * before that is report only if process dies + * with non-zero exit status + */ + PROC_EVENT_NONZERO_EXIT = 0x20000000, + PROC_EVENT_COREDUMP = 0x40000000, + PROC_EVENT_EXIT = 0x80000000 +}; + +struct proc_input { + enum proc_cn_mcast_op mcast_op; + enum proc_cn_event event_type; +}; + +static inline enum proc_cn_event valid_event(enum proc_cn_event ev_type) +{ + ev_type &= PROC_EVENT_ALL; + return ev_type; +} + /* * From the user's point of view, the process * ID is the thread group ID and thread ID is the internal @@ -44,24 +87,7 @@ enum proc_cn_mcast_op { */ struct proc_event { - enum what { - /* Use successive bits so the enums can be used to record - * sets of events as well - */ - PROC_EVENT_NONE = 0x00000000, - PROC_EVENT_FORK = 0x00000001, - PROC_EVENT_EXEC = 0x00000002, - PROC_EVENT_UID = 0x00000004, - PROC_EVENT_GID = 0x00000040, - PROC_EVENT_SID = 0x00000080, - PROC_EVENT_PTRACE = 0x00000100, - PROC_EVENT_COMM = 0x00000200, - /* "next" should be 0x00000400 */ - /* "last" is the last process event: exit, - * while "next to last" is coredumping event */ - PROC_EVENT_COREDUMP = 0x40000000, - PROC_EVENT_EXIT = 0x80000000 - } what; + enum proc_cn_event what; __u32 cpu; __u64 __attribute__((aligned(8))) timestamp_ns; /* Number of nano seconds since system boot */ diff --git a/include/uapi/linux/devlink.h b/include/uapi/linux/devlink.h index 3782d4219ac9..03875e078be8 100644 --- a/include/uapi/linux/devlink.h +++ b/include/uapi/linux/devlink.h @@ -661,6 +661,8 @@ enum devlink_resource_unit { enum devlink_port_fn_attr_cap { DEVLINK_PORT_FN_ATTR_CAP_ROCE_BIT, DEVLINK_PORT_FN_ATTR_CAP_MIGRATABLE_BIT, + DEVLINK_PORT_FN_ATTR_CAP_IPSEC_CRYPTO_BIT, + DEVLINK_PORT_FN_ATTR_CAP_IPSEC_PACKET_BIT, /* Add new caps above */ __DEVLINK_PORT_FN_ATTR_CAPS_MAX, @@ -669,6 +671,8 @@ enum devlink_port_fn_attr_cap { #define DEVLINK_PORT_FN_CAP_ROCE _BITUL(DEVLINK_PORT_FN_ATTR_CAP_ROCE_BIT) #define DEVLINK_PORT_FN_CAP_MIGRATABLE \ _BITUL(DEVLINK_PORT_FN_ATTR_CAP_MIGRATABLE_BIT) +#define DEVLINK_PORT_FN_CAP_IPSEC_CRYPTO _BITUL(DEVLINK_PORT_FN_ATTR_CAP_IPSEC_CRYPTO_BIT) +#define DEVLINK_PORT_FN_CAP_IPSEC_PACKET _BITUL(DEVLINK_PORT_FN_ATTR_CAP_IPSEC_PACKET_BIT) enum devlink_port_function_attr { DEVLINK_PORT_FUNCTION_ATTR_UNSPEC, diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h index 0c8cf359ea5b..e0e159138331 100644 --- a/include/uapi/linux/elf.h +++ b/include/uapi/linux/elf.h @@ -443,7 +443,6 @@ typedef struct elf64_shdr { #define NT_MIPS_DSP 0x800 /* MIPS DSP ASE registers */ #define NT_MIPS_FP_MODE 0x801 /* MIPS floating-point mode */ #define NT_MIPS_MSA 0x802 /* MIPS SIMD registers */ -#define NT_RISCV_VECTOR 0x900 /* RISC-V vector registers */ #define NT_LOONGARCH_CPUCFG 0xa00 /* LoongArch CPU config registers */ #define NT_LOONGARCH_CSR 0xa01 /* LoongArch control and status registers */ #define NT_LOONGARCH_LSX 0xa02 /* LoongArch Loongson SIMD Extension registers */ diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index 0f6a0fe09bdb..ce3117df9cec 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -570,6 +570,7 @@ enum { IFLA_BRPORT_MCAST_N_GROUPS, IFLA_BRPORT_MCAST_MAX_GROUPS, IFLA_BRPORT_NEIGH_VLAN_SUPPRESS, + IFLA_BRPORT_BACKUP_NHID, __IFLA_BRPORT_MAX }; #define IFLA_BRPORT_MAX (__IFLA_BRPORT_MAX - 1) diff --git a/include/uapi/linux/if_xdp.h b/include/uapi/linux/if_xdp.h index a78a8096f4ce..8d48863472b9 100644 --- a/include/uapi/linux/if_xdp.h +++ b/include/uapi/linux/if_xdp.h @@ -25,6 +25,12 @@ * application. */ #define XDP_USE_NEED_WAKEUP (1 << 3) +/* By setting this option, userspace application indicates that it can + * handle multiple descriptors per packet thus enabling AF_XDP to split + * multi-buffer XDP frames into multiple Rx descriptors. Without this set + * such frames will be dropped. + */ +#define XDP_USE_SG (1 << 4) /* Flags for xsk_umem_config flags */ #define XDP_UMEM_UNALIGNED_CHUNK_FLAG (1 << 0) @@ -108,4 +114,11 @@ struct xdp_desc { /* UMEM descriptor is __u64 */ +/* Flag indicating that the packet continues with the buffer pointed out by the + * next frame in the ring. The end of the packet is signalled by setting this + * bit to zero. For single buffer packets, every descriptor has 'options' set + * to 0 and this maintains backward compatibility. + */ +#define XDP_PKT_CONTD (1 << 0) + #endif /* _LINUX_IF_XDP_H */ diff --git a/include/uapi/linux/ipv6.h b/include/uapi/linux/ipv6.h index ac56605fe9bc..cf592d7b630f 100644 --- a/include/uapi/linux/ipv6.h +++ b/include/uapi/linux/ipv6.h @@ -198,6 +198,7 @@ enum { DEVCONF_IOAM6_ID_WIDE, DEVCONF_NDISC_EVICT_NOCARRIER, DEVCONF_ACCEPT_UNTRACKED_NA, + DEVCONF_ACCEPT_RA_MIN_LFT, DEVCONF_MAX }; diff --git a/include/uapi/linux/mdio.h b/include/uapi/linux/mdio.h index b826598d1e94..d03863da180e 100644 --- a/include/uapi/linux/mdio.h +++ b/include/uapi/linux/mdio.h @@ -82,6 +82,8 @@ #define MDIO_AN_10BT1_AN_CTRL 526 /* 10BASE-T1 AN control register */ #define MDIO_AN_10BT1_AN_STAT 527 /* 10BASE-T1 AN status register */ #define MDIO_PMA_PMD_BT1_CTRL 2100 /* BASE-T1 PMA/PMD control register */ +#define MDIO_PCS_1000BT1_CTRL 2304 /* 1000BASE-T1 PCS control register */ +#define MDIO_PCS_1000BT1_STAT 2305 /* 1000BASE-T1 PCS status register */ /* LASI (Link Alarm Status Interrupt) registers, defined by XENPAK MSA. */ #define MDIO_PMA_LASI_RXCTRL 0x9000 /* RX_ALARM control */ @@ -332,6 +334,8 @@ #define MDIO_PCS_10T1L_CTRL_RESET 0x8000 /* PCS reset */ /* BASE-T1 PMA/PMD extended ability register. */ +#define MDIO_PMA_PMD_BT1_B100_ABLE 0x0001 /* 100BASE-T1 Ability */ +#define MDIO_PMA_PMD_BT1_B1000_ABLE 0x0002 /* 1000BASE-T1 Ability */ #define MDIO_PMA_PMD_BT1_B10L_ABLE 0x0004 /* 10BASE-T1L Ability */ /* BASE-T1 auto-negotiation advertisement register [15:0] */ @@ -373,7 +377,19 @@ #define MDIO_AN_10BT1_AN_STAT_LPA_EEE_T1L 0x4000 /* 10BASE-T1L LP EEE ability advertisement */ /* BASE-T1 PMA/PMD control register */ -#define MDIO_PMA_PMD_BT1_CTRL_CFG_MST 0x4000 /* MASTER-SLAVE config value */ +#define MDIO_PMA_PMD_BT1_CTRL_STRAP 0x000F /* Type selection (Strap) */ +#define MDIO_PMA_PMD_BT1_CTRL_STRAP_B1000 0x0001 /* Select 1000BASE-T1 */ +#define MDIO_PMA_PMD_BT1_CTRL_CFG_MST 0x4000 /* MASTER-SLAVE config value */ + +/* 1000BASE-T1 PCS control register */ +#define MDIO_PCS_1000BT1_CTRL_LOW_POWER 0x0800 /* Low power mode */ +#define MDIO_PCS_1000BT1_CTRL_DISABLE_TX 0x4000 /* Global PMA transmit disable */ +#define MDIO_PCS_1000BT1_CTRL_RESET 0x8000 /* Software reset value */ + +/* 1000BASE-T1 PCS status register */ +#define MDIO_PCS_1000BT1_STAT_LINK 0x0004 /* PCS Link is up */ +#define MDIO_PCS_1000BT1_STAT_FAULT 0x0080 /* There is a fault condition */ + /* EEE Supported/Advertisement/LP Advertisement registers. * diff --git a/include/uapi/linux/mount.h b/include/uapi/linux/mount.h index 8eb0d7b758d2..bb242fdcfe6b 100644 --- a/include/uapi/linux/mount.h +++ b/include/uapi/linux/mount.h @@ -100,8 +100,9 @@ enum fsconfig_command { FSCONFIG_SET_PATH = 3, /* Set parameter, supplying an object by path */ FSCONFIG_SET_PATH_EMPTY = 4, /* Set parameter, supplying an object by (empty) path */ FSCONFIG_SET_FD = 5, /* Set parameter, supplying an object by fd */ - FSCONFIG_CMD_CREATE = 6, /* Invoke superblock creation */ + FSCONFIG_CMD_CREATE = 6, /* Create new or reuse existing superblock */ FSCONFIG_CMD_RECONFIGURE = 7, /* Invoke superblock reconfiguration */ + FSCONFIG_CMD_CREATE_EXCL = 8, /* Create new superblock, fail if reusing existing superblock */ }; /* diff --git a/include/uapi/linux/netdev.h b/include/uapi/linux/netdev.h index 639524b59930..c1634b95c223 100644 --- a/include/uapi/linux/netdev.h +++ b/include/uapi/linux/netdev.h @@ -11,7 +11,7 @@ /** * enum netdev_xdp_act - * @NETDEV_XDP_ACT_BASIC: XDP feautues set supported by all drivers + * @NETDEV_XDP_ACT_BASIC: XDP features set supported by all drivers * (XDP_ABORTED, XDP_DROP, XDP_PASS, XDP_TX) * @NETDEV_XDP_ACT_REDIRECT: The netdev supports XDP_REDIRECT * @NETDEV_XDP_ACT_NDO_XMIT: This feature informs if netdev implements @@ -34,6 +34,7 @@ enum netdev_xdp_act { NETDEV_XDP_ACT_RX_SG = 32, NETDEV_XDP_ACT_NDO_XMIT_SG = 64, + /* private: */ NETDEV_XDP_ACT_MASK = 127, }; @@ -41,6 +42,7 @@ enum { NETDEV_A_DEV_IFINDEX = 1, NETDEV_A_DEV_PAD, NETDEV_A_DEV_XDP_FEATURES, + NETDEV_A_DEV_XDP_ZC_MAX_SEGS, __NETDEV_A_DEV_MAX, NETDEV_A_DEV_MAX = (__NETDEV_A_DEV_MAX - 1) diff --git a/include/uapi/linux/netfilter_bridge/ebtables.h b/include/uapi/linux/netfilter_bridge/ebtables.h index a494cf43a755..4ff328f3d339 100644 --- a/include/uapi/linux/netfilter_bridge/ebtables.h +++ b/include/uapi/linux/netfilter_bridge/ebtables.h @@ -87,7 +87,7 @@ struct ebt_entries { /* nr. of entries */ unsigned int nentries; /* entry list */ - char data[0] __attribute__ ((aligned (__alignof__(struct ebt_replace)))); + char data[] __attribute__ ((aligned (__alignof__(struct ebt_replace)))); }; /* used for the bitmask of struct ebt_entry */ @@ -129,7 +129,7 @@ struct ebt_entry_match { } u; /* size of data */ unsigned int match_size; - unsigned char data[0] __attribute__ ((aligned (__alignof__(struct ebt_replace)))); + unsigned char data[] __attribute__ ((aligned (__alignof__(struct ebt_replace)))); }; struct ebt_entry_watcher { @@ -142,7 +142,7 @@ struct ebt_entry_watcher { } u; /* size of data */ unsigned int watcher_size; - unsigned char data[0] __attribute__ ((aligned (__alignof__(struct ebt_replace)))); + unsigned char data[] __attribute__ ((aligned (__alignof__(struct ebt_replace)))); }; struct ebt_entry_target { @@ -182,13 +182,15 @@ struct ebt_entry { unsigned char sourcemsk[ETH_ALEN]; unsigned char destmac[ETH_ALEN]; unsigned char destmsk[ETH_ALEN]; - /* sizeof ebt_entry + matches */ - unsigned int watchers_offset; - /* sizeof ebt_entry + matches + watchers */ - unsigned int target_offset; - /* sizeof ebt_entry + matches + watchers + target */ - unsigned int next_offset; - unsigned char elems[0] __attribute__ ((aligned (__alignof__(struct ebt_replace)))); + __struct_group(/* no tag */, offsets, /* no attrs */, + /* sizeof ebt_entry + matches */ + unsigned int watchers_offset; + /* sizeof ebt_entry + matches + watchers */ + unsigned int target_offset; + /* sizeof ebt_entry + matches + watchers + target */ + unsigned int next_offset; + ); + unsigned char elems[] __attribute__ ((aligned (__alignof__(struct ebt_replace)))); }; static __inline__ struct ebt_entry_target * diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h index e94870e77ee9..efc82c318fa2 100644 --- a/include/uapi/linux/openvswitch.h +++ b/include/uapi/linux/openvswitch.h @@ -965,6 +965,7 @@ struct check_pkt_len_arg { * start of the packet or at the start of the l3 header depending on the value * of l3 tunnel flag in the tun_flags field of OVS_ACTION_ATTR_ADD_MPLS * argument. + * @OVS_ACTION_ATTR_DROP: Explicit drop action. * * Only a single header can be set with a single %OVS_ACTION_ATTR_SET. Not all * fields within a header are modifiable, e.g. the IPv4 protocol and fragment @@ -1002,6 +1003,7 @@ enum ovs_action_attr { OVS_ACTION_ATTR_CHECK_PKT_LEN, /* Nested OVS_CHECK_PKT_LEN_ATTR_*. */ OVS_ACTION_ATTR_ADD_MPLS, /* struct ovs_action_add_mpls. */ OVS_ACTION_ATTR_DEC_TTL, /* Nested OVS_DEC_TTL_ATTR_*. */ + OVS_ACTION_ATTR_DROP, /* u32 error code. */ __OVS_ACTION_ATTR_MAX, /* Nothing past this will be accepted * from userspace. */ diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h index 37675437b768..39c6a250dd1b 100644 --- a/include/uapi/linux/perf_event.h +++ b/include/uapi/linux/perf_event.h @@ -1339,7 +1339,8 @@ union perf_mem_data_src { #define PERF_MEM_LVLNUM_L2 0x02 /* L2 */ #define PERF_MEM_LVLNUM_L3 0x03 /* L3 */ #define PERF_MEM_LVLNUM_L4 0x04 /* L4 */ -/* 5-0x8 available */ +/* 5-0x7 available */ +#define PERF_MEM_LVLNUM_UNC 0x08 /* Uncached */ #define PERF_MEM_LVLNUM_CXL 0x09 /* CXL */ #define PERF_MEM_LVLNUM_IO 0x0a /* I/O */ #define PERF_MEM_LVLNUM_ANY_CACHE 0x0b /* Any cache */ diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h index 7865f5a9885b..c7082cc60d21 100644 --- a/include/uapi/linux/pkt_cls.h +++ b/include/uapi/linux/pkt_cls.h @@ -598,6 +598,9 @@ enum { TCA_FLOWER_KEY_CFM, /* nested */ + TCA_FLOWER_KEY_SPI, /* be32 */ + TCA_FLOWER_KEY_SPI_MASK, /* be32 */ + __TCA_FLOWER_MAX, }; @@ -710,9 +713,11 @@ enum { TCA_FLOWER_KEY_CFM_OPT_UNSPEC, TCA_FLOWER_KEY_CFM_MD_LEVEL, TCA_FLOWER_KEY_CFM_OPCODE, - TCA_FLOWER_KEY_CFM_OPT_MAX, + __TCA_FLOWER_KEY_CFM_OPT_MAX, }; +#define TCA_FLOWER_KEY_CFM_OPT_MAX (__TCA_FLOWER_KEY_CFM_OPT_MAX - 1) + #define TCA_FLOWER_MASK_FLAGS_RANGE (1 << 0) /* Range-based match */ /* Match-all classifier */ diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h index 00f6ff0aff1f..3f85ae578056 100644 --- a/include/uapi/linux/pkt_sched.h +++ b/include/uapi/linux/pkt_sched.h @@ -603,6 +603,7 @@ enum { TCA_NETEM_JITTER64, TCA_NETEM_SLOT, TCA_NETEM_SLOT_DIST, + TCA_NETEM_PRNG_SEED, __TCA_NETEM_MAX, }; diff --git a/include/uapi/linux/psp-dbc.h b/include/uapi/linux/psp-dbc.h new file mode 100644 index 000000000000..b3845a9ff5fd --- /dev/null +++ b/include/uapi/linux/psp-dbc.h @@ -0,0 +1,147 @@ +/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ +/* + * Userspace interface for AMD Dynamic Boost Control (DBC) + * + * Copyright (C) 2023 Advanced Micro Devices, Inc. + * + * Author: Mario Limonciello <mario.limonciello@amd.com> + */ + +#ifndef __PSP_DBC_USER_H__ +#define __PSP_DBC_USER_H__ + +#include <linux/types.h> + +/** + * DOC: AMD Dynamic Boost Control (DBC) interface + */ + +#define DBC_NONCE_SIZE 16 +#define DBC_SIG_SIZE 32 +#define DBC_UID_SIZE 16 + +/** + * struct dbc_user_nonce - Nonce exchange structure (input/output). + * @auth_needed: Whether the PSP should authenticate this request (input). + * 0: no authentication, PSP will return single use nonce. + * 1: authentication: PSP will return multi-use nonce. + * @nonce: 8 byte value used for future authentication (output). + * @signature: Optional 32 byte signature created by software using a + * previous nonce (input). + */ +struct dbc_user_nonce { + __u32 auth_needed; + __u8 nonce[DBC_NONCE_SIZE]; + __u8 signature[DBC_SIG_SIZE]; +} __packed; + +/** + * struct dbc_user_setuid - UID exchange structure (input). + * @uid: 16 byte value representing software identity + * @signature: 32 byte signature created by software using a previous nonce + */ +struct dbc_user_setuid { + __u8 uid[DBC_UID_SIZE]; + __u8 signature[DBC_SIG_SIZE]; +} __packed; + +/** + * struct dbc_user_param - Parameter exchange structure (input/output). + * @msg_index: Message indicating what parameter to set or get (input) + * @param: 4 byte parameter, units are message specific. (input/output) + * @signature: 32 byte signature. + * - When sending a message this is to be created by software + * using a previous nonce (input) + * - For interpreting results, this signature is updated by the + * PSP to allow software to validate the authenticity of the + * results. + */ +struct dbc_user_param { + __u32 msg_index; + __u32 param; + __u8 signature[DBC_SIG_SIZE]; +} __packed; + +/** + * Dynamic Boost Control (DBC) IOC + * + * possible return codes for all DBC IOCTLs: + * 0: success + * -EINVAL: invalid input + * -E2BIG: excess data passed + * -EFAULT: failed to copy to/from userspace + * -EBUSY: mailbox in recovery or in use + * -ENODEV: driver not bound with PSP device + * -EACCES: request isn't authorized + * -EINVAL: invalid parameter + * -ETIMEDOUT: request timed out + * -EAGAIN: invalid request for state machine + * -ENOENT: not implemented + * -ENFILE: overflow + * -EPERM: invalid signature + * -EIO: unknown error + */ +#define DBC_IOC_TYPE 'D' + +/** + * DBCIOCNONCE - Fetch a nonce from the PSP for authenticating commands. + * If a nonce is fetched without authentication it can only + * be utilized for one command. + * If a nonce is fetched with authentication it can be used + * for multiple requests. + */ +#define DBCIOCNONCE _IOWR(DBC_IOC_TYPE, 0x1, struct dbc_user_nonce) + +/** + * DBCIOCUID - Set the user ID (UID) of a calling process. + * The user ID is 8 bytes long. It must be programmed using a + * 32 byte signature built using the nonce fetched from + * DBCIOCNONCE. + * The UID can only be set once until the system is rebooted. + */ +#define DBCIOCUID _IOW(DBC_IOC_TYPE, 0x2, struct dbc_user_setuid) + +/** + * DBCIOCPARAM - Set or get a parameter from the PSP. + * This request will only work after DBCIOCUID has successfully + * set the UID of the calling process. + * Whether the parameter is set or get is controlled by the + * message ID in the request. + * This command must be sent using a 32 byte signature built + * using the nonce fetched from DBCIOCNONCE. + * When the command succeeds, the 32 byte signature will be + * updated by the PSP for software to authenticate the results. + */ +#define DBCIOCPARAM _IOWR(DBC_IOC_TYPE, 0x3, struct dbc_user_param) + +/** + * enum dbc_cmd_msg - Messages utilized by DBCIOCPARAM + * @PARAM_GET_FMAX_CAP: Get frequency cap (MHz) + * @PARAM_SET_FMAX_CAP: Set frequency cap (MHz) + * @PARAM_GET_PWR_CAP: Get socket power cap (mW) + * @PARAM_SET_PWR_CAP: Set socket power cap (mW) + * @PARAM_GET_GFX_MODE: Get graphics mode (0/1) + * @PARAM_SET_GFX_MODE: Set graphics mode (0/1) + * @PARAM_GET_CURR_TEMP: Get current temperature (degrees C) + * @PARAM_GET_FMAX_MAX: Get maximum allowed value for frequency (MHz) + * @PARAM_GET_FMAX_MIN: Get minimum allowed value for frequency (MHz) + * @PARAM_GET_SOC_PWR_MAX: Get maximum allowed value for SoC power (mw) + * @PARAM_GET_SOC_PWR_MIN: Get minimum allowed value for SoC power (mw) + * @PARAM_GET_SOC_PWR_CUR: Get current value for SoC Power (mW) + */ +enum dbc_cmd_msg { + PARAM_GET_FMAX_CAP = 0x3, + PARAM_SET_FMAX_CAP = 0x4, + PARAM_GET_PWR_CAP = 0x5, + PARAM_SET_PWR_CAP = 0x6, + PARAM_GET_GFX_MODE = 0x7, + PARAM_SET_GFX_MODE = 0x8, + PARAM_GET_CURR_TEMP = 0x9, + PARAM_GET_FMAX_MAX = 0xA, + PARAM_GET_FMAX_MIN = 0xB, + PARAM_GET_SOC_PWR_MAX = 0xC, + PARAM_GET_SOC_PWR_MIN = 0xD, + PARAM_GET_SOC_PWR_CUR = 0xE, +}; + +#endif /* __PSP_DBC_USER_H__ */ diff --git a/include/uapi/linux/quota.h b/include/uapi/linux/quota.h index f17c9636a859..52090105b828 100644 --- a/include/uapi/linux/quota.h +++ b/include/uapi/linux/quota.h @@ -77,6 +77,7 @@ #define QFMT_VFS_V0 2 #define QFMT_OCFS2 3 #define QFMT_VFS_V1 4 +#define QFMT_SHMEM 5 /* Size of block in which space limits are passed through the quota * interface */ diff --git a/include/uapi/linux/seccomp.h b/include/uapi/linux/seccomp.h index 0fdc6ef02b94..dbfc9b37fcae 100644 --- a/include/uapi/linux/seccomp.h +++ b/include/uapi/linux/seccomp.h @@ -115,6 +115,8 @@ struct seccomp_notif_resp { __u32 flags; }; +#define SECCOMP_USER_NOTIF_FD_SYNC_WAKE_UP (1UL << 0) + /* valid flags for seccomp_notif_addfd */ #define SECCOMP_ADDFD_FLAG_SETFD (1UL << 0) /* Specify remote fd */ #define SECCOMP_ADDFD_FLAG_SEND (1UL << 1) /* Addfd and return it, atomically */ @@ -150,4 +152,6 @@ struct seccomp_notif_addfd { #define SECCOMP_IOCTL_NOTIF_ADDFD SECCOMP_IOW(3, \ struct seccomp_notif_addfd) +#define SECCOMP_IOCTL_NOTIF_SET_FLAGS SECCOMP_IOW(4, __u64) + #endif /* _UAPI_LINUX_SECCOMP_H */ diff --git a/include/uapi/linux/smc.h b/include/uapi/linux/smc.h index bb4dacca31e7..837fcd4b0abc 100644 --- a/include/uapi/linux/smc.h +++ b/include/uapi/linux/smc.h @@ -107,6 +107,8 @@ enum { enum { SMC_NLA_LGR_R_V2_UNSPEC, SMC_NLA_LGR_R_V2_DIRECT, /* u8 */ + SMC_NLA_LGR_R_V2_MAX_CONNS, /* u8 */ + SMC_NLA_LGR_R_V2_MAX_LINKS, /* u8 */ __SMC_NLA_LGR_R_V2_MAX, SMC_NLA_LGR_R_V2_MAX = __SMC_NLA_LGR_R_V2_MAX - 1 }; diff --git a/include/uapi/linux/stddef.h b/include/uapi/linux/stddef.h index 7837ba4fe728..7c3fc3980881 100644 --- a/include/uapi/linux/stddef.h +++ b/include/uapi/linux/stddef.h @@ -45,3 +45,7 @@ TYPE NAME[]; \ } #endif + +#ifndef __counted_by +#define __counted_by(m) +#endif diff --git a/include/uapi/linux/userfaultfd.h b/include/uapi/linux/userfaultfd.h index 66dd4cd277bd..62151706c5a3 100644 --- a/include/uapi/linux/userfaultfd.h +++ b/include/uapi/linux/userfaultfd.h @@ -39,7 +39,8 @@ UFFD_FEATURE_MINOR_SHMEM | \ UFFD_FEATURE_EXACT_ADDRESS | \ UFFD_FEATURE_WP_HUGETLBFS_SHMEM | \ - UFFD_FEATURE_WP_UNPOPULATED) + UFFD_FEATURE_WP_UNPOPULATED | \ + UFFD_FEATURE_POISON) #define UFFD_API_IOCTLS \ ((__u64)1 << _UFFDIO_REGISTER | \ (__u64)1 << _UFFDIO_UNREGISTER | \ @@ -49,12 +50,14 @@ (__u64)1 << _UFFDIO_COPY | \ (__u64)1 << _UFFDIO_ZEROPAGE | \ (__u64)1 << _UFFDIO_WRITEPROTECT | \ - (__u64)1 << _UFFDIO_CONTINUE) + (__u64)1 << _UFFDIO_CONTINUE | \ + (__u64)1 << _UFFDIO_POISON) #define UFFD_API_RANGE_IOCTLS_BASIC \ ((__u64)1 << _UFFDIO_WAKE | \ (__u64)1 << _UFFDIO_COPY | \ + (__u64)1 << _UFFDIO_WRITEPROTECT | \ (__u64)1 << _UFFDIO_CONTINUE | \ - (__u64)1 << _UFFDIO_WRITEPROTECT) + (__u64)1 << _UFFDIO_POISON) /* * Valid ioctl command number range with this API is from 0x00 to @@ -71,6 +74,7 @@ #define _UFFDIO_ZEROPAGE (0x04) #define _UFFDIO_WRITEPROTECT (0x06) #define _UFFDIO_CONTINUE (0x07) +#define _UFFDIO_POISON (0x08) #define _UFFDIO_API (0x3F) /* userfaultfd ioctl ids */ @@ -91,6 +95,8 @@ struct uffdio_writeprotect) #define UFFDIO_CONTINUE _IOWR(UFFDIO, _UFFDIO_CONTINUE, \ struct uffdio_continue) +#define UFFDIO_POISON _IOWR(UFFDIO, _UFFDIO_POISON, \ + struct uffdio_poison) /* read() structure */ struct uffd_msg { @@ -225,6 +231,7 @@ struct uffdio_api { #define UFFD_FEATURE_EXACT_ADDRESS (1<<11) #define UFFD_FEATURE_WP_HUGETLBFS_SHMEM (1<<12) #define UFFD_FEATURE_WP_UNPOPULATED (1<<13) +#define UFFD_FEATURE_POISON (1<<14) __u64 features; __u64 ioctls; @@ -321,6 +328,18 @@ struct uffdio_continue { __s64 mapped; }; +struct uffdio_poison { + struct uffdio_range range; +#define UFFDIO_POISON_MODE_DONTWAKE ((__u64)1<<0) + __u64 mode; + + /* + * Fields below here are written by the ioctl and must be at the end: + * the copy_from_user will not read past here. + */ + __s64 updated; +}; + /* * Flags for the userfaultfd(2) system call itself. */ diff --git a/include/uapi/linux/virtio_net.h b/include/uapi/linux/virtio_net.h index 12c1c9699935..cc65ef0f3c3e 100644 --- a/include/uapi/linux/virtio_net.h +++ b/include/uapi/linux/virtio_net.h @@ -56,6 +56,7 @@ #define VIRTIO_NET_F_MQ 22 /* Device supports Receive Flow * Steering */ #define VIRTIO_NET_F_CTRL_MAC_ADDR 23 /* Set MAC address */ +#define VIRTIO_NET_F_VQ_NOTF_COAL 52 /* Device supports virtqueue notification coalescing */ #define VIRTIO_NET_F_NOTF_COAL 53 /* Device supports notifications coalescing */ #define VIRTIO_NET_F_GUEST_USO4 54 /* Guest can handle USOv4 in. */ #define VIRTIO_NET_F_GUEST_USO6 55 /* Guest can handle USOv6 in. */ @@ -391,5 +392,18 @@ struct virtio_net_ctrl_coal_rx { }; #define VIRTIO_NET_CTRL_NOTF_COAL_RX_SET 1 +#define VIRTIO_NET_CTRL_NOTF_COAL_VQ_SET 2 +#define VIRTIO_NET_CTRL_NOTF_COAL_VQ_GET 3 + +struct virtio_net_ctrl_coal { + __le32 max_packets; + __le32 max_usecs; +}; + +struct virtio_net_ctrl_coal_vq { + __le16 vqn; + __le16 reserved; + struct virtio_net_ctrl_coal coal; +}; #endif /* _UAPI_LINUX_VIRTIO_NET_H */ diff --git a/include/uapi/xen/privcmd.h b/include/uapi/xen/privcmd.h index d2029556083e..375718ba4ab6 100644 --- a/include/uapi/xen/privcmd.h +++ b/include/uapi/xen/privcmd.h @@ -98,6 +98,18 @@ struct privcmd_mmap_resource { __u64 addr; }; +/* For privcmd_irqfd::flags */ +#define PRIVCMD_IRQFD_FLAG_DEASSIGN (1 << 0) + +struct privcmd_irqfd { + void __user *dm_op; + __u32 size; /* Size of structure pointed by dm_op */ + __u32 fd; + __u32 flags; + domid_t dom; + __u8 pad[2]; +}; + /* * @cmd: IOCTL_PRIVCMD_HYPERCALL * @arg: &privcmd_hypercall_t @@ -125,5 +137,7 @@ struct privcmd_mmap_resource { _IOC(_IOC_NONE, 'P', 6, sizeof(domid_t)) #define IOCTL_PRIVCMD_MMAP_RESOURCE \ _IOC(_IOC_NONE, 'P', 7, sizeof(struct privcmd_mmap_resource)) +#define IOCTL_PRIVCMD_IRQFD \ + _IOC(_IOC_NONE, 'P', 8, sizeof(struct privcmd_irqfd)) #endif /* __LINUX_PUBLIC_PRIVCMD_H__ */ diff --git a/include/video/kyro.h b/include/video/kyro.h index b958c2e9c915..418eef6c5523 100644 --- a/include/video/kyro.h +++ b/include/video/kyro.h @@ -38,18 +38,6 @@ struct kyrofb_info { int wc_cookie; }; -extern int kyro_dev_init(void); -extern void kyro_dev_reset(void); - -extern unsigned char *kyro_dev_physical_fb_ptr(void); -extern unsigned char *kyro_dev_virtual_fb_ptr(void); -extern void *kyro_dev_physical_regs_ptr(void); -extern void *kyro_dev_virtual_regs_ptr(void); -extern unsigned int kyro_dev_fb_size(void); -extern unsigned int kyro_dev_regs_size(void); - -extern u32 kyro_dev_overlay_offset(void); - /* * benedict.gaster@superh.com * Added the follow IOCTLS for the creation of overlay services... diff --git a/include/xen/events.h b/include/xen/events.h index 95970a2f7695..95d5e28de324 100644 --- a/include/xen/events.h +++ b/include/xen/events.h @@ -75,7 +75,6 @@ void evtchn_put(evtchn_port_t evtchn); void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector); void rebind_evtchn_irq(evtchn_port_t evtchn, int irq); -int xen_set_affinity_evtchn(struct irq_desc *desc, unsigned int tcpu); static inline void notify_remote_via_evtchn(evtchn_port_t port) { |